bug 668. Fix existing flavor as not created
[osm/RO.git] / osm_ro / vimconn_openstack.py
index 55f910b..073a752 100644 (file)
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 
 ##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
 # This file is part of openmano
 # All Rights Reserved.
 #
@@ -32,7 +32,7 @@ to the VIM connector's SFC resources as follows:
 - Service Function (OSM) -> Port Pair Group (Neutron)
 - Service Function Path (OSM) -> Port Chain (Neutron)
 '''
-__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C."
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
 __date__  = "$22-sep-2017 23:59:59$"
 
 import vimconn
@@ -44,6 +44,8 @@ import yaml
 import random
 import re
 import copy
+from pprint import pformat
+from types import StringTypes
 
 from novaclient import client as nClient, exceptions as nvExceptions
 from keystoneauth1.identity import v2, v3
@@ -52,7 +54,6 @@ import keystoneclient.exceptions as ksExceptions
 import keystoneclient.v3.client as ksClient_v3
 import keystoneclient.v2_0.client as ksClient_v2
 from glanceclient import client as glClient
-import glanceclient.client as gl1Client
 import glanceclient.exc as gl1Exceptions
 from  cinderclient import client as cClient
 from httplib import HTTPException
@@ -78,6 +79,18 @@ supportedClassificationTypes = ['legacy_flow_classifier']
 volume_timeout = 600
 server_timeout = 600
 
+
+class SafeDumper(yaml.SafeDumper):
+    def represent_data(self, data):
+        # Openstack APIs use custom subclasses of dict and YAML safe dumper
+        # is designed to not handle that (reference issue 142 of pyyaml)
+        if isinstance(data, dict) and data.__class__ != dict:
+            # A simple solution is to convert those items back to dicts
+            data = dict(data.items())
+
+        return super(SafeDumper, self).represent_data(data)
+
+
 class vimconnector(vimconn.vimconnector):
     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
                  log_level=None, config={}, persistent_info={}):
@@ -96,22 +109,34 @@ class vimconnector(vimconn.vimconnector):
 
         if config.get('dataplane_net_vlan_range') is not None:
             #validate vlan ranges provided by user
-            self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'))
+            self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range')
+
+        if config.get('multisegment_vlan_range') is not None:
+            #validate vlan ranges provided by user
+            self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range')
 
         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
                                       config)
 
-        self.insecure = self.config.get("insecure", False)
+        if self.config.get("insecure") and self.config.get("ca_cert"):
+            raise vimconn.vimconnException("options insecure and ca_cert are mutually exclusive")
+        self.verify = True
+        if self.config.get("insecure"):
+            self.verify = False
+        if self.config.get("ca_cert"):
+            self.verify = self.config.get("ca_cert")
+
         if not url:
             raise TypeError('url param can not be NoneType')
         self.persistent_info = persistent_info
         self.availability_zone = persistent_info.get('availability_zone', None)
         self.session = persistent_info.get('session', {'reload_client': True})
+        self.my_tenant_id = self.session.get('my_tenant_id')
         self.nova = self.session.get('nova')
         self.neutron = self.session.get('neutron')
         self.cinder = self.session.get('cinder')
         self.glance = self.session.get('glance')
-        self.glancev1 = self.session.get('glancev1')
+        self.glancev1 = self.session.get('glancev1')
         self.keystone = self.session.get('keystone')
         self.api_version3 = self.session.get('api_version3')
         self.vim_type = self.config.get("vim_type")
@@ -124,6 +149,11 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger = logging.getLogger('openmano.vim.openstack')
 
+        # allow security_groups to be a list or a single string
+        if isinstance(self.config.get('security_groups'), str):
+            self.config['security_groups'] = [self.config['security_groups']]
+        self.security_groups_id = None
+
         ####### VIO Specific Changes #########
         if self.vim_type == "VIO":
             self.logger = logging.getLogger('openmano.vim.vio')
@@ -152,11 +182,30 @@ class vimconnector(vimconn.vimconnector):
             vimconn.vimconnector.__setitem__(self, index, value)
         self.session['reload_client'] = True
 
+    def serialize(self, value):
+        """Serialization of python basic types.
+
+        In the case value is not serializable a message will be logged and a
+        simple representation of the data that cannot be converted back to
+        python is returned.
+        """
+        if isinstance(value, StringTypes):
+            return value
+
+        try:
+            return yaml.dump(value, Dumper=SafeDumper,
+                             default_flow_style=True, width=256)
+        except yaml.representer.RepresenterError:
+                self.logger.debug(
+                    'The following entity cannot be serialized in YAML:'
+                    '\n\n%s\n\n', pformat(value), exc_info=True)
+                return str(value)
+
     def _reload_connection(self):
         '''Called before any operation, it check if credentials has changed
         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
         '''
-        #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) 
+        #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
         if self.session['reload_client']:
             if self.config.get('APIversion'):
                 self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
@@ -187,9 +236,11 @@ class vimconnector(vimconn.vimconnector):
                                    password=self.passwd,
                                    tenant_name=self.tenant_name,
                                    tenant_id=self.tenant_id)
-            sess = session.Session(auth=auth, verify=not self.insecure)
+            sess = session.Session(auth=auth, verify=self.verify)
+            # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+            region_name = self.config.get('region_name')
             if self.api_version3:
-                self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type)
+                self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
             else:
                 self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
             self.session['keystone'] = self.keystone
@@ -202,23 +253,29 @@ class vimconnector(vimconn.vimconnector):
             version = self.config.get("microversion")
             if not version:
                 version = "2.1"
-            self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type)
-            self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type)
-            self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type)
+            # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+            self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+            try:
+                self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
+            except Exception as e:
+                self.logger.error("Cannot get project_id from session", exc_info=True)
             if self.endpoint_type == "internalURL":
                 glance_service_id = self.keystone.services.list(name="glance")[0].id
                 glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
             else:
                 glance_endpoint = None
             self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
-            #using version 1 of glance client in new_image()
-            self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
-                                                                       endpoint=glance_endpoint)
+            # using version 1 of glance client in new_image()
+            self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
+                                                                       endpoint=glance_endpoint)
             self.session['reload_client'] = False
             self.persistent_info['session'] = self.session
             # add availablity zone info inside  self.persistent_info
             self._set_availablity_zones()
             self.persistent_info['availability_zone'] = self.availability_zone
+            self.security_groups_id = None  # force to get again security_groups_ids next time they are needed
 
     def __net_os2mano(self, net_list_dict):
         '''Transform the net openstack format to mano format
@@ -341,22 +398,55 @@ class vimconnector(vimconn.vimconnector):
 
     def _format_exception(self, exception):
         '''Transform a keystone, nova, neutron  exception into a vimconn exception'''
-        if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
-                                  ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed
-                                  )):
-            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+
+        # Fixing bug 665 https://osm.etsi.org/bugzilla/show_bug.cgi?id=665
+        # There are some openstack versions that message error are unicode with non English
+        message_error = exception.message
+        if isinstance(message_error, unicode):
+            message_error = message_error.encode("utf")
+
+        if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
+                                  gl1Exceptions.HTTPNotFound)):
+            raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
+                               ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
+            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+        elif isinstance(exception,  (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
+            raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error)
         elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
-                                    neExceptions.NeutronException, nvExceptions.BadRequest)):
-            raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + str(exception))
-        elif isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound)):
-            raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
+                                    neExceptions.NeutronException)):
+            raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + message_error)
         elif isinstance(exception, nvExceptions.Conflict):
-            raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
+            raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + message_error)
         elif isinstance(exception, vimconn.vimconnException):
             raise exception
         else:  # ()
-            self.logger.error("General Exception " + str(exception), exc_info=True)
-            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+            self.logger.error("General Exception " + message_error, exc_info=True)
+            raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+
+    def _get_ids_from_name(self):
+        """
+         Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
+        :return: None
+        """
+        # get tenant_id if only tenant_name is supplied
+        self._reload_connection()
+        if not self.my_tenant_id:
+            raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}".
+                                                     format(self.tenant_name, self.tenant_id))
+        if self.config.get('security_groups') and not self.security_groups_id:
+            # convert from name to id
+            neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
+
+            self.security_groups_id = []
+            for sg in self.config.get('security_groups'):
+                for neutron_sg in neutron_sg_list:
+                    if sg in (neutron_sg["id"], neutron_sg["name"]):
+                        self.security_groups_id.append(neutron_sg["id"])
+                        break
+                else:
+                    self.security_groups_id = None
+                    raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg))
 
     def get_tenant_list(self, filter_dict={}):
         '''Obtain tenants of VIM
@@ -393,7 +483,7 @@ class vimconnector(vimconn.vimconnector):
             else:
                 project = self.keystone.tenants.create(tenant_name, tenant_description)
             return project.id
-        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
+        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)  as e:
             self._format_exception(e)
 
     def delete_tenant(self, tenant_id):
@@ -406,24 +496,67 @@ class vimconnector(vimconn.vimconnector):
             else:
                 self.keystone.tenants.delete(tenant_id)
             return tenant_id
-        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
+        except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)  as e:
             self._format_exception(e)
 
     def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
-        '''Adds a tenant network to VIM. Returns the network identifier'''
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
         self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
-        #self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
+        # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
         try:
             new_net = None
+            created_items = {}
             self._reload_connection()
             network_dict = {'name': net_name, 'admin_state_up': True}
             if net_type=="data" or net_type=="ptp":
                 if self.config.get('dataplane_physical_net') == None:
                     raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network")
-                network_dict["provider:physical_network"] = self.config['dataplane_physical_net'] #"physnet_sriov" #TODO physical
-                network_dict["provider:network_type"]     = "vlan"
-                if vlan!=None:
-                    network_dict["provider:network_type"] = vlan
+                if not self.config.get('multisegment_support'):
+                    network_dict["provider:physical_network"] = self.config[
+                        'dataplane_physical_net']  # "physnet_sriov" #TODO physical
+                    network_dict["provider:network_type"] = "vlan"
+                    if vlan!=None:
+                        network_dict["provider:network_type"] = vlan
+                else:
+                    ###### Multi-segment case ######
+                    segment_list = []
+                    segment1_dict = {}
+                    segment1_dict["provider:physical_network"] = ''
+                    segment1_dict["provider:network_type"]     = 'vxlan'
+                    segment_list.append(segment1_dict)
+                    segment2_dict = {}
+                    segment2_dict["provider:physical_network"] = self.config['dataplane_physical_net']
+                    segment2_dict["provider:network_type"]     = "vlan"
+                    if self.config.get('multisegment_vlan_range'):
+                        vlanID = self._generate_multisegment_vlanID()
+                        segment2_dict["provider:segmentation_id"] = vlanID
+                    # else
+                    #     raise vimconn.vimconnConflictException(
+                    #         "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network")
+                    segment_list.append(segment2_dict)
+                    network_dict["segments"] = segment_list
 
                 ####### VIO Specific Changes #########
                 if self.vim_type == "VIO":
@@ -435,12 +568,14 @@ class vimconnector(vimconn.vimconnector):
                                 "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\
                                 "at config value before creating sriov network with vlan tag")
 
-                        network_dict["provider:segmentation_id"] = self._genrate_vlanID()
+                        network_dict["provider:segmentation_id"] = self._generate_vlanID()
 
-            network_dict["shared"]=shared
-            new_net=self.neutron.create_network({'network':network_dict})
-            #print new_net
-            #create subnetwork, even if there is no profile
+            network_dict["shared"] = shared
+            if self.config.get("disable_network_port_security"):
+                network_dict["port_security_enabled"] = False
+            new_net = self.neutron.create_network({'network':network_dict})
+            # print new_net
+            # create subnetwork, even if there is no profile
             if not ip_profile:
                 ip_profile = {}
             if not ip_profile.get('subnet_address'):
@@ -449,14 +584,16 @@ class vimconnector(vimconn.vimconnector):
                 ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
             if 'ip_version' not in ip_profile:
                 ip_profile['ip_version'] = "IPv4"
-            subnet = {"name":net_name+"-subnet",
+            subnet = {"name": net_name+"-subnet",
                     "network_id": new_net["network"]["id"],
                     "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
                     "cidr": ip_profile['subnet_address']
                     }
             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
             if ip_profile.get('gateway_address'):
-                subnet['gateway_ip'] = ip_profile.get('gateway_address')
+                subnet['gateway_ip'] = ip_profile['gateway_address']
+            else:
+                subnet['gateway_ip'] = None
             if ip_profile.get('dns_address'):
                 subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
             if 'dhcp_enabled' in ip_profile:
@@ -475,8 +612,29 @@ class vimconnector(vimconn.vimconnector):
                 subnet['allocation_pools'][0]['end'] = ip_str
             #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet} )
-            return new_net["network"]["id"]
+
+            if net_type == "data" and self.config.get('multisegment_support'):
+                if self.config.get('l2gw_support'):
+                    l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
+                    for l2gw in l2gw_list:
+                        l2gw_conn = {}
+                        l2gw_conn["l2_gateway_id"] = l2gw["id"]
+                        l2gw_conn["network_id"] = new_net["network"]["id"]
+                        l2gw_conn["segmentation_id"] = str(vlanID)
+                        new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn})
+                        created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True
+            return new_net["network"]["id"], created_items
         except Exception as e:
+            #delete l2gw connections (if any) before deleting the network
+            for k, v in created_items.items():
+                if not v:  # skip already deleted
+                    continue
+                try:
+                    k_item, _, k_id = k.partition(":")
+                    if k_item == "l2gwconn":
+                        self.neutron.delete_l2_gateway_connection(k_id)
+                except Exception as e2:
+                    self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2))
             if new_net:
                 self.neutron.delete_network(new_net['network']['id'])
             self._format_exception(e)
@@ -526,14 +684,33 @@ class vimconnector(vimconn.vimconnector):
             subnets.append(subnet)
         net["subnets"] = subnets
         net["encapsulation"] = net.get('provider:network_type')
+        net["encapsulation_type"] = net.get('provider:network_type')
         net["segmentation_id"] = net.get('provider:segmentation_id')
+        net["encapsulation_id"] = net.get('provider:segmentation_id')
         return net
 
-    def delete_network(self, net_id):
-        '''Deletes a tenant network from VIM. Returns the old network identifier'''
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
         self.logger.debug("Deleting network '%s' from VIM", net_id)
+        if created_items == None:
+            created_items = {}
         try:
             self._reload_connection()
+            #delete l2gw connections (if any) before deleting the network
+            for k, v in created_items.items():
+                if not v:  # skip already deleted
+                    continue
+                try:
+                    k_item, _, k_id = k.partition(":")
+                    if k_item == "l2gwconn":
+                        self.neutron.delete_l2_gateway_connection(k_id)
+                except Exception as e:
+                    self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e))
             #delete VM ports attached to this networks before the network
             ports = self.neutron.list_ports(network_id=net_id)
             for p in ports['ports']:
@@ -554,13 +731,13 @@ class vimconnector(vimconn.vimconnector):
                 net_id:         #VIM id of this network
                     status:     #Mandatory. Text with one of:
                                 #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
                                 #  OTHER (Vim reported other status not understood)
                                 #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  ACTIVE, INACTIVE, DOWN (admin down),
                                 #  BUILD (on building process)
                                 #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
 
         '''
@@ -577,10 +754,9 @@ class vimconnector(vimconn.vimconnector):
 
                 if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
                     net['status'] = 'DOWN'
-                try:
-                    net['vim_info'] = yaml.safe_dump(net_vim, default_flow_style=True, width=256)
-                except yaml.representer.RepresenterError:
-                    net['vim_info'] = str(net_vim)
+
+                net['vim_info'] = self.serialize(net_vim)
+
                 if net_vim.get('fault'):  #TODO
                     net['error_msg'] = str(net_vim['fault'])
             except vimconn.vimconnNotFoundException as e:
@@ -620,10 +796,10 @@ class vimconnector(vimconn.vimconnector):
             flavor_candidate_data = (10000, 10000, 10000)
             flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
             # numa=None
-            numas = flavor_dict.get("extended", {}).get("numas")
-            if numas:
+            extended = flavor_dict.get("extended", {})
+            if extended:
                 #TODO
-                raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted")
+                raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemented")
                 # if len(numas) > 1:
                 #     raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa")
                 # numa=numas[0]
@@ -645,6 +821,20 @@ class vimconnector(vimconn.vimconnector):
         except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
             self._format_exception(e)
 
+    def process_resource_quota(self, quota, prefix, extra_specs):
+        """
+        :param prefix:
+        :param extra_specs: 
+        :return:
+        """
+        if 'limit' in quota:
+            extra_specs["quota:" + prefix + "_limit"] = quota['limit']
+        if 'reserve' in quota:
+            extra_specs["quota:" + prefix + "_reservation"] = quota['reserve']
+        if 'shares' in quota:
+            extra_specs["quota:" + prefix + "_shares_level"] = "custom"
+            extra_specs["quota:" + prefix + "_shares_share"] = quota['shares']
+
     def new_flavor(self, flavor_data, change_name_if_used=True):
         '''Adds a tenant flavor to openstack VIM
         if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
@@ -654,83 +844,92 @@ class vimconnector(vimconn.vimconnector):
         retry=0
         max_retries=3
         name_suffix = 0
-        name=flavor_data['name']
-        while retry<max_retries:
-            retry+=1
-            try:
-                self._reload_connection()
-                if change_name_if_used:
-                    #get used names
-                    fl_names=[]
-                    fl=self.nova.flavors.list()
-                    for f in fl:
-                        fl_names.append(f.name)
-                    while name in fl_names:
-                        name_suffix += 1
-                        name = flavor_data['name']+"-" + str(name_suffix)
-
-                ram = flavor_data.get('ram',64)
-                vcpus = flavor_data.get('vcpus',1)
-                numa_properties=None
-
-                extended = flavor_data.get("extended")
-                if extended:
-                    numas=extended.get("numas")
-                    if numas:
-                        numa_nodes = len(numas)
-                        if numa_nodes > 1:
-                            return -1, "Can not add flavor with more than one numa"
-                        numa_properties = {"hw:numa_nodes":str(numa_nodes)}
-                        numa_properties["hw:mem_page_size"] = "large"
-                        numa_properties["hw:cpu_policy"] = "dedicated"
-                        numa_properties["hw:numa_mempolicy"] = "strict"
-                        if self.vim_type == "VIO":
-                            numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-                            numa_properties["vmware:latency_sensitivity_level"] = "high"
-                        for numa in numas:
-                            #overwrite ram and vcpus
-                            #check if key 'memory' is present in numa else use ram value at flavor
-                            if 'memory' in numa:
-                                ram = numa['memory']*1024
-                            #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
-                            if 'paired-threads' in numa:
-                                vcpus = numa['paired-threads']*2
-                                #cpu_thread_policy "require" implies that the compute node must have an STM architecture
-                                numa_properties["hw:cpu_thread_policy"] = "require"
-                                numa_properties["hw:cpu_policy"] = "dedicated"
-                            elif 'cores' in numa:
-                                vcpus = numa['cores']
-                                # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
-                                numa_properties["hw:cpu_thread_policy"] = "isolate"
-                                numa_properties["hw:cpu_policy"] = "dedicated"
-                            elif 'threads' in numa:
-                                vcpus = numa['threads']
-                                # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
-                                numa_properties["hw:cpu_thread_policy"] = "prefer"
-                                numa_properties["hw:cpu_policy"] = "dedicated"
-                            # for interface in numa.get("interfaces",() ):
-                            #     if interface["dedicated"]=="yes":
-                            #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                            #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
-
-                #create flavor                 
-                new_flavor=self.nova.flavors.create(name,
-                                ram,
-                                vcpus,
-                                flavor_data.get('disk',0),
-                                is_public=flavor_data.get('is_public', True)
-                            )
-                #add metadata
-                if numa_properties:
-                    new_flavor.set_keys(numa_properties)
-                return new_flavor.id
-            except nvExceptions.Conflict as e:
-                if change_name_if_used and retry < max_retries:
-                    continue
-                self._format_exception(e)
-            #except nvExceptions.BadRequest as e:
-            except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
-                self._format_exception(e)
+        try:
+            name=flavor_data['name']
+            while retry<max_retries:
+                retry+=1
+                try:
+                    self._reload_connection()
+                    if change_name_if_used:
+                        #get used names
+                        fl_names=[]
+                        fl=self.nova.flavors.list()
+                        for f in fl:
+                            fl_names.append(f.name)
+                        while name in fl_names:
+                            name_suffix += 1
+                            name = flavor_data['name']+"-" + str(name_suffix)
+
+                    ram = flavor_data.get('ram',64)
+                    vcpus = flavor_data.get('vcpus',1)
+                    extra_specs={}
+
+                    extended = flavor_data.get("extended")
+                    if extended:
+                        numas=extended.get("numas")
+                        if numas:
+                            numa_nodes = len(numas)
+                            if numa_nodes > 1:
+                                return -1, "Can not add flavor with more than one numa"
+                            extra_specs["hw:numa_nodes"] = str(numa_nodes)
+                            extra_specs["hw:mem_page_size"] = "large"
+                            extra_specs["hw:cpu_policy"] = "dedicated"
+                            extra_specs["hw:numa_mempolicy"] = "strict"
+                            if self.vim_type == "VIO":
+                                extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+                                extra_specs["vmware:latency_sensitivity_level"] = "high"
+                            for numa in numas:
+                                #overwrite ram and vcpus
+                                #check if key 'memory' is present in numa else use ram value at flavor
+                                if 'memory' in numa:
+                                    ram = numa['memory']*1024
+                                #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+                                extra_specs["hw:cpu_sockets"] = 1
+                                if 'paired-threads' in numa:
+                                    vcpus = numa['paired-threads']*2
+                                    #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+                                    extra_specs["hw:cpu_thread_policy"] = "require"
+                                    extra_specs["hw:cpu_policy"] = "dedicated"
+                                elif 'cores' in numa:
+                                    vcpus = numa['cores']
+                                    # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+                                    extra_specs["hw:cpu_thread_policy"] = "isolate"
+                                    extra_specs["hw:cpu_policy"] = "dedicated"
+                                elif 'threads' in numa:
+                                    vcpus = numa['threads']
+                                    # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+                                    extra_specs["hw:cpu_thread_policy"] = "prefer"
+                                    extra_specs["hw:cpu_policy"] = "dedicated"
+                                # for interface in numa.get("interfaces",() ):
+                                #     if interface["dedicated"]=="yes":
+                                #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+                                #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+                        elif extended.get("cpu-quota"):
+                            self.process_resource_quota(extended.get("cpu-quota"), "cpu", extra_specs)
+                        if extended.get("mem-quota"):
+                            self.process_resource_quota(extended.get("mem-quota"), "memory", extra_specs)
+                        if extended.get("vif-quota"):
+                            self.process_resource_quota(extended.get("vif-quota"), "vif", extra_specs)
+                        if extended.get("disk-io-quota"):
+                            self.process_resource_quota(extended.get("disk-io-quota"), "disk_io", extra_specs)
+                    #create flavor
+                    new_flavor=self.nova.flavors.create(name,
+                                    ram,
+                                    vcpus,
+                                    flavor_data.get('disk',0),
+                                    is_public=flavor_data.get('is_public', True)
+                                )
+                    #add metadata
+                    if extra_specs:
+                        new_flavor.set_keys(extra_specs)
+                    return new_flavor.id
+                except nvExceptions.Conflict as e:
+                    if change_name_if_used and retry < max_retries:
+                        continue
+                    self._format_exception(e)
+        #except nvExceptions.BadRequest as e:
+        except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
+            self._format_exception(e)
 
     def delete_flavor(self,flavor_id):
         '''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
@@ -763,40 +962,48 @@ class vimconnector(vimconn.vimconnector):
                 if "disk_format" in image_dict:
                     disk_format=image_dict["disk_format"]
                 else: #autodiscover based on extension
-                    if image_dict['location'][-6:]==".qcow2":
+                    if image_dict['location'].endswith(".qcow2"):
                         disk_format="qcow2"
-                    elif image_dict['location'][-4:]==".vhd":
+                    elif image_dict['location'].endswith(".vhd"):
                         disk_format="vhd"
-                    elif image_dict['location'][-5:]==".vmdk":
+                    elif image_dict['location'].endswith(".vmdk"):
                         disk_format="vmdk"
-                    elif image_dict['location'][-4:]==".vdi":
+                    elif image_dict['location'].endswith(".vdi"):
                         disk_format="vdi"
-                    elif image_dict['location'][-4:]==".iso":
+                    elif image_dict['location'].endswith(".iso"):
                         disk_format="iso"
-                    elif image_dict['location'][-4:]==".aki":
+                    elif image_dict['location'].endswith(".aki"):
                         disk_format="aki"
-                    elif image_dict['location'][-4:]==".ari":
+                    elif image_dict['location'].endswith(".ari"):
                         disk_format="ari"
-                    elif image_dict['location'][-4:]==".ami":
+                    elif image_dict['location'].endswith(".ami"):
                         disk_format="ami"
                     else:
                         disk_format="raw"
                 self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
-                if image_dict['location'][0:4]=="http":
-                    new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
-                            container_format="bare", location=image_dict['location'], disk_format=disk_format)
+                if self.vim_type == "VIO":
+                    container_format = "bare"
+                    if 'container_format' in image_dict:
+                        container_format = image_dict['container_format']
+                    new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
+                                                          disk_format=disk_format)
+                else:
+                    new_image = self.glance.images.create(name=image_dict['name'])
+                if image_dict['location'].startswith("http"):
+                    # TODO there is not a method to direct download. It must be downloaded locally with requests
+                    raise vimconn.vimconnNotImplemented("Cannot create image from URL")
                 else: #local path
                     with open(image_dict['location']) as fimage:
-                        new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
-                            container_format="bare", data=fimage, disk_format=disk_format)
-                #insert metadata. We cannot use 'new_image.properties.setdefault' 
-                #because nova and glance are "INDEPENDENT" and we are using nova for reading metadata
-                new_image_nova=self.nova.images.find(id=new_image.id)
-                new_image_nova.metadata.setdefault('location',image_dict['location'])
+                        self.glance.images.upload(new_image.id, fimage)
+                        #new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+                        #    container_format="bare", data=fimage, disk_format=disk_format)
                 metadata_to_load = image_dict.get('metadata')
-                if metadata_to_load:
-                    for k,v in yaml.load(metadata_to_load).iteritems():
-                        new_image_nova.metadata.setdefault(k,v)
+                # TODO location is a reserved word for current openstack versions. fixed for VIO please check for openstack
+                if self.vim_type == "VIO":
+                    metadata_to_load['upload_location'] = image_dict['location']
+                else:
+                    metadata_to_load['location'] = image_dict['location']
+                self.glance.images.update(new_image.id, **metadata_to_load)
                 return new_image.id
             except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
                 self._format_exception(e)
@@ -813,16 +1020,16 @@ class vimconnector(vimconn.vimconnector):
         '''
         try:
             self._reload_connection()
-            self.nova.images.delete(image_id)
+            self.glance.images.delete(image_id)
             return image_id
-        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: #TODO remove
+        except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: #TODO remove
             self._format_exception(e)
 
     def get_image_id_from_path(self, path):
         '''Get the image id from image path in the VIM database. Returns the image_id'''
         try:
             self._reload_connection()
-            images = self.nova.images.list()
+            images = self.glance.images.list()
             for image in images:
                 if image.metadata.get("location")==path:
                     return image.id
@@ -845,17 +1052,18 @@ class vimconnector(vimconn.vimconnector):
             self._reload_connection()
             filter_dict_os = filter_dict.copy()
             #First we filter by the available filter fields: name, id. The others are removed.
-            filter_dict_os.pop('checksum', None)
-            image_list = self.nova.images.findall(**filter_dict_os)
-            if len(image_list) == 0:
-                return []
-            #Then we filter by the rest of filter fields: checksum
+            image_list = self.glance.images.list()
             filtered_list = []
             for image in image_list:
                 try:
-                    image_class = self.glance.images.get(image.id)
-                    if 'checksum' not in filter_dict or image_class['checksum'] == filter_dict.get('checksum'):
-                        filtered_list.append(image_class.copy())
+                    if filter_dict.get("name") and image["name"] != filter_dict["name"]:
+                        continue
+                    if filter_dict.get("id") and image["id"] != filter_dict["id"]:
+                        continue
+                    if filter_dict.get("checksum") and image["checksum"] != filter_dict["checksum"]:
+                        continue
+
+                    filtered_list.append(image.copy())
                 except gl1Exceptions.HTTPNotFound:
                     pass
             return filtered_list
@@ -873,8 +1081,8 @@ class vimconnector(vimconn.vimconnector):
                 return True
             if vm_status == 'ERROR':
                 return False
-            time.sleep(1)
-            elapsed_time += 1
+            time.sleep(5)
+            elapsed_time += 5
 
         # if we exceeded the timeout rollback
         if elapsed_time >= server_timeout:
@@ -972,6 +1180,7 @@ class vimconnector(vimconn.vimconnector):
             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
                 'size': (mandatory) string with the size of the disk in GB
+                'vim_id' (optional) should use this existing volume id
             availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
             availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
                 availability_zone_index is None
@@ -993,15 +1202,22 @@ class vimconnector(vimconn.vimconnector):
             self._reload_connection()
             # metadata_vpci = {}   # For a specific neutron plugin
             block_device_mapping = None
+
             for net in net_list:
                 if not net.get("net_id"):   # skip non connected iface
                     continue
 
-                port_dict={
+                port_dict = {
                     "network_id": net["net_id"],
                     "name": net.get("name"),
                     "admin_state_up": True
                 }
+                if self.config.get("security_groups") and net.get("port_security") is not False and \
+                        not self.config.get("no_port_security_extension"):
+                    if not self.security_groups_id:
+                        self._get_ids_from_name()
+                    port_dict["security_groups"] = self.security_groups_id
+
                 if net["type"]=="virtual":
                     pass
                     # if "vpci" in net:
@@ -1064,7 +1280,7 @@ class vimconnector(vimconn.vimconnector):
 
                 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
                 # As a workaround we wait until the VM is active and then disable the port-security
-                if net.get("port_security") == False:
+                if net.get("port_security") == False and not self.config.get("no_port_security_extension"):
                     no_secured_ports.append(new_port["port"]["id"])
 
             # if metadata_vpci:
@@ -1078,39 +1294,39 @@ class vimconnector(vimconn.vimconnector):
             self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
                               name, image_id, flavor_id, str(net_list_vim), description)
 
-            security_groups = self.config.get('security_groups')
-            if type(security_groups) is str:
-                security_groups = ( security_groups, )
             # cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
             # Create additional volumes in case these are present in disk_list
             base_disk_index = ord('b')
-            if disk_list != None:
+            if disk_list:
                 block_device_mapping = {}
                 for disk in disk_list:
-                    if 'image_id' in disk:
-                        volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
-                                    chr(base_disk_index), imageRef = disk['image_id'])
+                    if disk.get('vim_id'):
+                        block_device_mapping['_vd' + chr(base_disk_index)] = disk['vim_id']
                     else:
-                        volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
-                                    chr(base_disk_index))
-                    created_items["volume:" + str(volume.id)] = True
-                    block_device_mapping['_vd' +  chr(base_disk_index)] = volume.id
+                        if 'image_id' in disk:
+                            volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                                                chr(base_disk_index), imageRef=disk['image_id'])
+                        else:
+                            volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                                                chr(base_disk_index))
+                        created_items["volume:" + str(volume.id)] = True
+                        block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
                     base_disk_index += 1
 
-                # Wait until volumes are with status available
-                keep_waiting = True
+                # Wait until created volumes are with status available
                 elapsed_time = 0
-                while keep_waiting and elapsed_time < volume_timeout:
-                    keep_waiting = False
-                    for volume_id in block_device_mapping.itervalues():
-                        if self.cinder.volumes.get(volume_id).status != 'available':
-                            keep_waiting = True
-                    if keep_waiting:
-                        time.sleep(1)
-                        elapsed_time += 1
-
+                while elapsed_time < volume_timeout:
+                    for created_item in created_items:
+                        v, _, volume_id = created_item.partition(":")
+                        if v == 'volume':
+                            if self.cinder.volumes.get(volume_id).status != 'available':
+                                break
+                    else:  # all ready: break from while
+                        break
+                    time.sleep(5)
+                    elapsed_time += 5
                 # If we exceeded the timeout rollback
                 if elapsed_time >= volume_timeout:
                     raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
@@ -1121,10 +1337,12 @@ class vimconnector(vimconn.vimconnector):
             self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
                               "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
                               "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
-                                                                security_groups, vm_av_zone, self.config.get('keypair'),
-                                                                userdata, config_drive, block_device_mapping))
+                                                                self.config.get("security_groups"), vm_av_zone,
+                                                                self.config.get('keypair'), userdata, config_drive,
+                                                                block_device_mapping))
             server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
-                                              security_groups=security_groups,
+                                              security_groups=self.config.get("security_groups"),
+                                              # TODO remove security_groups in future versions. Already at neutron port
                                               availability_zone=vm_av_zone,
                                               key_name=self.config.get('keypair'),
                                               userdata=userdata,
@@ -1139,14 +1357,14 @@ class vimconnector(vimconn.vimconnector):
 
             for port_id in no_secured_ports:
                 try:
-                    self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} })
+                    self.neutron.update_port(port_id,
+                                             {"port": {"port_security_enabled": False, "security_groups": None}})
                 except Exception as e:
-                    self.logger.error("It was not possible to disable port security for port {}".format(port_id))
-                    raise
-
+                    raise vimconn.vimconnException("It was not possible to disable port security for port {}".format(
+                        port_id))
             # print "DONE :-)", server
 
-            pool_id = None
+            pool_id = None
             if external_network:
                 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
             for floating_network in external_network:
@@ -1162,10 +1380,11 @@ class vimconnector(vimconn.vimconnector):
                                     continue
                             free_floating_ip = ip.get("floating_ip_address")
                         else:
-                            if isinstance(floating_network['floating_ip'], str):
+                            if isinstance(floating_network['floating_ip'], str) and \
+                                floating_network['floating_ip'].lower() != "true":
                                 pool_id = floating_network['floating_ip']
                             else:
-                                #Find the external network
+                                # Find the external network
                                 external_nets = list()
                                 for net in self.neutron.list_networks()['networks']:
                                     if net['router:external']:
@@ -1183,7 +1402,7 @@ class vimconnector(vimconn.vimconnector):
                                 pool_id = external_nets[0].get('id')
                             param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
                             try:
-                                #self.logger.debug("Creating floating IP")
+                                # self.logger.debug("Creating floating IP")
                                 new_floating_ip = self.neutron.create_floatingip(param)
                                 free_floating_ip = new_floating_ip['floatingip']['floating_ip_address']
                             except Exception as e:
@@ -1196,13 +1415,15 @@ class vimconnector(vimconn.vimconnector):
                                 server.add_floating_ip(free_floating_ip, fix_ip)
                                 assigned = True
                             except Exception as e:
+                                # openstack need some time after VM creation to asign an IP. So retry if fails
                                 vm_status = self.nova.servers.get(server.id).status
                                 if vm_status != 'ACTIVE' and vm_status != 'ERROR':
                                     if time.time() - vm_start_time < server_timeout:
                                         time.sleep(5)
                                         continue
-                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+  str(e),
-                                                               http_code=vimconn.HTTP_Conflict)
+                                raise vimconn.vimconnException(
+                                    "Cannot create floating_ip: {} {}".format(type(e).__name__, e),
+                                    http_code=vimconn.HTTP_Conflict)
 
                 except Exception as e:
                     if not floating_network['exit_on_floating_ip_error']:
@@ -1245,13 +1466,13 @@ class vimconnector(vimconn.vimconnector):
         Params:
             vm_id: uuid of the VM
             console_type, can be:
-                "novnc" (by default), "xvpvnc" for VNC types, 
+                "novnc" (by default), "xvpvnc" for VNC types,
                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
         Returns dict with the console parameters:
                 protocol: ssh, ftp, http, https, ...
-                server:   usually ip address 
-                port:     the http, ssh, ... port 
-                suffix:   extra text, e.g. the http path and query string   
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
         '''
         self.logger.debug("Getting VM CONSOLE from VIM")
         try:
@@ -1351,14 +1572,14 @@ class vimconnector(vimconn.vimconnector):
                 vm_id:          #VIM id of this Virtual Machine
                     status:     #Mandatory. Text with one of:
                                 #  DELETED (not found at vim)
-                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
                                 #  OTHER (Vim reported other status not understood)
                                 #  ERROR (VIM indicates an ERROR status)
-                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
                                 #  CREATING (on building process), ERROR
                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
                                 #
-                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
                     interfaces:
                      -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
@@ -1381,33 +1602,29 @@ class vimconnector(vimconn.vimconnector):
                 else:
                     vm['status']    = "OTHER"
                     vm['error_msg'] = "VIM status reported " + vm_vim['status']
-                try:
-                    vm['vim_info']  = yaml.safe_dump(vm_vim, default_flow_style=True, width=256)
-                except yaml.representer.RepresenterError:
-                    vm['vim_info'] = str(vm_vim)
+
+                vm['vim_info'] = self.serialize(vm_vim)
+
                 vm["interfaces"] = []
                 if vm_vim.get('fault'):
                     vm['error_msg'] = str(vm_vim['fault'])
                 #get interfaces
                 try:
                     self._reload_connection()
-                    port_dict=self.neutron.list_ports(device_id=vm_id)
+                    port_dict = self.neutron.list_ports(device_id=vm_id)
                     for port in port_dict["ports"]:
                         interface={}
-                        try:
-                            interface['vim_info'] = yaml.safe_dump(port, default_flow_style=True, width=256)
-                        except yaml.representer.RepresenterError:
-                            interface['vim_info'] = str(port)
+                        interface['vim_info'] = self.serialize(port)
                         interface["mac_address"] = port.get("mac_address")
                         interface["vim_net_id"] = port["network_id"]
                         interface["vim_interface_id"] = port["id"]
-                        # check if OS-EXT-SRV-ATTR:host is there, 
+                        # check if OS-EXT-SRV-ATTR:host is there,
                         # in case of non-admin credentials, it will be missing
                         if vm_vim.get('OS-EXT-SRV-ATTR:host'):
                             interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
                         interface["pci"] = None
 
-                        # check if binding:profile is there, 
+                        # check if binding:profile is there,
                         # in case of non-admin credentials, it will be missing
                         if port.get('binding:profile'):
                             if port['binding:profile'].get('pci_slot'):
@@ -1425,16 +1642,20 @@ class vimconnector(vimconn.vimconnector):
                             interface["vlan"] = network['network'].get('provider:segmentation_id')
                         ips=[]
                         #look for floating ip address
-                        floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
-                        if floating_ip_dict.get("floatingips"):
-                            ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") )
+                        try:
+                            floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
+                            if floating_ip_dict.get("floatingips"):
+                                ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") )
+                        except Exception:
+                            pass
 
                         for subnet in port["fixed_ips"]:
                             ips.append(subnet["ip_address"])
                         interface["ip_address"] = ";".join(ips)
                         vm["interfaces"].append(interface)
                 except Exception as e:
-                    self.logger.error("Error getting vm interface information " + type(e).__name__ + ": "+  str(e))
+                    self.logger.error("Error getting vm interface information {}: {}".format(type(e).__name__, e),
+                                      exc_info=True)
             except vimconn.vimconnNotFoundException as e:
                 self.logger.error("Exception getting vm status: %s", str(e))
                 vm['status'] = "DELETED"
@@ -1521,7 +1742,7 @@ class vimconnector(vimconn.vimconnector):
         #TODO insert exception vimconn.HTTP_Unauthorized
 
     ####### VIO Specific Changes #########
-    def _genrate_vlanID(self):
+    def _generate_vlanID(self):
         """
          Method to get unused vlanID
             Args:
@@ -1551,35 +1772,69 @@ class vimconnector(vimconn.vimconnector):
                 " All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range')))
 
 
-    def _validate_vlan_ranges(self, dataplane_net_vlan_range):
+    def _generate_multisegment_vlanID(self):
+        """
+         Method to get unused vlanID
+            Args:
+                None
+            Returns:
+                vlanID
+        """
+        #Get used VLAN IDs
+        usedVlanIDs = []
+        networks = self.get_network_list()
+        for net in networks:
+            if net.get('provider:network_type') == "vlan" and net.get('provider:segmentation_id'):
+                usedVlanIDs.append(net.get('provider:segmentation_id'))
+            elif net.get('segments'):
+                for segment in net.get('segments'):
+                    if segment.get('provider:network_type') == "vlan" and segment.get('provider:segmentation_id'):
+                        usedVlanIDs.append(segment.get('provider:segmentation_id'))
+        used_vlanIDs = set(usedVlanIDs)
+
+        #find unused VLAN ID
+        for vlanID_range in self.config.get('multisegment_vlan_range'):
+            try:
+                start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+                for vlanID in xrange(start_vlanid, end_vlanid + 1):
+                    if vlanID not in used_vlanIDs:
+                        return vlanID
+            except Exception as exp:
+                raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+        else:
+            raise vimconn.vimconnConflictException("Unable to create the VLAN segment."\
+                " All VLAN IDs {} are in use.".format(self.config.get('multisegment_vlan_range')))
+
+
+    def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
         """
         Method to validate user given vlanID ranges
             Args:  None
             Returns: None
         """
-        for vlanID_range in dataplane_net_vlan_range:
+        for vlanID_range in input_vlan_range:
             vlan_range = vlanID_range.replace(" ", "")
             #validate format
             vlanID_pattern = r'(\d)*-(\d)*$'
             match_obj = re.match(vlanID_pattern, vlan_range)
             if not match_obj:
-                raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}.You must provide "\
-                "'dataplane_net_vlan_range' in format [start_ID - end_ID].".format(vlanID_range))
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}.You must provide "\
+                "'{}' in format [start_ID - end_ID].".format(text_vlan_range, vlanID_range, text_vlan_range))
 
             start_vlanid , end_vlanid = map(int,vlan_range.split("-"))
             if start_vlanid <= 0 :
-                raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
                 "Start ID can not be zero. For VLAN "\
-                "networks valid IDs are 1 to 4094 ".format(vlanID_range))
+                "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
             if end_vlanid > 4094 :
-                raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
                 "End VLAN ID can not be greater than 4094. For VLAN "\
-                "networks valid IDs are 1 to 4094 ".format(vlanID_range))
+                "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
 
             if start_vlanid > end_vlanid:
-                raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
-                    "You must provide a 'dataplane_net_vlan_range' in format start_ID - end_ID and "\
-                    "start_ID < end_ID ".format(vlanID_range))
+                raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                    "You must provide '{}' in format start_ID - end_ID and "\
+                    "start_ID < end_ID ".format(text_vlan_range, vlanID_range, text_vlan_range))
 
 #NOT USED FUNCTIONS
 
@@ -1601,7 +1856,7 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("osconnector: Adding a new user to VIM")
         try:
             self._reload_connection()
-            user=self.keystone.users.create(user_name, user_passwd, tenant_id=tenant_id)
+            user=self.keystone.users.create(user_name, password=user_passwd, default_project=tenant_id)
             #self.keystone.tenants.add_user(self.k_creds["username"], #role)
             return user.id
         except ksExceptions.ConnectionError as e:
@@ -1916,7 +2171,7 @@ class vimconnector(vimconn.vimconnector):
             self._reload_connection()
             # In networking-sfc the MPLS encapsulation is legacy
             # should be used when no full SFC Encapsulation is intended
-            sfc_encap = 'mpls'
+            correlation = 'mpls'
             if sfc_encap:
                 correlation = 'nsh'
             sfp_dict = {'name': name,