import yaml
import random
-from novaclient import client as nClient_v2, exceptions as nvExceptions
-from novaclient import api_versions
-import keystoneclient.v2_0.client as ksClient_v2
-from novaclient.v2.client import Client as nClient
-import keystoneclient.v3.client as ksClient
+from novaclient import client as nClient, exceptions as nvExceptions
+from keystoneauth1.identity import v2, v3
+from keystoneauth1 import session
import keystoneclient.exceptions as ksExceptions
-import glanceclient.v2.client as glClient
+import keystoneclient.v3.client as ksClient_v3
+import keystoneclient.v2_0.client as ksClient_v2
+from glanceclient import client as glClient
import glanceclient.client as gl1Client
import glanceclient.exc as gl1Exceptions
-import cinderclient.v2.client as cClient_v2
+from cinderclient import client as cClient
from httplib import HTTPException
-from neutronclient.neutron import client as neClient_v2
-from neutronclient.v2_0 import client as neClient
+from neutronclient.neutron import client as neClient
from neutronclient.common import exceptions as neExceptions
from requests.exceptions import ConnectionError
#global var to have a timeout creating and deleting volumes
volume_timeout = 60
-server_timeout = 60
+server_timeout = 300
class vimconnector(vimconn.vimconnector):
def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
'url' is the keystone authorization url,
'url_admin' is not use
'''
- self.osc_api_version = 'v2.0'
- if config.get('APIversion') == 'v3.3':
- self.osc_api_version = 'v3.3'
- vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+ api_version = config.get('APIversion')
+ if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'):
+ raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. "
+ "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version))
+ vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+ config)
- self.persistent_info = persistent_info
- self.k_creds={}
- self.n_creds={}
- if self.config.get("insecure"):
- self.k_creds["insecure"] = True
- self.n_creds["insecure"] = True
+ self.insecure = self.config.get("insecure", False)
if not url:
raise TypeError, 'url param can not be NoneType'
- self.k_creds['auth_url'] = url
- self.n_creds['auth_url'] = url
- if tenant_name:
- self.k_creds['tenant_name'] = tenant_name
- self.n_creds['project_id'] = tenant_name
- if tenant_id:
- self.k_creds['tenant_id'] = tenant_id
- self.n_creds['tenant_id'] = tenant_id
- if user:
- self.k_creds['username'] = user
- self.n_creds['username'] = user
- if passwd:
- self.k_creds['password'] = passwd
- self.n_creds['api_key'] = passwd
- if self.osc_api_version == 'v3.3':
- self.k_creds['project_name'] = tenant_name
- self.k_creds['project_id'] = tenant_id
- if config.get('region_name'):
- self.k_creds['region_name'] = config.get('region_name')
- self.n_creds['region_name'] = config.get('region_name')
-
- self.reload_client = True
+ self.persistent_info = persistent_info
+ self.availability_zone = persistent_info.get('availability_zone', None)
+ self.session = persistent_info.get('session', {'reload_client': True})
+ self.nova = self.session.get('nova')
+ self.neutron = self.session.get('neutron')
+ self.cinder = self.session.get('cinder')
+ self.glance = self.session.get('glance')
+ self.glancev1 = self.session.get('glancev1')
+ self.keystone = self.session.get('keystone')
+ self.api_version3 = self.session.get('api_version3')
+
self.logger = logging.getLogger('openmano.vim.openstack')
if log_level:
self.logger.setLevel( getattr(logging, log_level) )
-
- def __setitem__(self,index, value):
- '''Set individuals parameters
- Throw TypeError, KeyError
- '''
- if index=='tenant_id':
- self.reload_client=True
- self.tenant_id = value
- if self.osc_api_version == 'v3.3':
- if value:
- self.k_creds['project_id'] = value
- self.n_creds['project_id'] = value
- else:
- del self.k_creds['project_id']
- del self.n_creds['project_id']
- else:
- if value:
- self.k_creds['tenant_id'] = value
- self.n_creds['tenant_id'] = value
- else:
- del self.k_creds['tenant_id']
- del self.n_creds['tenant_id']
- elif index=='tenant_name':
- self.reload_client=True
- self.tenant_name = value
- if self.osc_api_version == 'v3.3':
- if value:
- self.k_creds['project_name'] = value
- self.n_creds['project_name'] = value
- else:
- del self.k_creds['project_name']
- del self.n_creds['project_name']
- else:
- if value:
- self.k_creds['tenant_name'] = value
- self.n_creds['project_id'] = value
- else:
- del self.k_creds['tenant_name']
- del self.n_creds['project_id']
- elif index=='user':
- self.reload_client=True
- self.user = value
- if value:
- self.k_creds['username'] = value
- self.n_creds['username'] = value
- else:
- del self.k_creds['username']
- del self.n_creds['username']
- elif index=='passwd':
- self.reload_client=True
- self.passwd = value
- if value:
- self.k_creds['password'] = value
- self.n_creds['api_key'] = value
- else:
- del self.k_creds['password']
- del self.n_creds['api_key']
- elif index=='url':
- self.reload_client=True
- self.url = value
- if value:
- self.k_creds['auth_url'] = value
- self.n_creds['auth_url'] = value
- else:
- raise TypeError, 'url param can not be NoneType'
+
+ def __getitem__(self, index):
+ """Get individuals parameters.
+ Throw KeyError"""
+ if index == 'project_domain_id':
+ return self.config.get("project_domain_id")
+ elif index == 'user_domain_id':
+ return self.config.get("user_domain_id")
else:
- vimconn.vimconnector.__setitem__(self,index, value)
-
+ return vimconn.vimconnector.__getitem__(self, index)
+
+ def __setitem__(self, index, value):
+ """Set individuals parameters and it is marked as dirty so to force connection reload.
+ Throw KeyError"""
+ if index == 'project_domain_id':
+ self.config["project_domain_id"] = value
+ elif index == 'user_domain_id':
+ self.config["user_domain_id"] = value
+ else:
+ vimconn.vimconnector.__setitem__(self, index, value)
+ self.session['reload_client'] = True
+
def _reload_connection(self):
'''Called before any operation, it check if credentials has changed
Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
'''
#TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
- if self.reload_client:
- #test valid params
- if len(self.n_creds) <4:
- raise ksExceptions.ClientException("Not enough parameters to connect to openstack")
- if self.osc_api_version == 'v3.3':
- self.nova = nClient(api_version=api_versions.APIVersion(version_str='2.0'), **self.n_creds)
- #TODO To be updated for v3
- #self.cinder = cClient.Client(**self.n_creds)
- self.keystone = ksClient.Client(**self.k_creds)
- self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
- self.neutron = neClient.Client(api_version=api_versions.APIVersion(version_str='2.0'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
+ if self.session['reload_client']:
+ if self.config.get('APIversion'):
+ self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
+ else: # get from ending auth_url that end with v3 or with v2.0
+ self.api_version3 = self.url.split("/")[-1] == "v3"
+ self.session['api_version3'] = self.api_version3
+ if self.api_version3:
+ auth = v3.Password(auth_url=self.url,
+ username=self.user,
+ password=self.passwd,
+ project_name=self.tenant_name,
+ project_id=self.tenant_id,
+ project_domain_id=self.config.get('project_domain_id', 'default'),
+ user_domain_id=self.config.get('user_domain_id', 'default'))
+ else:
+ auth = v2.Password(auth_url=self.url,
+ username=self.user,
+ password=self.passwd,
+ tenant_name=self.tenant_name,
+ tenant_id=self.tenant_id)
+ sess = session.Session(auth=auth, verify=not self.insecure)
+ if self.api_version3:
+ self.keystone = ksClient_v3.Client(session=sess)
else:
- self.nova = nClient_v2.Client(version='2', **self.n_creds)
- self.cinder = cClient_v2.Client(**self.n_creds)
- self.keystone = ksClient_v2.Client(**self.k_creds)
- self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
- self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
- self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
- self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds
- self.reload_client = False
+ self.keystone = ksClient_v2.Client(session=sess)
+ self.session['keystone'] = self.keystone
+ # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
+ # This implementation approach is due to the warning message in
+ # https://developer.openstack.org/api-guide/compute/microversions.html
+ # where it is stated that microversion backwards compatibility is not guaranteed and clients should
+ # always require an specific microversion.
+ # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
+ version = self.config.get("microversion")
+ if not version:
+ version = "2.1"
+ self.nova = self.session['nova'] = nClient.Client(str(version), session=sess)
+ self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess)
+ self.cinder = self.session['cinder'] = cClient.Client(2, session=sess)
+ self.glance = self.session['glance'] = glClient.Client(2, session=sess)
+ self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess)
+ self.session['reload_client'] = False
+ self.persistent_info['session'] = self.session
+ # add availablity zone info inside self.persistent_info
+ self._set_availablity_zones()
+ self.persistent_info['availability_zone'] = self.availability_zone
def __net_os2mano(self, net_list_dict):
'''Transform the net openstack format to mano format
net['type']='data'
else:
net['type']='bridge'
-
-
-
+
def _format_exception(self, exception):
'''Transform a keystone, nova, neutron exception into a vimconn exception'''
if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, nvExceptions.Conflict):
raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
- else: # ()
+ elif isinstance(exception, vimconn.vimconnException):
+ raise
+ else: # ()
+ self.logger.error("General Exception " + str(exception), exc_info=True)
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
def get_tenant_list(self, filter_dict={}):
self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3':
- project_class_list=self.keystone.projects.findall(**filter_dict)
+ if self.api_version3:
+ project_class_list = self.keystone.projects.list(name=filter_dict.get("name"))
else:
- project_class_list=self.keystone.tenants.findall(**filter_dict)
+ project_class_list = self.keystone.tenants.findall(**filter_dict)
project_list=[]
for project in project_class_list:
+ if filter_dict.get('id') and filter_dict["id"] != project.id:
+ continue
project_list.append(project.to_dict())
return project_list
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
def new_tenant(self, tenant_name, tenant_description):
self.logger.debug("Adding a new tenant name: %s", tenant_name)
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3':
- project=self.keystone.projects.create(tenant_name, tenant_description)
+ if self.api_version3:
+ project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"),
+ description=tenant_description, is_domain=False)
else:
- project=self.keystone.tenants.create(tenant_name, tenant_description)
+ project = self.keystone.tenants.create(tenant_name, tenant_description)
return project.id
except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
self.logger.debug("Deleting tenant %s from VIM", tenant_id)
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3':
+ if self.api_version3:
self.keystone.projects.delete(tenant_id)
else:
self.keystone.tenants.delete(tenant_id)
ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
if 'ip_version' not in ip_profile:
ip_profile['ip_version'] = "IPv4"
- subnet={"name":net_name+"-subnet",
+ subnet = {"name":net_name+"-subnet",
"network_id": new_net["network"]["id"],
"ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
"cidr": ip_profile['subnet_address']
}
- if 'gateway_address' in ip_profile:
- subnet['gateway_ip'] = ip_profile['gateway_address']
+ # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
+ subnet['gateway_ip'] = ip_profile.get('gateway_address')
if ip_profile.get('dns_address'):
- #TODO: manage dns_address as a list of addresses separated by commas
- subnet['dns_nameservers'] = []
- subnet['dns_nameservers'].append(ip_profile['dns_address'])
+ subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
if 'dhcp_enabled' in ip_profile:
subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True
if 'dhcp_start_address' in ip_profile:
- subnet['allocation_pools']=[]
+ subnet['allocation_pools'] = []
subnet['allocation_pools'].append(dict())
subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
if 'dhcp_count' in ip_profile:
self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ if self.api_version3 and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check
net_dict=self.neutron.list_networks(**filter_dict)
net_list=net_dict["networks"]
self.__net_os2mano(net_list)
subnet = {"id": subnet_id, "fault": str(e)}
subnets.append(subnet)
net["subnets"] = subnets
+ net["encapsulation"] = net.get('provider:network_type')
+ net["segmentation_id"] = net.get('provider:segmentation_id')
return net
def delete_network(self, net_id):
def get_flavor_id_from_data(self, flavor_dict):
"""Obtain flavor id that match the flavor description
Returns the flavor_id or raises a vimconnNotFoundException
+ flavor_dict: contains the required ram, vcpus, disk
+ If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
+ and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
+ vimconnNotFoundException is raised
"""
+ exact_match = False if self.config.get('use_existing_flavors') else True
try:
self._reload_connection()
- numa=None
- numas = flavor_dict.get("extended",{}).get("numas")
+ flavor_candidate_id = None
+ flavor_candidate_data = (10000, 10000, 10000)
+ flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
+ # numa=None
+ numas = flavor_dict.get("extended", {}).get("numas")
if numas:
#TODO
raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted")
epa = flavor.get_keys()
if epa:
continue
- #TODO
- if flavor.ram != flavor_dict["ram"]:
- continue
- if flavor.vcpus != flavor_dict["vcpus"]:
- continue
- if flavor.disk != flavor_dict["disk"]:
- continue
- return flavor.id
+ # TODO
+ flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
+ if flavor_data == flavor_target:
+ return flavor.id
+ elif not exact_match and flavor_target < flavor_data < flavor_candidate_data:
+ flavor_candidate_id = flavor.id
+ flavor_candidate_data = flavor_data
+ if not exact_match and flavor_candidate_id:
+ return flavor_candidate_id
raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
for numa in numas:
#overwrite ram and vcpus
ram = numa['memory']*1024
+ #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
if 'paired-threads' in numa:
vcpus = numa['paired-threads']*2
- numa_properties["hw:cpu_threads_policy"] = "prefer"
+ #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+ numa_properties["hw:cpu_thread_policy"] = "require"
+ numa_properties["hw:cpu_policy"] = "dedicated"
elif 'cores' in numa:
vcpus = numa['cores']
- #numa_properties["hw:cpu_threads_policy"] = "prefer"
+ # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+ numa_properties["hw:cpu_thread_policy"] = "isolate"
+ numa_properties["hw:cpu_policy"] = "dedicated"
elif 'threads' in numa:
vcpus = numa['threads']
- numa_properties["hw:cpu_policy"] = "isolated"
- for interface in numa.get("interfaces",() ):
- if interface["dedicated"]=="yes":
- raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+ numa_properties["hw:cpu_thread_policy"] = "prefer"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ # for interface in numa.get("interfaces",() ):
+ # if interface["dedicated"]=="yes":
+ # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+ # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
#create flavor
new_flavor=self.nova.flavors.create(name,
metadata: metadata of the image
Returns the image_id
'''
- #using version 1 of glance client
- glancev1 = gl1Client.Client('1',self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds
retry=0
max_retries=3
while retry<max_retries:
disk_format="raw"
self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
if image_dict['location'][0:4]=="http":
- new_image = glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+ new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
container_format="bare", location=image_dict['location'], disk_format=disk_format)
else: #local path
with open(image_dict['location']) as fimage:
- new_image = glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+ new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
container_format="bare", data=fimage, disk_format=disk_format)
#insert metadata. We cannot use 'new_image.properties.setdefault'
#because nova and glance are "INDEPENDENT" and we are using nova for reading metadata
except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
self._format_exception(e)
- def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
+ def __wait_for_vm(self, vm_id, status):
+ """wait until vm is in the desired status and return True.
+ If the VM gets in ERROR status, return false.
+ If the timeout is reached generate an exception"""
+ elapsed_time = 0
+ while elapsed_time < server_timeout:
+ vm_status = self.nova.servers.get(vm_id).status
+ if vm_status == status:
+ return True
+ if vm_status == 'ERROR':
+ return False
+ time.sleep(1)
+ elapsed_time += 1
+
+ # if we exceeded the timeout rollback
+ if elapsed_time >= server_timeout:
+ raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
+ http_code=vimconn.HTTP_Request_Timeout)
+
+ def _get_openstack_availablity_zones(self):
+ """
+ Get from openstack availability zones available
+ :return:
+ """
+ try:
+ openstack_availability_zone = self.nova.availability_zones.list()
+ openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
+ if zone.zoneName != 'internal']
+ return openstack_availability_zone
+ except Exception as e:
+ return None
+
+ def _set_availablity_zones(self):
+ """
+ Set vim availablity zone
+ :return:
+ """
+
+ if 'availability_zone' in self.config:
+ vim_availability_zones = self.config.get('availability_zone')
+ if isinstance(vim_availability_zones, str):
+ self.availability_zone = [vim_availability_zones]
+ elif isinstance(vim_availability_zones, list):
+ self.availability_zone = vim_availability_zones
+ else:
+ self.availability_zone = self._get_openstack_availablity_zones()
+
+ def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+ """
+ Return thge availability zone to be used by the created VM.
+ :return: The VIM availability zone to be used or None
+ """
+ if availability_zone_index is None:
+ if not self.config.get('availability_zone'):
+ return None
+ elif isinstance(self.config.get('availability_zone'), str):
+ return self.config['availability_zone']
+ else:
+ # TODO consider using a different parameter at config for default AV and AV list match
+ return self.config['availability_zone'][0]
+
+ vim_availability_zones = self.availability_zone
+ # check if VIM offer enough availability zones describe in the VNFD
+ if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+ # check if all the names of NFV AV match VIM AV names
+ match_by_index = False
+ for av in availability_zone_list:
+ if av not in vim_availability_zones:
+ match_by_index = True
+ break
+ if match_by_index:
+ return vim_availability_zones[availability_zone_index]
+ else:
+ return availability_zone_list[availability_zone_index]
+ else:
+ raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+ def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+ availability_zone_index=None, availability_zone_list=None):
'''Adds a VM instance to VIM
Params:
start: indicates if VM must start or boot in pause mode. Ignored
type: 'virtual', 'PF', 'VF', 'VFnotShared'
vim_id: filled/added by this function
floating_ip: True/False (or it can be None)
+ 'cloud_config': (optional) dictionary with:
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+ 'users': (optional) list of users to be inserted, each item is a dict with:
+ 'name': (mandatory) user name,
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+ 'user-data': (optional) string is a text script to be passed directly to cloud-init
+ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+ 'dest': (mandatory) string with the destination absolute path
+ 'encoding': (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ 'content' (mandatory): string with the content of the file
+ 'permissions': (optional) string with file permissions, typically octal notation '0644'
+ 'owner': (optional) file owner, string with the format 'owner:group'
+ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+ 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+ 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+ 'size': (mandatory) string with the size of the disk in GB
+ availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+ availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
+ availability_zone_index is None
#TODO ip, security groups
Returns the instance identifier
'''
self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
try:
+ server = None
metadata={}
net_list_vim=[]
- external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip
+ external_network=[] # list of external networks to be connected to instance, later on used to create floating_ip
+ no_secured_ports = [] # List of port-is with port-security disabled
self._reload_connection()
- metadata_vpci={} #For a specific neutron plugin
+ metadata_vpci={} # For a specific neutron plugin
+ block_device_mapping = None
for net in net_list:
if not net.get("net_id"): #skip non connected iface
continue
- if net["type"]=="virtual" or net["type"]=="VF":
- port_dict={
- "network_id": net["net_id"],
- "name": net.get("name"),
- "admin_state_up": True
- }
- if net["type"]=="virtual":
- if "vpci" in net:
- metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
- else: # for VF
- if "vpci" in net:
- if "VF" not in metadata_vpci:
- metadata_vpci["VF"]=[]
- metadata_vpci["VF"].append([ net["vpci"], "" ])
- port_dict["binding:vnic_type"]="direct"
- if not port_dict["name"]:
- port_dict["name"]=name
- if net.get("mac_address"):
- port_dict["mac_address"]=net["mac_address"]
- if net.get("port_security") == False:
- port_dict["port_security_enabled"]=net["port_security"]
- new_port = self.neutron.create_port({"port": port_dict })
- net["mac_adress"] = new_port["port"]["mac_address"]
- net["vim_id"] = new_port["port"]["id"]
- net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
- net_list_vim.append({"port-id": new_port["port"]["id"]})
- else: # for PF
- self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
- #TODO insert this when openstack consider passthrough ports as openstack neutron ports
+
+ port_dict={
+ "network_id": net["net_id"],
+ "name": net.get("name"),
+ "admin_state_up": True
+ }
+ if net["type"]=="virtual":
+ if "vpci" in net:
+ metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+ elif net["type"]=="VF": # for VF
+ if "vpci" in net:
+ if "VF" not in metadata_vpci:
+ metadata_vpci["VF"]=[]
+ metadata_vpci["VF"].append([ net["vpci"], "" ])
+ port_dict["binding:vnic_type"]="direct"
+ else: # For PT
+ if "vpci" in net:
+ if "PF" not in metadata_vpci:
+ metadata_vpci["PF"]=[]
+ metadata_vpci["PF"].append([ net["vpci"], "" ])
+ port_dict["binding:vnic_type"]="direct-physical"
+ if not port_dict["name"]:
+ port_dict["name"]=name
+ if net.get("mac_address"):
+ port_dict["mac_address"]=net["mac_address"]
+ new_port = self.neutron.create_port({"port": port_dict })
+ net["mac_adress"] = new_port["port"]["mac_address"]
+ net["vim_id"] = new_port["port"]["id"]
+ # if try to use a network without subnetwork, it will return a emtpy list
+ fixed_ips = new_port["port"].get("fixed_ips")
+ if fixed_ips:
+ net["ip"] = fixed_ips[0].get("ip_address")
+ else:
+ net["ip"] = None
+
+ port = {"port-id": new_port["port"]["id"]}
+ if float(self.nova.api_version.get_string()) >= 2.32:
+ port["tag"] = new_port["port"]["name"]
+ net_list_vim.append(port)
+
if net.get('floating_ip', False):
net['exit_on_floating_ip_error'] = True
external_network.append(net)
net['exit_on_floating_ip_error'] = False
external_network.append(net)
+ # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
+ # As a workaround we wait until the VM is active and then disable the port-security
+ if net.get("port_security") == False:
+ no_secured_ports.append(new_port["port"]["id"])
+
if metadata_vpci:
metadata = {"pci_assignement": json.dumps(metadata_vpci)}
if len(metadata["pci_assignement"]) >255:
userdata = cloud_config
#Create additional volumes in case these are present in disk_list
- block_device_mapping = None
base_disk_index = ord('b')
if disk_list != None:
- block_device_mapping = dict()
+ block_device_mapping = {}
for disk in disk_list:
if 'image_id' in disk:
volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
http_code=vimconn.HTTP_Request_Timeout)
+ # get availability Zone
+ vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
+ self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, "
+ "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+ "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata,
+ security_groups, vm_av_zone, self.config.get('keypair'),
+ userdata, config_drive, block_device_mapping))
server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
security_groups=security_groups,
- availability_zone=self.config.get('availability_zone'),
+ availability_zone=vm_av_zone,
key_name=self.config.get('keypair'),
userdata=userdata,
- config_drive = config_drive,
- block_device_mapping = block_device_mapping
+ config_drive=config_drive,
+ block_device_mapping=block_device_mapping
) # , description=description)
+
+ # Previously mentioned workaround to wait until the VM is active and then disable the port-security
+ if no_secured_ports:
+ self.__wait_for_vm(server.id, 'ACTIVE')
+
+ for port_id in no_secured_ports:
+ try:
+ self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} })
+
+ except Exception as e:
+ self.logger.error("It was not possible to disable port security for port {}".format(port_id))
+ self.delete_vminstance(server.id)
+ raise
+
#print "DONE :-)", server
pool_id = None
floating_ips = self.neutron.list_floatingips().get("floatingips", ())
- for floating_network in external_network:
- try:
- # wait until vm is active
- elapsed_time = 0
- while elapsed_time < server_timeout:
- status = self.nova.servers.get(server.id).status
- if status == 'ACTIVE':
- break
- time.sleep(1)
- elapsed_time += 1
- #if we exceeded the timeout rollback
- if elapsed_time >= server_timeout:
- raise vimconn.vimconnException('Timeout creating instance ' + name,
- http_code=vimconn.HTTP_Request_Timeout)
+ if external_network:
+ self.__wait_for_vm(server.id, 'ACTIVE')
+ for floating_network in external_network:
+ try:
assigned = False
while(assigned == False):
if floating_ips:
if not floating_network['exit_on_floating_ip_error']:
self.logger.warn("Cannot create floating_ip. %s", str(e))
continue
- self.delete_vminstance(server.id)
raise
return server.id
# except nvExceptions.NotFound as e:
# error_value=-vimconn.HTTP_Not_Found
# error_text= "vm instance %s not found" % vm_id
- except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+# except TypeError as e:
+# raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
+
+ except Exception as e:
# delete the volumes we just created
- if block_device_mapping != None:
+ if block_device_mapping:
for volume_id in block_device_mapping.itervalues():
self.cinder.volumes.delete(volume_id)
- # delete ports we just created
- for net_item in net_list_vim:
- if 'port-id' in net_item:
- self.neutron.delete_port(net_item['port-id'])
+ # Delete the VM
+ if server != None:
+ self.delete_vminstance(server.id)
+ else:
+ # delete ports we just created
+ for net_item in net_list_vim:
+ if 'port-id' in net_item:
+ self.neutron.delete_port(net_item['port-id'])
+
self._format_exception(e)
- except TypeError as e:
- raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
def get_vminstance(self,vm_id):
'''Returns the VM instance information from VIM'''
vim_net_id: #network id where this interface is connected
vim_interface_id: #interface/port VIM id
ip_address: #null, or text with IPv4, IPv6 address
+ compute_node: #identification of compute node where PF,VF interface is allocated
+ pci: #PCI address of the NIC that hosts the PF,VF
+ vlan: #physical VLAN used for VF
'''
vm_dict={}
self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
interface["mac_address"] = port.get("mac_address")
interface["vim_net_id"] = port["network_id"]
interface["vim_interface_id"] = port["id"]
+ # check if OS-EXT-SRV-ATTR:host is there,
+ # in case of non-admin credentials, it will be missing
+ if vm_vim.get('OS-EXT-SRV-ATTR:host'):
+ interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
+ interface["pci"] = None
+
+ # check if binding:profile is there,
+ # in case of non-admin credentials, it will be missing
+ if port.get('binding:profile'):
+ if port['binding:profile'].get('pci_slot'):
+ # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
+ # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+ # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
+ pci = port['binding:profile']['pci_slot']
+ # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+ interface["pci"] = pci
+ interface["vlan"] = None
+ #if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+ network = self.neutron.show_network(port["network_id"])
+ if network['network'].get('provider:network_type') == 'vlan' and \
+ port.get("binding:vnic_type") == "direct":
+ interface["vlan"] = network['network'].get('provider:segmentation_id')
ips=[]
#look for floating ip address
floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])