# -*- coding: utf-8 -*-
##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
__date__ = "$22-sep-2017 23:59:59$"
import vimconn
-import json
+# import json
import logging
import netaddr
import time
self.persistent_info = persistent_info
self.availability_zone = persistent_info.get('availability_zone', None)
self.session = persistent_info.get('session', {'reload_client': True})
+ self.my_tenant_id = self.session.get('my_tenant_id')
self.nova = self.session.get('nova')
self.neutron = self.session.get('neutron')
self.cinder = self.session.get('cinder')
self.logger = logging.getLogger('openmano.vim.openstack')
+ # allow security_groups to be a list or a single string
+ if isinstance(self.config.get('security_groups'), str):
+ self.config['security_groups'] = [self.config['security_groups']]
+ self.security_groups_id = None
+
####### VIO Specific Changes #########
if self.vim_type == "VIO":
self.logger = logging.getLogger('openmano.vim.vio')
tenant_name=self.tenant_name,
tenant_id=self.tenant_id)
sess = session.Session(auth=auth, verify=self.verify)
+ # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+ region_name = self.config.get('region_name')
if self.api_version3:
- self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type)
+ self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
else:
self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
self.session['keystone'] = self.keystone
version = self.config.get("microversion")
if not version:
version = "2.1"
- self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type)
- self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type)
- self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type)
+ # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+ self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+ self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+ self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+ try:
+ self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
+ except Exception as e:
+ self.logger.error("Cannot get project_id from session", exc_info=True)
if self.endpoint_type == "internalURL":
glance_service_id = self.keystone.services.list(name="glance")[0].id
glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
else:
glance_endpoint = None
self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
- #using version 1 of glance client in new_image()
+ # using version 1 of glance client in new_image()
# self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
# endpoint=glance_endpoint)
self.session['reload_client'] = False
# add availablity zone info inside self.persistent_info
self._set_availablity_zones()
self.persistent_info['availability_zone'] = self.availability_zone
+ self.security_groups_id = None # force to get again security_groups_ids next time they are needed
def __net_os2mano(self, net_list_dict):
'''Transform the net openstack format to mano format
def _format_exception(self, exception):
'''Transform a keystone, nova, neutron exception into a vimconn exception'''
- if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
- ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed
- )):
+ if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound, gl1Exceptions.HTTPNotFound)):
+ raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
+ ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
+ raise vimconn.vimconnException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
- neExceptions.NeutronException, nvExceptions.BadRequest)):
+ neExceptions.NeutronException)):
raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + str(exception))
- elif isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound)):
- raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, nvExceptions.Conflict):
raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, vimconn.vimconnException):
self.logger.error("General Exception " + str(exception), exc_info=True)
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ def _get_ids_from_name(self):
+ """
+ Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
+ :return: None
+ """
+ # get tenant_id if only tenant_name is supplied
+ self._reload_connection()
+ if not self.my_tenant_id:
+ raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}".
+ format(self.tenant_name, self.tenant_id))
+ if self.config.get('security_groups') and not self.security_groups_id:
+ # convert from name to id
+ neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
+
+ self.security_groups_id = []
+ for sg in self.config.get('security_groups'):
+ for neutron_sg in neutron_sg_list:
+ if sg in (neutron_sg["id"], neutron_sg["name"]):
+ self.security_groups_id.append(neutron_sg["id"])
+ break
+ else:
+ self.security_groups_id = None
+ raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg))
+
def get_tenant_list(self, filter_dict={}):
'''Obtain tenants of VIM
filter_dict can contain the following keys:
else:
project = self.keystone.tenants.create(tenant_name, tenant_description)
return project.id
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError) as e:
self._format_exception(e)
def delete_tenant(self, tenant_id):
else:
self.keystone.tenants.delete(tenant_id)
return tenant_id
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError) as e:
self._format_exception(e)
def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
subnets.append(subnet)
net["subnets"] = subnets
net["encapsulation"] = net.get('provider:network_type')
+ net["encapsulation_type"] = net.get('provider:network_type')
net["segmentation_id"] = net.get('provider:segmentation_id')
+ net["encapsulation_id"] = net.get('provider:segmentation_id')
return net
def delete_network(self, net_id):
retry=0
max_retries=3
name_suffix = 0
- name=flavor_data['name']
- while retry<max_retries:
- retry+=1
- try:
- self._reload_connection()
- if change_name_if_used:
- #get used names
- fl_names=[]
- fl=self.nova.flavors.list()
- for f in fl:
- fl_names.append(f.name)
- while name in fl_names:
- name_suffix += 1
- name = flavor_data['name']+"-" + str(name_suffix)
-
- ram = flavor_data.get('ram',64)
- vcpus = flavor_data.get('vcpus',1)
- numa_properties=None
-
- extended = flavor_data.get("extended")
- if extended:
- numas=extended.get("numas")
- if numas:
- numa_nodes = len(numas)
- if numa_nodes > 1:
- return -1, "Can not add flavor with more than one numa"
- numa_properties = {"hw:numa_nodes":str(numa_nodes)}
- numa_properties["hw:mem_page_size"] = "large"
- numa_properties["hw:cpu_policy"] = "dedicated"
- numa_properties["hw:numa_mempolicy"] = "strict"
- if self.vim_type == "VIO":
- numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
- numa_properties["vmware:latency_sensitivity_level"] = "high"
- for numa in numas:
- #overwrite ram and vcpus
- #check if key 'memory' is present in numa else use ram value at flavor
- if 'memory' in numa:
- ram = numa['memory']*1024
- #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
- if 'paired-threads' in numa:
- vcpus = numa['paired-threads']*2
- #cpu_thread_policy "require" implies that the compute node must have an STM architecture
- numa_properties["hw:cpu_thread_policy"] = "require"
- numa_properties["hw:cpu_policy"] = "dedicated"
- elif 'cores' in numa:
- vcpus = numa['cores']
- # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
- numa_properties["hw:cpu_thread_policy"] = "isolate"
- numa_properties["hw:cpu_policy"] = "dedicated"
- elif 'threads' in numa:
- vcpus = numa['threads']
- # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
- numa_properties["hw:cpu_thread_policy"] = "prefer"
- numa_properties["hw:cpu_policy"] = "dedicated"
- # for interface in numa.get("interfaces",() ):
- # if interface["dedicated"]=="yes":
- # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
-
- #create flavor
- new_flavor=self.nova.flavors.create(name,
- ram,
- vcpus,
- flavor_data.get('disk',0),
- is_public=flavor_data.get('is_public', True)
- )
- #add metadata
- if numa_properties:
- new_flavor.set_keys(numa_properties)
- return new_flavor.id
- except nvExceptions.Conflict as e:
- if change_name_if_used and retry < max_retries:
- continue
- self._format_exception(e)
- #except nvExceptions.BadRequest as e:
- except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
- self._format_exception(e)
+ try:
+ name=flavor_data['name']
+ while retry<max_retries:
+ retry+=1
+ try:
+ self._reload_connection()
+ if change_name_if_used:
+ #get used names
+ fl_names=[]
+ fl=self.nova.flavors.list()
+ for f in fl:
+ fl_names.append(f.name)
+ while name in fl_names:
+ name_suffix += 1
+ name = flavor_data['name']+"-" + str(name_suffix)
+
+ ram = flavor_data.get('ram',64)
+ vcpus = flavor_data.get('vcpus',1)
+ numa_properties=None
+
+ extended = flavor_data.get("extended")
+ if extended:
+ numas=extended.get("numas")
+ if numas:
+ numa_nodes = len(numas)
+ if numa_nodes > 1:
+ return -1, "Can not add flavor with more than one numa"
+ numa_properties = {"hw:numa_nodes":str(numa_nodes)}
+ numa_properties["hw:mem_page_size"] = "large"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ numa_properties["hw:numa_mempolicy"] = "strict"
+ if self.vim_type == "VIO":
+ numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+ numa_properties["vmware:latency_sensitivity_level"] = "high"
+ for numa in numas:
+ #overwrite ram and vcpus
+ #check if key 'memory' is present in numa else use ram value at flavor
+ if 'memory' in numa:
+ ram = numa['memory']*1024
+ #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+ if 'paired-threads' in numa:
+ vcpus = numa['paired-threads']*2
+ #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+ numa_properties["hw:cpu_thread_policy"] = "require"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ elif 'cores' in numa:
+ vcpus = numa['cores']
+ # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+ numa_properties["hw:cpu_thread_policy"] = "isolate"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ elif 'threads' in numa:
+ vcpus = numa['threads']
+ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+ numa_properties["hw:cpu_thread_policy"] = "prefer"
+ numa_properties["hw:cpu_policy"] = "dedicated"
+ # for interface in numa.get("interfaces",() ):
+ # if interface["dedicated"]=="yes":
+ # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+ # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+
+ #create flavor
+ new_flavor=self.nova.flavors.create(name,
+ ram,
+ vcpus,
+ flavor_data.get('disk',0),
+ is_public=flavor_data.get('is_public', True)
+ )
+ #add metadata
+ if numa_properties:
+ new_flavor.set_keys(numa_properties)
+ return new_flavor.id
+ except nvExceptions.Conflict as e:
+ if change_name_if_used and retry < max_retries:
+ continue
+ self._format_exception(e)
+ #except nvExceptions.BadRequest as e:
+ except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
+ self._format_exception(e)
def delete_flavor(self,flavor_id):
'''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
else:
disk_format="raw"
self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
- new_image = self.glance.images.create(name=image_dict['name'])
+ if self.vim_type == "VIO":
+ container_format = "bare"
+ if 'container_format' in image_dict:
+ container_format = image_dict['container_format']
+ new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
+ disk_format=disk_format)
+ else:
+ new_image = self.glance.images.create(name=image_dict['name'])
if image_dict['location'].startswith("http"):
# TODO there is not a method to direct download. It must be downloaded locally with requests
raise vimconn.vimconnNotImplemented("Cannot create image from URL")
#new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
# container_format="bare", data=fimage, disk_format=disk_format)
metadata_to_load = image_dict.get('metadata')
- #TODO location is a reserved word for current openstack versions. Use another word
- metadata_to_load['location'] = image_dict['location']
+ # TODO location is a reserved word for current openstack versions. fixed for VIO please check for openstack
+ if self.vim_type == "VIO":
+ metadata_to_load['upload_location'] = image_dict['location']
+ else:
+ metadata_to_load['location'] = image_dict['location']
self.glance.images.update(new_image.id, **metadata_to_load)
return new_image.id
except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
self._reload_connection()
self.glance.images.delete(image_id)
return image_id
- except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: #TODO remove
+ except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: #TODO remove
self._format_exception(e)
def get_image_id_from_path(self, path):
self._reload_connection()
# metadata_vpci = {} # For a specific neutron plugin
block_device_mapping = None
+
for net in net_list:
if not net.get("net_id"): # skip non connected iface
continue
- port_dict={
+ port_dict = {
"network_id": net["net_id"],
"name": net.get("name"),
"admin_state_up": True
}
+ if self.config.get("security_groups") and net.get("port_security") is not False and \
+ not self.config.get("no_port_security_extension"):
+ if not self.security_groups_id:
+ self._get_ids_from_name()
+ port_dict["security_groups"] = self.security_groups_id
+
if net["type"]=="virtual":
pass
# if "vpci" in net:
self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
name, image_id, flavor_id, str(net_list_vim), description)
- security_groups = self.config.get('security_groups')
- if type(security_groups) is str:
- security_groups = ( security_groups, )
# cloud config
config_drive, userdata = self._create_user_data(cloud_config)
self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
"availability_zone={}, key_name={}, userdata={}, config_drive={}, "
"block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
- security_groups, vm_av_zone, self.config.get('keypair'),
- userdata, config_drive, block_device_mapping))
+ self.config.get("security_groups"), vm_av_zone,
+ self.config.get('keypair'), userdata, config_drive,
+ block_device_mapping))
server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
- security_groups=security_groups,
+ security_groups=self.config.get("security_groups"),
+ # TODO remove security_groups in future versions. Already at neutron port
availability_zone=vm_av_zone,
key_name=self.config.get('keypair'),
userdata=userdata,
self._reload_connection()
# In networking-sfc the MPLS encapsulation is legacy
# should be used when no full SFC Encapsulation is intended
- sfc_encap = 'mpls'
+ correlation = 'mpls'
if sfc_encap:
correlation = 'nsh'
sfp_dict = {'name': name,