##
'''
-osconnector implements all the methods to interact with openstack using the python-client.
+osconnector implements all the methods to interact with openstack using the python-neutronclient.
+
+For the VNF forwarding graph, The OpenStack VIM connector calls the
+networking-sfc Neutron extension methods, whose resources are mapped
+to the VIM connector's SFC resources as follows:
+- Classification (OSM) -> Flow Classifier (Neutron)
+- Service Function Instance (OSM) -> Port Pair (Neutron)
+- Service Function (OSM) -> Port Pair Group (Neutron)
+- Service Function Path (OSM) -> Port Chain (Neutron)
'''
-__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research"
-__date__ ="$22-jun-2014 11:19:29$"
+__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C."
+__date__ = "$22-sep-2017 23:59:59$"
import vimconn
import json
-import yaml
import logging
import netaddr
import time
import yaml
import random
+import sys
+import re
+import copy
-from novaclient import client as nClient_v2, exceptions as nvExceptions
-from novaclient import api_versions
-import keystoneclient.v2_0.client as ksClient_v2
-from novaclient.v2.client import Client as nClient
-import keystoneclient.v3.client as ksClient
+from novaclient import client as nClient, exceptions as nvExceptions
+from keystoneauth1.identity import v2, v3
+from keystoneauth1 import session
import keystoneclient.exceptions as ksExceptions
-import glanceclient.v2.client as glClient
+import keystoneclient.v3.client as ksClient_v3
+import keystoneclient.v2_0.client as ksClient_v2
+from glanceclient import client as glClient
import glanceclient.client as gl1Client
import glanceclient.exc as gl1Exceptions
-import cinderclient.v2.client as cClient_v2
+from cinderclient import client as cClient
from httplib import HTTPException
-from neutronclient.neutron import client as neClient_v2
-from neutronclient.v2_0 import client as neClient
+from neutronclient.neutron import client as neClient
from neutronclient.common import exceptions as neExceptions
from requests.exceptions import ConnectionError
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
-'''contain the openstack virtual machine status to openmano status'''
+"""contain the openstack virtual machine status to openmano status"""
vmStatus2manoFormat={'ACTIVE':'ACTIVE',
'PAUSED':'PAUSED',
'SUSPENDED': 'SUSPENDED',
netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
}
+supportedClassificationTypes = ['legacy_flow_classifier']
+
#global var to have a timeout creating and deleting volumes
volume_timeout = 60
-server_timeout = 60
+server_timeout = 300
class vimconnector(vimconn.vimconnector):
def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
'url' is the keystone authorization url,
'url_admin' is not use
'''
- self.osc_api_version = 'v2.0'
- if config.get('APIversion') == 'v3.3':
- self.osc_api_version = 'v3.3'
- vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+ api_version = config.get('APIversion')
+ if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'):
+ raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. "
+ "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version))
+ vim_type = config.get('vim_type')
+ if vim_type and vim_type not in ('vio', 'VIO'):
+ raise vimconn.vimconnException("Invalid value '{}' for config:vim_type."
+ "Allowed values are 'vio' or 'VIO'".format(vim_type))
- self.persistent_info = persistent_info
- self.k_creds={}
- self.n_creds={}
- if self.config.get("insecure"):
- self.k_creds["insecure"] = True
- self.n_creds["insecure"] = True
+ if config.get('dataplane_net_vlan_range') is not None:
+ #validate vlan ranges provided by user
+ self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'))
+
+ vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+ config)
+
+ self.insecure = self.config.get("insecure", False)
if not url:
- raise TypeError, 'url param can not be NoneType'
- self.k_creds['auth_url'] = url
- self.n_creds['auth_url'] = url
- if tenant_name:
- self.k_creds['tenant_name'] = tenant_name
- self.n_creds['project_id'] = tenant_name
- if tenant_id:
- self.k_creds['tenant_id'] = tenant_id
- self.n_creds['tenant_id'] = tenant_id
- if user:
- self.k_creds['username'] = user
- self.n_creds['username'] = user
- if passwd:
- self.k_creds['password'] = passwd
- self.n_creds['api_key'] = passwd
- if self.osc_api_version == 'v3.3':
- self.k_creds['project_name'] = tenant_name
- self.k_creds['project_id'] = tenant_id
- if config.get('region_name'):
- self.k_creds['region_name'] = config.get('region_name')
- self.n_creds['region_name'] = config.get('region_name')
-
- self.reload_client = True
+ raise TypeError('url param can not be NoneType')
+ self.persistent_info = persistent_info
+ self.availability_zone = persistent_info.get('availability_zone', None)
+ self.session = persistent_info.get('session', {'reload_client': True})
+ self.nova = self.session.get('nova')
+ self.neutron = self.session.get('neutron')
+ self.cinder = self.session.get('cinder')
+ self.glance = self.session.get('glance')
+ self.glancev1 = self.session.get('glancev1')
+ self.keystone = self.session.get('keystone')
+ self.api_version3 = self.session.get('api_version3')
+ self.vim_type = self.config.get("vim_type")
+ if self.vim_type:
+ self.vim_type = self.vim_type.upper()
+ if self.config.get("use_internal_endpoint"):
+ self.endpoint_type = "internalURL"
+ else:
+ self.endpoint_type = None
+
self.logger = logging.getLogger('openmano.vim.openstack')
+
+ ####### VIO Specific Changes #########
+ if self.vim_type == "VIO":
+ self.logger = logging.getLogger('openmano.vim.vio')
+
if log_level:
- self.logger.setLevel( getattr(logging, log_level) )
-
- def __setitem__(self,index, value):
- '''Set individuals parameters
- Throw TypeError, KeyError
- '''
- if index=='tenant_id':
- self.reload_client=True
- self.tenant_id = value
- if self.osc_api_version == 'v3.3':
- if value:
- self.k_creds['project_id'] = value
- self.n_creds['project_id'] = value
- else:
- del self.k_creds['project_id']
- del self.n_creds['project_id']
- else:
- if value:
- self.k_creds['tenant_id'] = value
- self.n_creds['tenant_id'] = value
- else:
- del self.k_creds['tenant_id']
- del self.n_creds['tenant_id']
- elif index=='tenant_name':
- self.reload_client=True
- self.tenant_name = value
- if self.osc_api_version == 'v3.3':
- if value:
- self.k_creds['project_name'] = value
- self.n_creds['project_name'] = value
- else:
- del self.k_creds['project_name']
- del self.n_creds['project_name']
- else:
- if value:
- self.k_creds['tenant_name'] = value
- self.n_creds['project_id'] = value
- else:
- del self.k_creds['tenant_name']
- del self.n_creds['project_id']
- elif index=='user':
- self.reload_client=True
- self.user = value
- if value:
- self.k_creds['username'] = value
- self.n_creds['username'] = value
- else:
- del self.k_creds['username']
- del self.n_creds['username']
- elif index=='passwd':
- self.reload_client=True
- self.passwd = value
- if value:
- self.k_creds['password'] = value
- self.n_creds['api_key'] = value
- else:
- del self.k_creds['password']
- del self.n_creds['api_key']
- elif index=='url':
- self.reload_client=True
- self.url = value
- if value:
- self.k_creds['auth_url'] = value
- self.n_creds['auth_url'] = value
- else:
- raise TypeError, 'url param can not be NoneType'
+ self.logger.setLevel( getattr(logging, log_level))
+
+ def __getitem__(self, index):
+ """Get individuals parameters.
+ Throw KeyError"""
+ if index == 'project_domain_id':
+ return self.config.get("project_domain_id")
+ elif index == 'user_domain_id':
+ return self.config.get("user_domain_id")
+ else:
+ return vimconn.vimconnector.__getitem__(self, index)
+
+ def __setitem__(self, index, value):
+ """Set individuals parameters and it is marked as dirty so to force connection reload.
+ Throw KeyError"""
+ if index == 'project_domain_id':
+ self.config["project_domain_id"] = value
+ elif index == 'user_domain_id':
+ self.config["user_domain_id"] = value
else:
- vimconn.vimconnector.__setitem__(self,index, value)
-
+ vimconn.vimconnector.__setitem__(self, index, value)
+ self.session['reload_client'] = True
+
def _reload_connection(self):
'''Called before any operation, it check if credentials has changed
Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
'''
#TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
- if self.reload_client:
- #test valid params
- if len(self.n_creds) <4:
- raise ksExceptions.ClientException("Not enough parameters to connect to openstack")
- if self.osc_api_version == 'v3.3':
- self.nova = nClient(api_version=api_versions.APIVersion(version_str='2.0'), **self.n_creds)
- #TODO To be updated for v3
- #self.cinder = cClient.Client(**self.n_creds)
- self.keystone = ksClient.Client(**self.k_creds)
- self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
- self.neutron = neClient.Client(api_version=api_versions.APIVersion(version_str='2.0'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
+ if self.session['reload_client']:
+ if self.config.get('APIversion'):
+ self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
+ else: # get from ending auth_url that end with v3 or with v2.0
+ self.api_version3 = self.url.split("/")[-1] == "v3"
+ self.session['api_version3'] = self.api_version3
+ if self.api_version3:
+ auth = v3.Password(auth_url=self.url,
+ username=self.user,
+ password=self.passwd,
+ project_name=self.tenant_name,
+ project_id=self.tenant_id,
+ project_domain_id=self.config.get('project_domain_id', 'default'),
+ user_domain_id=self.config.get('user_domain_id', 'default'))
+ else:
+ auth = v2.Password(auth_url=self.url,
+ username=self.user,
+ password=self.passwd,
+ tenant_name=self.tenant_name,
+ tenant_id=self.tenant_id)
+ sess = session.Session(auth=auth, verify=not self.insecure)
+ if self.api_version3:
+ self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type)
+ else:
+ self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
+ self.session['keystone'] = self.keystone
+ # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
+ # This implementation approach is due to the warning message in
+ # https://developer.openstack.org/api-guide/compute/microversions.html
+ # where it is stated that microversion backwards compatibility is not guaranteed and clients should
+ # always require an specific microversion.
+ # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
+ version = self.config.get("microversion")
+ if not version:
+ version = "2.1"
+ self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type)
+ self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type)
+ self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type)
+ if self.endpoint_type == "internalURL":
+ glance_service_id = self.keystone.services.list(name="glance")[0].id
+ glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
else:
- self.nova = nClient_v2.Client(version='2', **self.n_creds)
- self.cinder = cClient_v2.Client(**self.n_creds)
- self.keystone = ksClient_v2.Client(**self.k_creds)
- self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
- self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
- self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
- self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds
- self.reload_client = False
+ glance_endpoint = None
+ self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
+ #using version 1 of glance client in new_image()
+ self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
+ endpoint=glance_endpoint)
+ self.session['reload_client'] = False
+ self.persistent_info['session'] = self.session
+ # add availablity zone info inside self.persistent_info
+ self._set_availablity_zones()
+ self.persistent_info['availability_zone'] = self.availability_zone
def __net_os2mano(self, net_list_dict):
'''Transform the net openstack format to mano format
net['type']='data'
else:
net['type']='bridge'
-
-
-
+
+ def __classification_os2mano(self, class_list_dict):
+ """Transform the openstack format (Flow Classifier) to mano format
+ (Classification) class_list_dict can be a list of dict or a single dict
+ """
+ if isinstance(class_list_dict, dict):
+ class_list_ = [class_list_dict]
+ elif isinstance(class_list_dict, list):
+ class_list_ = class_list_dict
+ else:
+ raise TypeError(
+ "param class_list_dict must be a list or a dictionary")
+ for classification in class_list_:
+ id = classification.pop('id')
+ name = classification.pop('name')
+ description = classification.pop('description')
+ project_id = classification.pop('project_id')
+ tenant_id = classification.pop('tenant_id')
+ original_classification = copy.deepcopy(classification)
+ classification.clear()
+ classification['ctype'] = 'legacy_flow_classifier'
+ classification['definition'] = original_classification
+ classification['id'] = id
+ classification['name'] = name
+ classification['description'] = description
+ classification['project_id'] = project_id
+ classification['tenant_id'] = tenant_id
+
+ def __sfi_os2mano(self, sfi_list_dict):
+ """Transform the openstack format (Port Pair) to mano format (SFI)
+ sfi_list_dict can be a list of dict or a single dict
+ """
+ if isinstance(sfi_list_dict, dict):
+ sfi_list_ = [sfi_list_dict]
+ elif isinstance(sfi_list_dict, list):
+ sfi_list_ = sfi_list_dict
+ else:
+ raise TypeError(
+ "param sfi_list_dict must be a list or a dictionary")
+ for sfi in sfi_list_:
+ sfi['ingress_ports'] = []
+ sfi['egress_ports'] = []
+ if sfi.get('ingress'):
+ sfi['ingress_ports'].append(sfi['ingress'])
+ if sfi.get('egress'):
+ sfi['egress_ports'].append(sfi['egress'])
+ del sfi['ingress']
+ del sfi['egress']
+ params = sfi.get('service_function_parameters')
+ sfc_encap = False
+ if params:
+ correlation = params.get('correlation')
+ if correlation:
+ sfc_encap = True
+ sfi['sfc_encap'] = sfc_encap
+ del sfi['service_function_parameters']
+
+ def __sf_os2mano(self, sf_list_dict):
+ """Transform the openstack format (Port Pair Group) to mano format (SF)
+ sf_list_dict can be a list of dict or a single dict
+ """
+ if isinstance(sf_list_dict, dict):
+ sf_list_ = [sf_list_dict]
+ elif isinstance(sf_list_dict, list):
+ sf_list_ = sf_list_dict
+ else:
+ raise TypeError(
+ "param sf_list_dict must be a list or a dictionary")
+ for sf in sf_list_:
+ del sf['port_pair_group_parameters']
+ sf['sfis'] = sf['port_pairs']
+ del sf['port_pairs']
+
+ def __sfp_os2mano(self, sfp_list_dict):
+ """Transform the openstack format (Port Chain) to mano format (SFP)
+ sfp_list_dict can be a list of dict or a single dict
+ """
+ if isinstance(sfp_list_dict, dict):
+ sfp_list_ = [sfp_list_dict]
+ elif isinstance(sfp_list_dict, list):
+ sfp_list_ = sfp_list_dict
+ else:
+ raise TypeError(
+ "param sfp_list_dict must be a list or a dictionary")
+ for sfp in sfp_list_:
+ params = sfp.pop('chain_parameters')
+ sfc_encap = False
+ if params:
+ correlation = params.get('correlation')
+ if correlation:
+ sfc_encap = True
+ sfp['sfc_encap'] = sfc_encap
+ sfp['spi'] = sfp.pop('chain_id')
+ sfp['classifications'] = sfp.pop('flow_classifiers')
+ sfp['service_functions'] = sfp.pop('port_pair_groups')
+
+ # placeholder for now; read TODO note below
+ def _validate_classification(self, type, definition):
+ # only legacy_flow_classifier Type is supported at this point
+ return True
+ # TODO(igordcard): this method should be an abstract method of an
+ # abstract Classification class to be implemented by the specific
+ # Types. Also, abstract vimconnector should call the validation
+ # method before the implemented VIM connectors are called.
+
def _format_exception(self, exception):
'''Transform a keystone, nova, neutron exception into a vimconn exception'''
if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed
)):
- raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
- elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
+ raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
+ elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
neExceptions.NeutronException, nvExceptions.BadRequest)):
raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound)):
raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
elif isinstance(exception, nvExceptions.Conflict):
raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
- else: # ()
+ elif isinstance(exception, vimconn.vimconnException):
+ raise
+ else: # ()
+ self.logger.error("General Exception " + str(exception), exc_info=True)
raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
def get_tenant_list(self, filter_dict={}):
self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3':
- project_class_list=self.keystone.projects.findall(**filter_dict)
+ if self.api_version3:
+ project_class_list = self.keystone.projects.list(name=filter_dict.get("name"))
else:
- project_class_list=self.keystone.tenants.findall(**filter_dict)
+ project_class_list = self.keystone.tenants.findall(**filter_dict)
project_list=[]
for project in project_class_list:
+ if filter_dict.get('id') and filter_dict["id"] != project.id:
+ continue
project_list.append(project.to_dict())
return project_list
- except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+ except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
def new_tenant(self, tenant_name, tenant_description):
self.logger.debug("Adding a new tenant name: %s", tenant_name)
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3':
- project=self.keystone.projects.create(tenant_name, tenant_description)
+ if self.api_version3:
+ project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"),
+ description=tenant_description, is_domain=False)
else:
- project=self.keystone.tenants.create(tenant_name, tenant_description)
+ project = self.keystone.tenants.create(tenant_name, tenant_description)
return project.id
except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
self.logger.debug("Deleting tenant %s from VIM", tenant_id)
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3':
+ if self.api_version3:
self.keystone.projects.delete(tenant_id)
else:
self.keystone.tenants.delete(tenant_id)
network_dict["provider:network_type"] = "vlan"
if vlan!=None:
network_dict["provider:network_type"] = vlan
+
+ ####### VIO Specific Changes #########
+ if self.vim_type == "VIO":
+ if vlan is not None:
+ network_dict["provider:segmentation_id"] = vlan
+ else:
+ if self.config.get('dataplane_net_vlan_range') is None:
+ raise vimconn.vimconnConflictException("You must provide "\
+ "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\
+ "at config value before creating sriov network with vlan tag")
+
+ network_dict["provider:segmentation_id"] = self._genrate_vlanID()
+
network_dict["shared"]=shared
new_net=self.neutron.create_network({'network':network_dict})
#print new_net
#Fake subnet is required
subnet_rand = random.randint(0, 255)
ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
- if 'ip_version' not in ip_profile:
+ if 'ip_version' not in ip_profile:
ip_profile['ip_version'] = "IPv4"
- subnet={"name":net_name+"-subnet",
+ subnet = {"name":net_name+"-subnet",
"network_id": new_net["network"]["id"],
"ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
"cidr": ip_profile['subnet_address']
}
- if 'gateway_address' in ip_profile:
- subnet['gateway_ip'] = ip_profile['gateway_address']
+ # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
+ subnet['gateway_ip'] = ip_profile.get('gateway_address')
if ip_profile.get('dns_address'):
- #TODO: manage dns_address as a list of addresses separated by commas
- subnet['dns_nameservers'] = []
- subnet['dns_nameservers'].append(ip_profile['dns_address'])
+ subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
if 'dhcp_enabled' in ip_profile:
subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True
if 'dhcp_start_address' in ip_profile:
- subnet['allocation_pools']=[]
+ subnet['allocation_pools'] = []
subnet['allocation_pools'].append(dict())
subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
if 'dhcp_count' in ip_profile:
self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
try:
self._reload_connection()
- if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict:
- filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ if self.api_version3 and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check
net_dict=self.neutron.list_networks(**filter_dict)
net_list=net_dict["networks"]
self.__net_os2mano(net_list)
error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- '''
+ '''
net_dict={}
for net_id in net_list:
net = {}
else:
net["status"] = "OTHER"
net["error_msg"] = "VIM status reported " + net_vim['status']
-
+
if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
net['status'] = 'DOWN'
try:
def get_flavor_id_from_data(self, flavor_dict):
"""Obtain flavor id that match the flavor description
Returns the flavor_id or raises a vimconnNotFoundException
+ flavor_dict: contains the required ram, vcpus, disk
+ If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
+ and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
+ vimconnNotFoundException is raised
"""
+ exact_match = False if self.config.get('use_existing_flavors') else True
try:
self._reload_connection()
- numa=None
- numas = flavor_dict.get("extended",{}).get("numas")
+ flavor_candidate_id = None
+ flavor_candidate_data = (10000, 10000, 10000)
+ flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
+ # numa=None
+ numas = flavor_dict.get("extended", {}).get("numas")
if numas:
#TODO
raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted")
epa = flavor.get_keys()
if epa:
continue
- #TODO
- if flavor.ram != flavor_dict["ram"]:
- continue
- if flavor.vcpus != flavor_dict["vcpus"]:
- continue
- if flavor.disk != flavor_dict["disk"]:
- continue
- return flavor.id
+ # TODO
+ flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
+ if flavor_data == flavor_target:
+ return flavor.id
+ elif not exact_match and flavor_target < flavor_data < flavor_candidate_data:
+ flavor_candidate_id = flavor.id
+ flavor_candidate_data = flavor_data
+ if not exact_match and flavor_candidate_id:
+ return flavor_candidate_id
raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
self._format_exception(e)
while name in fl_names:
name_suffix += 1
name = flavor_data['name']+"-" + str(name_suffix)
-
+
ram = flavor_data.get('ram',64)
vcpus = flavor_data.get('vcpus',1)
numa_properties=None
numa_properties["hw:mem_page_size"] = "large"
numa_properties["hw:cpu_policy"] = "dedicated"
numa_properties["hw:numa_mempolicy"] = "strict"
+ if self.vim_type == "VIO":
+ numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+ numa_properties["vmware:latency_sensitivity_level"] = "high"
for numa in numas:
#overwrite ram and vcpus
ram = numa['memory']*1024
+ #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
if 'paired-threads' in numa:
vcpus = numa['paired-threads']*2
- numa_properties["hw:cpu_threads_policy"] = "prefer"
+ #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+ numa_properties["hw:cpu_thread_policy"] = "require"
+ numa_properties["hw:cpu_policy"] = "dedicated"
elif 'cores' in numa:
vcpus = numa['cores']
- #numa_properties["hw:cpu_threads_policy"] = "prefer"
+ # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+ numa_properties["hw:cpu_thread_policy"] = "isolate"
+ numa_properties["hw:cpu_policy"] = "dedicated"
elif 'threads' in numa:
vcpus = numa['threads']
- numa_properties["hw:cpu_policy"] = "isolated"
+ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+ numa_properties["hw:cpu_thread_policy"] = "prefer"
+ numa_properties["hw:cpu_policy"] = "dedicated"
# for interface in numa.get("interfaces",() ):
# if interface["dedicated"]=="yes":
# raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
# #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
-
+
#create flavor
- new_flavor=self.nova.flavors.create(name,
- ram,
- vcpus,
+ new_flavor=self.nova.flavors.create(name,
+ ram,
+ vcpus,
flavor_data.get('disk',1),
is_public=flavor_data.get('is_public', True)
- )
+ )
#add metadata
if numa_properties:
new_flavor.set_keys(numa_properties)
metadata: metadata of the image
Returns the image_id
'''
- #using version 1 of glance client
- glancev1 = gl1Client.Client('1',self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds
retry=0
max_retries=3
while retry<max_retries:
disk_format="raw"
self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
if image_dict['location'][0:4]=="http":
- new_image = glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+ new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
container_format="bare", location=image_dict['location'], disk_format=disk_format)
else: #local path
with open(image_dict['location']) as fimage:
- new_image = glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+ new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
container_format="bare", data=fimage, disk_format=disk_format)
#insert metadata. We cannot use 'new_image.properties.setdefault'
#because nova and glance are "INDEPENDENT" and we are using nova for reading metadata
except IOError as e: #can not open the file
raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e)+ " for " + image_dict['location'],
http_code=vimconn.HTTP_Bad_Request)
-
+
def delete_image(self, image_id):
'''Deletes a tenant image from openstack VIM. Returns the old id
'''
self._format_exception(e)
def get_image_id_from_path(self, path):
- '''Get the image id from image path in the VIM database. Returns the image_id'''
+ '''Get the image id from image path in the VIM database. Returns the image_id'''
try:
self._reload_connection()
images = self.nova.images.list()
raise vimconn.vimconnNotFoundException("image with location '{}' not found".format( path))
except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
self._format_exception(e)
-
+
def get_image_list(self, filter_dict={}):
'''Obtain tenant images from VIM
Filter_dict can be:
except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
self._format_exception(e)
- def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
+ @staticmethod
+ def _create_mimemultipart(content_list):
+ """Creates a MIMEmultipart text combining the content_list
+ :param content_list: list of text scripts to be combined
+ :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one
+ element MIMEmultipart is not created and this content is returned
+ """
+ if not content_list:
+ return None
+ elif len(content_list) == 1:
+ return content_list[0]
+ combined_message = MIMEMultipart()
+ for content in content_list:
+ if content.startswith('#include'):
+ format = 'text/x-include-url'
+ elif content.startswith('#include-once'):
+ format = 'text/x-include-once-url'
+ elif content.startswith('#!'):
+ format = 'text/x-shellscript'
+ elif content.startswith('#cloud-config'):
+ format = 'text/cloud-config'
+ elif content.startswith('#cloud-config-archive'):
+ format = 'text/cloud-config-archive'
+ elif content.startswith('#upstart-job'):
+ format = 'text/upstart-job'
+ elif content.startswith('#part-handler'):
+ format = 'text/part-handler'
+ elif content.startswith('#cloud-boothook'):
+ format = 'text/cloud-boothook'
+ else: # by default
+ format = 'text/x-shellscript'
+ sub_message = MIMEText(content, format, sys.getdefaultencoding())
+ combined_message.attach(sub_message)
+ return combined_message.as_string()
+
+ def __wait_for_vm(self, vm_id, status):
+ """wait until vm is in the desired status and return True.
+ If the VM gets in ERROR status, return false.
+ If the timeout is reached generate an exception"""
+ elapsed_time = 0
+ while elapsed_time < server_timeout:
+ vm_status = self.nova.servers.get(vm_id).status
+ if vm_status == status:
+ return True
+ if vm_status == 'ERROR':
+ return False
+ time.sleep(1)
+ elapsed_time += 1
+
+ # if we exceeded the timeout rollback
+ if elapsed_time >= server_timeout:
+ raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
+ http_code=vimconn.HTTP_Request_Timeout)
+
+ def _get_openstack_availablity_zones(self):
+ """
+ Get from openstack availability zones available
+ :return:
+ """
+ try:
+ openstack_availability_zone = self.nova.availability_zones.list()
+ openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
+ if zone.zoneName != 'internal']
+ return openstack_availability_zone
+ except Exception as e:
+ return None
+
+ def _set_availablity_zones(self):
+ """
+ Set vim availablity zone
+ :return:
+ """
+
+ if 'availability_zone' in self.config:
+ vim_availability_zones = self.config.get('availability_zone')
+ if isinstance(vim_availability_zones, str):
+ self.availability_zone = [vim_availability_zones]
+ elif isinstance(vim_availability_zones, list):
+ self.availability_zone = vim_availability_zones
+ else:
+ self.availability_zone = self._get_openstack_availablity_zones()
+
+ def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+ """
+ Return thge availability zone to be used by the created VM.
+ :return: The VIM availability zone to be used or None
+ """
+ if availability_zone_index is None:
+ if not self.config.get('availability_zone'):
+ return None
+ elif isinstance(self.config.get('availability_zone'), str):
+ return self.config['availability_zone']
+ else:
+ # TODO consider using a different parameter at config for default AV and AV list match
+ return self.config['availability_zone'][0]
+
+ vim_availability_zones = self.availability_zone
+ # check if VIM offer enough availability zones describe in the VNFD
+ if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+ # check if all the names of NFV AV match VIM AV names
+ match_by_index = False
+ for av in availability_zone_list:
+ if av not in vim_availability_zones:
+ match_by_index = True
+ break
+ if match_by_index:
+ return vim_availability_zones[availability_zone_index]
+ else:
+ return availability_zone_list[availability_zone_index]
+ else:
+ raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+ def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+ availability_zone_index=None, availability_zone_list=None):
'''Adds a VM instance to VIM
Params:
start: indicates if VM must start or boot in pause mode. Ignored
type: 'virtual', 'PF', 'VF', 'VFnotShared'
vim_id: filled/added by this function
floating_ip: True/False (or it can be None)
+ 'cloud_config': (optional) dictionary with:
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+ 'users': (optional) list of users to be inserted, each item is a dict with:
+ 'name': (mandatory) user name,
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+ 'user-data': (optional) string is a text script to be passed directly to cloud-init
+ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+ 'dest': (mandatory) string with the destination absolute path
+ 'encoding': (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ 'content' (mandatory): string with the content of the file
+ 'permissions': (optional) string with file permissions, typically octal notation '0644'
+ 'owner': (optional) file owner, string with the format 'owner:group'
+ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+ 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+ 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+ 'size': (mandatory) string with the size of the disk in GB
+ availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+ availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
+ availability_zone_index is None
#TODO ip, security groups
Returns the instance identifier
'''
self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
try:
+ server = None
metadata={}
net_list_vim=[]
- external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip
+ external_network=[] # list of external networks to be connected to instance, later on used to create floating_ip
+ no_secured_ports = [] # List of port-is with port-security disabled
self._reload_connection()
- metadata_vpci={} #For a specific neutron plugin
+ metadata_vpci={} # For a specific neutron plugin
+ block_device_mapping = None
for net in net_list:
if not net.get("net_id"): #skip non connected iface
continue
metadata_vpci["VF"]=[]
metadata_vpci["VF"].append([ net["vpci"], "" ])
port_dict["binding:vnic_type"]="direct"
+ ########## VIO specific Changes #######
+ if self.vim_type == "VIO":
+ #Need to create port with port_security_enabled = False and no-security-groups
+ port_dict["port_security_enabled"]=False
+ port_dict["provider_security_groups"]=[]
+ port_dict["security_groups"]=[]
else: #For PT
+ ########## VIO specific Changes #######
+ #Current VIO release does not support port with type 'direct-physical'
+ #So no need to create virtual port in case of PCI-device.
+ #Will update port_dict code when support gets added in next VIO release
+ if self.vim_type == "VIO":
+ raise vimconn.vimconnNotSupportedException("Current VIO release does not support full passthrough (PT)")
if "vpci" in net:
if "PF" not in metadata_vpci:
metadata_vpci["PF"]=[]
port_dict["name"]=name
if net.get("mac_address"):
port_dict["mac_address"]=net["mac_address"]
- if net.get("port_security") == False:
- port_dict["port_security_enabled"]=net["port_security"]
new_port = self.neutron.create_port({"port": port_dict })
net["mac_adress"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
- net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
- net_list_vim.append({"port-id": new_port["port"]["id"]})
+ # if try to use a network without subnetwork, it will return a emtpy list
+ fixed_ips = new_port["port"].get("fixed_ips")
+ if fixed_ips:
+ net["ip"] = fixed_ips[0].get("ip_address")
+ else:
+ net["ip"] = None
+
+ port = {"port-id": new_port["port"]["id"]}
+ if float(self.nova.api_version.get_string()) >= 2.32:
+ port["tag"] = new_port["port"]["name"]
+ net_list_vim.append(port)
if net.get('floating_ip', False):
net['exit_on_floating_ip_error'] = True
net['exit_on_floating_ip_error'] = False
external_network.append(net)
+ # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
+ # As a workaround we wait until the VM is active and then disable the port-security
+ if net.get("port_security") == False:
+ no_secured_ports.append(new_port["port"]["id"])
+
if metadata_vpci:
metadata = {"pci_assignement": json.dumps(metadata_vpci)}
if len(metadata["pci_assignement"]) >255:
#metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
metadata = {}
-
+
self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s' metadata %s",
name, image_id, flavor_id, str(net_list_vim), description, str(metadata))
-
+
security_groups = self.config.get('security_groups')
if type(security_groups) is str:
security_groups = ( security_groups, )
#cloud config
userdata=None
config_drive = None
+ userdata_list = []
if isinstance(cloud_config, dict):
if cloud_config.get("user-data"):
- userdata=cloud_config["user-data"]
+ if isinstance(cloud_config["user-data"], str):
+ userdata_list.append(cloud_config["user-data"])
+ else:
+ for u in cloud_config["user-data"]:
+ userdata_list.append(u)
if cloud_config.get("boot-data-drive") != None:
config_drive = cloud_config["boot-data-drive"]
if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
- if userdata:
- raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'")
userdata_dict={}
#default user
if cloud_config.get("key-pairs"):
if file.get("owner"):
file_info["owner"] = file["owner"]
userdata_dict["write_files"].append(file_info)
- userdata = "#cloud-config\n"
- userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
+ userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
+ default_flow_style=False))
+ userdata = self._create_mimemultipart(userdata_list)
self.logger.debug("userdata: %s", userdata)
elif isinstance(cloud_config, str):
userdata = cloud_config
#Create additional volumes in case these are present in disk_list
- block_device_mapping = None
base_disk_index = ord('b')
if disk_list != None:
- block_device_mapping = dict()
+ block_device_mapping = {}
for disk in disk_list:
if 'image_id' in disk:
volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
http_code=vimconn.HTTP_Request_Timeout)
+ # get availability Zone
+ vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
+ self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, "
+ "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+ "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata,
+ security_groups, vm_av_zone, self.config.get('keypair'),
+ userdata, config_drive, block_device_mapping))
server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
security_groups=security_groups,
- availability_zone=self.config.get('availability_zone'),
+ availability_zone=vm_av_zone,
key_name=self.config.get('keypair'),
userdata=userdata,
- config_drive = config_drive,
- block_device_mapping = block_device_mapping
+ config_drive=config_drive,
+ block_device_mapping=block_device_mapping
) # , description=description)
+
+ # Previously mentioned workaround to wait until the VM is active and then disable the port-security
+ if no_secured_ports:
+ self.__wait_for_vm(server.id, 'ACTIVE')
+
+ for port_id in no_secured_ports:
+ try:
+ self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} })
+
+ except Exception as e:
+ self.logger.error("It was not possible to disable port security for port {}".format(port_id))
+ self.delete_vminstance(server.id)
+ raise
+
#print "DONE :-)", server
pool_id = None
floating_ips = self.neutron.list_floatingips().get("floatingips", ())
- for floating_network in external_network:
- try:
- # wait until vm is active
- elapsed_time = 0
- while elapsed_time < server_timeout:
- status = self.nova.servers.get(server.id).status
- if status == 'ACTIVE':
- break
- time.sleep(1)
- elapsed_time += 1
- #if we exceeded the timeout rollback
- if elapsed_time >= server_timeout:
- raise vimconn.vimconnException('Timeout creating instance ' + name,
- http_code=vimconn.HTTP_Request_Timeout)
+ if external_network:
+ self.__wait_for_vm(server.id, 'ACTIVE')
+ for floating_network in external_network:
+ try:
assigned = False
while(assigned == False):
if floating_ips:
if not floating_network['exit_on_floating_ip_error']:
self.logger.warn("Cannot create floating_ip. %s", str(e))
continue
- self.delete_vminstance(server.id)
raise
return server.id
# except nvExceptions.NotFound as e:
# error_value=-vimconn.HTTP_Not_Found
# error_text= "vm instance %s not found" % vm_id
- except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+# except TypeError as e:
+# raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
+
+ except Exception as e:
# delete the volumes we just created
- if block_device_mapping != None:
+ if block_device_mapping:
for volume_id in block_device_mapping.itervalues():
self.cinder.volumes.delete(volume_id)
- # delete ports we just created
- for net_item in net_list_vim:
- if 'port-id' in net_item:
- self.neutron.delete_port(net_item['port-id'])
+ # Delete the VM
+ if server != None:
+ self.delete_vminstance(server.id)
+ else:
+ # delete ports we just created
+ for net_item in net_list_vim:
+ if 'port-id' in net_item:
+ self.neutron.delete_port(net_item['port-id'])
+
self._format_exception(e)
- except TypeError as e:
- raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
def get_vminstance(self,vm_id):
'''Returns the VM instance information from VIM'''
console_dict = server.get_spice_console(console_type)
else:
raise vimconn.vimconnException("console type '{}' not allowed".format(console_type), http_code=vimconn.HTTP_Bad_Request)
-
+
console_dict1 = console_dict.get("console")
if console_dict1:
console_url = console_dict1.get("url")
if protocol_index < 0 or port_index<0 or suffix_index<0:
return -vimconn.HTTP_Internal_Server_Error, "Unexpected response from VIM"
console_dict={"protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index+2:port_index],
- "port": console_url[port_index:suffix_index],
- "suffix": console_url[suffix_index+1:]
+ "server": console_url[protocol_index+2:port_index],
+ "port": console_url[port_index:suffix_index],
+ "suffix": console_url[suffix_index+1:]
}
protocol_index += 2
return console_dict
raise vimconn.vimconnUnexpectedResponse("Unexpected response from VIM")
-
+
except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e:
self._format_exception(e)
interface["mac_address"] = port.get("mac_address")
interface["vim_net_id"] = port["network_id"]
interface["vim_interface_id"] = port["id"]
- interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
+ # check if OS-EXT-SRV-ATTR:host is there,
+ # in case of non-admin credentials, it will be missing
+ if vm_vim.get('OS-EXT-SRV-ATTR:host'):
+ interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
interface["pci"] = None
- if port['binding:profile'].get('pci_slot'):
- # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
- # TODO: This is just a workaround valid for niantinc. Find a better way to do so
- # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
- pci = port['binding:profile']['pci_slot']
- # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
- interface["pci"] = pci
+
+ # check if binding:profile is there,
+ # in case of non-admin credentials, it will be missing
+ if port.get('binding:profile'):
+ if port['binding:profile'].get('pci_slot'):
+ # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
+ # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+ # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
+ pci = port['binding:profile']['pci_slot']
+ # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+ interface["pci"] = pci
interface["vlan"] = None
#if network is of type vlan and port is of type direct (sr-iov) then set vlan id
network = self.neutron.show_network(port["network_id"])
vm['error_msg'] = str(e)
vm_dict[vm_id] = vm
return vm_dict
-
+
def action_vminstance(self, vm_id, action_dict):
'''Send and action over a VM instance from VIM
Returns the vm_id if the action was successfully sent to the VIM'''
self._reload_connection()
server = self.nova.servers.find(id=vm_id)
if "start" in action_dict:
- if action_dict["start"]=="rebuild":
+ if action_dict["start"]=="rebuild":
server.rebuild()
else:
if server.status=="PAUSED":
elif console_type == "spice-html5":
console_dict = server.get_spice_console(console_type)
else:
- raise vimconn.vimconnException("console type '{}' not allowed".format(console_type),
+ raise vimconn.vimconnException("console type '{}' not allowed".format(console_type),
http_code=vimconn.HTTP_Bad_Request)
try:
console_url = console_dict["console"]["url"]
if protocol_index < 0 or port_index<0 or suffix_index<0:
raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
console_dict2={"protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index+2 : port_index],
- "port": int(console_url[port_index+1 : suffix_index]),
- "suffix": console_url[suffix_index+1:]
+ "server": console_url[protocol_index+2 : port_index],
+ "port": int(console_url[port_index+1 : suffix_index]),
+ "suffix": console_url[suffix_index+1:]
}
- return console_dict2
+ return console_dict2
except Exception as e:
raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
-
+
return vm_id
except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
self._format_exception(e)
#TODO insert exception vimconn.HTTP_Unauthorized
+ ####### VIO Specific Changes #########
+ def _genrate_vlanID(self):
+ """
+ Method to get unused vlanID
+ Args:
+ None
+ Returns:
+ vlanID
+ """
+ #Get used VLAN IDs
+ usedVlanIDs = []
+ networks = self.get_network_list()
+ for net in networks:
+ if net.get('provider:segmentation_id'):
+ usedVlanIDs.append(net.get('provider:segmentation_id'))
+ used_vlanIDs = set(usedVlanIDs)
+
+ #find unused VLAN ID
+ for vlanID_range in self.config.get('dataplane_net_vlan_range'):
+ try:
+ start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+ for vlanID in xrange(start_vlanid, end_vlanid + 1):
+ if vlanID not in used_vlanIDs:
+ return vlanID
+ except Exception as exp:
+ raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+ else:
+ raise vimconn.vimconnConflictException("Unable to create the SRIOV VLAN network."\
+ " All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range')))
+
+
+ def _validate_vlan_ranges(self, dataplane_net_vlan_range):
+ """
+ Method to validate user given vlanID ranges
+ Args: None
+ Returns: None
+ """
+ for vlanID_range in dataplane_net_vlan_range:
+ vlan_range = vlanID_range.replace(" ", "")
+ #validate format
+ vlanID_pattern = r'(\d)*-(\d)*$'
+ match_obj = re.match(vlanID_pattern, vlan_range)
+ if not match_obj:
+ raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}.You must provide "\
+ "'dataplane_net_vlan_range' in format [start_ID - end_ID].".format(vlanID_range))
+
+ start_vlanid , end_vlanid = map(int,vlan_range.split("-"))
+ if start_vlanid <= 0 :
+ raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+ "Start ID can not be zero. For VLAN "\
+ "networks valid IDs are 1 to 4094 ".format(vlanID_range))
+ if end_vlanid > 4094 :
+ raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+ "End VLAN ID can not be greater than 4094. For VLAN "\
+ "networks valid IDs are 1 to 4094 ".format(vlanID_range))
+
+ if start_vlanid > end_vlanid:
+ raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\
+ "You must provide a 'dataplane_net_vlan_range' in format start_ID - end_ID and "\
+ "start_ID < end_ID ".format(vlanID_range))
+
#NOT USED FUNCTIONS
-
+
def new_external_port(self, port_data):
#TODO openstack if needed
'''Adds a external port to VIM'''
'''Returns the port identifier'''
- return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
-
+ return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
+
def connect_port_network(self, port_id, network_id, admin=False):
#TODO openstack if needed
'''Connects a external port to a network'''
'''Returns status code of the VIM response'''
- return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
-
+ return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
+
def new_user(self, user_name, user_passwd, tenant_id=None):
'''Adds a new user to openstack VIM'''
'''Returns the user identifier'''
#if reaching here is because an exception
if self.debug:
self.logger.debug("new_user " + error_text)
- return error_value, error_text
+ return error_value, error_text
def delete_user(self, user_id):
'''Delete a user from openstack VIM'''
'''Returns the user identifier'''
if self.debug:
- print "osconnector: Deleting a user from VIM"
+ print("osconnector: Deleting a user from VIM")
try:
self._reload_connection()
self.keystone.users.delete(user_id)
#TODO insert exception vimconn.HTTP_Unauthorized
#if reaching here is because an exception
if self.debug:
- print "delete_tenant " + error_text
+ print("delete_tenant " + error_text)
return error_value, error_text
-
+
def get_hosts_info(self):
'''Get the information of deployed hosts
Returns the hosts content'''
if self.debug:
- print "osconnector: Getting Host info from VIM"
+ print("osconnector: Getting Host info from VIM")
try:
h_list=[]
self._reload_connection()
#TODO insert exception vimconn.HTTP_Unauthorized
#if reaching here is because an exception
if self.debug:
- print "get_hosts_info " + error_text
- return error_value, error_text
+ print("get_hosts_info " + error_text)
+ return error_value, error_text
def get_hosts(self, vim_tenant):
'''Get the hosts and deployed instances
#TODO insert exception vimconn.HTTP_Unauthorized
#if reaching here is because an exception
if self.debug:
- print "get_hosts " + error_text
- return error_value, error_text
-
+ print("get_hosts " + error_text)
+ return error_value, error_text
+
+ def new_classification(self, name, ctype, definition):
+ self.logger.debug(
+ 'Adding a new (Traffic) Classification to VIM, named %s', name)
+ try:
+ new_class = None
+ self._reload_connection()
+ if ctype not in supportedClassificationTypes:
+ raise vimconn.vimconnNotSupportedException(
+ 'OpenStack VIM connector doesn\'t support provided '
+ 'Classification Type {}, supported ones are: '
+ '{}'.format(ctype, supportedClassificationTypes))
+ if not self._validate_classification(ctype, definition):
+ raise vimconn.vimconnException(
+ 'Incorrect Classification definition '
+ 'for the type specified.')
+ classification_dict = definition
+ classification_dict['name'] = name
+
+ new_class = self.neutron.create_flow_classifier(
+ {'flow_classifier': classification_dict})
+ return new_class['flow_classifier']['id']
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ self.logger.error(
+ 'Creation of Classification failed.')
+ self._format_exception(e)
+
+ def get_classification(self, class_id):
+ self.logger.debug(" Getting Classification %s from VIM", class_id)
+ filter_dict = {"id": class_id}
+ class_list = self.get_classification_list(filter_dict)
+ if len(class_list) == 0:
+ raise vimconn.vimconnNotFoundException(
+ "Classification '{}' not found".format(class_id))
+ elif len(class_list) > 1:
+ raise vimconn.vimconnConflictException(
+ "Found more than one Classification with this criteria")
+ classification = class_list[0]
+ return classification
+
+ def get_classification_list(self, filter_dict={}):
+ self.logger.debug("Getting Classifications from VIM filter: '%s'",
+ str(filter_dict))
+ try:
+ self._reload_connection()
+ if self.api_version3 and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ classification_dict = self.neutron.list_flow_classifier(
+ **filter_dict)
+ classification_list = classification_dict["flow_classifiers"]
+ self.__classification_os2mano(classification_list)
+ return classification_list
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ self._format_exception(e)
+ def delete_classification(self, class_id):
+ self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+ try:
+ self._reload_connection()
+ self.neutron.delete_flow_classifier(class_id)
+ return class_id
+ except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+ ksExceptions.ClientException, neExceptions.NeutronException,
+ ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+ self.logger.debug(
+ "Adding a new Service Function Instance to VIM, named '%s'", name)
+ try:
+ new_sfi = None
+ self._reload_connection()
+ correlation = None
+ if sfc_encap:
+ # TODO(igordc): must be changed to NSH in Queens
+ # (MPLS is a workaround)
+ correlation = 'mpls'
+ if len(ingress_ports) != 1:
+ raise vimconn.vimconnNotSupportedException(
+ "OpenStack VIM connector can only have "
+ "1 ingress port per SFI")
+ if len(egress_ports) != 1:
+ raise vimconn.vimconnNotSupportedException(
+ "OpenStack VIM connector can only have "
+ "1 egress port per SFI")
+ sfi_dict = {'name': name,
+ 'ingress': ingress_ports[0],
+ 'egress': egress_ports[0],
+ 'service_function_parameters': {
+ 'correlation': correlation}}
+ new_sfi = self.neutron.create_port_pair({'port_pair': sfi_dict})
+ return new_sfi['port_pair']['id']
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ if new_sfi:
+ try:
+ self.neutron.delete_port_pair_group(
+ new_sfi['port_pair']['id'])
+ except Exception:
+ self.logger.error(
+ 'Creation of Service Function Instance failed, with '
+ 'subsequent deletion failure as well.')
+ self._format_exception(e)
+
+ def get_sfi(self, sfi_id):
+ self.logger.debug(
+ 'Getting Service Function Instance %s from VIM', sfi_id)
+ filter_dict = {"id": sfi_id}
+ sfi_list = self.get_sfi_list(filter_dict)
+ if len(sfi_list) == 0:
+ raise vimconn.vimconnNotFoundException(
+ "Service Function Instance '{}' not found".format(sfi_id))
+ elif len(sfi_list) > 1:
+ raise vimconn.vimconnConflictException(
+ 'Found more than one Service Function Instance '
+ 'with this criteria')
+ sfi = sfi_list[0]
+ return sfi
+
+ def get_sfi_list(self, filter_dict={}):
+ self.logger.debug("Getting Service Function Instances from "
+ "VIM filter: '%s'", str(filter_dict))
+ try:
+ self._reload_connection()
+ if self.api_version3 and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ sfi_dict = self.neutron.list_port_pair(**filter_dict)
+ sfi_list = sfi_dict["port_pairs"]
+ self.__sfi_os2mano(sfi_list)
+ return sfi_list
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def delete_sfi(self, sfi_id):
+ self.logger.debug("Deleting Service Function Instance '%s' "
+ "from VIM", sfi_id)
+ try:
+ self._reload_connection()
+ self.neutron.delete_port_pair(sfi_id)
+ return sfi_id
+ except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+ ksExceptions.ClientException, neExceptions.NeutronException,
+ ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_sf(self, name, sfis, sfc_encap=True):
+ self.logger.debug("Adding a new Service Function to VIM, "
+ "named '%s'", name)
+ try:
+ new_sf = None
+ self._reload_connection()
+ correlation = None
+ if sfc_encap:
+ # TODO(igordc): must be changed to NSH in Queens
+ # (MPLS is a workaround)
+ correlation = 'mpls'
+ for instance in sfis:
+ sfi = self.get_sfi(instance)
+ if sfi.get('sfc_encap') != correlation:
+ raise vimconn.vimconnNotSupportedException(
+ "OpenStack VIM connector requires all SFIs of the "
+ "same SF to share the same SFC Encapsulation")
+ sf_dict = {'name': name,
+ 'port_pairs': sfis}
+ new_sf = self.neutron.create_port_pair_group({
+ 'port_pair_group': sf_dict})
+ return new_sf['port_pair_group']['id']
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ if new_sf:
+ try:
+ self.neutron.delete_port_pair_group(
+ new_sf['port_pair_group']['id'])
+ except Exception:
+ self.logger.error(
+ 'Creation of Service Function failed, with '
+ 'subsequent deletion failure as well.')
+ self._format_exception(e)
+
+ def get_sf(self, sf_id):
+ self.logger.debug("Getting Service Function %s from VIM", sf_id)
+ filter_dict = {"id": sf_id}
+ sf_list = self.get_sf_list(filter_dict)
+ if len(sf_list) == 0:
+ raise vimconn.vimconnNotFoundException(
+ "Service Function '{}' not found".format(sf_id))
+ elif len(sf_list) > 1:
+ raise vimconn.vimconnConflictException(
+ "Found more than one Service Function with this criteria")
+ sf = sf_list[0]
+ return sf
+
+ def get_sf_list(self, filter_dict={}):
+ self.logger.debug("Getting Service Function from VIM filter: '%s'",
+ str(filter_dict))
+ try:
+ self._reload_connection()
+ if self.api_version3 and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ sf_dict = self.neutron.list_port_pair_group(**filter_dict)
+ sf_list = sf_dict["port_pair_groups"]
+ self.__sf_os2mano(sf_list)
+ return sf_list
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def delete_sf(self, sf_id):
+ self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+ try:
+ self._reload_connection()
+ self.neutron.delete_port_pair_group(sf_id)
+ return sf_id
+ except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+ ksExceptions.ClientException, neExceptions.NeutronException,
+ ConnectionError) as e:
+ self._format_exception(e)
+
+ def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+ self.logger.debug("Adding a new Service Function Path to VIM, "
+ "named '%s'", name)
+ try:
+ new_sfp = None
+ self._reload_connection()
+ if not sfc_encap:
+ raise vimconn.vimconnNotSupportedException(
+ "OpenStack VIM connector only supports "
+ "SFC-Encapsulated chains")
+ # TODO(igordc): must be changed to NSH in Queens
+ # (MPLS is a workaround)
+ correlation = 'mpls'
+ sfp_dict = {'name': name,
+ 'flow_classifiers': classifications,
+ 'port_pair_groups': sfs,
+ 'chain_parameters': {'correlation': correlation}}
+ if spi:
+ sfp_dict['chain_id'] = spi
+ new_sfp = self.neutron.create_port_chain({'port_chain': sfp_dict})
+ return new_sfp["port_chain"]["id"]
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ if new_sfp:
+ try:
+ self.neutron.delete_port_chain(new_sfp['port_chain']['id'])
+ except Exception:
+ self.logger.error(
+ 'Creation of Service Function Path failed, with '
+ 'subsequent deletion failure as well.')
+ self._format_exception(e)
+
+ def get_sfp(self, sfp_id):
+ self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+ filter_dict = {"id": sfp_id}
+ sfp_list = self.get_sfp_list(filter_dict)
+ if len(sfp_list) == 0:
+ raise vimconn.vimconnNotFoundException(
+ "Service Function Path '{}' not found".format(sfp_id))
+ elif len(sfp_list) > 1:
+ raise vimconn.vimconnConflictException(
+ "Found more than one Service Function Path with this criteria")
+ sfp = sfp_list[0]
+ return sfp
+
+ def get_sfp_list(self, filter_dict={}):
+ self.logger.debug("Getting Service Function Paths from VIM filter: "
+ "'%s'", str(filter_dict))
+ try:
+ self._reload_connection()
+ if self.api_version3 and "tenant_id" in filter_dict:
+ filter_dict['project_id'] = filter_dict.pop('tenant_id')
+ sfp_dict = self.neutron.list_port_chain(**filter_dict)
+ sfp_list = sfp_dict["port_chains"]
+ self.__sfp_os2mano(sfp_list)
+ return sfp_list
+ except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+ neExceptions.NeutronException, ConnectionError) as e:
+ self._format_exception(e)
+
+ def delete_sfp(self, sfp_id):
+ self.logger.debug(
+ "Deleting Service Function Path '%s' from VIM", sfp_id)
+ try:
+ self._reload_connection()
+ self.neutron.delete_port_chain(sfp_id)
+ return sfp_id
+ except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+ ksExceptions.ClientException, neExceptions.NeutronException,
+ ConnectionError) as e:
+ self._format_exception(e)