X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_ro%2Fvimconn_openstack.py;h=319f8c1a0e2a3fbcafb5f1cad7cb7abc9e3a77ca;hb=a92a0eaaf370c626b442863f4127cd11fc64754c;hp=35cffaeac9ba67923722fb31f4d1bc870e760fa4;hpb=05a8b7bc29197345f9718796c110d6cf3c2ad176;p=osm%2FRO.git diff --git a/osm_ro/vimconn_openstack.py b/osm_ro/vimconn_openstack.py index 35cffaea..319f8c1a 100644 --- a/osm_ro/vimconn_openstack.py +++ b/osm_ro/vimconn_openstack.py @@ -35,24 +35,28 @@ import netaddr import time import yaml import random +import sys +import re -from novaclient import client as nClient_v2, exceptions as nvExceptions -from novaclient import api_versions -import keystoneclient.v2_0.client as ksClient_v2 -from novaclient.v2.client import Client as nClient -import keystoneclient.v3.client as ksClient +from novaclient import client as nClient, exceptions as nvExceptions +from keystoneauth1.identity import v2, v3 +from keystoneauth1 import session import keystoneclient.exceptions as ksExceptions -import glanceclient.v2.client as glClient +import keystoneclient.v3.client as ksClient_v3 +import keystoneclient.v2_0.client as ksClient_v2 +from glanceclient import client as glClient import glanceclient.client as gl1Client import glanceclient.exc as gl1Exceptions -import cinderclient.v2.client as cClient_v2 +from cinderclient import client as cClient from httplib import HTTPException -from neutronclient.neutron import client as neClient_v2 -from neutronclient.v2_0 import client as neClient +from neutronclient.neutron import client as neClient from neutronclient.common import exceptions as neExceptions from requests.exceptions import ConnectionError +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText -'''contain the openstack virtual machine status to openmano status''' + +"""contain the openstack virtual machine status to openmano status""" vmStatus2manoFormat={'ACTIVE':'ACTIVE', 'PAUSED':'PAUSED', 'SUSPENDED': 'SUSPENDED', @@ -65,7 +69,7 @@ netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE', #global var to have a timeout creating and deleting volumes volume_timeout = 60 -server_timeout = 60 +server_timeout = 300 class vimconnector(vimconn.vimconnector): def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, @@ -74,137 +78,130 @@ class vimconnector(vimconn.vimconnector): 'url' is the keystone authorization url, 'url_admin' is not use ''' - self.osc_api_version = 'v2.0' - if config.get('APIversion') == 'v3.3': - self.osc_api_version = 'v3.3' - vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config) + api_version = config.get('APIversion') + if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'): + raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. " + "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)) + vim_type = config.get('vim_type') + if vim_type and vim_type not in ('vio', 'VIO'): + raise vimconn.vimconnException("Invalid value '{}' for config:vim_type." + "Allowed values are 'vio' or 'VIO'".format(vim_type)) - self.persistent_info = persistent_info - self.k_creds={} - self.n_creds={} - if self.config.get("insecure"): - self.k_creds["insecure"] = True - self.n_creds["insecure"] = True + if config.get('dataplane_net_vlan_range') is not None: + #validate vlan ranges provided by user + self._validate_vlan_ranges(config.get('dataplane_net_vlan_range')) + + vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, + config) + + self.insecure = self.config.get("insecure", False) if not url: raise TypeError, 'url param can not be NoneType' - self.k_creds['auth_url'] = url - self.n_creds['auth_url'] = url - if tenant_name: - self.k_creds['tenant_name'] = tenant_name - self.n_creds['project_id'] = tenant_name - if tenant_id: - self.k_creds['tenant_id'] = tenant_id - self.n_creds['tenant_id'] = tenant_id - if user: - self.k_creds['username'] = user - self.n_creds['username'] = user - if passwd: - self.k_creds['password'] = passwd - self.n_creds['api_key'] = passwd - if self.osc_api_version == 'v3.3': - self.k_creds['project_name'] = tenant_name - self.k_creds['project_id'] = tenant_id - if config.get('region_name'): - self.k_creds['region_name'] = config.get('region_name') - self.n_creds['region_name'] = config.get('region_name') - - self.reload_client = True + self.persistent_info = persistent_info + self.availability_zone = persistent_info.get('availability_zone', None) + self.session = persistent_info.get('session', {'reload_client': True}) + self.nova = self.session.get('nova') + self.neutron = self.session.get('neutron') + self.cinder = self.session.get('cinder') + self.glance = self.session.get('glance') + self.glancev1 = self.session.get('glancev1') + self.keystone = self.session.get('keystone') + self.api_version3 = self.session.get('api_version3') + self.vim_type = self.config.get("vim_type") + if self.vim_type: + self.vim_type = self.vim_type.upper() + if self.config.get("use_internal_endpoint"): + self.endpoint_type = "internalURL" + else: + self.endpoint_type = None + self.logger = logging.getLogger('openmano.vim.openstack') + + ####### VIO Specific Changes ######### + if self.vim_type == "VIO": + self.logger = logging.getLogger('openmano.vim.vio') + if log_level: - self.logger.setLevel( getattr(logging, log_level) ) - - def __setitem__(self,index, value): - '''Set individuals parameters - Throw TypeError, KeyError - ''' - if index=='tenant_id': - self.reload_client=True - self.tenant_id = value - if self.osc_api_version == 'v3.3': - if value: - self.k_creds['project_id'] = value - self.n_creds['project_id'] = value - else: - del self.k_creds['project_id'] - del self.n_creds['project_id'] - else: - if value: - self.k_creds['tenant_id'] = value - self.n_creds['tenant_id'] = value - else: - del self.k_creds['tenant_id'] - del self.n_creds['tenant_id'] - elif index=='tenant_name': - self.reload_client=True - self.tenant_name = value - if self.osc_api_version == 'v3.3': - if value: - self.k_creds['project_name'] = value - self.n_creds['project_name'] = value - else: - del self.k_creds['project_name'] - del self.n_creds['project_name'] - else: - if value: - self.k_creds['tenant_name'] = value - self.n_creds['project_id'] = value - else: - del self.k_creds['tenant_name'] - del self.n_creds['project_id'] - elif index=='user': - self.reload_client=True - self.user = value - if value: - self.k_creds['username'] = value - self.n_creds['username'] = value - else: - del self.k_creds['username'] - del self.n_creds['username'] - elif index=='passwd': - self.reload_client=True - self.passwd = value - if value: - self.k_creds['password'] = value - self.n_creds['api_key'] = value - else: - del self.k_creds['password'] - del self.n_creds['api_key'] - elif index=='url': - self.reload_client=True - self.url = value - if value: - self.k_creds['auth_url'] = value - self.n_creds['auth_url'] = value - else: - raise TypeError, 'url param can not be NoneType' + self.logger.setLevel( getattr(logging, log_level)) + + def __getitem__(self, index): + """Get individuals parameters. + Throw KeyError""" + if index == 'project_domain_id': + return self.config.get("project_domain_id") + elif index == 'user_domain_id': + return self.config.get("user_domain_id") else: - vimconn.vimconnector.__setitem__(self,index, value) - + return vimconn.vimconnector.__getitem__(self, index) + + def __setitem__(self, index, value): + """Set individuals parameters and it is marked as dirty so to force connection reload. + Throw KeyError""" + if index == 'project_domain_id': + self.config["project_domain_id"] = value + elif index == 'user_domain_id': + self.config["user_domain_id"] = value + else: + vimconn.vimconnector.__setitem__(self, index, value) + self.session['reload_client'] = True + def _reload_connection(self): '''Called before any operation, it check if credentials has changed Throw keystoneclient.apiclient.exceptions.AuthorizationFailure ''' #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) - if self.reload_client: - #test valid params - if len(self.n_creds) <4: - raise ksExceptions.ClientException("Not enough parameters to connect to openstack") - if self.osc_api_version == 'v3.3': - self.nova = nClient(api_version=api_versions.APIVersion(version_str='2.0'), **self.n_creds) - #TODO To be updated for v3 - #self.cinder = cClient.Client(**self.n_creds) - self.keystone = ksClient.Client(**self.k_creds) - self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - self.neutron = neClient.Client(api_version=api_versions.APIVersion(version_str='2.0'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds) + if self.session['reload_client']: + if self.config.get('APIversion'): + self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3' + else: # get from ending auth_url that end with v3 or with v2.0 + self.api_version3 = self.url.split("/")[-1] == "v3" + self.session['api_version3'] = self.api_version3 + if self.api_version3: + auth = v3.Password(auth_url=self.url, + username=self.user, + password=self.passwd, + project_name=self.tenant_name, + project_id=self.tenant_id, + project_domain_id=self.config.get('project_domain_id', 'default'), + user_domain_id=self.config.get('user_domain_id', 'default')) + else: + auth = v2.Password(auth_url=self.url, + username=self.user, + password=self.passwd, + tenant_name=self.tenant_name, + tenant_id=self.tenant_id) + sess = session.Session(auth=auth, verify=not self.insecure) + if self.api_version3: + self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type) else: - self.nova = nClient_v2.Client(version='2', **self.n_creds) - self.cinder = cClient_v2.Client(**self.n_creds) - self.keystone = ksClient_v2.Client(**self.k_creds) - self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds) - self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL') - self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds - self.reload_client = False + self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type) + self.session['keystone'] = self.keystone + # In order to enable microversion functionality an explicit microversion must be specified in 'config'. + # This implementation approach is due to the warning message in + # https://developer.openstack.org/api-guide/compute/microversions.html + # where it is stated that microversion backwards compatibility is not guaranteed and clients should + # always require an specific microversion. + # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config + version = self.config.get("microversion") + if not version: + version = "2.1" + self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type) + self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type) + self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type) + if self.endpoint_type == "internalURL": + glance_service_id = self.keystone.services.list(name="glance")[0].id + glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url + else: + glance_endpoint = None + self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint) + #using version 1 of glance client in new_image() + self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess, + endpoint=glance_endpoint) + self.session['reload_client'] = False + self.persistent_info['session'] = self.session + # add availablity zone info inside self.persistent_info + self._set_availablity_zones() + self.persistent_info['availability_zone'] = self.availability_zone def __net_os2mano(self, net_list_dict): '''Transform the net openstack format to mano format @@ -220,9 +217,7 @@ class vimconnector(vimconn.vimconnector): net['type']='data' else: net['type']='bridge' - - - + def _format_exception(self, exception): '''Transform a keystone, nova, neutron exception into a vimconn exception''' if isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, @@ -236,7 +231,10 @@ class vimconnector(vimconn.vimconnector): raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception)) elif isinstance(exception, nvExceptions.Conflict): raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception)) - else: # () + elif isinstance(exception, vimconn.vimconnException): + raise + else: # () + self.logger.error("General Exception " + str(exception), exc_info=True) raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception)) def get_tenant_list(self, filter_dict={}): @@ -250,15 +248,17 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - if self.osc_api_version == 'v3.3': - project_class_list=self.keystone.projects.findall(**filter_dict) + if self.api_version3: + project_class_list = self.keystone.projects.list(name=filter_dict.get("name")) else: - project_class_list=self.keystone.tenants.findall(**filter_dict) + project_class_list = self.keystone.tenants.findall(**filter_dict) project_list=[] for project in project_class_list: + if filter_dict.get('id') and filter_dict["id"] != project.id: + continue project_list.append(project.to_dict()) return project_list - except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e: + except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e: self._format_exception(e) def new_tenant(self, tenant_name, tenant_description): @@ -266,10 +266,11 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Adding a new tenant name: %s", tenant_name) try: self._reload_connection() - if self.osc_api_version == 'v3.3': - project=self.keystone.projects.create(tenant_name, tenant_description) + if self.api_version3: + project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"), + description=tenant_description, is_domain=False) else: - project=self.keystone.tenants.create(tenant_name, tenant_description) + project = self.keystone.tenants.create(tenant_name, tenant_description) return project.id except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e: self._format_exception(e) @@ -279,7 +280,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Deleting tenant %s from VIM", tenant_id) try: self._reload_connection() - if self.osc_api_version == 'v3.3': + if self.api_version3: self.keystone.projects.delete(tenant_id) else: self.keystone.tenants.delete(tenant_id) @@ -302,6 +303,19 @@ class vimconnector(vimconn.vimconnector): network_dict["provider:network_type"] = "vlan" if vlan!=None: network_dict["provider:network_type"] = vlan + + ####### VIO Specific Changes ######### + if self.vim_type == "VIO": + if vlan is not None: + network_dict["provider:segmentation_id"] = vlan + else: + if self.config.get('dataplane_net_vlan_range') is None: + raise vimconn.vimconnConflictException("You must provide "\ + "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\ + "at config value before creating sriov network with vlan tag") + + network_dict["provider:segmentation_id"] = self._genrate_vlanID() + network_dict["shared"]=shared new_net=self.neutron.create_network({'network':network_dict}) #print new_net @@ -314,21 +328,19 @@ class vimconnector(vimconn.vimconnector): ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand) if 'ip_version' not in ip_profile: ip_profile['ip_version'] = "IPv4" - subnet={"name":net_name+"-subnet", + subnet = {"name":net_name+"-subnet", "network_id": new_net["network"]["id"], "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6, "cidr": ip_profile['subnet_address'] } - if 'gateway_address' in ip_profile: - subnet['gateway_ip'] = ip_profile['gateway_address'] + # Gateway should be set to None if not needed. Otherwise openstack assigns one by default + subnet['gateway_ip'] = ip_profile.get('gateway_address') if ip_profile.get('dns_address'): - #TODO: manage dns_address as a list of addresses separated by commas - subnet['dns_nameservers'] = [] - subnet['dns_nameservers'].append(ip_profile['dns_address']) + subnet['dns_nameservers'] = ip_profile['dns_address'].split(";") if 'dhcp_enabled' in ip_profile: subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True if 'dhcp_start_address' in ip_profile: - subnet['allocation_pools']=[] + subnet['allocation_pools'] = [] subnet['allocation_pools'].append(dict()) subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address'] if 'dhcp_count' in ip_profile: @@ -360,8 +372,8 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict: - filter_dict['project_id'] = filter_dict.pop('tenant_id') + if self.api_version3 and "tenant_id" in filter_dict: + filter_dict['project_id'] = filter_dict.pop('tenant_id') #TODO check net_dict=self.neutron.list_networks(**filter_dict) net_list=net_dict["networks"] self.__net_os2mano(net_list) @@ -472,11 +484,19 @@ class vimconnector(vimconn.vimconnector): def get_flavor_id_from_data(self, flavor_dict): """Obtain flavor id that match the flavor description Returns the flavor_id or raises a vimconnNotFoundException + flavor_dict: contains the required ram, vcpus, disk + If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus + and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a + vimconnNotFoundException is raised """ + exact_match = False if self.config.get('use_existing_flavors') else True try: self._reload_connection() - numa=None - numas = flavor_dict.get("extended",{}).get("numas") + flavor_candidate_id = None + flavor_candidate_data = (10000, 10000, 10000) + flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]) + # numa=None + numas = flavor_dict.get("extended", {}).get("numas") if numas: #TODO raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted") @@ -488,14 +508,15 @@ class vimconnector(vimconn.vimconnector): epa = flavor.get_keys() if epa: continue - #TODO - if flavor.ram != flavor_dict["ram"]: - continue - if flavor.vcpus != flavor_dict["vcpus"]: - continue - if flavor.disk != flavor_dict["disk"]: - continue - return flavor.id + # TODO + flavor_data = (flavor.ram, flavor.vcpus, flavor.disk) + if flavor_data == flavor_target: + return flavor.id + elif not exact_match and flavor_target < flavor_data < flavor_candidate_data: + flavor_candidate_id = flavor.id + flavor_candidate_data = flavor_data + if not exact_match and flavor_candidate_id: + return flavor_candidate_id raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict))) except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: self._format_exception(e) @@ -524,7 +545,7 @@ class vimconnector(vimconn.vimconnector): while name in fl_names: name_suffix += 1 name = flavor_data['name']+"-" + str(name_suffix) - + ram = flavor_data.get('ram',64) vcpus = flavor_data.get('vcpus',1) numa_properties=None @@ -540,18 +561,28 @@ class vimconnector(vimconn.vimconnector): numa_properties["hw:mem_page_size"] = "large" numa_properties["hw:cpu_policy"] = "dedicated" numa_properties["hw:numa_mempolicy"] = "strict" + if self.vim_type == "VIO": + numa_properties["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}' + numa_properties["vmware:latency_sensitivity_level"] = "high" for numa in numas: #overwrite ram and vcpus ram = numa['memory']*1024 + #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html if 'paired-threads' in numa: vcpus = numa['paired-threads']*2 - numa_properties["hw:cpu_threads_policy"] = "prefer" + #cpu_thread_policy "require" implies that the compute node must have an STM architecture + numa_properties["hw:cpu_thread_policy"] = "require" + numa_properties["hw:cpu_policy"] = "dedicated" elif 'cores' in numa: vcpus = numa['cores'] - #numa_properties["hw:cpu_threads_policy"] = "prefer" + # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated + numa_properties["hw:cpu_thread_policy"] = "isolate" + numa_properties["hw:cpu_policy"] = "dedicated" elif 'threads' in numa: vcpus = numa['threads'] - numa_properties["hw:cpu_policy"] = "isolated" + # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture + numa_properties["hw:cpu_thread_policy"] = "prefer" + numa_properties["hw:cpu_policy"] = "dedicated" # for interface in numa.get("interfaces",() ): # if interface["dedicated"]=="yes": # raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable) @@ -563,7 +594,7 @@ class vimconnector(vimconn.vimconnector): vcpus, flavor_data.get('disk',1), is_public=flavor_data.get('is_public', True) - ) + ) #add metadata if numa_properties: new_flavor.set_keys(numa_properties) @@ -597,8 +628,6 @@ class vimconnector(vimconn.vimconnector): metadata: metadata of the image Returns the image_id ''' - #using version 1 of glance client - glancev1 = gl1Client.Client('1',self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds) #TODO check k_creds vs n_creds retry=0 max_retries=3 while retry= server_timeout: + raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status, + http_code=vimconn.HTTP_Request_Timeout) + + def _get_openstack_availablity_zones(self): + """ + Get from openstack availability zones available + :return: + """ + try: + openstack_availability_zone = self.nova.availability_zones.list() + openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone + if zone.zoneName != 'internal'] + return openstack_availability_zone + except Exception as e: + return None + + def _set_availablity_zones(self): + """ + Set vim availablity zone + :return: + """ + + if 'availability_zone' in self.config: + vim_availability_zones = self.config.get('availability_zone') + if isinstance(vim_availability_zones, str): + self.availability_zone = [vim_availability_zones] + elif isinstance(vim_availability_zones, list): + self.availability_zone = vim_availability_zones + else: + self.availability_zone = self._get_openstack_availablity_zones() + + def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list): + """ + Return thge availability zone to be used by the created VM. + :return: The VIM availability zone to be used or None + """ + if availability_zone_index is None: + if not self.config.get('availability_zone'): + return None + elif isinstance(self.config.get('availability_zone'), str): + return self.config['availability_zone'] + else: + # TODO consider using a different parameter at config for default AV and AV list match + return self.config['availability_zone'][0] + + vim_availability_zones = self.availability_zone + # check if VIM offer enough availability zones describe in the VNFD + if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones): + # check if all the names of NFV AV match VIM AV names + match_by_index = False + for av in availability_zone_list: + if av not in vim_availability_zones: + match_by_index = True + break + if match_by_index: + return vim_availability_zones[availability_zone_index] + else: + return availability_zone_list[availability_zone_index] + else: + raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment") + + def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, + availability_zone_index=None, availability_zone_list=None): '''Adds a VM instance to VIM Params: start: indicates if VM must start or boot in pause mode. Ignored @@ -720,16 +862,39 @@ class vimconnector(vimconn.vimconnector): type: 'virtual', 'PF', 'VF', 'VFnotShared' vim_id: filled/added by this function floating_ip: True/False (or it can be None) + 'cloud_config': (optional) dictionary with: + 'key-pairs': (optional) list of strings with the public key to be inserted to the default user + 'users': (optional) list of users to be inserted, each item is a dict with: + 'name': (mandatory) user name, + 'key-pairs': (optional) list of strings with the public key to be inserted to the user + 'user-data': (optional) string is a text script to be passed directly to cloud-init + 'config-files': (optional). List of files to be transferred. Each item is a dict with: + 'dest': (mandatory) string with the destination absolute path + 'encoding': (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + 'content' (mandatory): string with the content of the file + 'permissions': (optional) string with file permissions, typically octal notation '0644' + 'owner': (optional) file owner, string with the format 'owner:group' + 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk) + 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with: + 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted + 'size': (mandatory) string with the size of the disk in GB + availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required + availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if + availability_zone_index is None #TODO ip, security groups Returns the instance identifier ''' self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list)) try: + server = None metadata={} net_list_vim=[] - external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip + external_network=[] # list of external networks to be connected to instance, later on used to create floating_ip + no_secured_ports = [] # List of port-is with port-security disabled self._reload_connection() - metadata_vpci={} #For a specific neutron plugin + metadata_vpci={} # For a specific neutron plugin + block_device_mapping = None for net in net_list: if not net.get("net_id"): #skip non connected iface continue @@ -748,7 +913,19 @@ class vimconnector(vimconn.vimconnector): metadata_vpci["VF"]=[] metadata_vpci["VF"].append([ net["vpci"], "" ]) port_dict["binding:vnic_type"]="direct" + ########## VIO specific Changes ####### + if self.vim_type == "VIO": + #Need to create port with port_security_enabled = False and no-security-groups + port_dict["port_security_enabled"]=False + port_dict["provider_security_groups"]=[] + port_dict["security_groups"]=[] else: #For PT + ########## VIO specific Changes ####### + #Current VIO release does not support port with type 'direct-physical' + #So no need to create virtual port in case of PCI-device. + #Will update port_dict code when support gets added in next VIO release + if self.vim_type == "VIO": + raise vimconn.vimconnNotSupportedException("Current VIO release does not support full passthrough (PT)") if "vpci" in net: if "PF" not in metadata_vpci: metadata_vpci["PF"]=[] @@ -758,13 +935,20 @@ class vimconnector(vimconn.vimconnector): port_dict["name"]=name if net.get("mac_address"): port_dict["mac_address"]=net["mac_address"] - if net.get("port_security") == False: - port_dict["port_security_enabled"]=net["port_security"] new_port = self.neutron.create_port({"port": port_dict }) net["mac_adress"] = new_port["port"]["mac_address"] net["vim_id"] = new_port["port"]["id"] - net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address") - net_list_vim.append({"port-id": new_port["port"]["id"]}) + # if try to use a network without subnetwork, it will return a emtpy list + fixed_ips = new_port["port"].get("fixed_ips") + if fixed_ips: + net["ip"] = fixed_ips[0].get("ip_address") + else: + net["ip"] = None + + port = {"port-id": new_port["port"]["id"]} + if float(self.nova.api_version.get_string()) >= 2.32: + port["tag"] = new_port["port"]["name"] + net_list_vim.append(port) if net.get('floating_ip', False): net['exit_on_floating_ip_error'] = True @@ -773,6 +957,11 @@ class vimconnector(vimconn.vimconnector): net['exit_on_floating_ip_error'] = False external_network.append(net) + # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped. + # As a workaround we wait until the VM is active and then disable the port-security + if net.get("port_security") == False: + no_secured_ports.append(new_port["port"]["id"]) + if metadata_vpci: metadata = {"pci_assignement": json.dumps(metadata_vpci)} if len(metadata["pci_assignement"]) >255: @@ -790,14 +979,17 @@ class vimconnector(vimconn.vimconnector): #cloud config userdata=None config_drive = None + userdata_list = [] if isinstance(cloud_config, dict): if cloud_config.get("user-data"): - userdata=cloud_config["user-data"] + if isinstance(cloud_config["user-data"], str): + userdata_list.append(cloud_config["user-data"]) + else: + for u in cloud_config["user-data"]: + userdata_list.append(u) if cloud_config.get("boot-data-drive") != None: config_drive = cloud_config["boot-data-drive"] if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"): - if userdata: - raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'") userdata_dict={} #default user if cloud_config.get("key-pairs"): @@ -831,17 +1023,17 @@ class vimconnector(vimconn.vimconnector): if file.get("owner"): file_info["owner"] = file["owner"] userdata_dict["write_files"].append(file_info) - userdata = "#cloud-config\n" - userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False) + userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4, + default_flow_style=False)) + userdata = self._create_mimemultipart(userdata_list) self.logger.debug("userdata: %s", userdata) elif isinstance(cloud_config, str): userdata = cloud_config #Create additional volumes in case these are present in disk_list - block_device_mapping = None base_disk_index = ord('b') if disk_list != None: - block_device_mapping = dict() + block_device_mapping = {} for disk in disk_list: if 'image_id' in disk: volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' + @@ -877,34 +1069,45 @@ class vimconnector(vimconn.vimconnector): raise vimconn.vimconnException('Timeout creating volumes for instance ' + name, http_code=vimconn.HTTP_Request_Timeout) + # get availability Zone + vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list) + self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, " + "availability_zone={}, key_name={}, userdata={}, config_drive={}, " + "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata, + security_groups, vm_av_zone, self.config.get('keypair'), + userdata, config_drive, block_device_mapping)) server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata, security_groups=security_groups, - availability_zone=self.config.get('availability_zone'), + availability_zone=vm_av_zone, key_name=self.config.get('keypair'), userdata=userdata, - config_drive = config_drive, - block_device_mapping = block_device_mapping + config_drive=config_drive, + block_device_mapping=block_device_mapping ) # , description=description) + + # Previously mentioned workaround to wait until the VM is active and then disable the port-security + if no_secured_ports: + self.__wait_for_vm(server.id, 'ACTIVE') + + for port_id in no_secured_ports: + try: + self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} }) + + except Exception as e: + self.logger.error("It was not possible to disable port security for port {}".format(port_id)) + self.delete_vminstance(server.id) + raise + #print "DONE :-)", server pool_id = None floating_ips = self.neutron.list_floatingips().get("floatingips", ()) - for floating_network in external_network: - try: - # wait until vm is active - elapsed_time = 0 - while elapsed_time < server_timeout: - status = self.nova.servers.get(server.id).status - if status == 'ACTIVE': - break - time.sleep(1) - elapsed_time += 1 - #if we exceeded the timeout rollback - if elapsed_time >= server_timeout: - raise vimconn.vimconnException('Timeout creating instance ' + name, - http_code=vimconn.HTTP_Request_Timeout) + if external_network: + self.__wait_for_vm(server.id, 'ACTIVE') + for floating_network in external_network: + try: assigned = False while(assigned == False): if floating_ips: @@ -948,26 +1151,31 @@ class vimconnector(vimconn.vimconnector): if not floating_network['exit_on_floating_ip_error']: self.logger.warn("Cannot create floating_ip. %s", str(e)) continue - self.delete_vminstance(server.id) raise return server.id # except nvExceptions.NotFound as e: # error_value=-vimconn.HTTP_Not_Found # error_text= "vm instance %s not found" % vm_id - except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e: +# except TypeError as e: +# raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request) + + except Exception as e: # delete the volumes we just created - if block_device_mapping != None: + if block_device_mapping: for volume_id in block_device_mapping.itervalues(): self.cinder.volumes.delete(volume_id) - # delete ports we just created - for net_item in net_list_vim: - if 'port-id' in net_item: - self.neutron.delete_port(net_item['port-id']) + # Delete the VM + if server != None: + self.delete_vminstance(server.id) + else: + # delete ports we just created + for net_item in net_list_vim: + if 'port-id' in net_item: + self.neutron.delete_port(net_item['port-id']) + self._format_exception(e) - except TypeError as e: - raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request) def get_vminstance(self,vm_id): '''Returns the VM instance information from VIM''' @@ -1133,15 +1341,22 @@ class vimconnector(vimconn.vimconnector): interface["mac_address"] = port.get("mac_address") interface["vim_net_id"] = port["network_id"] interface["vim_interface_id"] = port["id"] - interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host'] + # check if OS-EXT-SRV-ATTR:host is there, + # in case of non-admin credentials, it will be missing + if vm_vim.get('OS-EXT-SRV-ATTR:host'): + interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host'] interface["pci"] = None - if port['binding:profile'].get('pci_slot'): - # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00 - # TODO: This is just a workaround valid for niantinc. Find a better way to do so - # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic - pci = port['binding:profile']['pci_slot'] - # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2) - interface["pci"] = pci + + # check if binding:profile is there, + # in case of non-admin credentials, it will be missing + if port.get('binding:profile'): + if port['binding:profile'].get('pci_slot'): + # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00 + # TODO: This is just a workaround valid for niantinc. Find a better way to do so + # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic + pci = port['binding:profile']['pci_slot'] + # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2) + interface["pci"] = pci interface["vlan"] = None #if network is of type vlan and port is of type direct (sr-iov) then set vlan id network = self.neutron.show_network(port["network_id"]) @@ -1245,6 +1460,67 @@ class vimconnector(vimconn.vimconnector): self._format_exception(e) #TODO insert exception vimconn.HTTP_Unauthorized + ####### VIO Specific Changes ######### + def _genrate_vlanID(self): + """ + Method to get unused vlanID + Args: + None + Returns: + vlanID + """ + #Get used VLAN IDs + usedVlanIDs = [] + networks = self.get_network_list() + for net in networks: + if net.get('provider:segmentation_id'): + usedVlanIDs.append(net.get('provider:segmentation_id')) + used_vlanIDs = set(usedVlanIDs) + + #find unused VLAN ID + for vlanID_range in self.config.get('dataplane_net_vlan_range'): + try: + start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-")) + for vlanID in xrange(start_vlanid, end_vlanid + 1): + if vlanID not in used_vlanIDs: + return vlanID + except Exception as exp: + raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp)) + else: + raise vimconn.vimconnConflictException("Unable to create the SRIOV VLAN network."\ + " All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range'))) + + + def _validate_vlan_ranges(self, dataplane_net_vlan_range): + """ + Method to validate user given vlanID ranges + Args: None + Returns: None + """ + for vlanID_range in dataplane_net_vlan_range: + vlan_range = vlanID_range.replace(" ", "") + #validate format + vlanID_pattern = r'(\d)*-(\d)*$' + match_obj = re.match(vlanID_pattern, vlan_range) + if not match_obj: + raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}.You must provide "\ + "'dataplane_net_vlan_range' in format [start_ID - end_ID].".format(vlanID_range)) + + start_vlanid , end_vlanid = map(int,vlan_range.split("-")) + if start_vlanid <= 0 : + raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\ + "Start ID can not be zero. For VLAN "\ + "networks valid IDs are 1 to 4094 ".format(vlanID_range)) + if end_vlanid > 4094 : + raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\ + "End VLAN ID can not be greater than 4094. For VLAN "\ + "networks valid IDs are 1 to 4094 ".format(vlanID_range)) + + if start_vlanid > end_vlanid: + raise vimconn.vimconnConflictException("Invalid dataplane_net_vlan_range {}."\ + "You must provide a 'dataplane_net_vlan_range' in format start_ID - end_ID and "\ + "start_ID < end_ID ".format(vlanID_range)) + #NOT USED FUNCTIONS def new_external_port(self, port_data): @@ -1358,3 +1634,4 @@ class vimconnector(vimconn.vimconnector): return error_value, error_text +