X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=vimconn_openstack.py;h=b501d9da28ca3605d4b7a3600e538936aa28d7f4;hb=06e6c396413630640cafae3488442a0869f1642d;hp=81c6591531ed7578b447c9be3d76b59fd4be0d34;hpb=95baa27ba382b714f18a0fa2ed345af0eac2219f;p=osm%2FRO.git diff --git a/vimconn_openstack.py b/vimconn_openstack.py index 81c65915..b501d9da 100644 --- a/vimconn_openstack.py +++ b/vimconn_openstack.py @@ -24,16 +24,20 @@ ''' osconnector implements all the methods to interact with openstack using the python-client. ''' -__author__="Alfonso Tierno, Gerardo Garcia, xFlow Research" -__date__ ="$24-nov-2016 09:10$" +__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research" +__date__ ="$22-jun-2014 11:19:29$" import vimconn import json import yaml import logging import netaddr +import time +import yaml +import random -from novaclient import client as nClient_v2, exceptions as nvExceptions, api_versions as APIVersion +from novaclient import client as nClient_v2, exceptions as nvExceptions +from novaclient import api_versions import keystoneclient.v2_0.client as ksClient_v2 from novaclient.v2.client import Client as nClient import keystoneclient.v3.client as ksClient @@ -41,6 +45,7 @@ import keystoneclient.exceptions as ksExceptions import glanceclient.v2.client as glClient import glanceclient.client as gl1Client import glanceclient.exc as gl1Exceptions +import cinderclient.v2.client as cClient_v2 from httplib import HTTPException from neutronclient.neutron import client as neClient_v2 from neutronclient.v2_0 import client as neClient @@ -58,9 +63,14 @@ vmStatus2manoFormat={'ACTIVE':'ACTIVE', netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED' } +#global var to have a timeout creating and deleting volumes +volume_timeout = 60 +server_timeout = 60 + class vimconnector(vimconn.vimconnector): - def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, config={}): - '''using common constructor parameters. In this case + def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, + log_level=None, config={}, persistent_info={}): + '''using common constructor parameters. In this case 'url' is the keystone authorization url, 'url_admin' is not use ''' @@ -68,9 +78,13 @@ class vimconnector(vimconn.vimconnector): if config.get('APIversion') == 'v3.3': self.osc_api_version = 'v3.3' vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config) - + + self.persistent_info = persistent_info self.k_creds={} self.n_creds={} + if self.config.get("insecure"): + self.k_creds["insecure"] = True + self.n_creds["insecure"] = True if not url: raise TypeError, 'url param can not be NoneType' self.k_creds['auth_url'] = url @@ -90,6 +104,10 @@ class vimconnector(vimconn.vimconnector): if self.osc_api_version == 'v3.3': self.k_creds['project_name'] = tenant_name self.k_creds['project_id'] = tenant_id + if config.get('region_name'): + self.k_creds['region_name'] = config.get('region_name') + self.n_creds['region_name'] = config.get('region_name') + self.reload_client = True self.logger = logging.getLogger('openmano.vim.openstack') if log_level: @@ -172,12 +190,15 @@ class vimconnector(vimconn.vimconnector): if len(self.n_creds) <4: raise ksExceptions.ClientException("Not enough parameters to connect to openstack") if self.osc_api_version == 'v3.3': - self.nova = nClient(APIVersion(version_str='2'), **self.n_creds) + self.nova = nClient(api_version=api_versions.APIVersion(version_str='2.0'), **self.n_creds) + #TODO To be updated for v3 + #self.cinder = cClient.Client(**self.n_creds) self.keystone = ksClient.Client(**self.k_creds) self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - self.neutron = neClient.Client(APIVersion(version_str='2'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds) + self.neutron = neClient.Client(api_version=api_versions.APIVersion(version_str='2.0'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds) else: - self.nova = nClient_v2.Client('2', **self.n_creds) + self.nova = nClient_v2.Client(version='2', **self.n_creds) + self.cinder = cClient_v2.Client(**self.n_creds) self.keystone = ksClient_v2.Client(**self.k_creds) self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL') self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds) @@ -229,7 +250,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - if osc_api_version == 'v3.3': + if self.osc_api_version == 'v3.3': project_class_list=self.keystone.projects.findall(**filter_dict) else: project_class_list=self.keystone.tenants.findall(**filter_dict) @@ -258,7 +279,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Deleting tenant %s from VIM", tenant_id) try: self._reload_connection() - if osc_api_version == 'v3.3': + if self.osc_api_version == 'v3.3': self.keystone.projects.delete(tenant_id) else: self.keystone.tenants.delete(tenant_id) @@ -288,8 +309,9 @@ class vimconnector(vimconn.vimconnector): if not ip_profile: ip_profile = {} if 'subnet_address' not in ip_profile: - #Fake subnet is required - ip_profile['subnet_address'] = "192.168.111.0/24" + #Fake subnet is required + subnet_rand = random.randint(0, 255) + ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand) if 'ip_version' not in ip_profile: ip_profile['ip_version'] = "IPv4" subnet={"name":net_name+"-subnet", @@ -338,7 +360,7 @@ class vimconnector(vimconn.vimconnector): self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) try: self._reload_connection() - if osc_api_version == 'v3.3' and "tenant_id" in filter_dict: + if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict: filter_dict['project_id'] = filter_dict.pop('tenant_id') net_dict=self.neutron.list_networks(**filter_dict) net_list=net_dict["networks"] @@ -445,6 +467,38 @@ class vimconnector(vimconn.vimconnector): except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: self._format_exception(e) + def get_flavor_id_from_data(self, flavor_dict): + """Obtain flavor id that match the flavor description + Returns the flavor_id or raises a vimconnNotFoundException + """ + try: + self._reload_connection() + numa=None + numas = flavor_dict.get("extended",{}).get("numas") + if numas: + #TODO + raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted") + # if len(numas) > 1: + # raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa") + # numa=numas[0] + # numas = extended.get("numas") + for flavor in self.nova.flavors.list(): + epa = flavor.get_keys() + if epa: + continue + #TODO + if flavor.ram != flavor_dict["ram"]: + continue + if flavor.vcpus != flavor_dict["vcpus"]: + continue + if flavor.disk != flavor_dict["disk"]: + continue + return flavor.id + raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict))) + except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e: + self._format_exception(e) + + def new_flavor(self, flavor_data, change_name_if_used=True): '''Adds a tenant flavor to openstack VIM if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition @@ -552,7 +606,7 @@ class vimconnector(vimconn.vimconnector): #determine format http://docs.openstack.org/developer/glance/formats.html if "disk_format" in image_dict: disk_format=image_dict["disk_format"] - else: #autodiscover base on extention + else: #autodiscover based on extension if image_dict['location'][-6:]==".qcow2": disk_format="qcow2" elif image_dict['location'][-4:]==".vhd": @@ -642,14 +696,14 @@ class vimconnector(vimconn.vimconnector): #Then we filter by the rest of filter fields: checksum filtered_list = [] for image in image_list: - image_dict=self.glance.images.get(image.id) - if image_dict['checksum']==filter_dict.get('checksum'): - filtered_list.append(image) + image_class=self.glance.images.get(image.id) + if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'): + filtered_list.append(image_class.copy()) return filtered_list except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e: self._format_exception(e) - def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None): + def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None): '''Adds a VM instance to VIM Params: start: indicates if VM must start or boot in pause mode. Ignored @@ -663,48 +717,56 @@ class vimconnector(vimconn.vimconnector): use: 'data', 'bridge', 'mgmt' type: 'virtual', 'PF', 'VF', 'VFnotShared' vim_id: filled/added by this function + floating_ip: True/False (or it can be None) #TODO ip, security groups Returns the instance identifier ''' - self.logger.debug("Creating VM image '%s' flavor '%s' nics='%s'",image_id, flavor_id,str(net_list)) + self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list)) try: metadata={} net_list_vim=[] + external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip self._reload_connection() metadata_vpci={} #For a specific neutron plugin for net in net_list: if not net.get("net_id"): #skip non connected iface continue - if net["type"]=="virtual": - net_list_vim.append({'net-id': net["net_id"]}) - if "vpci" in net: - metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]] - elif net["type"]=="PF": - self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ") - #TODO insert this when openstack consider passthrough ports as openstack neutron ports - else: #VF - if "vpci" in net: - if "VF" not in metadata_vpci: - metadata_vpci["VF"]=[] - metadata_vpci["VF"].append([ net["vpci"], "" ]) + if net["type"]=="virtual" or net["type"]=="VF": port_dict={ - "network_id": net["net_id"], - "name": net.get("name"), - "binding:vnic_type": "direct", - "admin_state_up": True - } + "network_id": net["net_id"], + "name": net.get("name"), + "admin_state_up": True + } + if net["type"]=="virtual": + if "vpci" in net: + metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]] + else: # for VF + if "vpci" in net: + if "VF" not in metadata_vpci: + metadata_vpci["VF"]=[] + metadata_vpci["VF"].append([ net["vpci"], "" ]) + port_dict["binding:vnic_type"]="direct" if not port_dict["name"]: - port_dict["name"] = name + port_dict["name"]=name if net.get("mac_address"): port_dict["mac_address"]=net["mac_address"] - #TODO: manage having SRIOV without vlan tag - #if net["type"] == "VFnotShared" - # port_dict["vlan"]=0 + if net.get("port_security") == False: + port_dict["port_security_enabled"]=net["port_security"] new_port = self.neutron.create_port({"port": port_dict }) net["mac_adress"] = new_port["port"]["mac_address"] net["vim_id"] = new_port["port"]["id"] - net["ip"] = new_port["port"].get("fixed_ips",[{}])[0].get("ip_address") + net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address") net_list_vim.append({"port-id": new_port["port"]["id"]}) + else: # for PF + self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ") + #TODO insert this when openstack consider passthrough ports as openstack neutron ports + if net.get('floating_ip', False): + net['exit_on_floating_ip_error'] = True + external_network.append(net) + elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'): + net['exit_on_floating_ip_error'] = False + external_network.append(net) + if metadata_vpci: metadata = {"pci_assignement": json.dumps(metadata_vpci)} if len(metadata["pci_assignement"]) >255: @@ -719,54 +781,184 @@ class vimconnector(vimconn.vimconnector): security_groups = self.config.get('security_groups') if type(security_groups) is str: security_groups = ( security_groups, ) + #cloud config + userdata=None + config_drive = None if isinstance(cloud_config, dict): - userdata="#cloud-config\nusers:\n" - #default user - if "key-pairs" in cloud_config: - userdata += " - default:\n ssh-authorized-keys:\n" - for key in cloud_config["key-pairs"]: - userdata += " - '{key}'\n".format(key=key) - for user in cloud_config.get("users",[]): - userdata += " - name: {name}\n sudo: ALL=(ALL) NOPASSWD:ALL\n".format(name=user["name"]) - if "user-info" in user: - userdata += " gecos: {}'\n".format(user["user-info"]) - if user.get("key-pairs"): - userdata += " ssh-authorized-keys:\n" - for key in user["key-pairs"]: - userdata += " - '{key}'\n".format(key=key) + if cloud_config.get("user-data"): + userdata=cloud_config["user-data"] + if cloud_config.get("boot-data-drive") != None: + config_drive = cloud_config["boot-data-drive"] + if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"): + if userdata: + raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'") + userdata_dict={} + #default user + if cloud_config.get("key-pairs"): + userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"] + userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"] }] + if cloud_config.get("users"): + if "users" not in userdata_dict: + userdata_dict["users"] = [ "default" ] + for user in cloud_config["users"]: + user_info = { + "name" : user["name"], + "sudo": "ALL = (ALL)NOPASSWD:ALL" + } + if "user-info" in user: + user_info["gecos"] = user["user-info"] + if user.get("key-pairs"): + user_info["ssh-authorized-keys"] = user["key-pairs"] + userdata_dict["users"].append(user_info) + + if cloud_config.get("config-files"): + userdata_dict["write_files"] = [] + for file in cloud_config["config-files"]: + file_info = { + "path" : file["dest"], + "content": file["content"] + } + if file.get("encoding"): + file_info["encoding"] = file["encoding"] + if file.get("permissions"): + file_info["permissions"] = file["permissions"] + if file.get("owner"): + file_info["owner"] = file["owner"] + userdata_dict["write_files"].append(file_info) + userdata = "#cloud-config\n" + userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False) self.logger.debug("userdata: %s", userdata) elif isinstance(cloud_config, str): userdata = cloud_config - else: - userdata=None - + + #Create additional volumes in case these are present in disk_list + block_device_mapping = None + base_disk_index = ord('b') + if disk_list != None: + block_device_mapping = dict() + for disk in disk_list: + if 'image_id' in disk: + volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' + + chr(base_disk_index), imageRef = disk['image_id']) + else: + volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' + + chr(base_disk_index)) + block_device_mapping['_vd' + chr(base_disk_index)] = volume.id + base_disk_index += 1 + + #wait until volumes are with status available + keep_waiting = True + elapsed_time = 0 + while keep_waiting and elapsed_time < volume_timeout: + keep_waiting = False + for volume_id in block_device_mapping.itervalues(): + if self.cinder.volumes.get(volume_id).status != 'available': + keep_waiting = True + if keep_waiting: + time.sleep(1) + elapsed_time += 1 + + #if we exceeded the timeout rollback + if elapsed_time >= volume_timeout: + #delete the volumes we just created + for volume_id in block_device_mapping.itervalues(): + self.cinder.volumes.delete(volume_id) + + #delete ports we just created + for net_item in net_list_vim: + if 'port-id' in net_item: + self.neutron.delete_port(net_item['port-id']) + + raise vimconn.vimconnException('Timeout creating volumes for instance ' + name, + http_code=vimconn.HTTP_Request_Timeout) + server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata, - security_groups = security_groups, - availability_zone = self.config.get('availability_zone'), - key_name = self.config.get('keypair'), - userdata=userdata - ) #, description=description) - - + security_groups=security_groups, + availability_zone=self.config.get('availability_zone'), + key_name=self.config.get('keypair'), + userdata=userdata, + config_drive = config_drive, + block_device_mapping = block_device_mapping + ) # , description=description) #print "DONE :-)", server - -# #TODO server.add_floating_ip("10.95.87.209") -# #To look for a free floating_ip -# free_floating_ip = None -# for floating_ip in self.neutron.list_floatingips().get("floatingips", () ): -# if not floating_ip["port_id"]: -# free_floating_ip = floating_ip["floating_ip_address"] -# break -# if free_floating_ip: -# server.add_floating_ip(free_floating_ip) - - + pool_id = None + floating_ips = self.neutron.list_floatingips().get("floatingips", ()) + for floating_network in external_network: + try: + # wait until vm is active + elapsed_time = 0 + while elapsed_time < server_timeout: + status = self.nova.servers.get(server.id).status + if status == 'ACTIVE': + break + time.sleep(1) + elapsed_time += 1 + + #if we exceeded the timeout rollback + if elapsed_time >= server_timeout: + raise vimconn.vimconnException('Timeout creating instance ' + name, + http_code=vimconn.HTTP_Request_Timeout) + + assigned = False + while(assigned == False): + if floating_ips: + ip = floating_ips.pop(0) + if not ip.get("port_id", False) and ip.get('tenant_id') == server.tenant_id: + free_floating_ip = ip.get("floating_ip_address") + try: + fix_ip = floating_network.get('ip') + server.add_floating_ip(free_floating_ip, fix_ip) + assigned = True + except Exception as e: + raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+ str(e), http_code=vimconn.HTTP_Conflict) + else: + #Find the external network + external_nets = list() + for net in self.neutron.list_networks()['networks']: + if net['router:external']: + external_nets.append(net) + + if len(external_nets) == 0: + raise vimconn.vimconnException("Cannot create floating_ip automatically since no external " + "network is present", + http_code=vimconn.HTTP_Conflict) + if len(external_nets) > 1: + raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple " + "external networks are present", + http_code=vimconn.HTTP_Conflict) + + pool_id = external_nets[0].get('id') + param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}} + try: + #self.logger.debug("Creating floating IP") + new_floating_ip = self.neutron.create_floatingip(param) + free_floating_ip = new_floating_ip['floatingip']['floating_ip_address'] + fix_ip = floating_network.get('ip') + server.add_floating_ip(free_floating_ip, fix_ip) + assigned=True + except Exception as e: + raise vimconn.vimconnException(type(e).__name__ + ": Cannot assign floating_ip "+ str(e), http_code=vimconn.HTTP_Conflict) + except Exception as e: + if not floating_network['exit_on_floating_ip_error']: + self.logger.warn("Cannot create floating_ip. %s", str(e)) + continue + self.delete_vminstance(server.id) + raise + return server.id # except nvExceptions.NotFound as e: # error_value=-vimconn.HTTP_Not_Found # error_text= "vm instance %s not found" % vm_id - except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError - ) as e: + except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e: + # delete the volumes we just created + if block_device_mapping != None: + for volume_id in block_device_mapping.itervalues(): + self.cinder.volumes.delete(volume_id) + + # delete ports we just created + for net_item in net_list_vim: + if 'port-id' in net_item: + self.neutron.delete_port(net_item['port-id']) self._format_exception(e) except TypeError as e: raise vimconn.vimconnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request) @@ -846,7 +1038,32 @@ class vimconnector(vimconn.vimconnector): self.neutron.delete_port(p["id"]) except Exception as e: self.logger.error("Error deleting port: " + type(e).__name__ + ": "+ str(e)) + + #commented because detaching the volumes makes the servers.delete not work properly ?!? + #dettach volumes attached + server = self.nova.servers.get(vm_id) + volumes_attached_dict = server._info['os-extended-volumes:volumes_attached'] + #for volume in volumes_attached_dict: + # self.cinder.volumes.detach(volume['id']) + self.nova.servers.delete(vm_id) + + #delete volumes. + #Although having detached them should have them in active status + #we ensure in this loop + keep_waiting = True + elapsed_time = 0 + while keep_waiting and elapsed_time < volume_timeout: + keep_waiting = False + for volume in volumes_attached_dict: + if self.cinder.volumes.get(volume['id']).status != 'available': + keep_waiting = True + else: + self.cinder.volumes.delete(volume['id']) + if keep_waiting: + time.sleep(1) + elapsed_time += 1 + return vm_id except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e: self._format_exception(e)