X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=RO-VIM-openstack%2Fosm_rovim_openstack%2Fvimconn_openstack.py;h=3379f1ae3e895e48f7bc7f4fd7eb3b4994a0cdfe;hb=refs%2Fchanges%2F55%2F12855%2F2;hp=a15a53cfb99eca551618599e540a1036c051795d;hpb=ae41880d0de30a63dbb9ee3ca0e66bbf131e5b7e;p=osm%2FRO.git diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index a15a53cf..3379f1ae 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -1291,9 +1291,6 @@ class vimconnector(vimconn.VimConnector): extra_specs["hw:numa_mempolicy"] = "strict" if self.vim_type == "VIO": - extra_specs[ - "vmware:extra_config" - ] = '{"numa.nodeAffinity":"0"}' extra_specs["vmware:latency_sensitivity_level"] = "high" for numa in numas: @@ -1349,6 +1346,26 @@ class vimconnector(vimconn.VimConnector): extended.get("disk-io-quota"), "disk_io", extra_specs ) + # Set the mempage size as specified in the descriptor + if extended.get("mempage-size"): + if extended.get("mempage-size") == "LARGE": + extra_specs["hw:mem_page_size"] = "large" + elif extended.get("mempage-size") == "SMALL": + extra_specs["hw:mem_page_size"] = "small" + elif extended.get("mempage-size") == "SIZE_2MB": + extra_specs["hw:mem_page_size"] = "2MB" + elif extended.get("mempage-size") == "SIZE_1GB": + extra_specs["hw:mem_page_size"] = "1GB" + elif extended.get("mempage-size") == "PREFER_LARGE": + extra_specs["hw:mem_page_size"] = "any" + else: + # The validations in NBI should make reaching here not possible. + # If this message is shown, check validations + self.logger.debug( + "Invalid mempage-size %s. Will be ignored", + extended.get("mempage-size"), + ) + # create flavor new_flavor = self.nova.flavors.create( name=name, @@ -1865,8 +1882,14 @@ class vimconnector(vimconn.VimConnector): # cloud config config_drive, userdata = self._create_user_data(cloud_config) + # get availability Zone + vm_av_zone = self._get_vm_availability_zone( + availability_zone_index, availability_zone_list + ) + # Create additional volumes in case these are present in disk_list base_disk_index = ord("b") + boot_volume_id = None if disk_list: block_device_mapping = {} for disk in disk_list: @@ -1876,15 +1899,21 @@ class vimconnector(vimconn.VimConnector): ] else: if "image_id" in disk: + base_disk_index = ord("a") volume = self.cinder.volumes.create( size=disk["size"], name=name + "_vd" + chr(base_disk_index), imageRef=disk["image_id"], + # Make sure volume is in the same AZ as the VM to be attached to + availability_zone=vm_av_zone, ) + boot_volume_id = volume.id else: volume = self.cinder.volumes.create( size=disk["size"], name=name + "_vd" + chr(base_disk_index), + # Make sure volume is in the same AZ as the VM to be attached to + availability_zone=vm_av_zone, ) created_items["volume:" + str(volume.id)] = True @@ -1912,11 +1941,8 @@ class vimconnector(vimconn.VimConnector): "Timeout creating volumes for instance " + name, http_code=vimconn.HTTP_Request_Timeout, ) - - # get availability Zone - vm_av_zone = self._get_vm_availability_zone( - availability_zone_index, availability_zone_list - ) + if boot_volume_id: + self.cinder.volumes.set_bootable(boot_volume_id, True) # Manage affinity groups/server groups server_group_id = None