From: Luis Vega Date: Thu, 5 Oct 2023 23:22:04 +0000 (+0000) Subject: Feature 11003: AZ for Cinder X-Git-Tag: release-v15.0-start~3 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=refs%2Fchanges%2F37%2F13937%2F8;p=osm%2FRO.git Feature 11003: AZ for Cinder Change-Id: I73b4db09fa8de5ac0640d3b65b97e2a51f8c43c2 Signed-off-by: Luis Vega --- diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py index c2874935..44b63d20 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py @@ -1302,7 +1302,7 @@ class TestNewVmInstance(unittest.TestCase): def test_prepare_persistent_root_volumes_vim_using_volume_id(self): """Existing persistent root volume with vim_volume_id.""" - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"vim_volume_id": volume_id} block_device_mapping = {} @@ -1313,7 +1313,7 @@ class TestNewVmInstance(unittest.TestCase): expected_existing_vim_volumes = [{"id": volume_id}] boot_volume_id = self.vimconn._prepare_persistent_root_volumes( name, - vm_av_zone, + storage_av_zone, disk, base_disk_index, block_device_mapping, @@ -1368,7 +1368,7 @@ class TestNewVmInstance(unittest.TestCase): self, mock_update_block_device_mapping ): """Existing persistent non root volume with vim_volume_id.""" - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("b") disk = {"vim_volume_id": volume_id} block_device_mapping = {} @@ -1379,7 +1379,7 @@ class TestNewVmInstance(unittest.TestCase): self.vimconn._prepare_non_root_persistent_volumes( name, disk, - vm_av_zone, + storage_av_zone, block_device_mapping, base_disk_index, existing_vim_volumes, @@ -1395,7 +1395,7 @@ class TestNewVmInstance(unittest.TestCase): self, mock_update_block_device_mapping ): """Existing persistent root volume with vim_id.""" - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"vim_id": volume_id} block_device_mapping = {} @@ -1406,7 +1406,7 @@ class TestNewVmInstance(unittest.TestCase): expected_existing_vim_volumes = [{"id": volume_id}] boot_volume_id = self.vimconn._prepare_persistent_root_volumes( name, - vm_av_zone, + storage_av_zone, disk, base_disk_index, block_device_mapping, @@ -1424,7 +1424,7 @@ class TestNewVmInstance(unittest.TestCase): self, mock_update_block_device_mapping ): """Existing persistent root volume with vim_id.""" - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("b") disk = {"vim_id": volume_id} block_device_mapping = {} @@ -1436,7 +1436,7 @@ class TestNewVmInstance(unittest.TestCase): self.vimconn._prepare_non_root_persistent_volumes( name, disk, - vm_av_zone, + storage_av_zone, block_device_mapping, base_disk_index, existing_vim_volumes, @@ -1454,7 +1454,7 @@ class TestNewVmInstance(unittest.TestCase): ): """Create persistent root volume.""" self.vimconn.cinder.volumes.create.return_value.id = volume_id2 - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"size": 10, "image_id": image_id} block_device_mapping = {} @@ -1463,7 +1463,7 @@ class TestNewVmInstance(unittest.TestCase): expected_boot_vol_id = volume_id2 boot_volume_id = self.vimconn._prepare_persistent_root_volumes( name, - vm_av_zone, + storage_av_zone, disk, base_disk_index, block_device_mapping, @@ -1499,7 +1499,7 @@ class TestNewVmInstance(unittest.TestCase): ): """Create persistent root volume, disk has keep parameter.""" self.vimconn.cinder.volumes.create.return_value.id = volume_id2 - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"size": 10, "image_id": image_id, "keep": True} block_device_mapping = {} @@ -1509,7 +1509,7 @@ class TestNewVmInstance(unittest.TestCase): expected_existing_vim_volumes = [] boot_volume_id = self.vimconn._prepare_persistent_root_volumes( name, - vm_av_zone, + storage_av_zone, disk, base_disk_index, block_device_mapping, @@ -1547,7 +1547,7 @@ class TestNewVmInstance(unittest.TestCase): """Create persistent non-root volume.""" self.vimconn.cinder = CopyingMock() self.vimconn.cinder.volumes.create.return_value.id = volume_id2 - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"size": 10} block_device_mapping = {} @@ -1557,7 +1557,7 @@ class TestNewVmInstance(unittest.TestCase): self.vimconn._prepare_non_root_persistent_volumes( name, disk, - vm_av_zone, + storage_av_zone, block_device_mapping, base_disk_index, existing_vim_volumes, @@ -1591,7 +1591,7 @@ class TestNewVmInstance(unittest.TestCase): """Create persistent non-root volume.""" self.vimconn.cinder = CopyingMock() self.vimconn.cinder.volumes.create.return_value.id = volume_id2 - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"size": 10, "keep": True} block_device_mapping = {} @@ -1601,7 +1601,7 @@ class TestNewVmInstance(unittest.TestCase): self.vimconn._prepare_non_root_persistent_volumes( name, disk, - vm_av_zone, + storage_av_zone, block_device_mapping, base_disk_index, existing_vim_volumes, @@ -1635,12 +1635,17 @@ class TestNewVmInstance(unittest.TestCase): class MyVolume: name = "my-shared-volume" id = volume_id4 + availability_zone = ["nova"] + self.vimconn.storage_availability_zone = ["nova"] self.vimconn.cinder.volumes.create.return_value = MyVolume() shared_volume_data = {"size": 10, "name": "my-shared-volume"} result = self.vimconn.new_shared_volumes(shared_volume_data) self.vimconn.cinder.volumes.create.assert_called_once_with( - size=10, name="my-shared-volume", volume_type="multiattach" + size=10, + name="my-shared-volume", + volume_type="multiattach", + availability_zone=["nova"], ) self.assertEqual(result[0], "my-shared-volume") self.assertEqual(result[1], volume_id4) @@ -1651,7 +1656,7 @@ class TestNewVmInstance(unittest.TestCase): ): """Create persistent root volume raise exception.""" self.vimconn.cinder.volumes.create.side_effect = Exception - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("a") disk = {"size": 10, "image_id": image_id} block_device_mapping = {} @@ -1661,7 +1666,7 @@ class TestNewVmInstance(unittest.TestCase): with self.assertRaises(Exception): result = self.vimconn._prepare_persistent_root_volumes( name, - vm_av_zone, + storage_av_zone, disk, base_disk_index, block_device_mapping, @@ -1688,7 +1693,7 @@ class TestNewVmInstance(unittest.TestCase): ): """Create persistent non-root volume raise exception.""" self.vimconn.cinder.volumes.create.side_effect = Exception - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] base_disk_index = ord("b") disk = {"size": 10} block_device_mapping = {} @@ -1699,7 +1704,7 @@ class TestNewVmInstance(unittest.TestCase): self.vimconn._prepare_non_root_persistent_volumes( name, disk, - vm_av_zone, + storage_av_zone, block_device_mapping, base_disk_index, existing_vim_volumes, @@ -1921,7 +1926,7 @@ class TestNewVmInstance(unittest.TestCase): existing_vim_volumes = [] created_items = {} block_device_mapping = {} - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] mock_root_volumes.return_value = root_vol_id mock_created_vol_availability.return_value = 10 @@ -1931,7 +1936,7 @@ class TestNewVmInstance(unittest.TestCase): name, existing_vim_volumes, created_items, - vm_av_zone, + storage_av_zone, block_device_mapping, disk_list2, ) @@ -1944,7 +1949,7 @@ class TestNewVmInstance(unittest.TestCase): self.assertEqual(mock_non_root_volumes.call_count, 1) mock_root_volumes.assert_called_once_with( name="basicvm", - vm_av_zone=["nova"], + storage_av_zone=["nova"], disk={"size": 10, "image_id": image_id}, base_disk_index=97, block_device_mapping={}, @@ -1954,7 +1959,7 @@ class TestNewVmInstance(unittest.TestCase): mock_non_root_volumes.assert_called_once_with( name="basicvm", disk={"size": 20}, - vm_av_zone=["nova"], + storage_av_zone=["nova"], base_disk_index=98, block_device_mapping={}, existing_vim_volumes=[], @@ -1975,7 +1980,7 @@ class TestNewVmInstance(unittest.TestCase): """Timeout exceeded while waiting for disks.""" existing_vim_volumes = [] created_items = {} - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] block_device_mapping = {} mock_root_volumes.return_value = root_vol_id @@ -1987,7 +1992,7 @@ class TestNewVmInstance(unittest.TestCase): name, existing_vim_volumes, created_items, - vm_av_zone, + storage_av_zone, block_device_mapping, disk_list2, ) @@ -2003,7 +2008,7 @@ class TestNewVmInstance(unittest.TestCase): self.assertEqual(mock_non_root_volumes.call_count, 1) mock_root_volumes.assert_called_once_with( name="basicvm", - vm_av_zone=["nova"], + storage_av_zone=["nova"], disk={"size": 10, "image_id": image_id}, base_disk_index=97, block_device_mapping={}, @@ -2013,7 +2018,7 @@ class TestNewVmInstance(unittest.TestCase): mock_non_root_volumes.assert_called_once_with( name="basicvm", disk={"size": 20}, - vm_av_zone=["nova"], + storage_av_zone=["nova"], base_disk_index=98, block_device_mapping={}, existing_vim_volumes=[], @@ -2035,7 +2040,7 @@ class TestNewVmInstance(unittest.TestCase): existing_vim_volumes = [] created_items = {} block_device_mapping = {} - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] mock_created_vol_availability.return_value = 2 mock_existing_vol_availability.return_value = 3 @@ -2043,7 +2048,7 @@ class TestNewVmInstance(unittest.TestCase): name, existing_vim_volumes, created_items, - vm_av_zone, + storage_av_zone, block_device_mapping, disk_list, ) @@ -2067,7 +2072,7 @@ class TestNewVmInstance(unittest.TestCase): """Persistent root volumes preparation raises error.""" existing_vim_volumes = [] created_items = {} - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] block_device_mapping = {} mock_root_volumes.side_effect = Exception() @@ -2079,7 +2084,7 @@ class TestNewVmInstance(unittest.TestCase): name, existing_vim_volumes, created_items, - vm_av_zone, + storage_av_zone, block_device_mapping, disk_list2, ) @@ -2088,7 +2093,7 @@ class TestNewVmInstance(unittest.TestCase): mock_existing_vol_availability.assert_not_called() mock_root_volumes.assert_called_once_with( name="basicvm", - vm_av_zone=["nova"], + storage_av_zone=["nova"], disk={"size": 10, "image_id": image_id}, base_disk_index=97, block_device_mapping={}, @@ -2111,7 +2116,7 @@ class TestNewVmInstance(unittest.TestCase): """Non-root volumes preparation raises error.""" existing_vim_volumes = [] created_items = {} - vm_av_zone = ["nova"] + storage_av_zone = ["nova"] block_device_mapping = {} mock_root_volumes.return_value = root_vol_id @@ -2122,7 +2127,7 @@ class TestNewVmInstance(unittest.TestCase): name, existing_vim_volumes, created_items, - vm_av_zone, + storage_av_zone, block_device_mapping, disk_list2, ) @@ -2133,7 +2138,7 @@ class TestNewVmInstance(unittest.TestCase): self.assertEqual(mock_non_root_volumes.call_count, 1) mock_root_volumes.assert_called_once_with( name="basicvm", - vm_av_zone=["nova"], + storage_av_zone=["nova"], disk={"size": 10, "image_id": image_id}, base_disk_index=97, block_device_mapping={}, @@ -2143,7 +2148,7 @@ class TestNewVmInstance(unittest.TestCase): mock_non_root_volumes.assert_called_once_with( name="basicvm", disk={"size": 20}, - vm_av_zone=["nova"], + storage_av_zone=["nova"], base_disk_index=98, block_device_mapping={}, existing_vim_volumes=[], @@ -3300,7 +3305,7 @@ class TestNewVmInstance(unittest.TestCase): name=name, existing_vim_volumes=[], created_items={}, - vm_av_zone="nova", + storage_av_zone="nova", block_device_mapping={}, disk_list=disk_list2, ) @@ -3477,7 +3482,7 @@ class TestNewVmInstance(unittest.TestCase): name=name, existing_vim_volumes=[], created_items={}, - vm_av_zone="nova", + storage_av_zone="nova", block_device_mapping={}, disk_list=disk_list2, ) @@ -3575,7 +3580,7 @@ class TestNewVmInstance(unittest.TestCase): name=name, existing_vim_volumes=[], created_items={}, - vm_av_zone="nova", + storage_av_zone="nova", block_device_mapping={}, disk_list=disk_list2, ) @@ -3674,7 +3679,7 @@ class TestNewVmInstance(unittest.TestCase): name=name, existing_vim_volumes=[], created_items={}, - vm_av_zone="nova", + storage_av_zone="nova", block_device_mapping={}, disk_list=disk_list2, ) diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index bca94792..4a70f55d 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -186,6 +186,7 @@ class vimconnector(vimconn.VimConnector): self.persistent_info = persistent_info self.availability_zone = persistent_info.get("availability_zone", None) + self.storage_availability_zone = None self.session = persistent_info.get("session", {"reload_client": True}) self.my_tenant_id = self.session.get("my_tenant_id") self.nova = self.session.get("nova") @@ -1873,6 +1874,10 @@ class vimconnector(vimconn.VimConnector): self.availability_zone = vim_availability_zones else: self.availability_zone = self._get_openstack_availablity_zones() + if "storage_availability_zone" in self.config: + self.storage_availability_zone = self.config.get( + "storage_availability_zone" + ) def _get_vm_availability_zone( self, availability_zone_index, availability_zone_list @@ -2111,7 +2116,7 @@ class vimconnector(vimconn.VimConnector): def _prepare_persistent_root_volumes( self, name: str, - vm_av_zone: list, + storage_av_zone: list, disk: dict, base_disk_index: int, block_device_mapping: dict, @@ -2122,7 +2127,7 @@ class vimconnector(vimconn.VimConnector): Args: name (str): Name of VM instance - vm_av_zone (list): List of availability zones + storage_av_zone (list): Storage of availability zones disk (dict): Disk details base_disk_index (int): Disk index block_device_mapping (dict): Block device details @@ -2136,11 +2141,9 @@ class vimconnector(vimconn.VimConnector): # Disk may include only vim_volume_id or only vim_id." # Use existing persistent root volume finding with volume_id or vim_id key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id" - if disk.get(key_id): block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id] existing_vim_volumes.append({"id": disk[key_id]}) - else: # Create persistent root volume volume = self.cinder.volumes.create( @@ -2148,7 +2151,7 @@ class vimconnector(vimconn.VimConnector): name=name + "vd" + chr(base_disk_index), imageRef=disk["image_id"], # Make sure volume is in the same AZ as the VM to be attached to - availability_zone=vm_av_zone, + availability_zone=storage_av_zone, ) boot_volume_id = volume.id self.update_block_device_mapping( @@ -2195,10 +2198,16 @@ class vimconnector(vimconn.VimConnector): @catch_any_exception def new_shared_volumes(self, shared_volume_data) -> (str, str): + availability_zone = ( + self.storage_availability_zone + if self.storage_availability_zone + else self._get_vm_availability_zone + ) volume = self.cinder.volumes.create( size=shared_volume_data["size"], name=shared_volume_data["name"], volume_type="multiattach", + availability_zone=availability_zone, ) return volume.name, volume.id @@ -2241,7 +2250,7 @@ class vimconnector(vimconn.VimConnector): self, name: str, disk: dict, - vm_av_zone: list, + storage_av_zone: list, block_device_mapping: dict, base_disk_index: int, existing_vim_volumes: list, @@ -2252,7 +2261,7 @@ class vimconnector(vimconn.VimConnector): Args: name (str): Name of VM instance disk (dict): Disk details - vm_av_zone (list): List of availability zones + storage_av_zone (list): Storage of availability zones block_device_mapping (dict): Block device details base_disk_index (int): Disk index existing_vim_volumes (list): Existing disk details @@ -2271,7 +2280,7 @@ class vimconnector(vimconn.VimConnector): size=disk["size"], name=volume_name, # Make sure volume is in the same AZ as the VM to be attached to - availability_zone=vm_av_zone, + availability_zone=storage_av_zone, ) self.update_block_device_mapping( volume=volume, @@ -2352,7 +2361,7 @@ class vimconnector(vimconn.VimConnector): name: str, existing_vim_volumes: list, created_items: dict, - vm_av_zone: list, + storage_av_zone: list, block_device_mapping: dict, disk_list: list = None, ) -> None: @@ -2362,7 +2371,7 @@ class vimconnector(vimconn.VimConnector): name (str): Name of Instance existing_vim_volumes (list): List of existing volumes created_items (dict): All created items belongs to VM - vm_av_zone (list): VM availability zone + storage_av_zone (list): Storage availability zone block_device_mapping (dict): Block devices to be attached to VM disk_list (list): List of disks @@ -2377,7 +2386,7 @@ class vimconnector(vimconn.VimConnector): base_disk_index = ord("a") boot_volume_id = self._prepare_persistent_root_volumes( name=name, - vm_av_zone=vm_av_zone, + storage_av_zone=storage_av_zone, disk=disk, base_disk_index=base_disk_index, block_device_mapping=block_device_mapping, @@ -2398,7 +2407,7 @@ class vimconnector(vimconn.VimConnector): self._prepare_non_root_persistent_volumes( name=name, disk=disk, - vm_av_zone=vm_av_zone, + storage_av_zone=storage_av_zone, block_device_mapping=block_device_mapping, base_disk_index=base_disk_index, existing_vim_volumes=existing_vim_volumes, @@ -2814,13 +2823,19 @@ class vimconnector(vimconn.VimConnector): availability_zone_index, availability_zone_list ) + storage_av_zone = ( + self.storage_availability_zone + if self.storage_availability_zone + else vm_av_zone + ) + if disk_list: # Prepare disks self._prepare_disk_for_vminstance( name=name, existing_vim_volumes=existing_vim_volumes, created_items=created_items, - vm_av_zone=vm_av_zone, + storage_av_zone=storage_av_zone, block_device_mapping=block_device_mapping, disk_list=disk_list, ) diff --git a/releasenotes/notes/feature_11003_AZ_for_Cinder-1d73dc1aad574952.yaml b/releasenotes/notes/feature_11003_AZ_for_Cinder-1d73dc1aad574952.yaml new file mode 100644 index 00000000..a5b6ddb4 --- /dev/null +++ b/releasenotes/notes/feature_11003_AZ_for_Cinder-1d73dc1aad574952.yaml @@ -0,0 +1,23 @@ +####################################################################################### +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +####################################################################################### +--- +features: + - | + Feature 11003 - AZ for Cinder + Currently the creation of volumes with cinder uses the same + availability zone of Nova. In some cases, a different AZ may + be required in order to select a different storage domain. \ No newline at end of file