diff -Nru cinder-17.0.1/ChangeLog cinder-17.1.0/ChangeLog --- cinder-17.0.1/ChangeLog 2020-12-07 10:01:47.000000000 +0000 +++ cinder-17.1.0/ChangeLog 2021-03-08 11:44:59.000000000 +0000 @@ -1,10 +1,29 @@ CHANGES ======= +17.1.0 +------ + +* NetApp SolidFire: Fix duplicate volume when API response is lost +* Log information about the Ceph v2 clone API +* RBD: Pass bytes type for mon\_command inbuf +* Require oslo.serialization 4.0.2 +* Update SolidFire Storage assisted migration in support-matrix +* Pure: Add default value to pure\_host\_personality +* Tests: Fix rbd unit test failure due to ceph keyring file +* Correct group:reset\_group\_snapshot\_status policy +* RBD: Retry delete if VolumeIsBusy in \_copy\_image\_to\_volume +* Add CHAP support to Dell EMC PowerStore driver +* Adjust requirements and lower-constraints +* [SVF]:Reduce slowness by caching pool information +* [SVF]:Fix clone fcmap not being deleted in cleanup +* Fixed an issue with creating a backup from snapshot with NFS volume driver + 17.0.1 ------ * Fix volume rekey during clone +* NetApp SolidFire: Fix error on cluster workload rebalancing * PowerMax Docs - Victoria new features and supported software * Do not fail when depth is greater than rbd\_max\_clone\_depth * NetApp SolidFire: Fix clone and request timeout issues diff -Nru cinder-17.0.1/cinder/policies/group_snapshot_actions.py cinder-17.1.0/cinder/policies/group_snapshot_actions.py --- cinder-17.0.1/cinder/policies/group_snapshot_actions.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/policies/group_snapshot_actions.py 2021-03-08 11:44:16.000000000 +0000 @@ -24,7 +24,7 @@ group_snapshot_actions_policies = [ policy.DocumentedRuleDefault( name=RESET_STATUS, - check_str=base.RULE_ADMIN_OR_OWNER, + check_str=base.RULE_ADMIN_API, description="Reset status of group snapshot.", operations=[ { diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_base.py cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_base.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_base.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_base.py 2021-03-08 11:44:16.000000000 +0000 @@ -21,8 +21,10 @@ class TestBase(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") - def test_configuration(self, mock_appliance): + def test_configuration(self, mock_appliance, mock_chap): mock_appliance.return_value = "A1" self.driver.check_for_setup_error() @@ -51,10 +53,15 @@ self.assertIn("Failed to query PowerStore appliances.", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_metrics") - def test_update_volume_stats(self, mock_metrics, mock_appliance): + def test_update_volume_stats(self, + mock_metrics, + mock_appliance, + mock_chap): mock_appliance.return_value = "A1" mock_metrics.return_value = { "physical_total": 2147483648, @@ -64,11 +71,14 @@ self.driver._update_volume_stats() @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") @mock.patch("requests.request") def test_update_volume_stats_bad_status(self, mock_metrics, - mock_appliance): + mock_appliance, + mock_chap): mock_appliance.return_value = "A1" mock_metrics.return_value = powerstore.MockResponse(rc=400) self.driver.check_for_setup_error() diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_revert.py cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_revert.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_revert.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_revert.py 2021-03-08 11:44:16.000000000 +0000 @@ -23,8 +23,10 @@ class TestSnapshotCreateDelete(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") - def setUp(self, mock_appliance): + def setUp(self, mock_appliance, mock_chap): super(TestSnapshotCreateDelete, self).setUp() mock_appliance.return_value = "A1" self.driver.check_for_setup_error() diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_attach_detach.py cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_attach_detach.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_attach_detach.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_attach_detach.py 2021-03-08 11:44:17.000000000 +0000 @@ -25,10 +25,13 @@ class TestVolumeAttachDetach(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") - def setUp(self, mock_appliance): + def setUp(self, mock_appliance, mock_chap): super(TestVolumeAttachDetach, self).setUp() mock_appliance.return_value = "A1" + mock_chap.return_value = {"mode": "Single"} self.iscsi_driver.check_for_setup_error() self.fc_driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( @@ -50,7 +53,7 @@ attached_host=self.volume.host ) ] - self.fake_iscsi_targets_response = [ + fake_iscsi_targets_response = [ { "address": "1.2.3.4", "ip_port": { @@ -66,7 +69,7 @@ }, }, ] - self.fake_fc_wwns_response = [ + fake_fc_wwns_response = [ { "wwn": "58:cc:f0:98:49:21:07:02" }, @@ -79,18 +82,52 @@ "wwpns": ["58:cc:f0:98:49:21:07:02", "58:cc:f0:98:49:23:07:02"], "initiator": "fake_initiator", } + self.iscsi_targets_mock = self.mock_object( + self.iscsi_driver.adapter.client, + "get_ip_pool_address", + return_value=fake_iscsi_targets_response + ) + self.fc_wwns_mock = self.mock_object( + self.fc_driver.adapter.client, + "get_fc_port", + return_value=fake_fc_wwns_response + ) - @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." - "PowerStoreClient.get_fc_port") - def test_get_fc_targets(self, mock_get_ip_pool): - mock_get_ip_pool.return_value = self.fake_fc_wwns_response + def test_initialize_connection_chap_enabled(self): + self.iscsi_driver.adapter.use_chap_auth = True + with mock.patch.object(self.iscsi_driver.adapter, + "_create_host_and_attach", + return_value=( + utils.get_chap_credentials(), + 1 + )): + connection_properties = self.iscsi_driver.initialize_connection( + self.volume, + self.fake_connector + ) + self.assertIn("auth_username", connection_properties["data"]) + self.assertIn("auth_password", connection_properties["data"]) + + def test_initialize_connection_chap_disabled(self): + self.iscsi_driver.adapter.use_chap_auth = False + with mock.patch.object(self.iscsi_driver.adapter, + "_create_host_and_attach", + return_value=( + utils.get_chap_credentials(), + 1 + )): + connection_properties = self.iscsi_driver.initialize_connection( + self.volume, + self.fake_connector + ) + self.assertNotIn("auth_username", connection_properties["data"]) + self.assertNotIn("auth_password", connection_properties["data"]) + + def test_get_fc_targets(self): wwns = self.fc_driver.adapter._get_fc_targets("A1") self.assertEqual(2, len(wwns)) - @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." - "PowerStoreClient.get_fc_port") - def test_get_fc_targets_filtered(self, mock_get_ip_pool): - mock_get_ip_pool.return_value = self.fake_fc_wwns_response + def test_get_fc_targets_filtered(self): self.fc_driver.adapter.allowed_ports = ["58:cc:f0:98:49:23:07:02"] wwns = self.fc_driver.adapter._get_fc_targets("A1") self.assertEqual(1, len(wwns)) @@ -98,10 +135,7 @@ utils.fc_wwn_to_string("58:cc:f0:98:49:21:07:02") in wwns ) - @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." - "PowerStoreClient.get_fc_port") - def test_get_fc_targets_filtered_no_matched_ports(self, mock_get_ip_pool): - mock_get_ip_pool.return_value = self.fake_fc_wwns_response + def test_get_fc_targets_filtered_no_matched_ports(self): self.fc_driver.adapter.allowed_ports = ["fc_wwn_1", "fc_wwn_2"] error = self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.adapter._get_fc_targets, @@ -109,18 +143,12 @@ self.assertIn("There are no accessible Fibre Channel targets on the " "system.", error.msg) - @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." - "PowerStoreClient.get_ip_pool_address") - def test_get_iscsi_targets(self, mock_get_ip_pool): - mock_get_ip_pool.return_value = self.fake_iscsi_targets_response + def test_get_iscsi_targets(self): iqns, portals = self.iscsi_driver.adapter._get_iscsi_targets("A1") self.assertTrue(len(iqns) == len(portals)) self.assertEqual(2, len(portals)) - @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." - "PowerStoreClient.get_ip_pool_address") - def test_get_iscsi_targets_filtered(self, mock_get_ip_pool): - mock_get_ip_pool.return_value = self.fake_iscsi_targets_response + def test_get_iscsi_targets_filtered(self): self.iscsi_driver.adapter.allowed_ports = ["1.2.3.4"] iqns, portals = self.iscsi_driver.adapter._get_iscsi_targets("A1") self.assertTrue(len(iqns) == len(portals)) @@ -129,11 +157,7 @@ "iqn.2020-07.com.dell:dellemc-powerstore-test-iqn-2" in iqns ) - @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." - "PowerStoreClient.get_ip_pool_address") - def test_get_iscsi_targets_filtered_no_matched_ports(self, - mock_get_ip_pool): - mock_get_ip_pool.return_value = self.fake_iscsi_targets_response + def test_get_iscsi_targets_filtered_no_matched_ports(self): self.iscsi_driver.adapter.allowed_ports = ["1.1.1.1", "2.2.2.2"] error = self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.adapter._get_iscsi_targets, diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.py cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.py 2021-03-08 11:44:16.000000000 +0000 @@ -23,8 +23,10 @@ class TestVolumeCreateDeleteExtend(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") - def setUp(self, mock_appliance): + def setUp(self, mock_appliance, mock_chap): super(TestVolumeCreateDeleteExtend, self).setUp() mock_appliance.return_value = "A1" self.driver.check_for_setup_error() diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py 2021-03-08 11:44:16.000000000 +0000 @@ -23,8 +23,10 @@ class TestVolumeCreateFromSource(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." + "PowerStoreClient.get_chap_config") + @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_appliance_id_by_name") - def setUp(self, mock_appliance): + def setUp(self, mock_appliance, mock_chap): super(TestVolumeCreateFromSource, self).setUp() mock_appliance.return_value = "A1" self.driver.check_for_setup_error() diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py cinder-17.1.0/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py 2021-03-08 11:44:17.000000000 +0000 @@ -25,6 +25,7 @@ import ddt from oslo_concurrency import processutils from oslo_config import cfg +from oslo_service import loopingcall from oslo_utils import importutils from oslo_utils import units import paramiko @@ -5537,7 +5538,7 @@ self._set_flag('reserved_percentage', 25) self._set_flag('storwize_svc_multihostmap_enabled', True) self._set_flag('storwize_svc_vol_rsize', rsize) - stats = self.driver.get_volume_stats() + stats = self.driver.get_volume_stats(True) for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['storwize_svc_volpool_name']) @@ -8269,6 +8270,7 @@ list(resp.select('port_id', 'port_status'))) +@ddt.ddt class StorwizeHelpersTestCase(test.TestCase): def setUp(self): super(StorwizeHelpersTestCase, self).setUp() @@ -8494,7 +8496,168 @@ 'status': 'copying', 'target_vdisk_name': 'testvol'} self.storwize_svc_common.pretreatment_before_revert(vol) - stopfcmap.assert_called_once_with('4', split=True) + stopfcmap.assert_called_once_with('4') + + @ddt.data({'copy_rate': '50', 'progress': '3', 'status': 'copying'}, + {'copy_rate': '50', 'progress': '100', 'status': 'copying'}, + {'copy_rate': '0', 'progress': '0', 'status': 'copying'}, + {'copy_rate': '50', 'progress': '0', 'status': 'copying'}, + {'copy_rate': '0', 'progress': '0', 'status': 'idle_or_copied'}) + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + '_get_flashcopy_mapping_attributes') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + '_get_vdisk_fc_mappings') + def test_check_vdisk_fc_mappings(self, + fc_data, + get_vdisk_fc_mappings, + get_fc_mapping_attributes, + rmfcmap, stopfcmap, chfcmap): + vol = 'testvol' + get_vdisk_fc_mappings.return_value = ['4'] + get_fc_mapping_attributes.return_value = { + 'copy_rate': fc_data['copy_rate'], + 'progress': fc_data['progress'], + 'status': fc_data['status'], + 'target_vdisk_name': 'tar-testvol', + 'rc_controlled': 'no', + 'source_vdisk_name': 'testvol'} + + if(fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' + and fc_data['status'] == 'copying'): + (self.assertRaises(loopingcall.LoopingCallDone, + self.storwize_svc_common._check_vdisk_fc_mappings, vol, True, + False)) + stopfcmap.assert_called_with('4') + self.assertEqual(1, stopfcmap.call_count) + else: + self.storwize_svc_common._check_vdisk_fc_mappings(vol, True, + False) + stopfcmap.assert_not_called() + self.assertEqual(0, stopfcmap.call_count) + + get_vdisk_fc_mappings.assert_called() + get_fc_mapping_attributes.assert_called_with('4') + rmfcmap.assert_not_called() + self.assertEqual(1, get_fc_mapping_attributes.call_count) + self.assertEqual(0, rmfcmap.call_count) + + if(fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' + and fc_data['status'] in ['copying', 'idle_or_copied']): + chfcmap.assert_called_with('4', copyrate='50', autodel='on') + self.assertEqual(1, chfcmap.call_count) + else: + chfcmap.assert_not_called() + self.assertEqual(0, chfcmap.call_count) + + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + '_get_flashcopy_mapping_attributes') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + '_get_vdisk_fc_mappings') + def test_check_vdisk_fc_mappings_tarisvol(self, + get_vdisk_fc_mappings, + get_fc_mapping_attributes, + rmfcmap, stopfcmap, chfcmap): + vol = 'tar-testvol' + get_vdisk_fc_mappings.return_value = ['4'] + get_fc_mapping_attributes.return_value = { + 'copy_rate': '0', + 'progress': '0', + 'status': 'idle_or_copied', + 'target_vdisk_name': 'tar-testvol', + 'rc_controlled': 'no', + 'source_vdisk_name': 'testvol'} + + self.assertRaises(loopingcall.LoopingCallDone, + self.storwize_svc_common._check_vdisk_fc_mappings, + vol, True, False) + + get_vdisk_fc_mappings.assert_called() + get_fc_mapping_attributes.assert_called_with('4') + stopfcmap.assert_not_called() + rmfcmap.assert_called_with('4') + chfcmap.assert_not_called() + self.assertEqual(1, get_fc_mapping_attributes.call_count) + self.assertEqual(0, stopfcmap.call_count) + self.assertEqual(1, rmfcmap.call_count) + self.assertEqual(0, chfcmap.call_count) + + @ddt.data(([{'cp_rate': '0', 'prgs': '0', 'status': 'idle_or_copied', + 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}, + {'cp_rate': '50', 'prgs': '100', 'status': 'copying', + 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, + {'cp_rate': '50', 'prgs': '3', 'status': 'copying', + 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}], 1), + ([{'cp_rate': '50', 'prgs': '100', 'status': 'idle_or_copied', + 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}, + {'cp_rate': '50', 'prgs': '100', 'status': 'copying', + 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, + {'cp_rate': '50', 'prgs': '100', 'status': 'copying', + 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}], 1), + ([{'cp_rate': '50', 'prgs': '100', 'status': 'idle_or_copied', + 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}, + {'cp_rate': '50', 'prgs': '100', 'status': 'copying', + 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, + {'cp_rate': '50', 'prgs': '100', 'status': 'copying', + 'trg_vdisk': 'tar_testvol_1', 'src_vdisk': 'testvol'}], 2), + ([{'cp_rate': '0', 'prgs': '0', 'status': 'copying', + 'trg_vdisk': 'testvol', 'src_vdisk': 'snap_testvol'}, + {'cp_rate': '50', 'prgs': '0', 'status': 'copying', + 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, + {'cp_rate': '50', 'prgs': '0', 'status': 'copying', + 'trg_vdisk': 'tar_testvol_1', 'src_vdisk': 'testvol'}], 0)) + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') + @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + '_get_flashcopy_mapping_attributes') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + '_get_vdisk_fc_mappings') + @ddt.unpack + def test_check_vdisk_fc_mappings_mul_fcs(self, + fc_data, stopfc_count, + get_vdisk_fc_mappings, + get_fc_mapping_attributes, + rmfcmap, stopfcmap, chfcmap): + vol = 'testvol' + get_vdisk_fc_mappings.return_value = ['4', '5', '7'] + get_fc_mapping_attributes.side_effect = [ + { + 'copy_rate': fc_data[0]['cp_rate'], + 'progress': fc_data[0]['prgs'], + 'status': fc_data[0]['status'], + 'target_vdisk_name': fc_data[0]['trg_vdisk'], + 'rc_controlled': 'no', + 'source_vdisk_name': fc_data[0]['src_vdisk']}, + { + 'copy_rate': fc_data[1]['cp_rate'], + 'progress': fc_data[1]['prgs'], + 'status': fc_data[1]['status'], + 'target_vdisk_name': fc_data[1]['trg_vdisk'], + 'rc_controlled': 'no', + 'source_vdisk_name': fc_data[1]['src_vdisk']}, + { + 'copy_rate': fc_data[2]['cp_rate'], + 'progress': fc_data[2]['prgs'], + 'status': fc_data[2]['status'], + 'target_vdisk_name': fc_data[2]['trg_vdisk'], + 'rc_controlled': 'no', + 'source_vdisk_name': fc_data[2]['src_vdisk']}] + + self.storwize_svc_common._check_vdisk_fc_mappings(vol, True, True) + get_vdisk_fc_mappings.assert_called() + get_fc_mapping_attributes.assert_called() + rmfcmap.assert_not_called() + chfcmap.assert_not_called() + self.assertEqual(3, get_fc_mapping_attributes.call_count) + self.assertEqual(stopfc_count, stopfcmap.call_count) + self.assertEqual(0, rmfcmap.call_count) + self.assertEqual(0, chfcmap.call_count) def test_storwize_check_flashcopy_rate_invalid1(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, @@ -8522,6 +8685,35 @@ self.storwize_svc_common.check_flashcopy_rate, flashcopy_rate) + @ddt.data(({'mirror_pool': 'openstack2', + 'volume_topology': None, + 'peer_pool': None}, True, 1), + ({'mirror_pool': 'openstack2', + 'volume_topology': None, + 'peer_pool': None}, False, 2), + ({'mirror_pool': None, + 'volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'}, True, 1), + ({'mirror_pool': None, + 'volume_topology': 'hyperswap', + 'peer_pool': 'openstack1'}, False, 2)) + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'is_data_reduction_pool') + @ddt.unpack + def test_is_volume_type_dr_pools_dr_pool(self, opts, is_drp, call_count, + is_data_reduction_pool): + is_data_reduction_pool.return_value = is_drp + pool = 'openstack' + rep_type = None + rep_target_pool = None + + isdrpool = (self.storwize_svc_common. + is_volume_type_dr_pools(pool, opts, rep_type, + rep_target_pool)) + self.assertEqual(is_drp, isdrpool) + is_data_reduction_pool.assert_called() + self.assertEqual(call_count, is_data_reduction_pool.call_count) + @ddt.ddt class StorwizeSSHTestCase(test.TestCase): diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py cinder-17.1.0/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py 2021-03-08 11:44:17.000000000 +0000 @@ -370,11 +370,14 @@ 'attributes': {'uuid': f_uuid[1]}, 'qos': None, 'iqn': test_name}]}} - if params and params['startVolumeID']: + if params and params.get('startVolumeID', None): volumes = result['result']['volumes'] - selected_volumes = [v for v in volumes if v.get('volumeID') - != params['startVolumeID']] + selected_volumes = [v for v in volumes if v.get('volumeID') != + params['startVolumeID']] result['result']['volumes'] = selected_volumes + else: + result = {'result': {'volumes': []}} + return result elif method == 'DeleteSnapshot': return {'result': {}} diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/test_nfs.py cinder-17.1.0/cinder/tests/unit/volume/drivers/test_nfs.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/test_nfs.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/test_nfs.py 2021-03-08 11:44:16.000000000 +0000 @@ -1354,12 +1354,13 @@ run_as_root=True) mock_permission.assert_called_once_with(dest_vol_path) - @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3], - [NFS_CONFIG2, QEMU_IMG_INFO_OUT4], - [NFS_CONFIG3, QEMU_IMG_INFO_OUT3], - [NFS_CONFIG4, QEMU_IMG_INFO_OUT4]) + @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3, 'available'], + [NFS_CONFIG2, QEMU_IMG_INFO_OUT4, 'backing-up'], + [NFS_CONFIG3, QEMU_IMG_INFO_OUT3, 'available'], + [NFS_CONFIG4, QEMU_IMG_INFO_OUT4, 'backing-up']) @ddt.unpack - def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info): + def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info, + snap_status): self._set_driver(extra_confs=nfs_conf) drv = self._driver @@ -1376,7 +1377,7 @@ # Fake snapshot based in the previous created volume snap_file = src_volume.name + '.' + fake_snap.id fake_snap.volume = src_volume - fake_snap.status = 'available' + fake_snap.status = snap_status fake_snap.size = 10 # New fake volume where the snap will be copied @@ -1419,7 +1420,9 @@ mock_ensure.assert_called_once() mock_find_share.assert_called_once_with(new_volume) - def test_create_volume_from_snapshot_status_not_available(self): + @ddt.data('error', 'creating', 'deleting', 'deleted', 'updating', + 'error_deleting', 'unmanaging', 'restoring') + def test_create_volume_from_snapshot_invalid_status(self, snap_status): """Expect an error when the snapshot's status is not 'available'.""" self._set_driver() drv = self._driver @@ -1428,6 +1431,7 @@ fake_snap = fake_snapshot.fake_snapshot_obj(self.context) fake_snap.volume = src_volume + fake_snap.status = snap_status new_volume = self._simple_volume() new_volume['size'] = fake_snap['volume_size'] diff -Nru cinder-17.0.1/cinder/tests/unit/volume/drivers/test_rbd.py cinder-17.1.0/cinder/tests/unit/volume/drivers/test_rbd.py --- cinder-17.0.1/cinder/tests/unit/volume/drivers/test_rbd.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/tests/unit/volume/drivers/test_rbd.py 2021-03-08 11:44:17.000000000 +0000 @@ -881,6 +881,47 @@ @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') + @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) + def test_log_create_vol_from_snap_w_v2_clone_api(self, volume_get_by_id): + volume_get_by_id.return_value = self.volume_a + + self.mock_proxy().__enter__().volume.op_features.return_value = 1 + self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1 + + snapshot = mock.Mock() + self.cfg.rbd_flatten_volume_from_snapshot = False + + with mock.patch.object(driver, 'LOG') as \ + mock_log: + + self.driver.create_volume_from_snapshot(self.volume_a, snapshot) + + mock_log.info.assert_called_once() + self.assertTrue(self.driver._clone_v2_api_checked) + + @common_mocks + @mock.patch('cinder.objects.Volume.get_by_id') + @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) + def test_log_create_vol_from_snap_without_v2_clone_api(self, + volume_get_by_id): + volume_get_by_id.return_value = self.volume_a + + self.mock_proxy().__enter__().volume.op_features.return_value = 0 + self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1 + + snapshot = mock.Mock() + self.cfg.rbd_flatten_volume_from_snapshot = False + + with mock.patch.object(driver, 'LOG') as \ + mock_log: + + self.driver.create_volume_from_snapshot(self.volume_a, snapshot) + + mock_log.warning.assert_called_once() + self.assertTrue(self.driver._clone_v2_api_checked) + + @common_mocks + @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value @@ -1313,17 +1354,29 @@ self.driver._is_cloneable(location, {'disk_format': f})) self.assertTrue(mock_get_fsid.called) - def _copy_image(self): + def _copy_image(self, volume_busy=False): with mock.patch.object(tempfile, 'NamedTemporaryFile'): with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = True with mock.patch.object(image_utils, 'fetch_to_raw'): - with mock.patch.object(self.driver, 'delete_volume'): + with mock.patch.object(self.driver, 'delete_volume') \ + as mock_dv: with mock.patch.object(self.driver, '_resize'): mock_image_service = mock.MagicMock() args = [None, self.volume_a, mock_image_service, None] - self.driver.copy_image_to_volume(*args) + if volume_busy: + mock_dv.side_effect = ( + exception.VolumeIsBusy("doh")) + self.assertRaises( + exception.VolumeIsBusy, + self.driver.copy_image_to_volume, + *args) + self.assertEqual( + self.cfg.rados_connection_retries, + mock_dv.call_count) + else: + self.driver.copy_image_to_volume(*args) @mock.patch('cinder.volume.drivers.rbd.fileutils.delete_if_exists') @mock.patch('cinder.volume.volume_utils.check_encryption_provider', @@ -1373,6 +1426,11 @@ self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image_encrypted() + @common_mocks + def test_copy_image_busy_volume(self): + self.cfg.image_conversion_dir = '/var/run/cinder/tmp' + self._copy_image(volume_busy=True) + @ddt.data(True, False) @common_mocks @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info') @@ -1527,9 +1585,9 @@ return_value=dynamic_total): result = self.driver._get_pool_stats() client.cluster.mon_command.assert_has_calls([ - mock.call('{"prefix":"df", "format":"json"}', ''), + mock.call('{"prefix":"df", "format":"json"}', b''), mock.call('{"prefix":"osd pool get-quota", "pool": "rbd",' - ' "format":"json"}', ''), + ' "format":"json"}', b''), ]) self.assertEqual((free_capacity, total_capacity), result) @@ -1550,9 +1608,9 @@ ] result = self.driver._get_pool_stats() client.cluster.mon_command.assert_has_calls([ - mock.call('{"prefix":"df", "format":"json"}', ''), + mock.call('{"prefix":"df", "format":"json"}', b''), mock.call('{"prefix":"osd pool get-quota", "pool": "rbd",' - ' "format":"json"}', ''), + ' "format":"json"}', b''), ]) free_capacity = 1.56 total_capacity = 3.0 @@ -1640,7 +1698,8 @@ # OpenStack usage doesn't have the rbd_keyring_conf Oslo Config option cfg_file = '/etc/ceph/ceph.client.admin.keyring' self.driver.configuration.rbd_keyring_conf = cfg_file - self.driver._set_keyring_attributes() + with mock.patch('os.path.isfile', return_value=False): + self.driver._set_keyring_attributes() self.assertEqual(cfg_file, self.driver.keyring_file) self.assertIsNone(self.driver.keyring_data) diff -Nru cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/adapter.py cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/adapter.py --- cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/adapter.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/adapter.py 2021-03-08 11:44:16.000000000 +0000 @@ -16,6 +16,7 @@ """Adapter for Dell EMC PowerStore Cinder driver.""" from oslo_log import log as logging +from oslo_utils import strutils from cinder import coordination from cinder import exception @@ -30,6 +31,7 @@ LOG = logging.getLogger(__name__) PROTOCOL_FC = "FC" PROTOCOL_ISCSI = "iSCSI" +CHAP_MODE_SINGLE = "Single" class CommonAdapter(object): @@ -41,6 +43,7 @@ self.configuration = configuration self.storage_protocol = None self.allowed_ports = None + self.use_chap_auth = None @staticmethod def initiators(connector): @@ -83,13 +86,20 @@ self.appliances_to_ids_map[appliance_name] = ( self.client.get_appliance_id_by_name(appliance_name) ) + self.use_chap_auth = False + if self.storage_protocol == PROTOCOL_ISCSI: + chap_config = self.client.get_chap_config() + if chap_config.get("mode") == CHAP_MODE_SINGLE: + self.use_chap_auth = True LOG.debug("Successfully initialized PowerStore %(protocol)s adapter. " "PowerStore appliances: %(appliances)s. " - "Allowed ports: %(allowed_ports)s.", + "Allowed ports: %(allowed_ports)s. " + "Use CHAP authentication: %(use_chap_auth)s.", { "protocol": self.storage_protocol, "appliances": self.appliances, "allowed_ports": self.allowed_ports, + "use_chap_auth": self.use_chap_auth, }) def create_volume(self, volume): @@ -314,7 +324,9 @@ { "volume_name": volume.name, "volume_id": volume.id, - "connection_properties": connection_properties, + "connection_properties": strutils.mask_password( + connection_properties + ), }) return connection_properties @@ -429,13 +441,17 @@ """Create PowerStore host if it does not exist. :param connector: connection properties - :return: PowerStore host object + :return: PowerStore host object, iSCSI CHAP credentials """ initiators = self.initiators(connector) host = self._filter_hosts_by_initiators(initiators) + if self.use_chap_auth: + chap_credentials = utils.get_chap_credentials() + else: + chap_credentials = {} if host: - self._modify_host_initiators(host, initiators) + self._modify_host_initiators(host, chap_credentials, initiators) else: host_name = utils.powerstore_host_name( connector, @@ -451,6 +467,7 @@ { "port_name": initiator, "port_type": self.storage_protocol, + **chap_credentials, } for initiator in initiators ] host = self.client.create_host(host_name, ports) @@ -463,12 +480,13 @@ "initiators": initiators, "host_provider_id": host["id"], }) - return host + return host, chap_credentials - def _modify_host_initiators(self, host, initiators): + def _modify_host_initiators(self, host, chap_credentials, initiators): """Update PowerStore host initiators if needed. :param host: PowerStore host object + :param chap_credentials: iSCSI CHAP credentials :param initiators: list of initiators :return: None """ @@ -476,17 +494,22 @@ initiators_added = [ initiator["port_name"] for initiator in host["host_initiators"] ] + initiators_to_add = [] + initiators_to_modify = [] initiators_to_remove = [ initiator for initiator in initiators_added if initiator not in initiators ] - initiators_to_add = [ - { + for initiator in initiators: + initiator_add_modify = { "port_name": initiator, - "port_type": self.storage_protocol, - } for initiator in initiators - if initiator not in initiators_added - ] + **chap_credentials, + } + if initiator not in initiators_added: + initiator_add_modify["port_type"] = self.storage_protocol + initiators_to_add.append(initiator_add_modify) + elif self.use_chap_auth: + initiators_to_modify.append(initiator_add_modify) if initiators_to_remove: LOG.debug("Remove initiators from PowerStore host %(host_name)s. " "Initiators: %(initiators_to_remove)s. " @@ -514,7 +537,9 @@ "%(host_provider_id)s.", { "host_name": host["name"], - "initiators_to_add": initiators_to_add, + "initiators_to_add": strutils.mask_password( + initiators_to_add + ), "host_provider_id": host["id"], }) self.client.modify_host_initiators( @@ -526,7 +551,34 @@ "PowerStore host id: %(host_provider_id)s.", { "host_name": host["name"], - "initiators_to_add": initiators_to_add, + "initiators_to_add": strutils.mask_password( + initiators_to_add + ), + "host_provider_id": host["id"], + }) + if initiators_to_modify: + LOG.debug("Modify initiators of PowerStore host %(host_name)s. " + "Initiators: %(initiators_to_modify)s. " + "PowerStore host id: %(host_provider_id)s.", + { + "host_name": host["name"], + "initiators_to_modify": strutils.mask_password( + initiators_to_modify + ), + "host_provider_id": host["id"], + }) + self.client.modify_host_initiators( + host["id"], + modify_initiators=initiators_to_modify + ) + LOG.debug("Successfully modified initiators of PowerStore host " + "%(host_name)s. Initiators: %(initiators_to_modify)s. " + "PowerStore host id: %(host_provider_id)s.", + { + "host_name": host["name"], + "initiators_to_modify": strutils.mask_password( + initiators_to_modify + ), "host_provider_id": host["id"], }) @@ -572,11 +624,11 @@ :param connector: connection properties :param volume: OpenStack volume object - :return: attached volume logical number + :return: iSCSI CHAP credentials, volume logical number """ - host = self._create_host_if_not_exist(connector) - return self._attach_volume_to_host(host, volume) + host, chap_credentials = self._create_host_if_not_exist(connector) + return chap_credentials, self._attach_volume_to_host(host, volume) def _connect_volume(self, volume, connector): """Attach PowerStore volume and return it's connection properties. @@ -588,12 +640,21 @@ appliance_name = volume_utils.extract_host(volume.host, "pool") appliance_id = self.appliances_to_ids_map[appliance_name] - volume_lun = self._create_host_and_attach( + chap_credentials, volume_lun = self._create_host_and_attach( connector, volume ) - return self._get_connection_properties(appliance_id, - volume_lun) + connection_properties = self._get_connection_properties(appliance_id, + volume_lun) + if self.use_chap_auth: + connection_properties["data"]["auth_method"] = "CHAP" + connection_properties["data"]["auth_username"] = ( + chap_credentials.get("chap_single_username") + ) + connection_properties["data"]["auth_password"] = ( + chap_credentials.get("chap_single_password") + ) + return connection_properties def _detach_volume_from_hosts(self, volume, hosts_to_detach=None): """Detach volume from PowerStore hosts. @@ -731,7 +792,7 @@ return { "driver_volume_type": self.driver_volume_type, "data": { - "target_discovered": True, + "target_discovered": False, "target_lun": volume_lun, "target_wwn": target_wwns, } @@ -782,7 +843,10 @@ return { "driver_volume_type": self.driver_volume_type, "data": { - "target_discovered": True, + "target_discovered": False, + "target_portal": portals[0], + "target_iqn": iqns[0], + "target_lun": volume_lun, "target_portals": portals, "target_iqns": iqns, "target_luns": [volume_lun] * len(portals), diff -Nru cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/client.py cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/client.py --- cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/client.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/client.py 2021-03-08 11:44:16.000000000 +0000 @@ -19,6 +19,7 @@ import json from oslo_log import log as logging +from oslo_utils import strutils import requests from cinder import exception @@ -83,7 +84,12 @@ "verify_cert": self._verify_cert, }) - def _send_request(self, method, url, payload=None, params=None): + def _send_request(self, + method, + url, + payload=None, + params=None, + log_response_data=True): if not payload: payload = {} if not params: @@ -106,11 +112,12 @@ "REST Request: %s %s with body %s", r.request.method, r.request.url, - r.request.body) - LOG.log(log_level, - "REST Response: %s with data %s", - r.status_code, - r.text) + strutils.mask_password(r.request.body)) + if log_response_data or log_level == logging.ERROR: + msg = "REST Response: %s with data %s" % (r.status_code, r.text) + else: + msg = "REST Response: %s" % r.status_code + LOG.log(log_level, msg) try: response = r.json() @@ -123,6 +130,19 @@ _send_patch_request = functools.partialmethod(_send_request, "PATCH") _send_delete_request = functools.partialmethod(_send_request, "DELETE") + def get_chap_config(self): + r, response = self._send_get_request( + "/chap_config/0", + params={ + "select": "mode" + } + ) + if r.status_code not in self.ok_codes: + msg = _("Failed to query PowerStore CHAP configuration.") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return response + def get_appliance_id_by_name(self, appliance_name): r, response = self._send_get_request( "/appliance", @@ -148,7 +168,8 @@ payload={ "entity": "space_metrics_by_appliance", "entity_id": appliance_id, - } + }, + log_response_data=False ) if r.status_code not in self.ok_codes: msg = (_("Failed to query metrics for " diff -Nru cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/driver.py cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/driver.py --- cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/driver.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/driver.py 2021-03-08 11:44:16.000000000 +0000 @@ -37,9 +37,10 @@ Version history: 1.0.0 - Initial version + 1.0.1 - Add CHAP support """ - VERSION = "1.0.0" + VERSION = "1.0.1" VENDOR = "Dell EMC" # ThirdPartySystems wiki page diff -Nru cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/utils.py cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/utils.py --- cinder-17.0.1/cinder/volume/drivers/dell_emc/powerstore/utils.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/dell_emc/powerstore/utils.py 2021-03-08 11:44:16.000000000 +0000 @@ -23,9 +23,12 @@ from cinder import exception from cinder.i18n import _ from cinder.objects import fields +from cinder.volume import volume_utils LOG = logging.getLogger(__name__) +CHAP_DEFAULT_USERNAME = "PowerStore_iSCSI_CHAP_Username" +CHAP_DEFAULT_SECRET_LENGTH = 60 def bytes_to_gib(size_in_bytes): @@ -134,3 +137,17 @@ attachment.attached_host == host_name) ] return len(attachments) > 1 + + +def get_chap_credentials(): + """Generate CHAP credentials. + + :return: CHAP username and secret + """ + + return { + "chap_single_username": CHAP_DEFAULT_USERNAME, + "chap_single_password": volume_utils.generate_password( + CHAP_DEFAULT_SECRET_LENGTH + ) + } diff -Nru cinder-17.0.1/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py cinder-17.1.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py --- cinder-17.0.1/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py 2021-03-08 11:44:17.000000000 +0000 @@ -756,6 +756,7 @@ self.ssh = StorwizeSSH(run_ssh) self.check_fcmapping_interval = 3 self.code_level = None + self.stats = {} @staticmethod def handle_keyerror(cmd, out): @@ -825,6 +826,12 @@ def is_data_reduction_pool(self, pool_name): """Check if pool is data reduction pool.""" + # Check pool is data reduction pool or not from pool information + # saved in stats. + for pool in self.stats.get('pools', []): + if pool['pool_name'] == pool_name: + return pool['data_reduction'] + pool_data = self.get_pool_attrs(pool_name) if (pool_data and 'data_reduction' in pool_data and pool_data['data_reduction'] == 'yes'): @@ -2069,10 +2076,10 @@ return None return resp[0] - def _check_vdisk_fc_mappings(self, name, - allow_snaps=True, allow_fctgt=False): + @cinder_utils.trace + def _check_delete_vdisk_fc_mappings(self, name, allow_snaps=True, + allow_fctgt=False): """FlashCopy mapping check helper.""" - LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name) mapping_ids = self._get_vdisk_fc_mappings(name) wait_for_copy = False for map_id in mapping_ids: @@ -2085,10 +2092,23 @@ target = attrs['target_vdisk_name'] copy_rate = attrs['copy_rate'] status = attrs['status'] + progress = attrs['progress'] + LOG.debug('Loopcall: source: %s, target: %s, copy_rate: %s, ' + 'status: %s, progress: %s, mapid: %s', source, target, + copy_rate, status, progress, map_id) if allow_fctgt and target == name and status == 'copying': - self.ssh.stopfcmap(map_id) - attrs = self._get_flashcopy_mapping_attributes(map_id) + try: + self.ssh.stopfcmap(map_id) + except exception.VolumeBackendAPIException as ex: + LOG.warning(ex) + wait_for_copy = True + try: + attrs = self._get_flashcopy_mapping_attributes(map_id) + except exception.VolumeBackendAPIException as ex: + LOG.warning(ex) + wait_for_copy = True + continue if attrs: status = attrs['status'] else: @@ -2110,28 +2130,80 @@ {'name': name, 'src': source, 'tgt': target}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) - if status in ['copying', 'prepared']: - self.ssh.stopfcmap(map_id) - # Need to wait for the fcmap to change to - # stopped state before remove fcmap - wait_for_copy = True - elif status in ['stopping', 'preparing']: + try: + if status in ['copying', 'prepared']: + self.ssh.stopfcmap(map_id) + # Need to wait for the fcmap to change to + # stopped state before remove fcmap + wait_for_copy = True + elif status in ['stopping', 'preparing']: + wait_for_copy = True + else: + self.ssh.rmfcmap(map_id) + except exception.VolumeBackendAPIException as ex: + LOG.warning(ex) wait_for_copy = True - else: - self.ssh.rmfcmap(map_id) # Case 4: Copy in progress - wait and will autodelete else: - if status == 'prepared': - self.ssh.stopfcmap(map_id) - self.ssh.rmfcmap(map_id) - elif status in ['idle_or_copied', 'stopped']: - # Prepare failed or stopped - self.ssh.rmfcmap(map_id) - else: + try: + if status == 'prepared': + self.ssh.stopfcmap(map_id) + self.ssh.rmfcmap(map_id) + elif status in ['idle_or_copied', 'stopped']: + # Prepare failed or stopped + self.ssh.rmfcmap(map_id) + elif (status in ['copying', 'prepared'] and + progress == '100'): + self.ssh.stopfcmap(map_id) + else: + wait_for_copy = True + except exception.VolumeBackendAPIException as ex: + LOG.warning(ex) wait_for_copy = True + if not wait_for_copy or not len(mapping_ids): raise loopingcall.LoopingCallDone(retvalue=True) + @cinder_utils.trace + def _check_vdisk_fc_mappings(self, name, allow_snaps=True, + allow_fctgt=False): + """FlashCopy mapping check helper.""" + # if this is a remove disk we need to be down to one fc clone + mapping_ids = self._get_vdisk_fc_mappings(name) + if len(mapping_ids) > 1 and allow_fctgt: + LOG.debug('Loopcall: vdisk %s has ' + 'more than one fc map. Waiting.', name) + for map_id in mapping_ids: + attrs = self._get_flashcopy_mapping_attributes(map_id) + if not attrs: + continue + source = attrs['source_vdisk_name'] + target = attrs['target_vdisk_name'] + copy_rate = attrs['copy_rate'] + status = attrs['status'] + progress = attrs['progress'] + LOG.debug('Loopcall: source: %s, target: %s, copy_rate: %s, ' + 'status: %s, progress: %s, mapid: %s', + source, target, copy_rate, status, progress, map_id) + + if copy_rate != '0' and source == name: + try: + if status in ['copying'] and progress == '100': + self.ssh.stopfcmap(map_id) + elif status == 'idle_or_copied' and progress == '100': + # wait for auto-delete of fcmap. + continue + elif status in ['idle_or_copied', 'stopped']: + # Prepare failed or stopped + self.ssh.rmfcmap(map_id) + # handle VolumeBackendAPIException to let it go through + # next attempts in case of any cli exception. + except exception.VolumeBackendAPIException as ex: + LOG.warning(ex) + return + return self._check_delete_vdisk_fc_mappings( + name, allow_snaps=allow_snaps, allow_fctgt=allow_fctgt) + def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True, allow_fctgt=False): """Ensure vdisk has no flashcopy mappings.""" @@ -2584,7 +2656,7 @@ elif copy_rate != '0' and progress == '100': LOG.debug('Split completed clone map_id=%(map_id)s fcmap', {'map_id': map_id}) - self.ssh.stopfcmap(map_id, split=True) + self.ssh.stopfcmap(map_id) class CLIResponse(object): @@ -2778,6 +2850,9 @@ # This is used to save the available pools in failed-over status self._secondary_pools = None + # This dictionary is used to save pools information. + self._stats = {} + # Storwize has the limitation that can not burst more than 3 new ssh # connections within 1 second. So slow down the initialization. time.sleep(1) @@ -2795,6 +2870,12 @@ # Get list of all volumes self._get_all_volumes() + # Update the pool stats + self._update_volume_stats() + + # Save the pool stats information in helpers class. + self._master_backend_helpers.stats = self._stats + # Build the list of in-progress vdisk copy operations if ctxt is None: admin_context = context.get_admin_context() @@ -3583,6 +3664,8 @@ self._state = self._master_state self._update_volume_stats() + self._master_backend_helpers.stats = self._stats + return storwize_const.FAILBACK_VALUE, volumes_update, groups_update def _failback_replica_volumes(self, ctxt, rep_volumes): @@ -3832,6 +3915,8 @@ self._state = self._aux_state self._update_volume_stats() + self._aux_backend_helpers.stats = self._stats + return self._active_backend_id, volumes_update, groups_update def _failover_replica_volumes(self, ctxt, rep_volumes): @@ -5632,6 +5717,7 @@ """Build pool status""" QoS_support = True pool_stats = {} + is_dr_pool = False pool_data = self._helpers.get_pool_attrs(pool) if pool_data: easy_tier = pool_data['easy_tier'] in ['on', 'auto'] @@ -5655,6 +5741,14 @@ storwize_svc_multihostmap_enabled) backend_state = ('up' if pool_data['status'] == 'online' else 'down') + + # Get the data_reduction information for pool and set + # is_dr_pool flag. + if pool_data.get('data_reduction') == 'Yes': + is_dr_pool = True + elif pool_data.get('data_reduction') == 'No': + is_dr_pool = False + pool_stats = { 'pool_name': pool_data['name'], 'total_capacity_gb': total_capacity_gb, @@ -5674,6 +5768,7 @@ 'max_over_subscription_ratio': over_sub_ratio, 'consistent_group_snapshot_enabled': True, 'backend_state': backend_state, + 'data_reduction': is_dr_pool, } if self._replica_enabled: pool_stats.update({ @@ -5695,6 +5790,7 @@ 'thick_provisioning_support': False, 'max_over_subscription_ratio': 0, 'reserved_percentage': 0, + 'data_reduction': is_dr_pool, 'backend_state': 'down'} return pool_stats diff -Nru cinder-17.0.1/cinder/volume/drivers/pure.py cinder-17.1.0/cinder/volume/drivers/pure.py --- cinder-17.0.1/cinder/volume/drivers/pure.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/pure.py 2021-03-08 11:44:17.000000000 +0000 @@ -59,8 +59,9 @@ "this calculated value will override the " "max_over_subscription_ratio config option."), cfg.StrOpt("pure_host_personality", + default=None, choices=['aix', 'esxi', 'hitachi-vsp', 'hpux', - 'oracle-vm-server', 'solaris', 'vms'], + 'oracle-vm-server', 'solaris', 'vms', None], help="Determines how the Purity system tunes the protocol used " "between the array and the initiator."), # These are used as default settings. In future these can be overridden diff -Nru cinder-17.0.1/cinder/volume/drivers/rbd.py cinder-17.1.0/cinder/volume/drivers/rbd.py --- cinder-17.0.1/cinder/volume/drivers/rbd.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/rbd.py 2021-03-08 11:44:17.000000000 +0000 @@ -248,6 +248,7 @@ self._is_replication_enabled = False self._replication_targets = [] self._target_names = [] + self._clone_v2_api_checked = False if self.rbd is not None: self.RBD_FEATURE_LAYERING = self.rbd.RBD_FEATURE_LAYERING @@ -294,6 +295,22 @@ 'max_over_subscription_ratio', 'volume_dd_blocksize') return RBD_OPTS + additional_opts + def _show_msg_check_clone_v2_api(self, volume_name): + if not self._clone_v2_api_checked: + self._clone_v2_api_checked = True + with RBDVolumeProxy(self, volume_name) as volume: + try: + if (volume.volume.op_features() & + self.rbd.RBD_OPERATION_FEATURE_CLONE_PARENT): + LOG.info('Using v2 Clone API') + return + except AttributeError: + pass + LOG.warning('Not using v2 clone API, please upgrade to' + ' mimic+ and set the OSD minimum client' + ' compat version to mimic for better' + ' performance, fewer deletion issues') + def _get_target_config(self, target_id): """Get a replication target from known replication targets.""" for target in self._replication_targets: @@ -558,14 +575,14 @@ with RADOSClient(self) as client: ret, df_outbuf, __ = client.cluster.mon_command( - '{"prefix":"df", "format":"json"}', '') + '{"prefix":"df", "format":"json"}', b'') if ret: LOG.warning('Unable to get rados pool stats.') return 'unknown', 'unknown' ret, quota_outbuf, __ = client.cluster.mon_command( '{"prefix":"osd pool get-quota", "pool": "%s",' - ' "format":"json"}' % pool_name, '') + ' "format":"json"}' % pool_name, b'') if ret: LOG.warning('Unable to get rados pool quotas.') return 'unknown', 'unknown' @@ -1028,6 +1045,8 @@ self._flatten(self.configuration.rbd_pool, volume.name) if int(volume.size): self._resize(volume) + + self._show_msg_check_clone_v2_api(snapshot.volume_name) return volume_update def _delete_backup_snaps(self, rbd_image): @@ -1604,7 +1623,13 @@ if encrypted: self._encrypt_image(context, volume, tmp_dir, tmp.name) - self.delete_volume(volume) + @utils.retry(exception.VolumeIsBusy, + self.configuration.rados_connection_interval, + self.configuration.rados_connection_retries) + def _delete_volume(volume): + self.delete_volume(volume) + + _delete_volume(volume) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) diff -Nru cinder-17.0.1/cinder/volume/drivers/remotefs.py cinder-17.1.0/cinder/volume/drivers/remotefs.py --- cinder-17.0.1/cinder/volume/drivers/remotefs.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/remotefs.py 2021-03-08 11:44:17.000000000 +0000 @@ -1377,11 +1377,11 @@ LOG.debug('Creating volume %(vol)s from snapshot %(snap)s', {'vol': volume.id, 'snap': snapshot.id}) - if snapshot.status != 'available': - msg = _('Snapshot status must be "available" to clone. ' - 'But is: %(status)s') % {'status': snapshot.status} - - raise exception.InvalidSnapshot(msg) + status = snapshot.status + acceptable_states = ['available', 'backing-up'] + self._validate_state(status, acceptable_states, + obj_description='snapshot', + invalid_exc=exception.InvalidSnapshot) self._ensure_shares_mounted() diff -Nru cinder-17.0.1/cinder/volume/drivers/solidfire.py cinder-17.1.0/cinder/volume/drivers/solidfire.py --- cinder-17.0.1/cinder/volume/drivers/solidfire.py 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/cinder/volume/drivers/solidfire.py 2021-03-08 11:44:17.000000000 +0000 @@ -101,17 +101,26 @@ min=30, help='Sets time in seconds to wait for a migrating volume to ' 'complete pairing and sync.'), + cfg.IntOpt('sf_api_request_timeout', default=30, min=30, help='Sets time in seconds to wait for an api request to ' 'complete.'), + cfg.IntOpt('sf_volume_clone_timeout', default=600, min=60, help='Sets time in seconds to wait for a clone of a volume or ' 'snapshot to complete.' - )] + ), + + cfg.IntOpt('sf_volume_create_timeout', + default=60, + min=30, + help='Sets time in seconds to wait for a create volume ' + 'operation to complete.')] + CONF = cfg.CONF CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP) @@ -262,9 +271,13 @@ - Enable Active/Active support flag - Implement Active/Active replication support 2.2.0 - Add storage assisted volume migration support + 2.2.1 - Fix bug #1891914 fix error on cluster workload rebalancing + by adding xNotPrimary to the retryable exception list + 2.2.2 - Fix bug #1896112 SolidFire Driver creates duplicate volume + when API response is lost """ - VERSION = '2.2.0' + VERSION = '2.2.2' SUPPORTS_ACTIVE_ACTIVE = True @@ -302,7 +315,8 @@ 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xSliceNotRegistered', - 'xNotReadyForIO'] + 'xNotReadyForIO', + 'xNotPrimary'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) @@ -1000,10 +1014,62 @@ params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) + def _list_volumes_by_name(self, sf_volume_name): + params = {'volumeName': sf_volume_name} + return self._issue_api_request( + 'ListVolumes', params, version='8.0')['result']['volumes'] + + def _wait_volume_is_active(self, sf_volume_name): + + def _wait(): + volumes = self._list_volumes_by_name(sf_volume_name) + if volumes: + LOG.debug("Found Volume [%s] in SolidFire backend. " + "Current status is [%s].", + sf_volume_name, volumes[0]['status']) + if volumes[0]['status'] == 'active': + raise loopingcall.LoopingCallDone(volumes[0]) + + try: + timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( + _wait) + sf_volume = (timer.start( + interval=1, + timeout=self.configuration.sf_volume_create_timeout).wait()) + + return sf_volume + except loopingcall.LoopingCallTimeOut: + msg = ("Timeout while waiting volume [%s] " + "to be in active state." % sf_volume_name) + LOG.error(msg) + raise SolidFireAPIException(msg) + def _do_volume_create(self, sf_account, params, endpoint=None): - params['accountID'] = sf_account['accountID'] - sf_volid = self._issue_api_request( - 'CreateVolume', params, endpoint=endpoint)['result']['volumeID'] + + sf_volume_name = params['name'] + volumes_found = self._list_volumes_by_name(sf_volume_name) + if volumes_found: + msg = ('Volume name [%s] already exists ' + 'in SolidFire backend.') % sf_volume_name + LOG.error(msg) + raise DuplicateSfVolumeNames(message=msg) + + sf_volid = None + try: + params['accountID'] = sf_account['accountID'] + response = self._issue_api_request( + 'CreateVolume', params, endpoint=endpoint) + sf_volid = response['result']['volumeID'] + + except requests.exceptions.ReadTimeout: + LOG.debug("Read Timeout exception caught while creating " + "volume [%s].", sf_volume_name) + # Check if volume was created for the given name, + # in case the backend has processed the request but failed + # to deliver the response before api request timeout. + volume_created = self._wait_volume_is_active(sf_volume_name) + sf_volid = volume_created['volumeID'] + return self._get_model_info(sf_account, sf_volid, endpoint=endpoint) def _do_snapshot_create(self, params): diff -Nru cinder-17.0.1/cinder.egg-info/pbr.json cinder-17.1.0/cinder.egg-info/pbr.json --- cinder-17.0.1/cinder.egg-info/pbr.json 2020-12-07 10:01:47.000000000 +0000 +++ cinder-17.1.0/cinder.egg-info/pbr.json 2021-03-08 11:45:00.000000000 +0000 @@ -1 +1 @@ -{"git_version": "cdc586631", "is_release": true} \ No newline at end of file +{"git_version": "deb31a0c4", "is_release": true} \ No newline at end of file diff -Nru cinder-17.0.1/cinder.egg-info/PKG-INFO cinder-17.1.0/cinder.egg-info/PKG-INFO --- cinder-17.0.1/cinder.egg-info/PKG-INFO 2020-12-07 10:01:47.000000000 +0000 +++ cinder-17.1.0/cinder.egg-info/PKG-INFO 2021-03-08 11:45:00.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: cinder -Version: 17.0.1 +Version: 17.1.0 Summary: OpenStack Block Storage Home-page: https://docs.openstack.org/cinder/latest/ Author: OpenStack diff -Nru cinder-17.0.1/cinder.egg-info/requires.txt cinder-17.1.0/cinder.egg-info/requires.txt --- cinder-17.0.1/cinder.egg-info/requires.txt 2020-12-07 10:01:47.000000000 +0000 +++ cinder-17.1.0/cinder.egg-info/requires.txt 2021-03-08 11:45:00.000000000 +0000 @@ -4,7 +4,7 @@ SQLAlchemy>=1.3.0 WebOb>=1.7.1 castellan>=1.3.0 -cryptography>=2.1.4 +cryptography>=2.5 cursive>=0.2.1 decorator>=4.1.0 eventlet>=0.26.0 @@ -31,7 +31,7 @@ oslo.privsep>=2.3.0 oslo.reports>=1.18.0 oslo.rootwrap>=5.8.0 -oslo.serialization>=2.25.0 +oslo.serialization>=4.0.2 oslo.service>=2.0.0 oslo.upgradecheck>=0.1.0 oslo.utils>=3.40.2 diff -Nru cinder-17.0.1/cinder.egg-info/SOURCES.txt cinder-17.1.0/cinder.egg-info/SOURCES.txt --- cinder-17.0.1/cinder.egg-info/SOURCES.txt 2020-12-07 10:01:48.000000000 +0000 +++ cinder-17.1.0/cinder.egg-info/SOURCES.txt 2021-03-08 11:45:00.000000000 +0000 @@ -2117,14 +2117,20 @@ releasenotes/notes/bug-1887885-nec-fix-snapshot-detach-error-fff3012e0e9a2d2b.yaml releasenotes/notes/bug-1887908-nec-live-migration-failure-withfc-3128fff7c48e739f.yaml releasenotes/notes/bug-1887962-643379faf20f01cf.yaml +releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml releasenotes/notes/bug-1890241-strowize-delete_group_snapshot_fix-2e491e74e1f73ba7.yaml +releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml releasenotes/notes/bug-1890586-storwize-check_flashcopy_rate-fix-571e6e182b604725.yaml releasenotes/notes/bug-1890588-storwize-select_io_group-fix-7200f2e00140ab34.yaml +releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml releasenotes/notes/bug-1895510-REST-API-issue-to-get-bundle-198a3b89255759bb.yaml releasenotes/notes/bug-1896087-rollback-volume-status-bd04951f929bb88d.yaml releasenotes/notes/bug-1898918-b24a93d7d5aff238.yaml +releasenotes/notes/bug-1900979-powerstore-chap-support.yaml releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml +releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml +releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml releasenotes/notes/bug-fix-1866871-f9d61defc00f4007.yaml releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml releasenotes/notes/bug-invalid-content-type-1715094-8yu8i9w425ua08f3.yaml @@ -2618,6 +2624,8 @@ releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml releasenotes/notes/sf-add-migration-support-691ace064d7576e9.yaml releasenotes/notes/sf-fix-clone-and-request-timeout-issues-56f7a7659c7ec775.yaml +releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml +releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml releasenotes/notes/sheepdog-driver-removal-b63d12460e886c33.yaml releasenotes/notes/sheepdog-mark-unsupported-648b2458d4a198de.yaml diff -Nru cinder-17.0.1/debian/changelog cinder-17.1.0/debian/changelog --- cinder-17.0.1/debian/changelog 2021-01-26 20:12:48.000000000 +0000 +++ cinder-17.1.0/debian/changelog 2021-04-08 13:53:43.000000000 +0000 @@ -1,3 +1,10 @@ +cinder (2:17.1.0-0ubuntu1) groovy; urgency=medium + + * New stable point release for OpenStack Victoria (LP: #1923035). + * d/p/rbd-retry-delete.patch: Removed after patch landedd upstream. + + -- Chris MacNaughton Thu, 08 Apr 2021 13:53:43 +0000 + cinder (2:17.0.1-0ubuntu2) groovy; urgency=medium * d/p/rbd-retry-delete.patch: Fix RBD mirroring race by retrying volume delete diff -Nru cinder-17.0.1/debian/patches/rbd-retry-delete.patch cinder-17.1.0/debian/patches/rbd-retry-delete.patch --- cinder-17.0.1/debian/patches/rbd-retry-delete.patch 2021-01-26 20:12:48.000000000 +0000 +++ cinder-17.1.0/debian/patches/rbd-retry-delete.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -From e1ed30838c3d72e70e3e06d5e7bc6f18d6bc4aad Mon Sep 17 00:00:00 2001 -From: Corey Bryant -Date: Thu, 22 Oct 2020 10:55:33 -0400 -Subject: [PATCH] RBD: Retry delete if VolumeIsBusy in _copy_image_to_volume - -Cinder can fail to create an image-based volume if RBD mirroring -is enabled. With the journaling-based approach to RBD mirroring, -ceph will still create a snapshot as a result of volume creation. -The volume create in _create_from_image_download() results in -a snapshot getting created, resulting in a race where delete_volume() -gets a VolumeIsBusy exception. - -Change-Id: Ib80e04512ec34a390e9e17af2f3544e18cad8598 -Closes-Bug: #1900775 -(cherry picked from commit 6231d26667508e34eb0a985f1f7ced1dbe623e1a) ---- - cinder/tests/unit/volume/drivers/test_rbd.py | 23 +++++++++++++++++--- - cinder/volume/drivers/rbd.py | 8 ++++++- - 2 files changed, 27 insertions(+), 4 deletions(-) - -diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py -index be6c2d936..b1017945e 100644 ---- a/cinder/tests/unit/volume/drivers/test_rbd.py -+++ b/cinder/tests/unit/volume/drivers/test_rbd.py -@@ -1313,17 +1313,29 @@ class RBDTestCase(test.TestCase): - self.driver._is_cloneable(location, {'disk_format': f})) - self.assertTrue(mock_get_fsid.called) - -- def _copy_image(self): -+ def _copy_image(self, volume_busy=False): - with mock.patch.object(tempfile, 'NamedTemporaryFile'): - with mock.patch.object(os.path, 'exists') as mock_exists: - mock_exists.return_value = True - with mock.patch.object(image_utils, 'fetch_to_raw'): -- with mock.patch.object(self.driver, 'delete_volume'): -+ with mock.patch.object(self.driver, 'delete_volume') \ -+ as mock_dv: - with mock.patch.object(self.driver, '_resize'): - mock_image_service = mock.MagicMock() - args = [None, self.volume_a, - mock_image_service, None] -- self.driver.copy_image_to_volume(*args) -+ if volume_busy: -+ mock_dv.side_effect = ( -+ exception.VolumeIsBusy("doh")) -+ self.assertRaises( -+ exception.VolumeIsBusy, -+ self.driver.copy_image_to_volume, -+ *args) -+ self.assertEqual( -+ self.cfg.rados_connection_retries, -+ mock_dv.call_count) -+ else: -+ self.driver.copy_image_to_volume(*args) - - @mock.patch('cinder.volume.drivers.rbd.fileutils.delete_if_exists') - @mock.patch('cinder.volume.volume_utils.check_encryption_provider', -@@ -1373,6 +1385,11 @@ class RBDTestCase(test.TestCase): - self.cfg.image_conversion_dir = '/var/run/cinder/tmp' - self._copy_image_encrypted() - -+ @common_mocks -+ def test_copy_image_busy_volume(self): -+ self.cfg.image_conversion_dir = '/var/run/cinder/tmp' -+ self._copy_image(volume_busy=True) -+ - @ddt.data(True, False) - @common_mocks - @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info') -diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py -index 9e312f2b4..068821c1c 100644 ---- a/cinder/volume/drivers/rbd.py -+++ b/cinder/volume/drivers/rbd.py -@@ -1604,7 +1604,13 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD, - if encrypted: - self._encrypt_image(context, volume, tmp_dir, tmp.name) - -- self.delete_volume(volume) -+ @utils.retry(exception.VolumeIsBusy, -+ self.configuration.rados_connection_interval, -+ self.configuration.rados_connection_retries) -+ def _delete_volume(volume): -+ self.delete_volume(volume) -+ -+ _delete_volume(volume) - - chunk_size = self.configuration.rbd_store_chunk_size * units.Mi - order = int(math.log(chunk_size, 2)) --- -2.29.2 - diff -Nru cinder-17.0.1/debian/patches/series cinder-17.1.0/debian/patches/series --- cinder-17.0.1/debian/patches/series 2021-01-26 20:12:48.000000000 +0000 +++ cinder-17.1.0/debian/patches/series 2021-04-08 13:53:43.000000000 +0000 @@ -1,3 +1,2 @@ skip-victoria-failures.patch -rbd-retry-delete.patch add-mock-psutil-in-quobyte-tests.patch diff -Nru cinder-17.0.1/doc/source/configuration/block-storage/drivers/dell-emc-powerstore-driver.rst cinder-17.1.0/doc/source/configuration/block-storage/drivers/dell-emc-powerstore-driver.rst --- cinder-17.0.1/doc/source/configuration/block-storage/drivers/dell-emc-powerstore-driver.rst 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/doc/source/configuration/block-storage/drivers/dell-emc-powerstore-driver.rst 2021-03-08 11:44:16.000000000 +0000 @@ -77,3 +77,19 @@ The driver creates thin provisioned compressed volumes by default. Thick provisioning is not supported. + +CHAP authentication support +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The driver supports one-way (Single mode) CHAP authentication. +To use CHAP authentication CHAP Single mode has to be enabled on the storage +side. + +.. note:: When enabling CHAP, any previously added hosts will need to be updated + with CHAP configuration since there will be I/O disruption for those hosts. + It is recommended that before adding hosts to the cluster, + decide what type of CHAP configuration is required, if any. + +CHAP configuration is retrieved from the storage during driver initialization, +no additional configuration is needed. +Secrets are generated automatically. diff -Nru cinder-17.0.1/doc/source/reference/support-matrix.ini cinder-17.1.0/doc/source/reference/support-matrix.ini --- cinder-17.0.1/doc/source/reference/support-matrix.ini 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/doc/source/reference/support-matrix.ini 2021-03-08 11:44:17.000000000 +0000 @@ -698,7 +698,7 @@ driver.macrosan=complete driver.nec=complete driver.netapp_ontap=missing -driver.netapp_solidfire=missing +driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.nimble=missing diff -Nru cinder-17.0.1/lower-constraints.txt cinder-17.1.0/lower-constraints.txt --- cinder-17.0.1/lower-constraints.txt 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/lower-constraints.txt 2021-03-08 11:44:17.000000000 +0000 @@ -1,6 +1,6 @@ alabaster==0.7.10 alembic==1.0.0 -amqp==2.2.2 +amqp==2.6.0 appdirs==1.4.3 asn1crypto==0.24.0 automaton==1.17.0 @@ -14,7 +14,7 @@ cmd2==0.8.1 contextlib2==0.5.5 coverage==4.1 -cryptography==2.1.4 +cryptography==2.5 cursive==0.2.1 ddt==1.2.1 debtcollector==1.22.0 @@ -51,7 +51,7 @@ Mako==1.0.7 MarkupSafe==1.1.0 mock==2.0.0 -msgpack==0.5.6 +msgpack==0.6.0 netaddr==0.7.19 netifaces==0.10.7 networkx==2.1.0 @@ -73,7 +73,7 @@ oslo.privsep==2.3.0 oslo.reports==1.18.0 oslo.rootwrap==5.8.0 -oslo.serialization==2.25.0 +oslo.serialization==4.0.2 oslo.service==2.0.0 oslo.utils==3.40.2 oslo.versionedobjects==1.31.2 diff -Nru cinder-17.0.1/PKG-INFO cinder-17.1.0/PKG-INFO --- cinder-17.0.1/PKG-INFO 2020-12-07 10:01:48.393640300 +0000 +++ cinder-17.1.0/PKG-INFO 2021-03-08 11:45:01.211460400 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: cinder -Version: 17.0.1 +Version: 17.1.0 Summary: OpenStack Block Storage Home-page: https://docs.openstack.org/cinder/latest/ Author: OpenStack diff -Nru cinder-17.0.1/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml cinder-17.1.0/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml --- cinder-17.0.1/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + `Bug #1888951 `_: + Fixed an issue with creating a backup from snapshot with NFS volume + driver. diff -Nru cinder-17.0.1/releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml cinder-17.1.0/releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml --- cinder-17.0.1/releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + IBM Spectrum Virtualize driver `Bug #1890254 + `_: + Fix check_vdisk_fc_mappings is not deleting all flashcopy + mappings while deleting source volume, when multiple clones + and snapshots are created using common source volume. diff -Nru cinder-17.0.1/releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml cinder-17.1.0/releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml --- cinder-17.0.1/releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + `Bug #1890591 `_: + IBM Spectrum Virtualize Family: Fixed issue in do_setup of + StorwizeSVCCommonDriver to save pool information in stats + during initialisation. diff -Nru cinder-17.0.1/releasenotes/notes/bug-1900979-powerstore-chap-support.yaml cinder-17.1.0/releasenotes/notes/bug-1900979-powerstore-chap-support.yaml --- cinder-17.0.1/releasenotes/notes/bug-1900979-powerstore-chap-support.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/bug-1900979-powerstore-chap-support.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + `Bug #1900979 `_: + Fix bug with using PowerStore with enabled CHAP as a storage backend. diff -Nru cinder-17.0.1/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml cinder-17.1.0/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml --- cinder-17.0.1/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,38 @@ +--- +upgrade: + - | + This release contains a fix for `Bug #1908315 + `_, which changes the + default value of the policy governing the Block Storage API action + `Reset group snapshot status + `_ + to make the action administrator-only. This policy was inadvertently + changed to be admin-or-owner during the Queens development cycle. + + The policy is named ``group:reset_group_snapshot_status``. + + * If you have a custom value for this policy in your cinder policy + configuration file, this change to the default value will not affect + you. + * If you have been aware of this regression and like the current + (incorrect) behavior, you may add the following line to your cinder + policy configuration file to restore that behavior:: + + "group:reset_group_snapshot_status": "rule:admin_or_owner" + + This setting is *not recommended* by the Cinder project team, as it + may allow end users to put a group snapshot into an invalid status with + indeterminate consequences. + + For more information about the cinder policy configuration file, see the + `policy.yaml + `_ + section of the Cinder Configuration Guide. +fixes: + - | + `Bug #1908315 `_: Corrected + the default checkstring for the ``group:reset_group_snapshot_status`` + policy to make it admin-only. This policy governs the Block Storage API + action `Reset group snapshot status + `_, + which by default is supposed to be an adminstrator-only action. diff -Nru cinder-17.0.1/releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml cinder-17.1.0/releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml --- cinder-17.0.1/releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + `Bug 1913449 `_: + Fix RBD driver _update_volume_stats() failing when using Ceph + Pacific python rados libraries. This failed because we + were passing a str instead of bytes to cluster.mon_command() diff -Nru cinder-17.0.1/releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml cinder-17.1.0/releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml --- cinder-17.0.1/releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,14 @@ +--- +fixes: + - | + NetApp SolidFire driver `Bug #1896112 + `_: + Fixes an issue that may duplicate volumes during creation, in case + the SolidFire backend successfully processes a request and creates + the volume, but fails to deliver the result back to the driver (the + response is lost). When this scenario occurs, the SolidFire driver + will retry the operation, which previously resulted in the creation + of a duplicate volume. This fix adds the ``sf_volume_create_timeout`` + configuration option (default value: 60 seconds) which specifies an + additional length of time that the driver will wait for the volume to + become active on the backend before raising an exception. diff -Nru cinder-17.0.1/releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml cinder-17.1.0/releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml --- cinder-17.0.1/releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-17.1.0/releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml 2021-03-08 11:44:16.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + NetApp SolidFire driver `Bug #1891914 + `_: + Fix an error that might occur on cluster workload rebalancing or + system upgrade, when an operation is made to a volume at the same + time its connection is being moved to a secondary node. diff -Nru cinder-17.0.1/requirements.txt cinder-17.1.0/requirements.txt --- cinder-17.0.1/requirements.txt 2020-12-07 10:01:08.000000000 +0000 +++ cinder-17.1.0/requirements.txt 2021-03-08 11:44:17.000000000 +0000 @@ -25,7 +25,7 @@ oslo.privsep>=2.3.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 -oslo.serialization>=2.25.0 # Apache-2.0 +oslo.serialization>=4.0.2 # Apache-2.0 oslo.service>=2.0.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 oslo.utils>=3.40.2 # Apache-2.0 @@ -61,6 +61,6 @@ tooz>=1.58.0 # Apache-2.0 google-api-python-client>=1.4.2 # Apache-2.0 castellan>=1.3.0 # Apache-2.0 -cryptography>=2.1.4 # BSD/Apache-2.0 +cryptography>=2.5 # BSD/Apache-2.0 cursive>=0.2.1 # Apache-2.0 zstd>=1.4.5.0 # BSD