diff -Nru cinder-14.0.3.dev24.202001031945.disco/AUTHORS cinder-14.0.5.dev1.202004081945.disco/AUTHORS --- cinder-14.0.3.dev24.202001031945.disco/AUTHORS 2020-01-03 19:48:23.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/AUTHORS 2020-04-08 19:51:58.000000000 +0000 @@ -643,6 +643,7 @@ Rafi Khardalian Rahul Verma Raildo Mascena +Rajat Dhasmana Rajesh Tailor Rakesh H S Rakesh Jain @@ -876,6 +877,7 @@ Xinli Guan Xinyuan Huang Xu Ao +Xuchu Jiang XueChendi YAMADA Hideki Yaguang Tang diff -Nru cinder-14.0.3.dev24.202001031945.disco/ChangeLog cinder-14.0.5.dev1.202004081945.disco/ChangeLog --- cinder-14.0.3.dev24.202001031945.disco/ChangeLog 2020-01-03 19:48:22.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/ChangeLog 2020-04-08 19:51:56.000000000 +0000 @@ -1,6 +1,28 @@ CHANGES ======= +* PowerMax Driver - Legacy volume not found + +14.0.4 +------ + +* RBD: fix volume reference handling in clone logic +* [Unity] Fix TypeError for test case test\_delete\_host\_wo\_lock +* Add new license scheme for Flashsystem9000 series +* NEC driver: fix an undefined variable +* ChunkedBackupDriver: Freeing memory on restore +* Cinder backup export broken +* PowerMax Docs - corrections and improvements +* Tell reno to ignore the kilo branch +* Fix: Create new cache entry when xtremio reaches snap limit +* Support Incremental Backup Completion In RBD +* Make volume soft delete more thorough +* Cap sphinx for py2 to match global reqs +* PowerMax Driver - Volume group delete failure + +14.0.3 +------ + * Fix service\_uuid migration for volumes with no host * Pure Storage - Fix disconnect error in clustered environments * PowerMax Driver - PowerMax Formatted Vols Fix diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/backup/api.py cinder-14.0.5.dev1.202004081945.disco/cinder/backup/api.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/backup/api.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/backup/api.py 2020-04-08 19:45:39.000000000 +0000 @@ -278,7 +278,10 @@ raise exception.InvalidBackup(reason=msg) parent_id = None + parent = None + if latest_backup: + parent = latest_backup parent_id = latest_backup.id if latest_backup['status'] != fields.BackupStatus.AVAILABLE: msg = _('The parent backup must be available for ' @@ -313,6 +316,7 @@ 'availability_zone': availability_zone, 'snapshot_id': snapshot_id, 'data_timestamp': data_timestamp, + 'parent': parent, 'metadata': metadata or {} } backup = objects.Backup(context=context, **kwargs) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/backup/chunkeddriver.py cinder-14.0.5.dev1.202004081945.disco/cinder/backup/chunkeddriver.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/backup/chunkeddriver.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/backup/chunkeddriver.py 2020-04-08 19:45:39.000000000 +0000 @@ -717,9 +717,12 @@ LOG.debug('decompressing data using %s algorithm', compression_algorithm) decompressed = decompressor.decompress(body) + body = None # Allow Python to free it volume_file.write(decompressed) + decompressed = None # Allow Python to free it else: volume_file.write(body) + body = None # Allow Python to free it # force flush every write to avoid long blocking write on close volume_file.flush() diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/backup/drivers/ceph.py cinder-14.0.5.dev1.202004081945.disco/cinder/backup/drivers/ceph.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/backup/drivers/ceph.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/backup/drivers/ceph.py 2020-04-08 19:45:39.000000000 +0000 @@ -43,6 +43,7 @@ """ import fcntl +import json import os import re import subprocess @@ -314,22 +315,39 @@ ioctx.close() client.shutdown() - def _get_backup_base_name(self, volume_id, backup_id=None, - diff_format=False): + def _format_base_name(self, service_metadata): + base_name = json.loads(service_metadata)["base"] + return utils.convert_str(base_name) + + def _get_backup_base_name(self, volume_id, backup=None): """Return name of base image used for backup. Incremental backups use a new base name so we support old and new style format. """ # Ensure no unicode - if diff_format: + if not backup: return utils.convert_str("volume-%s.backup.base" % volume_id) - else: - if backup_id is None: - msg = _("Backup id required") - raise exception.InvalidParameterValue(msg) - return utils.convert_str("volume-%s.backup.%s" - % (volume_id, backup_id)) + + if backup.service_metadata: + return self._format_base_name(backup.service_metadata) + + # 'parent' field will only be present in incremental backups. This is + # filled by cinder-api + if backup.parent: + # Old backups don't have the base name in the service_metadata, + # so we use the default RBD backup base + if backup.parent.service_metadata: + service_metadata = backup.parent.service_metadata + base_name = self._format_base_name(service_metadata) + else: + base_name = utils.convert_str("volume-%s.backup.base" + % volume_id) + + return base_name + + return utils.convert_str("volume-%s.backup.%s" + % (volume_id, backup.id)) def _discard_bytes(self, volume, offset, length): """Trim length bytes from offset. @@ -479,7 +497,7 @@ if base_name is None: try_diff_format = True - base_name = self._get_backup_base_name(volume_id, backup.id) + base_name = self._get_backup_base_name(volume_id, backup=backup) LOG.debug("Trying diff format basename='%(basename)s' for " "backup base image of volume %(volume)s.", {'basename': base_name, 'volume': volume_id}) @@ -630,7 +648,7 @@ if name not in rbds: LOG.debug("Image '%s' not found - trying diff format name", name) if try_diff_format: - name = self._get_backup_base_name(volume_id, diff_format=True) + name = self._get_backup_base_name(volume_id) if name not in rbds: LOG.debug("Diff format image '%s' not found", name) return False, name @@ -657,50 +675,79 @@ return False + def _full_rbd_backup(self, container, base_name, length): + """Create the base_image for a full RBD backup.""" + with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, + container)) as client: + self._create_base_image(base_name, length, client) + # Now we just need to return from_snap=None and image_created=True, if + # there is some exception in making backup snapshot, will clean up the + # base image. + return None, True + + def _incremental_rbd_backup(self, backup, base_name, length, + source_rbd_image, volume_id): + """Select the last snapshot for a RBD incremental backup.""" + + container = backup.container + last_incr = backup.parent_id + LOG.debug("Trying to perform an incremental backup with container: " + "%(container)s, base_name: %(base)s, source RBD image: " + "%(source)s, volume ID %(volume)s and last incremental " + "backup ID: %(incr)s.", + {'container': container, + 'base': base_name, + 'source': source_rbd_image, + 'volume': volume_id, + 'incr': last_incr, + }) + + with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, + container)) as client: + base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, + base_name, + read_only=True)) + try: + from_snap = self._get_backup_snap_name(base_rbd, + base_name, + last_incr) + if from_snap is None: + msg = (_( + "Can't find snapshot from parent %(incr)s and " + "base name image %(base)s.") % + {'incr': last_incr, 'base': base_name}) + LOG.error(msg) + raise exception.BackupRBDOperationFailed(msg) + finally: + base_rbd.close() + + return from_snap, False + def _backup_rbd(self, backup, volume_file, volume_name, length): - """Create an incremental backup from an RBD image.""" + """Create an incremental or full backup from an RBD image.""" rbd_user = volume_file.rbd_user rbd_pool = volume_file.rbd_pool rbd_conf = volume_file.rbd_conf source_rbd_image = eventlet.tpool.Proxy(volume_file.rbd_image) volume_id = backup.volume_id - updates = {} - base_name = self._get_backup_base_name(volume_id, diff_format=True) - image_created = False - with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, - backup.container)) as client: - # If from_snap does not exist at the destination (and the - # destination exists), this implies a previous backup has failed. - # In this case we will force a full backup. - # - # TODO(dosaboy): find a way to repair the broken backup - # - if base_name not in eventlet.tpool.Proxy(self.rbd.RBD()).list( - ioctx=client.ioctx): - src_vol_snapshots = self.get_backup_snaps(source_rbd_image) - if src_vol_snapshots: - # If there are source volume snapshots but base does not - # exist then we delete it and set from_snap to None - LOG.debug("Volume '%(volume)s' has stale source " - "snapshots so deleting them.", - {'volume': volume_id}) - for snap in src_vol_snapshots: - from_snap = snap['name'] - source_rbd_image.remove_snap(from_snap) - from_snap = None - - # Create new base image - self._create_base_image(base_name, length, client) - image_created = True - else: - # If a from_snap is defined and is present in the source volume - # image but does not exist in the backup base then we look down - # the list of source volume snapshots and find the latest one - # for which a backup snapshot exist in the backup base. Until - # that snapshot is reached, we delete all the other snapshots - # for which backup snapshot does not exist. - from_snap = self._get_most_recent_snap(source_rbd_image, - base_name, client) + base_name = None + + # If backup.parent_id is None performs full RBD backup + if backup.parent_id is None: + base_name = self._get_backup_base_name(volume_id, backup=backup) + from_snap, image_created = self._full_rbd_backup(backup.container, + base_name, + length) + # Otherwise performs incremental rbd backup + else: + # Find the base name from the parent backup's service_metadata + base_name = self._get_backup_base_name(volume_id, backup=backup) + rbd_img = source_rbd_image + from_snap, image_created = self._incremental_rbd_backup(backup, + base_name, + length, + rbd_img, + volume_id) LOG.debug("Using --from-snap '%(snap)s' for incremental backup of " "volume %(volume)s.", @@ -744,14 +791,8 @@ "source volume='%(volume)s'.", {'snapshot': new_snap, 'volume': volume_id}) source_rbd_image.remove_snap(new_snap) - # We update the parent_id here. The from_snap is of the format: - # backup.BACKUP_ID.snap.TIMESTAMP. So we need to extract the - # backup_id of the parent only from from_snap and set it as - # parent_id - if from_snap: - parent_id = from_snap.split('.') - updates = {'parent_id': parent_id[1]} - return updates + + return {'service_metadata': '{"base": "%s"}' % base_name} def _file_is_rbd(self, volume_file): """Returns True if the volume_file is actually an RBD image.""" @@ -765,7 +806,7 @@ image. """ volume_id = backup.volume_id - backup_name = self._get_backup_base_name(volume_id, backup.id) + backup_name = self._get_backup_base_name(volume_id, backup=backup) with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: @@ -868,23 +909,6 @@ LOG.debug("Found snapshot '%s'", snaps[0]) return snaps[0] - def _get_most_recent_snap(self, rbd_image, base_name, client): - """Get the most recent backup snapshot of the provided image. - - Returns name of most recent backup snapshot or None if there are no - backup snapshots. - """ - src_vol_backup_snaps = self.get_backup_snaps(rbd_image, sort=True) - from_snap = None - - for snap in src_vol_backup_snaps: - if self._snap_exists(base_name, snap['name'], client): - from_snap = snap['name'] - break - rbd_image.remove_snap(snap['name']) - - return from_snap - def _get_volume_size_gb(self, volume): """Return the size in gigabytes of the given volume. @@ -938,17 +962,23 @@ volume_file.seek(0) length = self._get_volume_size_gb(volume) - do_full_backup = False - if self._file_is_rbd(volume_file): - # If volume an RBD, attempt incremental backup. - LOG.debug("Volume file is RBD: attempting incremental backup.") + if backup.snapshot_id: + do_full_backup = True + elif self._file_is_rbd(volume_file): + # If volume an RBD, attempt incremental or full backup. + do_full_backup = False + LOG.debug("Volume file is RBD: attempting optimized backup") try: - updates = self._backup_rbd(backup, volume_file, - volume.name, length) + updates = self._backup_rbd(backup, volume_file, volume.name, + length) except exception.BackupRBDOperationFailed: - LOG.debug("Forcing full backup of volume %s.", volume.id) - do_full_backup = True + with excutils.save_and_reraise_exception(): + self.delete_backup(backup) else: + if backup.parent_id: + LOG.debug("Volume file is NOT RBD: can't perform" + "incremental backup.") + raise exception.BackupRBDOperationFailed LOG.debug("Volume file is NOT RBD: will do full backup.") do_full_backup = True @@ -970,11 +1000,6 @@ LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.", {'backup_id': backup.id, 'volume_id': volume.id}) - # If updates is empty then set parent_id to None. This will - # take care if --incremental flag is used in CLI but a full - # backup is performed instead - if not updates and backup.parent_id: - updates = {'parent_id': None} return updates def _full_restore(self, backup, dest_file, dest_name, length, @@ -989,13 +1014,10 @@ # If a source snapshot is provided we assume the base is diff # format. if src_snap: - diff_format = True + backup_name = self._get_backup_base_name(backup.volume_id, + backup=backup) else: - diff_format = False - - backup_name = self._get_backup_base_name(backup.volume_id, - backup_id=backup.id, - diff_format=diff_format) + backup_name = self._get_backup_base_name(backup.volume_id) # Retrieve backup volume src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, @@ -1022,7 +1044,7 @@ post-process and resize it back to its expected size. """ backup_base = self._get_backup_base_name(backup.volume_id, - diff_format=True) + backup=backup) with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: @@ -1047,7 +1069,7 @@ rbd_pool = restore_file.rbd_pool rbd_conf = restore_file.rbd_conf base_name = self._get_backup_base_name(backup.volume_id, - diff_format=True) + backup=backup) LOG.debug("Attempting incremental restore from base='%(base)s' " "snap='%(snap)s'", @@ -1179,8 +1201,10 @@ """ length = int(volume.size) * units.Gi - base_name = self._get_backup_base_name(backup.volume_id, - diff_format=True) + if backup.service_metadata: + base_name = self._get_backup_base_name(backup.volume_id, backup) + else: + base_name = self._get_backup_base_name(backup.volume_id) with eventlet.tpool.Proxy(rbd_driver.RADOSClient( self, backup.container)) as client: diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/db/sqlalchemy/api.py cinder-14.0.5.dev1.202004081945.disco/cinder/db/sqlalchemy/api.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/db/sqlalchemy/api.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/db/sqlalchemy/api.py 2020-04-08 19:45:39.000000000 +0000 @@ -1712,35 +1712,33 @@ volume_type_id, host=host) +VOLUME_DEPENDENT_MODELS = frozenset([models.VolumeMetadata, + models.VolumeAdminMetadata, + models.Transfer, + models.VolumeGlanceMetadata, + models.VolumeAttachment]) + + @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def volume_destroy(context, volume_id): session = get_session() now = timeutils.utcnow() + updated_values = {'status': 'deleted', + 'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at'), + 'migration_status': None} with session.begin(): - updated_values = {'status': 'deleted', - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at'), - 'migration_status': None} model_query(context, models.Volume, session=session).\ filter_by(id=volume_id).\ update(updated_values) - model_query(context, models.VolumeMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.VolumeAdminMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.Transfer, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) + for model in VOLUME_DEPENDENT_MODELS: + model_query(context, model, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) del updated_values['updated_at'] return updated_values diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/exception.py cinder-14.0.5.dev1.202004081945.disco/cinder/exception.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/exception.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/exception.py 2020-04-08 19:45:39.000000000 +0000 @@ -1394,3 +1394,8 @@ class NVMETTargetDeleteError(CinderException): message = "Failed to delete subsystem: %(subsystem)s" + + +class SnapshotLimitReached(CinderException): + message = _("Exceeded the configured limit of " + "%(set_limit)s snapshots per volume.") diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/objects/backup.py cinder-14.0.5.dev1.202004081945.disco/cinder/objects/backup.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/objects/backup.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/objects/backup.py 2020-04-08 19:45:39.000000000 +0000 @@ -40,9 +40,10 @@ # Version 1.4: Add restore_volume_id # Version 1.5: Add metadata # Version 1.6: Add encryption_key_id - VERSION = '1.6' + # Version 1.7: Add parent + VERSION = '1.7' - OPTIONAL_FIELDS = ('metadata',) + OPTIONAL_FIELDS = ('metadata', 'parent') fields = { 'id': fields.UUIDField(), @@ -55,6 +56,7 @@ 'availability_zone': fields.StringField(nullable=True), 'container': fields.StringField(nullable=True), 'parent_id': fields.StringField(nullable=True), + 'parent': fields.ObjectField('Backup', nullable=True), 'status': c_fields.BackupStatusField(nullable=True), 'fail_reason': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), @@ -110,8 +112,14 @@ def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with a target version.""" + added_fields = (((1, 7), ('parent',)),) + super(Backup, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) + for version, remove_fields in added_fields: + if target_version < version: + for obj_field in remove_fields: + primitive.pop(obj_field, None) @classmethod def _from_db_object(cls, context, backup, db_backup, expected_attrs=None): @@ -148,6 +156,11 @@ if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) + if attrname == 'parent': + if self.parent_id: + self.parent = self.get_by_id(self._context, self.parent_id) + else: + self.parent = None self.obj_reset_changes(fields=[attrname]) def obj_what_changed(self): @@ -174,6 +187,7 @@ self.metadata = db.backup_metadata_update(self._context, self.id, metadata, True) + updates.pop('parent', None) db.backup_update(self._context, self.id, updates) self.obj_reset_changes() @@ -203,7 +217,7 @@ # We don't want to export extra fields and we want to force lazy # loading, so we can't use dict(self) or self.obj_to_primitive record = {name: field.to_primitive(self, name, getattr(self, name)) - for name, field in self.fields.items()} + for name, field in self.fields.items() if name != 'parent'} # We must update kwargs instead of record to ensure we don't overwrite # "real" data from the backup kwargs.update(record) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/objects/base.py cinder-14.0.5.dev1.202004081945.disco/cinder/objects/base.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/objects/base.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/objects/base.py 2020-04-08 19:45:39.000000000 +0000 @@ -146,6 +146,7 @@ OBJ_VERSIONS.add('1.35', {'Backup': '1.6', 'BackupImport': '1.6'}) OBJ_VERSIONS.add('1.36', {'RequestSpec': '1.4'}) OBJ_VERSIONS.add('1.37', {'RequestSpec': '1.5'}) +OBJ_VERSIONS.add('1.38', {'Backup': '1.7', 'BackupImport': '1.7'}) class CinderObjectRegistry(base.VersionedObjectRegistry): diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/backup/drivers/test_backup_ceph.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/backup/drivers/test_backup_ceph.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/backup/drivers/test_backup_ceph.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/backup/drivers/test_backup_ceph.py 2020-04-08 19:45:39.000000000 +0000 @@ -15,6 +15,7 @@ """ Tests for Ceph backup service.""" import hashlib +import json import os import tempfile import threading @@ -39,6 +40,7 @@ from cinder import objects from cinder import test from cinder.tests.unit import fake_constants as fake +import cinder.volume.drivers.rbd as rbd_driver # This is used to collect raised exceptions so that tests may check what was # raised. @@ -119,6 +121,14 @@ 'user_id': userid, 'project_id': projectid} return db.backup_create(self.ctxt, backup)['id'] + def _create_parent_backup_object(self): + tmp_backup_id = fake.BACKUP3_ID + self._create_backup_db_entry(tmp_backup_id, self.volume_id, + self.volume_size) + tmp_backup = objects.Backup.get_by_id(self.ctxt, tmp_backup_id) + tmp_backup.service_metadata = 'mock_base_name' + return tmp_backup + def time_inc(self): self.counter += 1 return self.counter @@ -170,6 +180,22 @@ self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) self.backup.container = "backups" + # Create parent backup of volume + self.parent_backup = self._create_parent_backup_object() + + # Create alternate backup with parent + self.alt_backup_id = fake.BACKUP2_ID + self._create_backup_db_entry(self.alt_backup_id, self.volume_id, + self.volume_size) + + self.alt_backup = objects.Backup.get_by_id(self.ctxt, + self.alt_backup_id) + + base_name = "volume-%s.backup.%s" % (self.volume_id, self.backup_id) + self.alt_backup.container = "backups" + self.alt_backup.parent = self.backup + self.alt_backup.parent.service_metadata = '{"base": "%s"}' % base_name + # Create alternate volume. self.alt_volume_id = str(uuid.uuid4()) self._create_volume_db_entry(self.alt_volume_id, self.volume_size) @@ -256,24 +282,6 @@ self.assertEqual(1 | 2 | 4 | 64, features) @common_mocks - def test_get_most_recent_snap(self): - last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4()) - - image = self.mock_rbd.Image.return_value - with mock.patch.object(self.service, '_snap_exists') as \ - mock_snap_exists: - mock_snap_exists.return_value = True - image.list_snaps.return_value = \ - [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())}, - {'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())}, - {'name': last}, - {'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}] - base_name = "mock_base" - client = mock.Mock() - snap = self.service._get_most_recent_snap(image, base_name, client) - self.assertEqual(last, snap) - - @common_mocks def test_get_backup_snap_name(self): snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4()) @@ -415,7 +423,7 @@ with mock.patch.object(self.service, '_backup_metadata'): with mock.patch.object(self.service, '_discard_bytes'): with tempfile.NamedTemporaryFile() as test_file: - self.service.backup(self.backup, self.volume_file) + self.service.backup(self.alt_backup, self.volume_file) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @@ -424,25 +432,34 @@ self.assertNotEqual(threading.current_thread(), thread_dict['thread']) @common_mocks - def test_get_backup_base_name(self): - name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + def test_get_backup_base_name_without_backup_param(self): + """Test _get_backup_base_name without backup.""" + name = self.service._get_backup_base_name(self.volume_id) self.assertEqual("volume-%s.backup.base" % (self.volume_id), name) - self.assertRaises(exception.InvalidParameterValue, - self.service._get_backup_base_name, - self.volume_id) - - name = self.service._get_backup_base_name(self.volume_id, '1234') - self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'), - name) + @common_mocks + def test_get_backup_base_name_w_backup_and_no_parent(self): + """Test _get_backup_base_name with backup and no parent.""" + name = self.service._get_backup_base_name(self.volume_id, + self.backup) + self.assertEqual("volume-%s.backup.%s" % + (self.volume_id, self.backup.id), name) + + @common_mocks + def test_get_backup_base_name_w_backup_and_parent(self): + """Test _get_backup_base_name with backup and parent.""" + name = self.service._get_backup_base_name(self.volume_id, + self.alt_backup) + base_name = json.loads(self.alt_backup.parent.service_metadata) + self.assertEqual(base_name["base"], name) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @mock.patch('subprocess.Popen', spec=True) def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl): + """Test full RBD backup generated successfully.""" backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + self.alt_backup) def mock_write_data(): self.volume_file.seek(0) @@ -483,8 +500,11 @@ {'name': 'backup.mock.snap.15341241.90'}, {'name': 'backup.mock.snap.199994362.10'}]) - output = self.service.backup(self.backup, rbdio) - self.assertDictEqual({}, output) + output = self.service.backup(self.alt_backup, + rbdio) + base_name = '{"base": "%s"}' % backup_name + service_meta = {'service_metadata': base_name} + self.assertDictEqual(service_meta, output) self.assertEqual(['popen_init', 'read', @@ -494,7 +514,7 @@ 'communicate'], self.callstack) self.assertFalse(mock_full_backup.called) - self.assertTrue(mock_get_backup_snaps.called) + self.assertFalse(mock_get_backup_snaps.called) # Ensure the files are equal self.assertEqual(checksum.digest(), @@ -505,7 +525,7 @@ with mock.patch.object(self.service, '_backup_rbd') as \ mock_backup_rbd, mock.patch.object(self.service, '_backup_metadata'): - mock_backup_rbd.return_value = {'parent_id': 'mock'} + mock_backup_rbd.return_value = {'service_metadata': 'base_name'} image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', @@ -513,15 +533,14 @@ 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) output = self.service.backup(self.backup, rbdio) - self.assertDictEqual({'parent_id': 'mock'}, output) + self.assertDictEqual({'service_metadata': 'base_name'}, output) @common_mocks - def test_backup_volume_from_rbd_set_parent_id_none(self): - backup_name = self.service._get_backup_base_name( - self.volume_id, diff_format=True) + def test_backup_volume_from_rbd_got_exception(self): + base_name = self.service._get_backup_base_name(self.volume_id, + self.alt_backup) - self.mock_rbd.RBD().list.return_value = [backup_name] - self.backup.parent_id = 'mock_parent_id' + self.mock_rbd.RBD().list.return_value = [base_name] with mock.patch.object(self.service, 'get_backup_snaps'), \ mock.patch.object(self.service, '_rbd_diff_transfer') as \ @@ -550,28 +569,54 @@ 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) mock_get_backup_snaps.return_value = ( - [{'name': 'backup.mock.snap.153464362.12'}, - {'name': 'backup.mock.snap.199994362.10'}]) - output = self.service.backup(self.backup, rbdio) - self.assertIsNone(output['parent_id']) + [{'name': 'backup.mock.snap.153464362.12', + 'backup_id': 'mock_parent_id'}, + {'name': 'backup.mock.snap.199994362.10', + 'backup_id': 'mock'}]) + self.assertRaises(exception.BackupRBDOperationFailed, + self.service.backup, + self.alt_backup, rbdio) @common_mocks def test_backup_rbd_set_parent_id(self): - backup_name = self.service._get_backup_base_name( - self.volume_id, diff_format=True) + base_name = self.service._get_backup_base_name(self.volume_id, + self.alt_backup) vol_name = self.volume.name vol_length = self.volume.size - self.mock_rbd.RBD().list.return_value = [backup_name] + self.mock_rbd.RBD().list.return_value = [base_name] with mock.patch.object(self.service, '_snap_exists'), \ - mock.patch.object(self.service, '_get_backup_base_name') as \ - mock_get_backup_base_name, mock.patch.object( - self.service, '_get_most_recent_snap') as mock_get_most_recent_snap, \ + mock.patch.object(self.service, '_get_backup_snap_name') as \ + mock_get_backup_snap_name, \ mock.patch.object(self.service, '_rbd_diff_transfer'): - mock_get_backup_base_name.return_value = backup_name - mock_get_most_recent_snap.return_value = ( - 'backup.mock.snap.153464362.12') + image = self.service.rbd.Image() + mock_get_backup_snap_name.return_value = 'mock_snap_name' + meta = linuxrbd.RBDImageMetadata(image, + 'pool_foo', + 'user_foo', + 'conf_foo') + rbdio = linuxrbd.RBDVolumeIOWrapper(meta) + rbdio.seek(0) + output = self.service._backup_rbd(self.alt_backup, rbdio, + vol_name, vol_length) + base_name = '{"base": "%s"}' % base_name + self.assertEqual({'service_metadata': base_name}, output) + self.backup.parent_id = None + + @common_mocks + def test_backup_rbd_without_parent_id(self): + full_backup_name = self.service._get_backup_base_name(self.volume_id, + self.alt_backup) + vol_name = self.volume.name + vol_length = self.volume.size + + with mock.patch.object(self.service, '_rbd_diff_transfer'), \ + mock.patch.object(self.service, '_create_base_image') as \ + mock_create_base_image, mock.patch.object( + rbd_driver, 'RADOSClient') as mock_rados_client: + client = mock.Mock() + mock_rados_client.return_value.__enter__.return_value = client image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', @@ -579,9 +624,12 @@ 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) rbdio.seek(0) - output = self.service._backup_rbd(self.backup, rbdio, + output = self.service._backup_rbd(self.alt_backup, rbdio, vol_name, vol_length) - self.assertDictEqual({'parent_id': 'mock'}, output) + mock_create_base_image.assert_called_with(full_backup_name, + vol_length, client) + base_name = '{"base": "%s"}' % full_backup_name + self.assertEqual({'service_metadata': base_name}, output) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @@ -595,7 +643,7 @@ self._try_delete_base_image(). """ backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + self.alt_backup) def mock_write_data(): self.volume_file.seek(0) @@ -659,7 +707,7 @@ self.assertRaises( self.service.rbd.ImageNotFound, self.service.backup, - self.backup, rbdio) + self.alt_backup, rbdio) @common_mocks @mock.patch('fcntl.fcntl', spec=True) @@ -672,7 +720,7 @@ second exception occurs in self.delete_backup(). """ backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + self.alt_backup) def mock_write_data(): self.volume_file.seek(0) @@ -730,12 +778,11 @@ self.assertRaises( self.service.rbd.ImageBusy, self.service.backup, - self.backup, rbdio) + self.alt_backup, rbdio) @common_mocks def test_backup_rbd_from_snap(self): - backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + backup_name = self.service._get_backup_base_name(self.volume_id) vol_name = self.volume['name'] vol_length = self.service._get_volume_size_gb(self.volume) @@ -776,43 +823,36 @@ @common_mocks def test_backup_rbd_from_snap2(self): - backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + base_name = self.service._get_backup_base_name(self.volume_id, + self.alt_backup) vol_name = self.volume['name'] vol_length = self.service._get_volume_size_gb(self.volume) self.mock_rbd.RBD().list = mock.Mock() - self.mock_rbd.RBD().list.return_value = [backup_name] + self.mock_rbd.RBD().list.return_value = [base_name] - with mock.patch.object(self.service, '_get_most_recent_snap') as \ - mock_get_most_recent_snap: - with mock.patch.object(self.service, '_get_backup_base_name') as \ - mock_get_backup_base_name: - with mock.patch.object(self.service, '_rbd_diff_transfer') as \ - mock_rbd_diff_transfer: - with mock.patch.object(self.service, '_get_new_snap_name') as \ - mock_get_new_snap_name: - mock_get_backup_base_name.return_value = ( - backup_name) - mock_get_most_recent_snap.return_value = ( - 'backup.mock.snap.153464362.12') - mock_get_new_snap_name.return_value = 'new_snap' - image = self.service.rbd.Image() - meta = linuxrbd.RBDImageMetadata(image, - 'pool_foo', - 'user_foo', - 'conf_foo') - rbdio = linuxrbd.RBDVolumeIOWrapper(meta) - rbdio.seek(0) - self.service._backup_rbd(self.backup, rbdio, - vol_name, vol_length) - mock_rbd_diff_transfer.assert_called_with( - vol_name, 'pool_foo', backup_name, - self.backup.container, src_user='user_foo', - src_conf='conf_foo', - dest_conf='/etc/ceph/ceph.conf', - dest_user='cinder', src_snap='new_snap', - from_snap='backup.mock.snap.153464362.12') + with mock.patch.object(self.service, '_get_backup_base_name') as \ + mock_get_backup_base_name: + with mock.patch.object(self.service, '_rbd_diff_transfer') as \ + mock_rbd_diff_transfer: + with mock.patch.object(self.service, '_get_new_snap_name') as \ + mock_get_new_snap_name: + mock_get_backup_base_name.return_value = base_name + mock_get_new_snap_name.return_value = 'new_snap' + image = self.service.rbd.Image() + meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', + 'user_foo', 'conf_foo') + rbdio = linuxrbd.RBDVolumeIOWrapper(meta) + rbdio.seek(0) + self.service._backup_rbd(self.alt_backup, rbdio, vol_name, + vol_length) + mock_rbd_diff_transfer.assert_called_with( + vol_name, 'pool_foo', base_name, + self.backup.container, src_user='user_foo', + src_conf='conf_foo', + dest_conf='/etc/ceph/ceph.conf', + dest_user='cinder', src_snap='new_snap', + from_snap=None) @common_mocks def test_backup_vol_length_0(self): @@ -843,7 +883,7 @@ @common_mocks def test_restore(self): backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + self.alt_backup) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] @@ -865,7 +905,7 @@ with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) - self.service.restore(self.backup, self.volume_id, + self.service.restore(self.alt_backup, self.volume_id, test_file) checksum = hashlib.sha256() @@ -960,8 +1000,7 @@ @common_mocks def test_delete_backup_snapshot(self): snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4()) - base_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + base_name = self.service._get_backup_base_name(self.volume_id) self.mock_rbd.RBD.remove_snap = mock.Mock() thread_dict = {} @@ -991,16 +1030,16 @@ @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_try_delete_base_image_diff_format(self, mock_meta_backup): backup_name = self.service._get_backup_base_name(self.volume_id, - diff_format=True) + self.alt_backup) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] with mock.patch.object(self.service, '_delete_backup_snapshot') as \ mock_del_backup_snap: - snap_name = self.service._get_new_snap_name(self.backup_id) + snap_name = self.service._get_new_snap_name(self.alt_backup_id) mock_del_backup_snap.return_value = (snap_name, 0) - self.service.delete_backup(self.backup) + self.service.delete_backup(self.alt_backup) self.assertTrue(mock_del_backup_snap.called) self.assertTrue(self.mock_rbd.RBD.return_value.list.called) @@ -1010,7 +1049,7 @@ @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_try_delete_base_image(self, mock_meta_backup): backup_name = self.service._get_backup_base_name(self.volume_id, - self.backup_id) + self.alt_backup) thread_dict = {} def mock_side_effect(ioctx, base_name): @@ -1019,7 +1058,7 @@ self.mock_rbd.RBD.return_value.list.return_value = [backup_name] self.mock_rbd.RBD.return_value.remove.side_effect = mock_side_effect with mock.patch.object(self.service, 'get_backup_snaps'): - self.service.delete_backup(self.backup) + self.service.delete_backup(self.alt_backup) self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) self.assertNotEqual(threading.current_thread(), thread_dict['thread']) @@ -1028,7 +1067,7 @@ def test_try_delete_base_image_busy(self): """This should induce retries then raise rbd.ImageBusy.""" backup_name = self.service._get_backup_base_name(self.volume_id, - self.backup_id) + self.alt_backup) rbd = self.mock_rbd.RBD.return_value rbd.list.return_value = [backup_name] @@ -1038,7 +1077,7 @@ mock_get_backup_snaps: self.assertRaises(self.mock_rbd.ImageBusy, self.service._try_delete_base_image, - self.backup) + self.alt_backup) self.assertTrue(mock_get_backup_snaps.called) self.assertTrue(rbd.list.called) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/db/test_orm_relationships.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/db/test_orm_relationships.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/db/test_orm_relationships.py 1970-01-01 00:00:00.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/db/test_orm_relationships.py 2020-04-08 19:45:39.000000000 +0000 @@ -0,0 +1,46 @@ +# Copyright 2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for code that makes assumptions about ORM relationships.""" + +from sqlalchemy_utils import functions as saf + +from cinder.db.sqlalchemy import api as db_api +from cinder.db.sqlalchemy import models +from cinder import test + + +class VolumeRelationshipsTestCase(test.TestCase): + """Test cases for Volume ORM model relationshps.""" + + def test_volume_dependent_models_list(self): + """Make sure the volume dependent tables list is accurate.""" + # Addresses LP Bug #1542169 + + volume_declarative_base = saf.get_declarative_base(models.Volume) + volume_fks = saf.get_referencing_foreign_keys(models.Volume) + + dependent_tables = [] + for table, fks in saf.group_foreign_keys(volume_fks): + dependent_tables.append(table) + + found_dependent_models = [] + for table in dependent_tables: + found_dependent_models.append(saf.get_class_by_table( + volume_declarative_base, table)) + + self.assertEqual(len(found_dependent_models), + len(db_api.VOLUME_DEPENDENT_MODELS)) + for model in found_dependent_models: + self.assertIn(model, db_api.VOLUME_DEPENDENT_MODELS) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/objects/test_backup.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/objects/test_backup.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/objects/test_backup.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/objects/test_backup.py 2020-04-08 19:45:39.000000000 +0000 @@ -143,6 +143,20 @@ metadata={'test_key': 'test_value'}) self.assertEqual({'test_key': 'test_value'}, backup.metadata) + @mock.patch('cinder.objects.backup.Backup.get_by_id', + return_value=None) + def test_obj_field_parent(self, mock_lzy_ld): + backup = objects.Backup(context=self.context, + parent_id=None) + self.assertIsNone(backup.parent) + + # Bug #1862635: should trigger a lazy load + backup = objects.Backup(context=self.context, + parent_id=fake.UUID5) + # need noqa here because of pyflakes issue #202 + _ = backup.parent # noqa + mock_lzy_ld.assert_called_once() + def test_import_record(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, @@ -154,6 +168,24 @@ # Make sure we don't lose data when converting from string self.assertDictEqual(self._expected_backup(backup), imported_backup) + @mock.patch('cinder.db.get_by_id', return_value=fake_backup) + def test_import_record_w_parent(self, backup_get): + full_backup = objects.Backup.get_by_id(self.context, fake.USER_ID) + self._compare(self, fake_backup, full_backup) + + utils.replace_obj_loader(self, objects.Backup) + incr_backup = objects.Backup(context=self.context, + id=fake.BACKUP2_ID, + parent=full_backup, + parent_id=full_backup['id'], + num_dependent_backups=0) + export_string = incr_backup.encode_record() + imported_backup = objects.Backup.decode_record(export_string) + + # Make sure we don't lose data when converting from string + self.assertDictEqual(self._expected_backup(incr_backup), + imported_backup) + def test_import_record_additional_info(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, @@ -175,7 +207,7 @@ def _expected_backup(self, backup): record = {name: field.to_primitive(backup, name, getattr(backup, name)) - for name, field in backup.fields.items()} + for name, field in backup.fields.items() if name != 'parent'} return record def test_import_record_additional_info_cant_overwrite(self): diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/objects/test_objects.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/objects/test_objects.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/objects/test_objects.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/objects/test_objects.py 2020-04-08 19:45:39.000000000 +0000 @@ -23,9 +23,9 @@ # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. object_data = { - 'Backup': '1.6-c7ede487ba6fbcdd2a4711343cd972be', + 'Backup': '1.7-fffdbcd5da3c30750916fa2cc0e8ffb5', 'BackupDeviceInfo': '1.0-74b3950676c690538f4bc6796bd0042e', - 'BackupImport': '1.6-c7ede487ba6fbcdd2a4711343cd972be', + 'BackupImport': '1.7-fffdbcd5da3c30750916fa2cc0e8ffb5', 'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'CleanupRequest': '1.0-e7c688b893e1d5537ccf65cc3eb10a28', 'Cluster': '1.1-e2c533eb8cdd8d229b6c45c6cf3a9e2c', diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/test_db_api.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/test_db_api.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/test_db_api.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/test_db_api.py 2020-04-08 19:45:39.000000000 +0000 @@ -768,6 +768,13 @@ self.assertRaises(exception.VolumeNotFound, db.volume_get, self.ctxt, volume['id']) + @mock.patch('cinder.db.sqlalchemy.api.model_query') + def test_volume_destroy_deletes_dependent_data(self, mock_model_query): + """Addresses LP Bug #1542169.""" + db.volume_destroy(self.ctxt, fake.VOLUME_ID) + expected_call_count = 1 + len(sqlalchemy_api.VOLUME_DEPENDENT_MODELS) + self.assertEqual(expected_call_count, mock_model_query.call_count) + def test_volume_get_all(self): volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i}) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py 2020-04-08 19:45:39.000000000 +0000 @@ -602,6 +602,15 @@ 'snapvx_source': 'false', 'storageGroupId': []} + volume_details_legacy = {'cap_gb': 2, + 'num_of_storage_groups': 1, + 'volumeId': device_id, + 'volume_identifier': test_volume.id, + 'wwn': volume_wwn, + 'snapvx_target': 'false', + 'snapvx_source': 'false', + 'storageGroupId': []} + volume_list = [ {'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py 2020-04-08 19:45:39.000000000 +0000 @@ -442,15 +442,39 @@ array = self.data.array snap_name = self.data.group_snapshot_name source_group_name = self.data.storagegroup_name_source - extra_specs = self.data.extra_specs - src_dev_ids = [self.data.device_id] with mock.patch.object( self.provision, 'delete_group_replica') as mock_delete_replica: self.provision.delete_group_replica( - array, snap_name, source_group_name, src_dev_ids, extra_specs) + array, snap_name, source_group_name) mock_delete_replica.assert_called_once_with( - array, snap_name, source_group_name, src_dev_ids, extra_specs) + array, snap_name, source_group_name) + + @mock.patch.object(rest.PowerMaxRest, + 'get_storagegroup_snap_generation_list', + side_effect=[['0', '3', '1', '2'], + ['0', '1'], ['0'], list()]) + def test_delete_group_replica_side_effect(self, mock_list): + array = self.data.array + snap_name = self.data.group_snapshot_name + source_group_name = self.data.storagegroup_name_source + with mock.patch.object( + self.rest, 'delete_storagegroup_snap') as mock_del: + self.provision.delete_group_replica( + array, snap_name, source_group_name) + self.assertEqual(4, mock_del.call_count) + mock_del.reset_mock() + self.provision.delete_group_replica( + array, snap_name, source_group_name) + self.assertEqual(2, mock_del.call_count) + mock_del.reset_mock() + self.provision.delete_group_replica( + array, snap_name, source_group_name) + self.assertEqual(1, mock_del.call_count) + mock_del.reset_mock() + self.provision.delete_group_replica( + array, snap_name, source_group_name) + mock_del.assert_not_called() def test_link_and_break_replica(self): array = self.data.array diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py 2020-04-08 19:45:39.000000000 +0000 @@ -610,6 +610,26 @@ name_id=self.data.test_clone_volume._name_id) self.assertEqual(self.data.device_id, found_dev_id) + def test_check_volume_device_id_legacy_case(self): + element_name = self.utils.get_volume_element_name( + self.data.test_volume.id) + with mock.patch.object(self.rest, 'get_volume', + return_value=self.data.volume_details_legacy): + found_dev_id = self.rest.check_volume_device_id( + self.data.array, self.data.device_id, element_name) + self.assertEqual(self.data.device_id, found_dev_id) + + def test_check_volume_device_id_legacy_case_no_match(self): + element_name = self.utils.get_volume_element_name( + self.data.test_volume.id) + volume_details_no_match = deepcopy(self.data.volume_details_legacy) + volume_details_no_match['volume_identifier'] = 'no_match' + with mock.patch.object(self.rest, 'get_volume', + return_value=volume_details_no_match): + found_dev_id = self.rest.check_volume_device_id( + self.data.array, self.data.device_id, element_name) + self.assertIsNone(found_dev_id) + def test_find_mv_connections_for_vol(self): device_id = self.data.device_id ref_lun_id = int( @@ -1497,6 +1517,27 @@ mock_create.assert_called_once_with( array, source_group, snap_name, extra_specs) + def test_delete_storagegroup_snap(self): + array = self.data.array + source_group = self.data.storagegroup_name_source + snap_name = self.data.group_snapshot_name + with mock.patch.object( + self.rest, 'delete_storagegroup_snap') as mock_delete: + self.rest.delete_storagegroup_snap( + array, source_group, snap_name, '0') + mock_delete.assert_called_once_with( + array, source_group, snap_name, '0') + + @mock.patch.object(rest.PowerMaxRest, 'get_resource', + return_value={'generations': ['0', '1']}) + def test_get_storagegroup_snap_generation_list(self, mock_list): + array = self.data.array + source_group = self.data.storagegroup_name_source + snap_name = self.data.group_snapshot_name + ret_list = self.rest.get_storagegroup_snap_generation_list( + array, source_group, snap_name) + self.assertEqual(['0', '1'], ret_list) + def test_get_storagegroup_rdf_details(self): details = self.rest.get_storagegroup_rdf_details( self.data.array, self.data.test_vol_grp_name, diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py 2020-04-08 19:45:39.000000000 +0000 @@ -65,7 +65,7 @@ raise ex.UnityResourceNotFoundError() elif self.get_id() == 'snap_in_use': raise ex.UnityDeleteAttachedSnapError() - elif self.name == 'empty_host': + elif self.name == 'empty-host': raise ex.HostDeleteIsCalled() @property @@ -584,7 +584,8 @@ host = MockResource(name='empty-host') self.client.host_cache['empty-host'] = host self.assertRaises(ex.HostDeleteIsCalled, - self.client.delete_host_wo_lock(host)) + self.client.delete_host_wo_lock, + host) def test_delete_host_wo_lock_remove_from_cache(self): host = MockResource(name='empty-host-in-cache') diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py 2020-04-08 19:45:39.000000000 +0000 @@ -8129,17 +8129,20 @@ fake_license_scheme = { 'license_scheme': '9846' } + fake_9100_license_scheme = { + 'license_scheme': 'flex' + } fake_license_invalid_scheme = { 'license_scheme': '0000' } - lslicense.side_effect = [fake_license_without_keys, fake_license_without_keys, fake_license, fake_license_without_keys] lsguicapabilities.side_effect = [fake_license_without_keys, fake_license_invalid_scheme, - fake_license_scheme] + fake_license_scheme, + fake_9100_license_scheme] self.assertFalse(self.storwize_svc_common.compression_enabled()) self.assertFalse(self.storwize_svc_common.compression_enabled()) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/nec/test_volume.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/nec/test_volume.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/nec/test_volume.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/nec/test_volume.py 2020-04-08 19:45:39.000000000 +0000 @@ -671,6 +671,13 @@ self.vol, self.vol.size, None, self._convert_id2name, self._select_leastused_poolnumber) + self.mock_object(self._cli, 'get_pair_lds', + return_value={'lds1', 'lds2', 'lds3'}) + with self.assertRaisesRegex(exception.VolumeBackendAPIException, + 'Cannot create clone volume. ' + 'number of pairs reached 3. ' + 'ldname=LX:287RbQoP7VdwR1WsPC2fZT'): + self.create_cloned_volume(self.vol, self.src) def test_bindld_CreateCloneWaitingInterval(self): self.assertEqual(10, cli.get_sleep_time_for_clone(0)) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/test_rbd.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/test_rbd.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/drivers/test_rbd.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/drivers/test_rbd.py 2020-04-08 19:45:39.000000000 +0000 @@ -1045,10 +1045,13 @@ (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) + # We expect clone() to be called exactly once. self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) + # Without flattening, only the source volume is opened, + # so only one call to close() should occur. self.assertEqual( - 2, self.mock_rbd.Image.return_value.close.call_count) + 1, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) mock_resize.assert_not_called() mock_enable_repl.assert_not_called() @@ -1084,7 +1087,7 @@ image.protect_snap.assert_called_once_with(name + '.clone_snap') self.assertEqual(1, self.mock_rbd.RBD.return_value.clone.call_count) self.assertEqual( - 2, self.mock_rbd.Image.return_value.close.call_count) + 1, self.mock_rbd.Image.return_value.close.call_count) mock_get_clone_depth.assert_called_once_with( self.mock_client().__enter__(), self.volume_a.name) mock_resize.assert_not_called() @@ -1114,7 +1117,7 @@ self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.assertEqual( - 2, self.mock_rbd.Image.return_value.close.call_count) + 1, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 1, mock_resize.call_count) @@ -1171,7 +1174,7 @@ # We expect the driver to close both volumes, so 2 is expected self.assertEqual( - 3, self.mock_rbd.Image.return_value.close.call_count) + 2, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) mock_enable_repl.assert_not_called() diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/flows/test_create_volume_flow.py cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/flows/test_create_volume_flow.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/tests/unit/volume/flows/test_create_volume_flow.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/tests/unit/volume/flows/test_create_volume_flow.py 2020-04-08 19:45:40.000000000 +0000 @@ -1559,53 +1559,52 @@ image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = mock.MagicMock() - image_info = imageutils.QemuImgInfo() - image_info.virtual_size = '1073741824' - mock_qemu_info.return_value = image_info - volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') - image_volume = fake_volume.fake_db_volume(size=2) - self.mock_db.volume_create.return_value = image_volume - + self.mock_driver.clone_image.return_value = (None, False) self.flags(verify_glance_signatures='disabled') - if cloning_supported: - mock_create_from_src.side_effect = exception.CinderException( - 'Error during cloning') - else: - mock_create_from_src.side_effect = NotImplementedError( - 'Driver does not support clone') - manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) - - model_update = manager._create_from_image_cache_or_download( - self.ctxt, - volume, - image_location, - image_id, - image_meta, - self.mock_image_service, - update_cache=False) + if cloning_supported: + mock_create_from_src.side_effect = exception.SnapshotLimitReached( + 'Error during cloning') + self.assertRaises( + exception.SnapshotLimitReached, + manager._create_from_image, + self.ctxt, + volume, + image_location, + image_id, + image_meta, + self.mock_image_service) + else: + mock_create_from_src.side_effect = NotImplementedError( + 'Driver does not support clone') + model_update = manager._create_from_image( + self.ctxt, + volume, + image_location, + image_id, + image_meta, + self.mock_image_service) + mock_create_from_img_dl.assert_called_once() + self.assertEqual(mock_create_from_img_dl.return_value, + model_update) # Ensure cloning was attempted and that it failed mock_create_from_src.assert_called_once() - mock_create_from_img_dl.assert_called_once() - self.assertEqual(mock_create_from_img_dl.return_value, model_update) - - # Ensure a new cache entry is created when cloning fails, but - # only when the driver supports cloning. - if cloning_supported: - (self.mock_volume_manager. - _create_image_cache_volume_entry.assert_called_once()) - else: - (self.mock_volume_manager. - _create_image_cache_volume_entry.assert_not_called()) + with mock.patch( + 'cinder.volume.flows.manager.create_volume.' + 'CreateVolumeFromSpecTask') as volume_manager: + (volume_manager.CreateVolumeFromSpecTask. + _create_from_image_cache_or_download.called_once()) + (volume_manager.CreateVolumeFromSpecTask. + _create_from_image_cache.called_once()) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/common.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/common.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/common.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/common.py 2020-04-08 19:45:40.000000000 +0000 @@ -4468,16 +4468,10 @@ {'group_id': source_group.id}) raise exception.VolumeBackendAPIException( message=exception_message) - # Check if the snapshot exists - if 'snapVXSnapshots' in volume_group: - if snap_name in volume_group['snapVXSnapshots']: - src_devs = self._get_snap_src_dev_list(array, snapshots) - self.provision.delete_group_replica( - array, snap_name, vol_grp_name, src_devs, extra_specs) - else: - # Snapshot has been already deleted, return successfully - LOG.error("Cannot find group snapshot %(snapId)s.", - {'snapId': group_snapshot.id}) + + self.provision.delete_group_replica( + array, snap_name, vol_grp_name) + model_update = {'status': fields.GroupSnapshotStatus.DELETED} for snapshot in snapshots: snapshots_model_update.append( @@ -4855,12 +4849,9 @@ # Delete the snapshot if required if rollback_dict.get("snap_name"): try: - src_dev_ids = [ - a for a, b in rollback_dict['list_volume_pairs']] self.provision.delete_group_replica( array, rollback_dict["snap_name"], - rollback_dict["source_group_name"], - src_dev_ids, rollback_dict['interval_retries_dict']) + rollback_dict["source_group_name"]) except Exception as e: LOG.debug("Failed to delete group snapshot. Attempting " "further rollback. Exception received: %(e)s.", diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/fc.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/fc.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/fc.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/fc.py 2020-04-08 19:45:40.000000000 +0000 @@ -109,9 +109,11 @@ - Support for storage-assisted in-use retype (bp/powermax-storage-assisted-inuse-retype) 4.0.1 - PowerMax OS Metro formatted volumes fix (bug #1829876) + 4.0.2 - Volume group delete failure (bug #1853589) + 4.0.3 - Legacy volume not found fix (#1867163) """ - VERSION = "4.0.1" + VERSION = "4.0.3" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/iscsi.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/iscsi.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/iscsi.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/iscsi.py 2020-04-08 19:45:40.000000000 +0000 @@ -114,9 +114,11 @@ - Support for storage-assisted in-use retype (bp/powermax-storage-assisted-inuse-retype) 4.0.1 - PowerMax OS Metro formatted volumes fix (bug #1829876) + 4.0.2 - Volume group delete failure (bug #1853589) + 4.0.3 - Legacy volume not found fix (#1867163) """ - VERSION = "4.0.1" + VERSION = "4.0.3" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/provision.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/provision.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/provision.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/provision.py 2020-04-08 19:45:40.000000000 +0000 @@ -382,8 +382,9 @@ self._unlink_volume(array, "", "", snap_name, extra_specs, list_volume_pairs=list_device_pairs, generation=generation) - self.delete_volume_snap(array, snap_name, source_devices, - restored=False, generation=generation) + if source_devices: + self.delete_volume_snap(array, snap_name, source_devices, + restored=False, generation=generation) def extend_volume(self, array, device_id, new_size, extra_specs, rdf_group=None): @@ -691,8 +692,7 @@ self.rest.create_storagegroup_snap( array, source_group, snap_name, extra_specs) - def delete_group_replica(self, array, snap_name, source_group_name, - src_dev_ids, extra_specs): + def delete_group_replica(self, array, snap_name, source_group_name): """Delete the snapshot. :param array: the array serial number @@ -701,12 +701,19 @@ :param src_dev_ids: the list of source device ids :param extra_specs: extra specifications """ - # Delete snapvx snapshot LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " "snapshot: %(snap_name)s.", {'srcGroup': source_group_name, 'snap_name': snap_name}) - self.delete_volume_snap_check_for_links( - array, snap_name, src_dev_ids, extra_specs) + gen_list = self.rest.get_storagegroup_snap_generation_list( + array, source_group_name, snap_name) + if gen_list: + gen_list.sort(reverse=True) + for gen in gen_list: + self.rest.delete_storagegroup_snap( + array, source_group_name, snap_name, gen) + else: + LOG.debug("Unable to get generation number(s) for: %(srcGroup)s.", + {'srcGroup': source_group_name}) def link_and_break_replica(self, array, source_group_name, target_group_name, snap_name, extra_specs, diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/rest.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/rest.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/powermax/rest.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/powermax/rest.py 2020-04-08 19:45:40.000000000 +0000 @@ -920,13 +920,18 @@ 'Device id = %(di)s', {'en': element_name, 'vi': vol_identifier, 'di': device_id}) - if vol_identifier == element_name: - found_device_id = device_id - elif name_id: - # This may be host-assisted migration case - element_name = self.utils.get_volume_element_name(name_id) - if vol_identifier == element_name: + if vol_identifier: + if vol_identifier in element_name: found_device_id = device_id + if vol_identifier != element_name: + LOG.debug("Device %(di)s is a legacy volume created " + "using SMI-S.", + {'di': device_id}) + elif name_id: + # This may be host-assisted migration case + element_name = self.utils.get_volume_element_name(name_id) + if vol_identifier == element_name: + found_device_id = device_id return found_device_id def add_vol_to_sg(self, array, storagegroup_name, device_id, extra_specs): @@ -2441,6 +2446,43 @@ self.wait_for_job('Create storage group snapVx', status_code, job, extra_specs) + def delete_storagegroup_snap(self, array, source_group, + snap_name, generation='0'): + """Delete a snapVx snapshot of a storage group. + + :param array: the array serial number + :param source_group: the source group name + :param snap_name: the name of the snapshot + :param generation: the generation number of the SnapVX + """ + resource_name = ("%(sg_name)s/snapshot/%(snap_name)s" + "/generation/%(generation)s" + % {'sg_name': source_group, 'snap_name': snap_name, + 'generation': generation}) + + self.delete_resource( + array, REPLICATION, 'storagegroup', resource_name=resource_name) + + def get_storagegroup_snap_generation_list( + self, array, source_group, snap_name): + """Get a snapshot and its generation count information for an sg. + + The most recent snapshot will have a gen number of 0. The oldest + snapshot will have a gen number = genCount - 1 (i.e. if there are 4 + generations of particular snapshot, the oldest will have a gen num of + 3). + + :param array: name of the array -- str + :param source_group: name of the storage group -- str + :param snap_name: the name of the snapshot -- str + :returns: generation numbers -- list + """ + resource_name = ("%(sg_name)s/snapshot/%(snap_name)s/generation" + % {'sg_name': source_group, 'snap_name': snap_name}) + response = self.get_resource(array, REPLICATION, 'storagegroup', + resource_name=resource_name) + return response.get('generations', list()) if response else list() + def get_storagegroup_rdf_details(self, array, storagegroup_name, rdf_group_num): """Get the remote replication details of a storage group. diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/xtremio.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/xtremio.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/dell_emc/xtremio.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/dell_emc/xtremio.py 2020-04-08 19:45:40.000000000 +0000 @@ -539,8 +539,7 @@ src_vref['id']) limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache') if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']: - raise exception.CinderException('Exceeded the configured limit of ' - '%d snapshots per volume' % limit) + raise exception.SnapshotLimitReached(set_limit=limit) try: self.client.create_snapshot(src_vref['id'], volume['id']) except exception.XtremIOSnapshotsLimitExceeded as e: diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py 2020-04-08 19:45:40.000000000 +0000 @@ -762,6 +762,8 @@ resp = self.ssh.lsguicapabilities() if resp.get('license_scheme', '0') == '9846': return True + if resp.get('license_scheme', '0') == 'flex': + return True except exception.VolumeBackendAPIException: LOG.exception("Failed to fetch licensing scheme.") return False diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/nec/volume_helper.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/nec/volume_helper.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/nec/volume_helper.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/nec/volume_helper.py 2020-04-08 19:45:40.000000000 +0000 @@ -475,8 +475,7 @@ if len(pair_lds) == 3: msg = (_('Cannot create clone volume. ' 'number of pairs reached 3. ' - '%(msg)s. ldname=%(ldname)s') % - {'msg': msg, 'ldname': source_name}) + 'ldname=%s') % source_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/rbd.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/rbd.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/drivers/rbd.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/drivers/rbd.py 2020-04-08 19:45:40.000000000 +0000 @@ -669,6 +669,7 @@ except Exception as e: src_volume.unprotect_snap(clone_snap) src_volume.remove_snap(clone_snap) + src_volume.close() msg = (_("Failed to clone '%(src_vol)s@%(src_snap)s' to " "'%(dest)s', error: %(error)s") % {'src_vol': src_name, @@ -677,8 +678,6 @@ 'error': e}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) - finally: - src_volume.close() depth = self._get_clone_depth(client, src_name) # If dest volume is a clone and rbd_max_clone_depth reached, diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/flows/manager/create_volume.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/flows/manager/create_volume.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/flows/manager/create_volume.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/flows/manager/create_volume.py 2020-04-08 19:45:40.000000000 +0000 @@ -619,6 +619,14 @@ cache_entry['volume_id'] ) return model_update, True + except exception.SnapshotLimitReached: + # If this exception occurred when cloning the image-volume, + # it is because the image-volume reached its snapshot limit. + # Delete current cache entry and create a "fresh" entry + # NOTE: This will not delete the existing image-volume and + # only delete the cache entry + with excutils.save_and_reraise_exception(): + self.image_volume_cache.evict(context, cache_entry) except NotImplementedError: LOG.warning('Backend does not support creating image-volume ' 'clone. Image will be downloaded from Glance.') @@ -702,17 +710,18 @@ image_id, image_meta ) + except exception.SnapshotLimitReached: + # This exception will be handled by the caller's + # (_create_from_image) retry decorator + with excutils.save_and_reraise_exception(): + LOG.debug("Snapshot limit reached. Creating new " + "image-volume.") except exception.CinderException as e: LOG.warning('Failed to create volume from image-volume ' 'cache, image will be downloaded from Glance. ' 'Error: %(exception)s', {'exception': e}) - # If an exception occurred when cloning the image-volume, - # it may be the image-volume reached its snapshot limit. - # Create another "fresh" cache entry. - update_cache = True - # Don't cache unless directed. if not cloned and update_cache: should_create_cache_entry = True @@ -804,6 +813,7 @@ return model_update + @utils.retry(exception.SnapshotLimitReached, retries=1) def _create_from_image(self, context, volume, image_location, image_id, image_meta, image_service, **kwargs): diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder/volume/manager.py cinder-14.0.5.dev1.202004081945.disco/cinder/volume/manager.py --- cinder-14.0.3.dev24.202001031945.disco/cinder/volume/manager.py 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder/volume/manager.py 2020-04-08 19:45:40.000000000 +0000 @@ -936,9 +936,6 @@ LOG.exception("Failed to update usages deleting volume.", resource=volume) - # Delete glance metadata if it exists - self.db.volume_glance_metadata_delete_by_volume(context, volume.id) - volume.destroy() # If deleting source/destination volume in a migration or a temp @@ -3534,9 +3531,6 @@ resource={'type': 'group', 'id': group.id}) - # Delete glance metadata if it exists - self.db.volume_glance_metadata_delete_by_volume(context, vol.id) - vol.destroy() # Commit the reservations diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder.egg-info/pbr.json cinder-14.0.5.dev1.202004081945.disco/cinder.egg-info/pbr.json --- cinder-14.0.3.dev24.202001031945.disco/cinder.egg-info/pbr.json 2020-01-03 19:48:23.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder.egg-info/pbr.json 2020-04-08 19:51:58.000000000 +0000 @@ -1 +1 @@ -{"git_version": "a59c01e82", "is_release": false} \ No newline at end of file +{"git_version": "db58c6c90", "is_release": false} \ No newline at end of file diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder.egg-info/PKG-INFO cinder-14.0.5.dev1.202004081945.disco/cinder.egg-info/PKG-INFO --- cinder-14.0.3.dev24.202001031945.disco/cinder.egg-info/PKG-INFO 2020-01-03 19:48:23.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder.egg-info/PKG-INFO 2020-04-08 19:51:58.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cinder -Version: 14.0.3.dev24 +Version: 14.0.5.dev1 Summary: OpenStack Block Storage Home-page: https://docs.openstack.org/cinder/latest/ Author: OpenStack diff -Nru cinder-14.0.3.dev24.202001031945.disco/cinder.egg-info/SOURCES.txt cinder-14.0.5.dev1.202004081945.disco/cinder.egg-info/SOURCES.txt --- cinder-14.0.3.dev24.202001031945.disco/cinder.egg-info/SOURCES.txt 2020-01-03 19:48:24.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/cinder.egg-info/SOURCES.txt 2020-04-08 19:51:58.000000000 +0000 @@ -12,6 +12,7 @@ bindep.txt driver-requirements.txt lower-constraints.txt +reno.yaml requirements.txt setup.cfg setup.py @@ -1040,6 +1041,7 @@ cinder/tests/unit/db/test_cluster.py cinder/tests/unit/db/test_migrations.py cinder/tests/unit/db/test_name_id.py +cinder/tests/unit/db/test_orm_relationships.py cinder/tests/unit/db/test_purge.py cinder/tests/unit/db/test_qos_specs.py cinder/tests/unit/db/test_reset_backend.py @@ -2016,6 +2018,7 @@ releasenotes/notes/bug-1805550-default-policy-file-db15eaa76fefa115.yaml releasenotes/notes/bug-1812685-powermax-replication-specs-fix-aa6b13b93b4059d6.yaml releasenotes/notes/bug-1833115-fix-netapp-ontap-python3-failures-dd869e602f9539e1.yaml +releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml releasenotes/notes/bug-invalid-content-type-1715094-8yu8i9w425ua08f3.yaml releasenotes/notes/bug-reno-69539ecb9b0b5464.yaml releasenotes/notes/bugfix-1744692-5aebd0c97ae66407.yaml @@ -2280,6 +2283,7 @@ releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml releasenotes/notes/policy-for-type-list-and-show-apis-rt56uy78crt5e378.yaml releasenotes/notes/policy-in-code-226f71562ab28195.yaml +releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml releasenotes/notes/powermax-inuse-retype-support-64bd35adab17420d.yaml releasenotes/notes/privsep-rocky-35bdfe70ed62a826.yaml releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml @@ -2440,6 +2444,7 @@ releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml releasenotes/notes/support-image-signature-verification-yu8qub7286et9dh4.yaml +releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml releasenotes/notes/support-metadata-for-backup-3d8753f67e2934fa.yaml releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml diff -Nru cinder-14.0.3.dev24.202001031945.disco/debian/changelog cinder-14.0.5.dev1.202004081945.disco/debian/changelog --- cinder-14.0.3.dev24.202001031945.disco/debian/changelog 2020-01-03 19:48:59.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/debian/changelog 2020-04-08 19:53:05.000000000 +0000 @@ -1,10 +1,44 @@ -cinder (2:14.0.3.dev24.202001031945.disco-0ubuntu1) disco; urgency=medium +cinder (2:14.0.5.dev1.202004081945.disco-0ubuntu1) disco; urgency=medium * Automated Ubuntu testing build: - * [c9efc9e] Rollback the volume_types table when failed to update - quota_usages + * [db58c6c] PowerMax Driver - Legacy volume not found + * [47caae4] RBD: fix volume reference handling in clone logic + * [25a43b4] [Unity] Fix TypeError for test case + test_delete_host_wo_lock + * [56a874f] Add new license scheme for Flashsystem9000 series + * [16f9c55] NEC driver: fix an undefined variable + * [f4aa814] ChunkedBackupDriver: Freeing memory on restore + * [d906d81] Cinder backup export broken + * [24cac8d] PowerMax Docs - corrections and improvements + * [b21dc07] Tell reno to ignore the kilo branch + * [8c7861c] Fix: Create new cache entry when xtremio reaches snap + limit + * [bef9086] Support Incremental Backup Completion In RBD + * [3b3e0b2] Make volume soft delete more thorough + * [bffb7d5] Cap sphinx for py2 to match global reqs + * [0702d3b] PowerMax Driver - Volume group delete failure - -- Openstack Ubuntu Testing Bot Fri, 03 Jan 2020 19:48:59 +0000 + -- Openstack Ubuntu Testing Bot Wed, 08 Apr 2020 19:53:05 +0000 + +cinder (2:14.0.4-0ubuntu1~cloud0) bionic-stein; urgency=medium + + * d/watch: Update tarball URL to opendev.org. + * New stable point release for OpenStack Stein (LP: #1871126). + * d/control: Add python3-sqlalchemy-utils BD. + + -- Corey Bryant Mon, 06 Apr 2020 12:39:00 -0400 + +cinder (2:14.0.3-0ubuntu1~cloud0) bionic-stein; urgency=medium + + * New stable point release for OpenStack Stein (LP: #1858934). + + -- Corey Bryant Thu, 09 Jan 2020 09:07:36 -0500 + +cinder (2:14.0.2-0ubuntu1~cloud0) bionic-stein; urgency=medium + + * New upstream release for the Ubuntu Cloud Archive. + + -- Openstack Ubuntu Testing Bot Mon, 28 Oct 2019 13:21:31 +0000 cinder (2:14.0.2-0ubuntu1) disco; urgency=medium diff -Nru cinder-14.0.3.dev24.202001031945.disco/debian/control cinder-14.0.5.dev1.202004081945.disco/debian/control --- cinder-14.0.3.dev24.202001031945.disco/debian/control 2020-01-03 19:48:43.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/debian/control 2020-04-08 19:52:38.000000000 +0000 @@ -82,6 +82,7 @@ python3-rtslib-fb (>= 2.1.65), python3-six (>= 1.10.0), python3-sqlalchemy (>= 1.0.10), + python3-sqlalchemy-utils, python3-stestr, python3-stevedore (>= 1:1.20.0), python3-suds (>= 0.6), diff -Nru cinder-14.0.3.dev24.202001031945.disco/debian/watch cinder-14.0.5.dev1.202004081945.disco/debian/watch --- cinder-14.0.3.dev24.202001031945.disco/debian/watch 2020-01-03 19:48:43.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/debian/watch 2020-04-08 19:52:38.000000000 +0000 @@ -1,3 +1,3 @@ version=3 opts="uversionmangle=s/\.([a-zA-Z])/~$1/;s/%7E/~/;s/\.0b/~b/;s/\.0rc/~rc/" \ - https://tarballs.openstack.org/cinder/ cinder-(14\.\d.*)\.tar\.gz + https://tarballs.opendev.org/openstack/cinder/ cinder-(14\.\d.*)\.tar\.gz diff -Nru cinder-14.0.3.dev24.202001031945.disco/doc/requirements.txt cinder-14.0.5.dev1.202004081945.disco/doc/requirements.txt --- cinder-14.0.3.dev24.202001031945.disco/doc/requirements.txt 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/doc/requirements.txt 2020-04-08 19:45:40.000000000 +0000 @@ -4,7 +4,8 @@ openstackdocstheme>=1.18.1 # Apache-2.0 reno>=2.5.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 -sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD +sphinx!=1.6.6,!=1.6.7,>=1.6.3,<2.0.0;python_version=='2.7' # BSD +sphinx!=1.6.6,!=1.6.7,>=1.6.3;python_version>='3.4' # BSD mock>=2.0.0 # BSD os-api-ref>=1.4.0 # Apache-2.0 ddt>=1.0.1 # MIT diff -Nru cinder-14.0.3.dev24.202001031945.disco/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst cinder-14.0.5.dev1.202004081945.disco/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst --- cinder-14.0.3.dev24.202001031945.disco/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst 2020-04-08 19:45:40.000000000 +0000 @@ -1,5 +1,5 @@ ====================================== -Dell EMC POWERMAX iSCSI and FC drivers +Dell EMC PowerMax iSCSI and FC drivers ====================================== The Dell EMC PowerMax drivers, ``PowerMaxISCSIDriver`` and @@ -35,9 +35,50 @@ and Configuration Guide`` and ``Dell EMC Unisphere for PowerMax Installation Guide`` at ``support.emc.com``. +.. note:: + + While it is not explicitly documented which OS versions should be + installed on a particular array, it is recommended to install the latest + PowerMax OS as supported by Unisphere for PowerMax, that the PowerMax + driver supports for a given OpenStack release. + + +-----------+------------------------+-------------+ + | OpenStack | Unisphere for PowerMax | PowerMax OS | + +===========+========================+=============+ + | Stein | 9.0.x | 5978.221 | + +-----------+------------------------+-------------+ + | Rocky | 9.0.x | 5978.221 | + +-----------+------------------------+-------------+ + + However, a Hybrid array can only run HyperMax OS 5977, and is still + supported until further notice. Some functionality will not be available + in older versions of the OS. If in any doubt, please contact your customer + representative. + + + Required PowerMax software suites for OpenStack ----------------------------------------------- +The storage system requires a Unisphere for PowerMax (SMC) eLicense. + +PowerMax +~~~~~~~~ +There are two licenses for the PowerMax 2000 and 8000: + +- Essentials software package +- Pro software package + +The Dell EMC PowerMax cinder driver requires the Pro software package. + +All Flash +~~~~~~~~~ +For full functionality including SRDF for the VMAX All Flash, the FX package, +or the F package plus the SRDF a la carte add on is required. + +Hybrid +~~~~~~ + There are five Dell EMC Software Suites sold with the VMAX Hybrid arrays: - Base Suite @@ -53,13 +94,11 @@ Using PowerMax Remote Replication functionality will also require the Remote Replication Suite. -For full functionality including SRDF for the VMAX All Flash, the FX package, -or the F package plus the SRDF ``a la carte`` add on is required. -The storage system also requires a Unisphere for PowerMax (SMC) eLicence. +.. note:: -Each are licensed separately. For further details on how to get the -relevant license(s), reference eLicensing Support below. + Each are licensed separately. For further details on how to get the + relevant license(s), reference eLicensing Support below. eLicensing support @@ -127,25 +166,49 @@ - Extending attached volume - Replicated volume retype support - Retyping attached(in-use) volume -- Unisphere high availability(HA) support +- Unisphere High Availability(HA) support + + +PowerMax naming conventions +=========================== .. note:: - VMAX All Flash array with Solutions Enabler 8.3.0.11 or later have - compression enabled by default when associated with Diamond Service Level. - This means volumes added to any newly created storage groups will be - compressed. + shortHostName will be altered using the following formula, if its length + exceeds 16 characters. This is because the storage group and masking view + names cannot exceed 64 characters: + .. code-block:: text + + if len(shortHostName) > 16: + 1. Perform md5 hash on the shortHostName + 2. Convert output of 1. to hex + 3. Take last 6 characters of shortHostName and append output of 2. + 4. If the length of output of 3. exceeds 16 characters, join the + first 8 characters and last 8 characters. + +.. note:: + + portgroup_name will be altered using the following formula, if its length + exceeds 12 characters. This is because the storage group and masking view + names cannot exceed 64 characters: + + .. code-block:: text + + if len(portgroup_name) > 12: + 1. Perform md5 hash on the portgroup_name + 2. Convert output of 1. to hex + 3. Take last 6 characters of portgroup_name and append output of 2. + 4. If the length of output of 3. exceeds 12 characters, join the + first 6 characters and last 6 characters. -PowerMax naming conventions -=========================== Masking view names ------------------ Masking views are dynamically created by the PowerMax FC and iSCSI drivers using the following naming conventions. ``[protocol]`` is either ``I`` for -volumes attached over iSCSI or ``F`` for volumes attached over Fiber Channel. +volumes attached over iSCSI or ``F`` for volumes attached over Fibre Channel. .. code-block:: text @@ -161,7 +224,7 @@ (either WWNNs or IQNs) from OpenStack and adds or updates the contents of the Initiator Group as required. Names are of the following format. ``[protocol]`` is either ``I`` for volumes attached over iSCSI or ``F`` for volumes attached -over Fiber Channel. +over Fibre Channel. .. code-block:: console @@ -210,15 +273,16 @@ .. note:: CD and RE are only set if compression is explicitly disabled or replication - explicitly enabled. See the compression and replication sections below. + explicitly enabled. See the compression `11. All Flash compression support`_ + and replication `Volume replication support`_ sections below. .. note:: For PowerMax and any All Flash with PowerMax OS (5978) or greater, workload - is NONE + if set will be ignored and set to NONE -PowerMax Driver Integration +PowerMax driver integration =========================== 1. Prerequisites @@ -251,7 +315,7 @@ Configuration Guide`` at ``support.emc.com``. -2. FC Zoning with PowerMax +2. FC zoning with PowerMax -------------------------- Zone Manager is required when there is a fabric between the host and array. @@ -261,8 +325,8 @@ 3. iSCSI with PowerMax ---------------------- -- Make sure the ``iscsi-initiator-utils`` package is installed on all Compute - nodes. +- Make sure the ``open-iscsi`` package (or distro equivalent) is installed + on all Compute nodes. .. note:: @@ -270,15 +334,16 @@ masking view. An attach operation creates this masking view. -4. Configure Block Storage in cinder.conf + +4. Configure block storage in cinder.conf ----------------------------------------- .. note:: VMAX driver was rebranded to PowerMax in Stein, so some of the driver - specific tags have also changed. Legacy tags like vmax_srp, vmax_array, - vmax_service_level and vmax_port_group, as well as the old driver - location, will continue to work until the 'V' release. + specific tags have also changed. Legacy tags like ``vmax_srp``, + ``vmax_array``, ``vmax_service_level`` and ``vmax_port_group``, as well + as the old driver location, will continue to work until the 'V' release. .. config-table:: @@ -289,13 +354,6 @@ .. note:: - For security and backend uniformity, the use of the XML file for PowerMax - backend configuration was deprecated in Queens and removed entirely - in Rocky. - - -.. note:: - ``san_api_port`` is ``8443`` by default but can be changed if necessary. For the purposes of this documentation the default is assumed so the tag will not appear in any of the ``cinder.conf`` @@ -330,7 +388,7 @@ +--------------------+----------------------------+---------+----------+ -Configure Block Storage in cinder.conf +Configure block storage in cinder.conf Add the following entries to ``/etc/cinder/cinder.conf``: @@ -426,80 +484,82 @@ ``cinder.conf`` backend stanza. -6. Create Volume Types +6. Create volume types ---------------------- - Once the ``cinder.conf`` has been updated, :command:`openstack` commands - need to be issued in order to create and associate OpenStack volume types - with the declared ``volume_backend_names``. - - Additionally, each volume type will need an associated ``pool_name`` - an - extra specification indicating the service level/ workload combination to - be used for that volume type. +Once the ``cinder.conf`` has been updated, :command:`openstack` commands +need to be issued in order to create and associate OpenStack volume types +with the declared ``volume_backend_names``. - There is also the option to assign a port group to a volume type by - setting the ``storagetype:portgroupname`` extra specification. +Additionally, each volume type will need an associated ``pool_name`` - an +extra specification indicating the service level/ workload combination to +be used for that volume type. - .. note:: +There is also the option to assign a port group to a volume type by +setting the ``storagetype:portgroupname`` extra specification. - It is possible to create as many volume types as the number of Service - Level and Workload(available) combination for provisioning volumes. The - pool_name is the additional property which has to be set and is of the - format: ``+++``. - This can be obtained from the output of the ``cinder get-pools--detail``. - Workload is NONE for PowerMax or any All Flash with PowerMax OS (5978) - or greater. +.. note:: - .. code-block:: console + It is possible to create as many volume types as the number of Service + Level and Workload(available) combination for provisioning volumes. The + pool_name is the additional property which has to be set and is of the + format: ``+++``. + This can be obtained from the output of the ``cinder get-pools--detail``. + Workload is NONE for PowerMax or any All Flash with PowerMax OS (5978) + or greater. - $ openstack volume type create POWERMAX_ISCSI_SILVER_OLTP - $ openstack volume type set --property volume_backend_name=ISCSI_backend \ - --property pool_name=Silver+OLTP+SRP_1+000123456789 \ - --property storagetype:portgroupname=OS-PG2 \ - POWERMAX_ISCSI_SILVER_OLTP - $ openstack volume type create POWERMAX_FC_DIAMOND_DSS - $ openstack volume type set --property volume_backend_name=FC_backend \ - --property pool_name=Diamond+DSS+SRP_1+000123456789 \ - --property storagetype:portgroupname=OS-PG1 \ - POWERMAX_FC_DIAMOND_DSS +.. code-block:: console + $ openstack volume type create POWERMAX_ISCSI_SILVER_OLTP + $ openstack volume type set --property volume_backend_name=ISCSI_backend \ + --property pool_name=Silver+OLTP+SRP_1+000123456789 \ + --property storagetype:portgroupname=OS-PG2 \ + POWERMAX_ISCSI_SILVER_OLTP + $ openstack volume type create POWERMAX_FC_DIAMOND_DSS + $ openstack volume type set --property volume_backend_name=FC_backend \ + --property pool_name=Diamond+DSS+SRP_1+000123456789 \ + --property storagetype:portgroupname=OS-PG1 \ + POWERMAX_FC_DIAMOND_DSS - By issuing these commands, the Block Storage volume type - ``POWERMAX_ISCSI_SILVER_OLTP`` is associated with the ``ISCSI_backend``, - a Silver Service Level, and an OLTP workload. - The type ``POWERMAX_FC_DIAMOND_DSS`` is associated with the ``FC_backend``, - a Diamond Service Level, and a DSS workload. +By issuing these commands, the Block Storage volume type +``POWERMAX_ISCSI_SILVER_OLTP`` is associated with the ``ISCSI_backend``, +a Silver Service Level, and an OLTP workload. - The ``ServiceLevel`` manages the underlying storage to provide expected - performance. Setting the ``ServiceLevel`` to ``None`` means that non-FAST - managed storage groups will be created instead (storage groups not - associated with any service level). If ``ServiceLevel`` is ``None`` then - ``Workload`` must be ``None``. +The type ``POWERMAX_FC_DIAMOND_DSS`` is associated with the ``FC_backend``, +a Diamond Service Level, and a DSS workload. - .. code-block:: console +The ``ServiceLevel`` manages the underlying storage to provide expected +performance. Setting the ``ServiceLevel`` to ``None`` means that non-FAST +managed storage groups will be created instead (storage groups not +associated with any service level). If ``ServiceLevel`` is ``None`` then +``Workload`` must be ``None``. - openstack volume type set --property pool_name=None+None+SRP_1+000123456789 +.. code-block:: console - When a ``Workload`` is added, the latency range is reduced due to the - added information. Setting the ``Workload`` to ``None`` means the latency - range will be the widest for its Service Level type. Please note that you - cannot set a Workload without a Service Level. + openstack volume type set --property pool_name=None+None+SRP_1+000123456789 - .. code-block:: console +When a ``Workload`` is added, the latency range is reduced due to the +added information. Setting the ``Workload`` to ``None`` means the latency +range will be the widest for its Service Level type. Please note that you +cannot set a Workload without a Service Level. - openstack volume type set --property pool_name=Diamond+None+SRP_1+000123456789 +.. code-block:: console - .. note:: + openstack volume type set --property pool_name=Diamond+None+SRP_1+000123456789 + +.. note:: - PowerMax and Hybrid support Optimized, Diamond, Platinum, Gold, Silver, - Bronze, and NONE service levels. VMAX All Flash supports Diamond and - None. Hybrid and All Flash support DSS_REP, DSS, OLTP_REP, OLTP, and None - workloads, the latter up until ucode 5977. There is no support for - workloads in PowerMax OS (5978) or greater. + PowerMax and Hybrid support ``Optimized``, ``Diamond``, ``Platinum``, + ``Gold``, ``Silver``, ``Bronze``, and ``NONE`` service levels. VMAX + All Flash supports ``Diamond`` and `None. Hybrid and All Flash support + ``DSS_REP``, ``DSS``, ``OLTP_REP``, ``OLTP``, and None workloads, the + latter up until ucode 5977. Please refer to Stein PowerMax online + documentation if you wish to use ``workload``. There is no support + for workloads in PowerMax OS (5978) or greater. -7. Interval and Retries +7. Interval and retries ----------------------- By default, ``interval`` and ``retries`` are ``3`` seconds and ``200`` retries @@ -530,32 +590,32 @@ interval = 1 retries = 700 -8. CHAP Authentication Support +8. CHAP authentication support ------------------------------ -This supports one way initiator CHAP authentication functionality into the +This supports one-way initiator CHAP authentication functionality into the PowerMax backend. With CHAP one-way authentication, the storage array challenges the host during the initial link negotiation process and expects to receive a valid credential and CHAP secret in response. When challenged, the host transmits a CHAP credential and CHAP secret to the storage array. -The storagearray looks for this credential and CHAP secret which stored in +The storage array looks for this credential and CHAP secret which stored in the host initiator's initiator group (IG) information in the ACLX database. Once a positive authentication occurs, the storage array sends an acceptance message to the host. However, if the storage array fails to find any record of the credential/secret pair, it sends a rejection message, and the link is closed. -Assumptions, Restrictions and Pre-Requisites +Assumptions, restrictions and prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. The host initiator IQN is required along with the credentials the host initiator will use to log into the storage array with. The same credentials should be used in a multi node system if connecting to the same array. -#. Enable one way CHAP authentication for the iscsi initiator on the storage +#. Enable one-way CHAP authentication for the iSCSI initiator on the storage array using SYMCLI. Template and example shown below. For the purpose of this setup, the credential/secret used would be my_username/my_password - with iscsi initiator of iqn.1991-05.com.company.lcseb130 + with iSCSI initiator of iqn.1991-05.com.company.lcseb130 .. code-block:: console @@ -569,7 +629,7 @@ -Settings and Configuration +Settings and configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Set the configuration in the PowerMax backend group in cinder.conf using the @@ -741,8 +801,8 @@ Volume is created against volume type and QoS is enforced with the parameters above. -USE CASE 2 - Preset limits -~~~~~~~~~~~~~~~~~~~~~~~~~~ +USE CASE 2 - Pre-set limits +~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites - PowerMax @@ -865,8 +925,8 @@ -USE CASE 3 - Preset limits -~~~~~~~~~~~~~~~~~~~~~~~~~~ +USE CASE 3 - Pre-set limits +~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites - PowerMax @@ -1121,7 +1181,10 @@ .. note:: This feature is only applicable for All Flash arrays, 250F, 450F, 850F - and 950F and PowerMax 2000 and 8000. + and 950F and PowerMax 2000 and 8000. It was first introduced Solutions + Enabler 8.3.0.11 or later and is enabled by default when associated with + a Service Level. This means volumes added to any newly created storage + groups will be compressed. Use case 1 - Compression disabled create, attach, detach, and delete volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1171,7 +1234,7 @@ https://docs.openstack.org/cinder/latest/admin/blockstorage-over-subscription.html -13. Live Migration support +13. Live migration support -------------------------- Non-live migration (sometimes referred to simply as 'migration'). The instance @@ -1198,7 +1261,7 @@ In PowerMax, A volume cannot belong to two or more FAST storage groups at the same time. To get around this limitation we leverage both cascaded storage -groups and a temporary non FAST storage group. +groups and a temporary non-FAST storage group. A volume can remain 'live' if moved between masking views that have the same initiator group and port groups which preserves the host path. @@ -1212,7 +1275,7 @@ #. The volume is added to the FAST storage group within the destination parent storage group of the destination masking view. At this point the volume belongs to two storage groups. -#. One of two things happens: +#. One of two things happen: - If the connection to the destination instance is successful, the volume is removed from the non-FAST storage group in the originating masking @@ -1227,11 +1290,12 @@ Please refer to the following for more information: -https://docs.openstack.org/nova/latest/admin/live-migration-usage.html +https://docs.openstack.org/nova/latest/admin/configuring-migrations.html and -https://docs.openstack.org/nova/latest/admin/configuring-migrations.html +https://docs.openstack.org/nova/latest/admin/live-migration-usage.html + .. note:: @@ -1302,7 +1366,7 @@ and HostC. HostA is the compute node while HostB and HostC are the compute nodes. The following were also used in live migration. -- 2 gb bootable volume using the cirros image. +- 2 gb bootable volume using the CirrOS image. - Instance created using the 2gb volume above with a flavor m1.small using 2048 RAM, 20GB of Disk and 1 VCPU. @@ -1366,7 +1430,7 @@ https://docs.openstack.org/cinder/latest/admin/blockstorage-volume-multiattach.html for configuration information. -Multi-attach Architecture +Multi-attach architecture ~~~~~~~~~~~~~~~~~~~~~~~~~ In PowerMax, a volume cannot belong to two or more FAST storage groups at the @@ -1381,7 +1445,7 @@ backend is required – the volume is attached to and detached from each host as normal. -Example Use Case +Example use case ~~~~~~~~~~~~~~~~ Volume ``Multi-attach-Vol-1`` (with a multi-attach capable volume type, and @@ -1411,21 +1475,22 @@ storage group. The non-FAST managed storage group is cleaned up, if required. -.. note:: - Known issue - the multi-attach flag is still false after a retype. This - is being addressed in https://bugs.launchpad.net/cinder/+bug/1790840 - - -15. Volume Encryption support +15. Volume encryption support ----------------------------- Please refer to the following: https://docs.openstack.org/cinder/latest/configuration/block-storage/volume-encryption.html -16. Volume metadata in logs ---------------------------- +16. Volume metadata +------------------- + +Volume metadata is returned to the user in both the Cinder Volume logs and +with volumes and snapshots created in Cinder via the UI or CLI. + +16.1 Volume metadata in logs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If debug is enabled in the default section of the cinder.conf, PowerMax Cinder driver will log additional volume information in the Cinder volume log, @@ -1461,7 +1526,36 @@ | serial_number | 000123456789 | +------------------------------------+---------------------------------------------------------+ -17. Unisphere high availability(HA) support +16.2 Metadata in the UI and CLI +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default metadata will be set on all volume and snapshot objects created in +Cinder. This information represents the state of the object on the backend +PowerMax and will change when volume attributes are changed by performing +actions on them such as re-type or attaching to an instance. + +.. code-block:: console + + demo@openstack-controller:~$ cinder show powermax-volume + + +--------------------------------+------------------------------------------------------------+ + | Property | Value | + +--------------------------------+------------------------------------------------------------+ + | metadata | ArrayID : 000123456789 | + | | ArrayModel : PowerMax_8000 | + | | CompressionDisabled : False | + | | Configuration : TDEV | + | | DeviceID : 0012F | + | | DeviceLabel : OS-d87edb98-60fd-49dd-bb0f-cc388cf6f3f4 | + | | Emulation : FBA | + | | ReplicationEnabled : False | + | | ServiceLevel : Diamond | + | | Workload : None | + | name | powermax-volume | + +--------------------------------+------------------------------------------------------------+ + + +17. Unisphere High Availability(HA) support ------------------------------------------- This feature facilitates high availability of Unisphere for PowerMax servers, @@ -1620,7 +1714,7 @@ * ``remote_port_group`` is the name of a PowerMax port group that has been pre-configured to expose volumes managed by this backend in the event - of a failover. Make sure that this portgroup contains either all FC or + of a failover. Make sure that this port group contains either all FC or all iSCSI port groups (for a given back end), as appropriate for the configured driver (iSCSI or FC). @@ -1675,7 +1769,7 @@ ``replication_device`` parameter has been entered in the PowerMax backend entry in the ``cinder.conf``, a corresponding volume type needs to be created ``replication_enabled`` property set. See - above ``Setup PowerMax drivers`` for details. + above `6. Create volume types`_ for details. .. code-block:: console @@ -1720,6 +1814,11 @@ Failover host ~~~~~~~~~~~~~ +.. note:: + + Failover and Failback operations are not applicable in Metro + configurations. + In the event of a disaster, or where there is required downtime, upgrade of the primary array for example, the administrator can issue the failover host command to failover to the configured target: @@ -1728,22 +1827,38 @@ # cinder failover-host cinder_host@POWERMAX_FC_REPLICATION -If the primary array becomes available again, you can initiate a failback -using the same command and specifying ``--backend_id default``: +After issuing Cinder failover-host command Cinder will set the R2 array as the +target array for Cinder, however to get existing instances to use this new +array and paths to volumes it is necessary to first shelve Nova instances and +then unshelve them, this will effectively restart the Nova instance and +re-establish data paths between Nova instances and the volumes on the R2 array. .. code-block:: console - # cinder failover-host cinder_host@POWERMAX_FC_REPLICATION --backend_id default + # nova shelve + # nova unshelve [--availability-zone ] + +When a host is in failover mode performing normal volume or snapshot +provisioning will not be possible, failover-host mode simply provides access +to replicated volumes to minimise environment down-time. The primary objective +whilst in failover mode should be to get the R1 array back online. When the +primary array becomes available again, you can initiate a failback using the +same failover command and specifying --backend_id default: -.. note:: +.. code-block:: console - Failover and Failback operations are not applicable in Metro configurations. + # cinder failover-host cinder_host@POWERMAX_FC_REPLICATION --backend_id default +After issuing the failover command to revert to the default backend host it is +necessary to re-issue the Nova shelve and unshelve commands to restore the +data paths between Nova instances and their corresponding back end volumes. +Once reverted to the default backend volume and snapshot provisioning +operations can continue as normal. -Asynchronous and Metro replication management groups +Asynchronous and metro replication management groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Asynchronous and Metro volumes in an RDF session, i.e. belonging to an SRDF +Asynchronous and metro volumes in an RDF session, i.e. belonging to an SRDF group, must be managed together for RDF operations (although there is a ``consistency exempt`` option for creating and deleting pairs in an Async group). To facilitate this management, we create an internal RDF management @@ -1756,7 +1871,7 @@ Metro support ~~~~~~~~~~~~~ -SRDF/Metro is a High Availabilty solution. It works by masking both sides of +SRDF/Metro is a high availability solution. It works by masking both sides of the RDF relationship to the host, and presenting all paths to the host, appearing that they all point to the one device. In order to do this, there needs to be multipath software running to manage writing to the @@ -1775,7 +1890,7 @@ replication volume type. -Volume retype - storage assisted volume migration +Volume retype - storage assisted volume migration -------------------------------------------------- Volume retype with storage assisted migration is supported now for @@ -1787,7 +1902,7 @@ another, use volume retype with the migration-policy to on-demand. The target volume type should have the same volume_backend_name configured and should have the desired pool_name to which you are trying to retype to - (please refer to ``Setup PowerMax Drivers`` for details). + (please refer to `6. Create volume types`_ for details). .. code-block:: console @@ -1805,7 +1920,7 @@ .. note:: - With the Stein release, In Use (attached) volume retype is supported + With the Stein release, in-use (attached) volume retype is supported Generic volume group support @@ -1865,7 +1980,7 @@ A generic volume group can be both consistent group snapshot enabled and consistent group replication enabled. -Storage Group Names +Storage group names ~~~~~~~~~~~~~~~~~~~ Storage groups are created on the PowerMax as a result of creation of generic @@ -1877,113 +1992,22 @@ TruncatedGroupName_GroupUUID or GroupUUID -Group type operations -~~~~~~~~~~~~~~~~~~~~~ +Group type, group and group snapshot operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Create a group type - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-create GROUP_TYPE - -- Show a group type - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-show GROUP_TYPE - -- List group types - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-list - -- Delete group type - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-delete GROUP_TYPE - -- Set/unset a group spec - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-key GROUP_TYPE set consistent_group_snapshot_enabled=" True" - -- List group types and group specs: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-specs-list - -Group operations -~~~~~~~~~~~~~~~~ - -- Create a group: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-create --name GROUP GROUP_TYPE VOLUME_TYPE1,VOLUME_TYPE2 - -- Show a group: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-show GROUP - -- List all groups: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-list - -- Create a volume and add it to a group at the time of creation: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 create --volume-type VOLUME_TYPE1 --group-id GROUP_ID 1 - -- Modify a group to add or remove volumes: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-update --add-volumes UUID1,UUID2 --remove-volumes UUID3,UUID4 GROUP - -- Delete a group - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-delete --delete-volumes GROUP - -Group snapshot operations -~~~~~~~~~~~~~~~~~~~~~~~~~ - -- Create a group snapshot: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-create --name GROUP_SNAPSHOT GROUP - -- Delete group snapshot(s): - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-delete GROUP_SNAPSHOT - -- Create a group from a group snapshot: - -.. code-block:: console - - $ cinder --os-volume-api-version 3.14 group-create-from-src --group-snapshot GROUP_SNAPSHOT --name GROUP - -- Create a group from a source snapshot: - -.. code-block:: console - - $ cinder --os-volume-api-version 3.14 group-create-from-src --source-group SOURCE_GROUP --name GROUP +Please refer to the following section for the most up to date group type +group and group replication operations https://docs.openstack.org/cinder/latest/admin/blockstorage-groups.html Group replication operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Generic volume group operations no longer require the user to specify the +Cinder CLI version, however, performing generic volume group replication +operations still require this setting. When running generic volume group +commands set the value --os-volume-api-version to 3.38. +These commands are not listed in the latest Cinder CLI documentation so +will remain here until added to the latest Cinder CLI version or +deprecated from Cinder. + - Enable group replication @@ -2011,7 +2035,7 @@ --secondary-backend-id default -Manage and Unmanage Volumes +Manage and unmanage Volumes --------------------------- Managing volumes in OpenStack is the process whereby a volume which exists @@ -2031,7 +2055,7 @@ - The volume must a whole GB e.g. 5.5GB is not a valid size -- The volume cannot be a snapvx target +- The volume cannot be a SnapVX target For a volume to exist in a Cinder managed pool, it must reside in the same @@ -2061,7 +2085,7 @@ - The PowerMax serial number (12 digit numerical) -Manage Volumes +Manage volumes ~~~~~~~~~~~~~~ With your pool name defined you can now manage the volume into OpenStack, this @@ -2095,7 +2119,7 @@ PowerMax driver on a manage operation. -Managing Volumes with Replication Enabled +Managing volumes with replication enabled ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Whilst it is not possible to manage volumes into OpenStack that are part of a @@ -2107,7 +2131,7 @@ enabled for that volume. -Unmanage Volume +Unmanage volume ~~~~~~~~~~~~~~~ Unmanaging a volume is not the same as deleting a volume. When a volume is @@ -2136,7 +2160,7 @@ the volume is no longer managed by OpenStack. -Manage/Unmanage Snapshots +Manage/unmanage snapshots ------------------------- Users can manage PowerMax SnapVX snapshots into OpenStack if the source volume @@ -2155,7 +2179,7 @@ #. It is only possible to manage or unmanage one snapshot at a time in Cinder. -Manage SnapVX Snapshot +Manage SnapVX snapshot ~~~~~~~~~~~~~~~~~~~~~~ It is possible to manage PowerMax SnapVX snapshots into OpenStack, where the @@ -2171,7 +2195,7 @@ or volumes which exist in a replication session. -Requirements/Restrictions: +Requirements/restrictions: #. The SnapVX source volume must be present in and managed by Cinder. @@ -2183,7 +2207,7 @@ linked target volumes. -Command Structure: +Command structure: #. Identify your SnapVX snapshot for management on the PowerMax, note the name. @@ -2245,7 +2269,7 @@ managed by Cinder will be present for use under the name ``SnapshotManaged``. -Unmanage Cinder Snapshot +Unmanage cinder snapshot ~~~~~~~~~~~~~~~~~~~~~~~~ Unmanaging a snapshot in Cinder is the process whereby the snapshot is removed @@ -2310,7 +2334,7 @@ - Volume should not be ``encapsulated`` - Volume should not be ``reserved`` - Volume should not be a part of an RDF session -- Volume should not be a snapVX Target +- Volume should not be a SnapVX Target - Volume identifier should not begin with ``OS-``. Manageable snaphots @@ -2335,7 +2359,7 @@ There is some delay in the syncing of the Unisphere for PowerMax database when the state/properties of a volume is modified using ``symcli``. To - prevent this it is preferrable to modify state/properties of volumes within + prevent this it is preferable to modify state/properties of volumes within Unisphere. @@ -2354,10 +2378,17 @@ Seamless upgrades from an SMI-S based driver to RESTAPI based driver, following the setup instructions above, are supported with a few exceptions: -#. Live migration functionality will not work on already attached/in-use - legacy volumes. These volumes will first need to be detached and reattached - using the RESTAPI based driver. This is because we have changed the masking - view architecture from Pike to better support this functionality. +#. OpenStack's ``live migration`` functionality will not work on already + attached/in-use legacy volumes without first migrating the volumes to + the new REST masking view structure. This can be done by running the + migrate.py script in PyU4V. Please refer to the Tools Guide in PyU4V_. + + .. code-block:: text + + $ pip install PyU4V + #. Consistency groups are deprecated in Pike. Generic Volume Groups are supported from Pike onwards. + +.. _PyU4V: https://pyu4v.readthedocs.io/en/latest/ diff -Nru cinder-14.0.3.dev24.202001031945.disco/lower-constraints.txt cinder-14.0.5.dev1.202004081945.disco/lower-constraints.txt --- cinder-14.0.3.dev24.202001031945.disco/lower-constraints.txt 2020-01-03 19:45:34.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/lower-constraints.txt 2020-04-08 19:45:40.000000000 +0000 @@ -138,6 +138,7 @@ sphinxcontrib-websupport==1.0.1 sqlalchemy-migrate==0.11.0 SQLAlchemy==1.0.10 +SQLAlchemy-Utils==0.36.1 sqlparse==0.2.4 statsd==3.2.2 stestr==2.2.0 diff -Nru cinder-14.0.3.dev24.202001031945.disco/PKG-INFO cinder-14.0.5.dev1.202004081945.disco/PKG-INFO --- cinder-14.0.3.dev24.202001031945.disco/PKG-INFO 2020-01-03 19:48:24.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/PKG-INFO 2020-04-08 19:51:59.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cinder -Version: 14.0.3.dev24 +Version: 14.0.5.dev1 Summary: OpenStack Block Storage Home-page: https://docs.openstack.org/cinder/latest/ Author: OpenStack diff -Nru cinder-14.0.3.dev24.202001031945.disco/releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml cinder-14.0.5.dev1.202004081945.disco/releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml --- cinder-14.0.3.dev24.202001031945.disco/releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml 2020-04-08 19:45:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + PowerMax Driver - Issue with upgrades from pre Pike to Pike and later. + The device is not found when trying to snapshot a legacy volume. diff -Nru cinder-14.0.3.dev24.202001031945.disco/releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml cinder-14.0.5.dev1.202004081945.disco/releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml --- cinder-14.0.3.dev24.202001031945.disco/releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml 2020-04-08 19:45:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + PowerMax driver - fix to eliminate 'cannot use the device for the function + because it is in a Copy Session' when attempting to delete a volume group + that previously had a group snapshot created on and deleted from it. diff -Nru cinder-14.0.3.dev24.202001031945.disco/releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml cinder-14.0.5.dev1.202004081945.disco/releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml --- cinder-14.0.3.dev24.202001031945.disco/releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml 2020-04-08 19:45:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - Fixed issue where all Ceph RBD backups would be incremental after the + first one. The driver now honors whether ``--incremental`` is specified or + not. diff -Nru cinder-14.0.3.dev24.202001031945.disco/reno.yaml cinder-14.0.5.dev1.202004081945.disco/reno.yaml --- cinder-14.0.3.dev24.202001031945.disco/reno.yaml 1970-01-01 00:00:00.000000000 +0000 +++ cinder-14.0.5.dev1.202004081945.disco/reno.yaml 2020-04-08 19:45:40.000000000 +0000 @@ -0,0 +1,4 @@ +--- +# Ignore the kilo-eol tag because that branch does not work with reno +# and contains no release notes. +closed_branch_tag_re: "(.+)(?=3.2.0 # Apache-2.0 PyMySQL>=0.7.6 # MIT License psycopg2>=2.7 # LGPL/ZPL +SQLAlchemy-Utils>=0.36.1 # BSD License testtools>=2.2.0 # MIT testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD