diff -Nru nova-17.0.7/AUTHORS nova-17.0.9/AUTHORS --- nova-17.0.7/AUTHORS 2018-10-08 21:59:15.000000000 +0000 +++ nova-17.0.9/AUTHORS 2018-12-19 20:58:55.000000000 +0000 @@ -234,6 +234,7 @@ Chris Yeoh Christian Berendt Christine Wang +Christoph Manns Christoph Thiel Christopher Lefelhocz Christopher Lefelhocz @@ -302,6 +303,7 @@ David Bingham David Edery David Hill +David Hill David Kang David McNally David Medberry @@ -520,6 +522,7 @@ Ivo Vasev J. Daniel Schmidt JC Martin +Jack Ding Jackie Truong Jacob Cherkas Jake Dahn @@ -559,6 +562,7 @@ Jenkins Jennifer Mulsow Jenny Oshima +Jens Harbott Jens Jorritsma Jens Rosenboom Jeremy Liu diff -Nru nova-17.0.7/ChangeLog nova-17.0.9/ChangeLog --- nova-17.0.7/ChangeLog 2018-10-08 21:59:12.000000000 +0000 +++ nova-17.0.9/ChangeLog 2018-12-19 20:58:52.000000000 +0000 @@ -1,10 +1,38 @@ CHANGES ======= -17.0.7 +17.0.9 ------ +* Add secret=true to fixed\_key configuration parameter +* Create BDMs/tags in cell with instance when over-quota +* Make the nova-next job voting and gating +* Add functional regression test for bug 1806064 +* compute: Ensure pre-migrating instances are destroyed during init\_host +* Add regression test for bug #1764883 +* Test websocketproxy with TLS in the nova-next job +* Move the nova-next job in-tree and update it +* Default embedded instance.flavor.is\_public attribute +* Make supports\_direct\_io work on 4096b sector size +* De-dupe subnet IDs when calling neutron /subnets API +* Fix NoneType error in \_notify\_volume\_usage\_detach +* Ensure attachment cleanup on failure in driver.pre\_live\_migration +* Move live\_migration.pre.start to the start of the method +* Fix up compute rpcapi version for pike release +* conductor: Recreate volume attachments during a reschedule +* Add regression test for bug#1784353 +* fixtures: Track volume attachments within CinderFixtureNewAttachFlow +* Add regression test for bug 1797580 +* Handle volume API failure in \_post\_live\_migration +* Handle missing marker during online data migration + +17.0.8 +------ + +* Not set instance to ERROR if set\_admin\_password failed +* Ignore VirtDriverNotReady in \_sync\_power\_states periodic task * stable-only: fix typo in IVS related privsep method +* Fix stacktraces with redis caching backend * nova-manage - fix online\_data\_migrations counts * Skip more rebuild tests for cells v1 job * Skip ServerShowV247Test.test\_update\_rebuild\_list\_server in nova-cells-v1 job @@ -25,6 +53,7 @@ * Fix soft deleting vm fails after "nova resize" vm * Fix service list for disabled compute using MC driver * Set default of oslo.privsep.daemon logging to INFO level +* Refix disk size during live migration with disk over-commit * Update RequestSpec.flavor on resize\_revert * cover migration cases with functional tests * Follow devstack-plugin-ceph job rename diff -Nru nova-17.0.7/debian/changelog nova-17.0.9/debian/changelog --- nova-17.0.7/debian/changelog 2019-01-07 19:54:42.000000000 +0000 +++ nova-17.0.9/debian/changelog 2019-03-06 09:30:05.000000000 +0000 @@ -1,3 +1,11 @@ +nova (2:17.0.9-0ubuntu1) bionic; urgency=medium + + * New stable point release for OpenStack Queens (LP: #1818069). + * Remove patch disk-size-live-migration-overcommit, already + on tree. + + -- Sahid Orentino Ferdjaoui Wed, 06 Mar 2019 10:30:05 +0100 + nova (2:17.0.7-0ubuntu2) bionic; urgency=medium * d/p/ensure-rbd-auth-fallback-uses-matching-credentials.patch: Cherry- diff -Nru nova-17.0.7/debian/patches/disk-size-live-migration-overcommit.patch nova-17.0.9/debian/patches/disk-size-live-migration-overcommit.patch --- nova-17.0.7/debian/patches/disk-size-live-migration-overcommit.patch 2019-01-07 19:54:42.000000000 +0000 +++ nova-17.0.9/debian/patches/disk-size-live-migration-overcommit.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -From 9f8a31f7e15261d6e3d26cfa36ed1fa5ab005091 Mon Sep 17 00:00:00 2001 -From: int32bit -Date: Mon, 22 Jan 2018 20:15:21 +0800 -Subject: [PATCH] Refix disk size during live migration with disk over-commit - -Currently available disk of targer host is calculated based on -local_gb, not available disk(free_disk_gb). This check can be -negative if the target host has no free disk. - -Change-Id: Iec50269ef31dfe090f0cd4db95a37909661bd910 -closes-bug: 1744079 -(cherry picked from commit e2cc275063658b23ed88824100919a6dfccb760d) ---- - nova/tests/unit/virt/libvirt/test_driver.py | 4 ++-- - nova/virt/libvirt/driver.py | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py -index 451babad03..77101d784a 100644 ---- a/nova/tests/unit/virt/libvirt/test_driver.py -+++ b/nova/tests/unit/virt/libvirt/test_driver.py -@@ -8294,7 +8294,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, - instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel - drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - compute_info = {'disk_available_least': -1000, -- 'local_gb': 100, -+ 'free_disk_gb': 50, - 'cpu_info': 'asdf', - } - filename = "file" -@@ -8311,7 +8311,7 @@ class LibvirtConnTestCase(test.NoDBTestCase, - return_value.is_volume_backed = False - self.assertThat({"filename": "file", - 'image_type': 'default', -- 'disk_available_mb': 102400, -+ 'disk_available_mb': 51200, - "disk_over_commit": True, - "block_migration": True, - "is_volume_backed": False}, -diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py -index 712c36da0b..2b0e21e245 100644 ---- a/nova/virt/libvirt/driver.py -+++ b/nova/virt/libvirt/driver.py -@@ -6508,7 +6508,7 @@ class LibvirtDriver(driver.ComputeDriver): - :returns: a LibvirtLiveMigrateData object - """ - if disk_over_commit: -- disk_available_gb = dst_compute_info['local_gb'] -+ disk_available_gb = dst_compute_info['free_disk_gb'] - else: - disk_available_gb = dst_compute_info['disk_available_least'] - disk_available_mb = ( --- -2.19.1 - diff -Nru nova-17.0.7/debian/patches/series nova-17.0.9/debian/patches/series --- nova-17.0.7/debian/patches/series 2019-01-07 19:54:42.000000000 +0000 +++ nova-17.0.9/debian/patches/series 2019-03-06 09:30:05.000000000 +0000 @@ -2,5 +2,4 @@ skip-ssl-tests.patch arm-console-patch.patch revert-generalize-db-conf-group-copying.patch -disk-size-live-migration-overcommit.patch ensure-rbd-auth-fallback-uses-matching-credentials.patch diff -Nru nova-17.0.7/nova/compute/manager.py nova-17.0.9/nova/compute/manager.py --- nova-17.0.7/nova/compute/manager.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/compute/manager.py 2018-12-19 20:57:15.000000000 +0000 @@ -632,10 +632,11 @@ While nova-compute was down, the instances running on it could be evacuated to another host. This method looks for evacuation migration records where this is the source host and which were either started - (accepted) or complete (done). From those migration records, local - instances reported by the hypervisor are compared to the instances - for the migration records and those local guests are destroyed, along - with instance allocation records in Placement for this node. + (accepted), in-progress (pre-migrating) or migrated (done). From those + migration records, local instances reported by the hypervisor are + compared to the instances for the migration records and those local + guests are destroyed, along with instance allocation records in + Placement for this node. """ filters = { 'source_compute': self.host, @@ -643,7 +644,13 @@ # included in case the source node comes back up while instances # are being evacuated to another host. We don't want the same # instance being reported from multiple hosts. - 'status': ['accepted', 'done'], + # NOTE(lyarwood): pre-migrating is also included here as the + # source compute can come back online shortly after the RT + # claims on the destination that in-turn moves the migration to + # pre-migrating. If the evacuate fails on the destination host, + # the user can rebuild the instance (in ERROR state) on the source + # host. + 'status': ['accepted', 'pre-migrating', 'done'], 'migration_type': 'evacuation', } with utils.temporary_mutation(context, read_deleted='yes'): @@ -706,6 +713,7 @@ migration.status = 'completed' migration.save() + return evacuations def _is_instance_storage_shared(self, context, instance, host=None): shared_storage = True @@ -1150,9 +1158,14 @@ try: # checking that instance was not already evacuated to other host - self._destroy_evacuated_instances(context) + evacuated_instances = self._destroy_evacuated_instances(context) + + # Initialise instances on the host that are not evacuating for instance in instances: - self._init_instance(context, instance) + if (not evacuated_instances or + instance.uuid not in evacuated_instances): + self._init_instance(context, instance) + finally: if CONF.defer_iptables_apply: self.driver.filter_defer_apply_off() @@ -3512,7 +3525,6 @@ except Exception: # Catch all here because this could be anything. LOG.exception('set_admin_password failed', instance=instance) - self._set_instance_obj_error_state(context, instance) # We create a new exception here so that we won't # potentially reveal password information to the # API caller. The real exception is logged above @@ -5335,13 +5347,14 @@ if CONF.volume_usage_poll_interval <= 0: return - vol_stats = [] mp = bdm.device_name # Handle bootable volumes which will not contain /dev/ if '/dev/' in mp: mp = mp[5:] try: vol_stats = self.driver.block_stats(instance, mp) + if vol_stats is None: + return except NotImplementedError: return @@ -5929,8 +5942,17 @@ migrate_data.old_vol_attachment_ids = {} bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) + network_info = self.network_api.get_instance_nw_info(context, instance) + self._notify_about_instance_usage( + context, instance, "live_migration.pre.start", + network_info=network_info) + compute_utils.notify_about_instance_action( + context, instance, self.host, + action=fields.NotificationAction.LIVE_MIGRATION_PRE, + phase=fields.NotificationPhase.START, bdms=bdms) + + connector = self.driver.get_volume_connector(instance) try: - connector = self.driver.get_volume_connector(instance) for bdm in bdms: if bdm.is_volume and bdm.attachment_id is not None: # This bdm uses the new cinder v3.44 API. @@ -5955,6 +5977,31 @@ # update the bdm with the new attachment_id. bdm.attachment_id = attach_ref['id'] bdm.save() + + block_device_info = self._get_instance_block_device_info( + context, instance, refresh_conn_info=True, + bdms=bdms) + + migrate_data = self.driver.pre_live_migration(context, + instance, + block_device_info, + network_info, + disk, + migrate_data) + LOG.debug('driver pre_live_migration data is %s', migrate_data) + + # NOTE(tr3buchet): setup networks on destination host + self.network_api.setup_networks_on_host(context, instance, + self.host) + + # Creating filters to hypervisors and firewalls. + # An example is that nova-instance-instance-xxx, + # which is written to libvirt.xml(Check "virsh nwfilter-list") + # This nwfilter is necessary on the destination host. + # In addition, this method is creating filtering rule + # onto destination host. + self.driver.ensure_filtering_rules_for_instance(instance, + network_info) except Exception: # If we raise, migrate_data with the updated attachment ids # will not be returned to the source host for rollback. @@ -5969,27 +6016,6 @@ bdm.attachment_id = old_attachments[bdm.volume_id] bdm.save() - block_device_info = self._get_instance_block_device_info( - context, instance, refresh_conn_info=True, - bdms=bdms) - - network_info = self.network_api.get_instance_nw_info(context, instance) - self._notify_about_instance_usage( - context, instance, "live_migration.pre.start", - network_info=network_info) - compute_utils.notify_about_instance_action( - context, instance, self.host, - action=fields.NotificationAction.LIVE_MIGRATION_PRE, - phase=fields.NotificationPhase.START) - - migrate_data = self.driver.pre_live_migration(context, - instance, - block_device_info, - network_info, - disk, - migrate_data) - LOG.debug('driver pre_live_migration data is %s', migrate_data) - # Volume connections are complete, tell cinder that all the # attachments have completed. for bdm in bdms: @@ -5997,26 +6023,13 @@ self.volume_api.attachment_complete(context, bdm.attachment_id) - # NOTE(tr3buchet): setup networks on destination host - self.network_api.setup_networks_on_host(context, instance, - self.host) - - # Creating filters to hypervisors and firewalls. - # An example is that nova-instance-instance-xxx, - # which is written to libvirt.xml(Check "virsh nwfilter-list") - # This nwfilter is necessary on the destination host. - # In addition, this method is creating filtering rule - # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance, - network_info) - self._notify_about_instance_usage( context, instance, "live_migration.pre.end", network_info=network_info) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_PRE, - phase=fields.NotificationPhase.END) + phase=fields.NotificationPhase.END, bdms=bdms) LOG.debug('pre_live_migration result data is %s', migrate_data) return migrate_data @@ -6289,23 +6302,44 @@ connector = self.driver.get_volume_connector(instance) for bdm in bdms: if bdm.is_volume: - if bdm.attachment_id is None: - # Prior to cinder v3.44: - # We don't want to actually mark the volume detached, or - # delete the bdm, just remove the connection from this - # host. - # - # remove the volume connection without detaching from - # hypervisor because the instance is not running anymore - # on the current host - self.volume_api.terminate_connection(ctxt, bdm.volume_id, - connector) - else: - # cinder v3.44 api flow - delete the old attachment - # for the source host - old_attachment_id = \ - migrate_data.old_vol_attachment_ids[bdm.volume_id] - self.volume_api.attachment_delete(ctxt, old_attachment_id) + # Detaching volumes is a call to an external API that can fail. + # If it does, we need to handle it gracefully so that the call + # to post_live_migration_at_destination - where we set instance + # host and task state - still happens. We need to rethink the + # current approach of setting instance host and task state + # AFTER a whole bunch of things that could fail in unhandled + # ways, but that is left as a TODO(artom). + try: + if bdm.attachment_id is None: + # Prior to cinder v3.44: + # We don't want to actually mark the volume detached, + # or delete the bdm, just remove the connection from + # this host. + # + # remove the volume connection without detaching from + # hypervisor because the instance is not running + # anymore on the current host + self.volume_api.terminate_connection(ctxt, + bdm.volume_id, + connector) + else: + # cinder v3.44 api flow - delete the old attachment + # for the source host + old_attachment_id = \ + migrate_data.old_vol_attachment_ids[bdm.volume_id] + self.volume_api.attachment_delete(ctxt, + old_attachment_id) + except Exception as e: + if bdm.attachment_id is None: + LOG.error('Connection for volume %s not terminated on ' + 'source host %s during post_live_migration: ' + '%s', bdm.volume_id, self.host, + six.text_type(e), instance=instance) + else: + LOG.error('Volume attachment %s not deleted on source ' + 'host %s during post_live_migration: %s', + old_attachment_id, self.host, + six.text_type(e), instance=instance) # Releasing vlan. # (not necessary in current implementation?) @@ -7079,7 +7113,14 @@ expected_attrs=[], use_slave=True) - num_vm_instances = self.driver.get_num_instances() + try: + num_vm_instances = self.driver.get_num_instances() + except exception.VirtDriverNotReady as e: + # If the virt driver is not ready, like ironic-api not being up + # yet in the case of ironic, just log it and exit. + LOG.info('Skipping _sync_power_states periodic task due to: %s', e) + return + num_db_instances = len(db_instances) if num_vm_instances != num_db_instances: diff -Nru nova-17.0.7/nova/compute/rpcapi.py nova-17.0.9/nova/compute/rpcapi.py --- nova-17.0.7/nova/compute/rpcapi.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/compute/rpcapi.py 2018-12-19 20:57:15.000000000 +0000 @@ -328,12 +328,12 @@ * 4.15 - Add tag argument to reserve_block_device_name() * 4.16 - Add tag argument to attach_interface() * 4.17 - Add new_attachment_id to swap_volume. - * 4.18 - Add migration to prep_resize() - ... Pike supports messaging version 4.18. So any changes to existing + ... Pike supports messaging version 4.17. So any changes to existing methods in 4.x after that point should be done so that they can handle - the version_cap being set to 4.18. + the version_cap being set to 4.17. + * 4.18 - Add migration to prep_resize() * 4.19 - build_and_run_instance() now gets a 'host_list' parameter representing potential alternate hosts for retries within a cell. @@ -358,7 +358,7 @@ 'mitaka': '4.11', 'newton': '4.13', 'ocata': '4.13', - 'pike': '4.18', + 'pike': '4.17', 'queens': '5.0', } diff -Nru nova-17.0.7/nova/conductor/manager.py nova-17.0.9/nova/conductor/manager.py --- nova-17.0.7/nova/conductor/manager.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/conductor/manager.py 2018-12-19 20:57:15.000000000 +0000 @@ -52,6 +52,7 @@ from nova.scheduler import utils as scheduler_utils from nova import servicegroup from nova import utils +from nova.volume import cinder LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -225,6 +226,7 @@ def __init__(self): super(ComputeTaskManager, self).__init__() self.compute_rpcapi = compute_rpcapi.ComputeAPI() + self.volume_api = cinder.API() self.image_api = image.API() self.network_api = network.API() self.servicegroup_api = servicegroup.API() @@ -514,6 +516,24 @@ inst_mapping.save() return inst_mapping + def _validate_existing_attachment_ids(self, context, instance, bdms): + """Ensure any attachment ids referenced by the bdms exist. + + New attachments will only be created if the attachment ids referenced + by the bdms no longer exist. This can happen when an instance is + rescheduled after a failure to spawn as cleanup code on the previous + host will delete attachments before rescheduling. + """ + for bdm in bdms: + if bdm.is_volume and bdm.attachment_id: + try: + self.volume_api.attachment_get(context, bdm.attachment_id) + except exception.VolumeAttachmentNotFound: + attachment = self.volume_api.attachment_create( + context, bdm.volume_id, instance.uuid) + bdm.attachment_id = attachment['id'] + bdm.save() + # NOTE(danms): This is never cell-targeted because it is only used for # cellsv1 (which does not target cells directly) and n-cpu reschedules # (which go to the cell conductor and thus are always cell-specific). @@ -693,6 +713,11 @@ if inst_mapping: inst_mapping.destroy() return + else: + # NOTE(lyarwood): If this is a reschedule then recreate any + # attachments that were previously removed when cleaning up + # after failures to spawn etc. + self._validate_existing_attachment_ids(context, instance, bdms) alts = [(alt.service_host, alt.nodename) for alt in host_list] LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s", @@ -1120,6 +1145,8 @@ instance_uuids, return_alternates=True) except Exception as exc: LOG.exception('Failed to schedule instances') + # FIXME(mriedem): If the tags are not persisted with the instance + # in cell0 then the API will not show them. self._bury_in_cell0(context, request_specs[0], exc, build_requests=build_requests, block_device_mapping=block_device_mapping) @@ -1145,6 +1172,8 @@ LOG.error('No host-to-cell mapping found for selected ' 'host %(host)s. Setup is incomplete.', {'host': host.service_host}) + # FIXME(mriedem): If the tags are not persisted with the + # instance in cell0 then the API will not show them. self._bury_in_cell0( context, request_spec, exc, build_requests=[build_request], instances=[instance], @@ -1196,6 +1225,7 @@ self._cleanup_build_artifacts(context, exc, instances, build_requests, request_specs, + block_device_mapping, tags, cell_mapping_cache) zipped = six.moves.zip(build_requests, request_specs, host_lists, @@ -1272,7 +1302,8 @@ limits=host.limits, host_list=host_list) def _cleanup_build_artifacts(self, context, exc, instances, build_requests, - request_specs, cell_mapping_cache): + request_specs, block_device_mappings, tags, + cell_mapping_cache): for (instance, build_request, request_spec) in six.moves.zip( instances, build_requests, request_specs): # Skip placeholders that were buried in cell0 or had their @@ -1293,6 +1324,21 @@ inst_mapping.cell_mapping = cell inst_mapping.save() + # In order to properly clean-up volumes when deleting a server in + # ERROR status with no host, we need to store BDMs in the same + # cell. + if block_device_mappings: + self._create_block_device_mapping( + cell, instance.flavor, instance.uuid, + block_device_mappings) + + # Like BDMs, the server tags provided by the user when creating the + # server should be persisted in the same cell so they can be shown + # from the API. + if tags: + with nova_context.target_cell(context, cell) as cctxt: + self._create_tags(cctxt, instance.uuid, tags) + # Be paranoid about artifacts being deleted underneath us. try: build_request.destroy() diff -Nru nova-17.0.7/nova/conf/key_manager.py nova-17.0.9/nova/conf/key_manager.py --- nova-17.0.7/nova/conf/key_manager.py 2018-10-08 21:57:09.000000000 +0000 +++ nova-17.0.9/nova/conf/key_manager.py 2018-12-19 20:57:10.000000000 +0000 @@ -27,6 +27,7 @@ cfg.StrOpt( 'fixed_key', deprecated_group='keymgr', + secret=True, help=""" Fixed key returned by key manager, specified in hex. diff -Nru nova-17.0.7/nova/consoleauth/manager.py nova-17.0.9/nova/consoleauth/manager.py --- nova-17.0.7/nova/consoleauth/manager.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/consoleauth/manager.py 2018-12-19 20:57:10.000000000 +0000 @@ -139,6 +139,7 @@ def delete_tokens_for_instance(self, context, instance_uuid): tokens = self._get_tokens_for_instance(instance_uuid) - self.mc.delete_multi( + if tokens: + self.mc.delete_multi( [tok.encode('UTF-8') for tok in tokens]) self.mc_instance.delete(instance_uuid.encode('UTF-8')) diff -Nru nova-17.0.7/nova/network/neutronv2/api.py nova-17.0.9/nova/network/neutronv2/api.py --- nova-17.0.7/nova/network/neutronv2/api.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/network/neutronv2/api.py 2018-12-19 20:57:15.000000000 +0000 @@ -2484,7 +2484,7 @@ return [] if not client: client = get_client(context) - search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]} + search_opts = {'id': list(set(ip['subnet_id'] for ip in fixed_ips))} data = client.list_subnets(**search_opts) ipam_subnets = data.get('subnets', []) subnets = [] diff -Nru nova-17.0.7/nova/objects/instance.py nova-17.0.9/nova/objects/instance.py --- nova-17.0.7/nova/objects/instance.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/objects/instance.py 2018-12-19 20:57:15.000000000 +0000 @@ -321,29 +321,31 @@ # Before we stored flavors in instance_extra, certain fields, defined # in nova.compute.flavors.system_metadata_flavor_props, were stored # in the instance.system_metadata for the embedded instance.flavor. - # The "disabled" field wasn't one of those keys, however, so really - # old instances that had their embedded flavor converted to the - # serialized instance_extra form won't have the disabled attribute - # set and we need to default those here so callers don't explode trying - # to load instance.flavor.disabled. - def _default_disabled(flavor): + # The "disabled" and "is_public" fields weren't one of those keys, + # however, so really old instances that had their embedded flavor + # converted to the serialized instance_extra form won't have the + # disabled attribute set and we need to default those here so callers + # don't explode trying to load instance.flavor.disabled. + def _default_flavor_values(flavor): if 'disabled' not in flavor: flavor.disabled = False + if 'is_public' not in flavor: + flavor.is_public = True flavor_info = jsonutils.loads(db_flavor) self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur']) - _default_disabled(self.flavor) + _default_flavor_values(self.flavor) if flavor_info['old']: self.old_flavor = objects.Flavor.obj_from_primitive( flavor_info['old']) - _default_disabled(self.old_flavor) + _default_flavor_values(self.old_flavor) else: self.old_flavor = None if flavor_info['new']: self.new_flavor = objects.Flavor.obj_from_primitive( flavor_info['new']) - _default_disabled(self.new_flavor) + _default_flavor_values(self.new_flavor) else: self.new_flavor = None self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor']) diff -Nru nova-17.0.7/nova/objects/request_spec.py nova-17.0.9/nova/objects/request_spec.py --- nova-17.0.7/nova/objects/request_spec.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/objects/request_spec.py 2018-12-19 20:57:15.000000000 +0000 @@ -645,13 +645,17 @@ # Prevent lazy-load of those fields for every instance later. attrs = ['system_metadata', 'flavor', 'pci_requests', 'numa_topology', 'availability_zone'] - instances = objects.InstanceList.get_by_filters(context, - filters={'deleted': False}, - sort_key='created_at', - sort_dir='asc', - limit=max_count, - marker=marker, - expected_attrs=attrs) + try: + instances = objects.InstanceList.get_by_filters( + context, filters={'deleted': False}, sort_key='created_at', + sort_dir='asc', limit=max_count, marker=marker, + expected_attrs=attrs) + except exception.MarkerNotFound: + # Instance referenced by marker may have been purged. + # Try again but get all instances. + instances = objects.InstanceList.get_by_filters( + context, filters={'deleted': False}, sort_key='created_at', + sort_dir='asc', limit=max_count, expected_attrs=attrs) count_all = len(instances) count_hit = 0 for instance in instances: diff -Nru nova-17.0.7/nova/tests/fixtures.py nova-17.0.9/nova/tests/fixtures.py --- nova-17.0.7/nova/tests/fixtures.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/tests/fixtures.py 2018-12-19 20:57:15.000000000 +0000 @@ -1329,6 +1329,9 @@ # This map gets updated on attach/detach operations. self.attachments = collections.defaultdict(list) + def volume_ids_for_instance(self, instance_uuid): + return self.attachments.get(instance_uuid) + def setUp(self): super(CinderFixture, self).setUp() @@ -1515,15 +1518,28 @@ self.swap_volume_instance_uuid = None self.swap_volume_instance_error_uuid = None self.attachment_error_id = None - # This is a map of instance UUIDs mapped to a list of volume IDs. - # This map gets updated on attach/detach operations. - self.attachments = collections.defaultdict(list) + # A map of volumes to a list of (attachment_id, instance_uuid). + # Note that a volume can have multiple attachments even without + # multi-attach, as some flows create a blank 'reservation' attachment + # before deleting another attachment. + self.volume_to_attachment = collections.defaultdict(list) + + def volume_ids_for_instance(self, instance_uuid): + for volume_id, attachments in self.volume_to_attachment.items(): + for _, _instance_uuid in attachments: + if _instance_uuid == instance_uuid: + # we might have multiple volumes attached to this instance + # so yield rather than return + yield volume_id + break def setUp(self): super(CinderFixtureNewAttachFlow, self).setUp() def fake_get(self_api, context, volume_id, microversion=None): # Check for the special swap volumes. + attachments = self.volume_to_attachment[volume_id] + if volume_id in (CinderFixture.SWAP_OLD_VOL, CinderFixture.SWAP_ERR_OLD_VOL): volume = { @@ -1542,37 +1558,39 @@ if volume_id == CinderFixture.SWAP_OLD_VOL else self.swap_volume_instance_error_uuid) - volume.update({ - 'status': 'in-use', - 'attachments': { - instance_uuid: { - 'mountpoint': '/dev/vdb', - 'attachment_id': volume_id - } - }, - 'attach_status': 'attached' - }) + if attachments: + attachment_id, instance_uuid = attachments[0] + + volume.update({ + 'status': 'in-use', + 'attachments': { + instance_uuid: { + 'mountpoint': '/dev/vdb', + 'attachment_id': attachment_id + } + }, + 'attach_status': 'attached' + }) return volume # Check to see if the volume is attached. - for instance_uuid, volumes in self.attachments.items(): - if volume_id in volumes: - # The volume is attached. - volume = { - 'status': 'in-use', - 'display_name': volume_id, - 'attach_status': 'attached', - 'id': volume_id, - 'multiattach': volume_id == self.MULTIATTACH_VOL, - 'size': 1, - 'attachments': { - instance_uuid: { - 'attachment_id': volume_id, - 'mountpoint': '/dev/vdb' - } + if attachments: + # The volume is attached. + attachment_id, instance_uuid = attachments[0] + volume = { + 'status': 'in-use', + 'display_name': volume_id, + 'attach_status': 'attached', + 'id': volume_id, + 'multiattach': volume_id == self.MULTIATTACH_VOL, + 'size': 1, + 'attachments': { + instance_uuid: { + 'attachment_id': attachment_id, + 'mountpoint': '/dev/vdb' } } - break + } else: # This is a test that does not care about the actual details. volume = { @@ -1600,26 +1618,45 @@ new_volume_id, error): return {'save_volume_id': new_volume_id} + def _find_attachment(attachment_id): + """Find attachment corresponding to ``attachment_id``. + + Returns: + A tuple of the volume ID, an attachment-instance mapping tuple + for the given attachment ID, and a list of attachment-instance + mapping tuples for the volume. + """ + for volume_id, attachments in self.volume_to_attachment.items(): + for attachment in attachments: + _attachment_id, instance_uuid = attachment + if attachment_id == _attachment_id: + return volume_id, attachment, attachments + raise exception.VolumeAttachmentNotFound( + attachment_id=attachment_id) + def fake_attachment_create(_self, context, volume_id, instance_uuid, connector=None, mountpoint=None): attachment_id = uuidutils.generate_uuid() if self.attachment_error_id is not None: attachment_id = self.attachment_error_id attachment = {'id': attachment_id, 'connection_info': {'data': {}}} - self.attachments['instance_uuid'].append(instance_uuid) - self.attachments[instance_uuid].append(volume_id) + self.volume_to_attachment[volume_id].append( + (attachment_id, instance_uuid)) return attachment def fake_attachment_delete(_self, context, attachment_id): - instance_uuid = self.attachments['instance_uuid'][0] - del self.attachments[instance_uuid][0] - del self.attachments['instance_uuid'][0] + # 'attachment' is a tuple defining a attachment-instance mapping + _, attachment, attachments = _find_attachment(attachment_id) + attachments.remove(attachment) + if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID: self.swap_error = True def fake_attachment_update(_self, context, attachment_id, connector, mountpoint=None): + # Ensure the attachment exists + _find_attachment(attachment_id) attachment_ref = {'driver_volume_type': 'fake_type', 'id': attachment_id, 'connection_info': {'data': @@ -1630,6 +1667,8 @@ return attachment_ref def fake_attachment_get(_self, context, attachment_id): + # Ensure the attachment exists + _find_attachment(attachment_id) attachment_ref = {'driver_volume_type': 'fake_type', 'id': attachment_id, 'connection_info': {'data': diff -Nru nova-17.0.7/nova/tests/functional/compute/test_live_migration.py nova-17.0.9/nova/tests/functional/compute/test_live_migration.py --- nova-17.0.7/nova/tests/functional/compute/test_live_migration.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/compute/test_live_migration.py 2018-12-19 20:57:15.000000000 +0000 @@ -0,0 +1,148 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.compute import api as compute_api +from nova import exception +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit import fake_notifier +from nova.tests import uuidsentinel as uuids +from nova.virt import fake + + +class FakeCinderError(object): + """Poor man's Mock because we're stubbing out and not mock.patching. Stubs + out both terminate_connection and attachment_delete. We keep a raise and + call count to simulate a single volume error while being able to assert + that we still got called for all of an instance's volumes. + """ + + def __init__(self): + self.raise_count = 0 + self.call_count = 0 + + def __call__(self, *args, **kwargs): + self.call_count += 1 + if self.raise_count == 0: + self.raise_count += 1 + raise exception.CinderConnectionFailed(reason='Fake Cinder error') + + +class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase, + integrated_helpers.InstanceHelperMixin): + api_major_version = 'v2.1' + microversion = 'latest' + + def setUp(self): + super(LiveMigrationCinderFailure, self).setUp() + fake_notifier.stub_notifier(self) + self.addCleanup(fake_notifier.reset) + # Start a second compte node (the first one was started for us by + # _IntegratedTestBase. set_nodes() is needed to avoid duplicate + # nodenames. See comments in test_bug_1702454.py. + fake.set_nodes(['host2']) + self.addCleanup(fake.restore_nodes) + self.compute2 = self.start_service('compute', host='host2') + + # To get the old Cinder flow we need to hack the service version, otherwise + # the new flow is attempted and CinderFixture complains about auth because + # it's not stubbing out the new flow methods. + @mock.patch( + 'nova.objects.service.get_minimum_version_all_cells', + return_value=compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1) + def test_live_migrate_terminate_connection_fails(self, _): + self.useFixture(nova_fixtures.CinderFixture(self)) + server = self.api.post_server({ + 'server': { + 'flavorRef': 1, + 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'name': 'live-migrate-terminate-connection-fail-test', + 'networks': 'none', + 'block_device_mapping_v2': [ + {'boot_index': 0, + 'uuid': uuids.broken_volume, + 'source_type': 'volume', + 'destination_type': 'volume'}, + {'boot_index': 1, + 'uuid': uuids.working_volume, + 'source_type': 'volume', + 'destination_type': 'volume'}]}}) + server = self._wait_for_state_change(self.api, server, 'ACTIVE') + + source = server['OS-EXT-SRV-ATTR:host'] + if source == self.compute.host: + dest = self.compute2.host + else: + dest = self.compute.host + + post = { + 'os-migrateLive': { + 'host': dest, + 'block_migration': False, + } + } + stub_terminate_connection = FakeCinderError() + self.stub_out('nova.volume.cinder.API.terminate_connection', + stub_terminate_connection) + self.api.post_server_action(server['id'], post) + # Live migration should complete despite a volume failing to detach. + # Waiting for ACTIVE on dest is essentially an assert for just that. + self._wait_for_server_parameter(self.api, server, + {'OS-EXT-SRV-ATTR:host': dest, + 'status': 'ACTIVE'}) + self.assertEqual(2, stub_terminate_connection.call_count) + self.assertEqual(1, stub_terminate_connection.raise_count) + + def test_live_migrate_attachment_delete_fails(self): + self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) + server = self.api.post_server({ + 'server': { + 'flavorRef': 1, + 'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6', + 'name': 'live-migrate-attachment-delete-fail-test', + 'networks': 'none', + 'block_device_mapping_v2': [ + {'boot_index': 0, + 'uuid': uuids.broken_volume, + 'source_type': 'volume', + 'destination_type': 'volume'}, + {'boot_index': 1, + 'uuid': uuids.working_volume, + 'source_type': 'volume', + 'destination_type': 'volume'}]}}) + server = self._wait_for_state_change(self.api, server, 'ACTIVE') + + source = server['OS-EXT-SRV-ATTR:host'] + if source == self.compute.host: + dest = self.compute2.host + else: + dest = self.compute.host + + post = { + 'os-migrateLive': { + 'host': dest, + 'block_migration': False, + } + } + stub_attachment_delete = FakeCinderError() + self.stub_out('nova.volume.cinder.API.attachment_delete', + stub_attachment_delete) + self.api.post_server_action(server['id'], post) + self._wait_for_server_parameter(self.api, server, + {'OS-EXT-SRV-ATTR:host': dest, + 'status': 'ACTIVE'}) + self.assertEqual(2, stub_attachment_delete.call_count) + self.assertEqual(1, stub_attachment_delete.raise_count) diff -Nru nova-17.0.7/nova/tests/functional/db/test_request_spec.py nova-17.0.9/nova/tests/functional/db/test_request_spec.py --- nova-17.0.7/nova/tests/functional/db/test_request_spec.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/db/test_request_spec.py 2018-12-19 20:57:15.000000000 +0000 @@ -167,3 +167,45 @@ self.context, 50) self.assertEqual(5, match) self.assertEqual(0, done) + + def test_migration_with_missing_marker(self): + self._create_instances(old=2, total=5) + + # Start with 2 old (without request_spec) and 3 new instances: + # [old, old, new, new, new] + match, done = request_spec.migrate_instances_add_request_spec( + self.context, 2) + # Instance list after session 1: + # [upgraded, upgraded, new, new, new] + self.assertEqual(2, match) + self.assertEqual(2, done) + + # Delete and remove the marker instance from api table while leaving + # the spec in request_specs table. This triggers MarkerNotFound + # exception in the latter session. + self.api.delete_server(self.instances[1].uuid) + db.archive_deleted_rows(max_rows=100) + # Instance list after deletion: [upgraded, new, new, new] + + # This session of migration hits MarkerNotFound exception and then + # starts from the beginning of the list + match, done = request_spec.migrate_instances_add_request_spec( + self.context, 50) + self.assertEqual(4, match) + self.assertEqual(0, done) + + # Make sure we ran over all the instances + match, done = request_spec.migrate_instances_add_request_spec( + self.context, 50) + self.assertEqual(0, match) + self.assertEqual(0, done) + + # Make sure all instances have now a related RequestSpec + for instance in self.instances: + uuid = instance.uuid + try: + spec = objects.RequestSpec.get_by_instance_uuid( + self.context, uuid) + self.assertEqual(instance.project_id, spec.project_id) + except exception.RequestSpecNotFound: + self.fail("RequestSpec not found for instance UUID :%s ", uuid) diff -Nru nova-17.0.7/nova/tests/functional/regressions/test_bug_1404867.py nova-17.0.9/nova/tests/functional/regressions/test_bug_1404867.py --- nova-17.0.7/nova/tests/functional/regressions/test_bug_1404867.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/regressions/test_bug_1404867.py 2018-12-19 20:57:15.000000000 +0000 @@ -96,7 +96,8 @@ # There should now exist an attachment to the volume as it was created # by Nova. - self.assertIn(volume_id, self.cinder.attachments[server_id]) + self.assertIn(volume_id, + self.cinder.volume_ids_for_instance(server_id)) # Delete this server, which should delete BDMs and remove the # reservation on the instances. @@ -104,4 +105,5 @@ # The volume should no longer have any attachments as instance delete # should have removed them. - self.assertNotIn(volume_id, self.cinder.attachments[server_id]) + self.assertNotIn(volume_id, + self.cinder.volume_ids_for_instance(server_id)) diff -Nru nova-17.0.7/nova/tests/functional/regressions/test_bug_1675570.py nova-17.0.9/nova/tests/functional/regressions/test_bug_1675570.py --- nova-17.0.7/nova/tests/functional/regressions/test_bug_1675570.py 2018-10-08 21:57:09.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/regressions/test_bug_1675570.py 2018-12-19 20:57:15.000000000 +0000 @@ -160,7 +160,8 @@ self._wait_for_volume_attach(server_id, volume_id) # Check to see that the fixture is tracking the server and volume # attachment. - self.assertIn(volume_id, self.cinder.attachments[server_id]) + self.assertIn(volume_id, + self.cinder.volume_ids_for_instance(server_id)) # At this point the instance.host is no longer set, so deleting # the server will take the local delete path in the API. @@ -172,7 +173,8 @@ LOG.info('Validating that volume %s was detached from server %s.', volume_id, server_id) # Now that the bug is fixed, assert the volume was detached. - self.assertNotIn(volume_id, self.cinder.attachments[server_id]) + self.assertNotIn(volume_id, + self.cinder.volume_ids_for_instance(server_id)) @mock.patch('nova.objects.Service.get_minimum_version', diff -Nru nova-17.0.7/nova/tests/functional/regressions/test_bug_1764883.py nova-17.0.9/nova/tests/functional/regressions/test_bug_1764883.py --- nova-17.0.7/nova/tests/functional/regressions/test_bug_1764883.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/regressions/test_bug_1764883.py 2018-12-19 20:57:15.000000000 +0000 @@ -0,0 +1,123 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit import fake_network +from nova.tests.unit import fake_notifier +import nova.tests.unit.image.fake +from nova.tests.unit import policy_fixture +from nova.virt import fake + + +class TestEvacuationWithSourceReturningDuringRebuild( + test.TestCase, integrated_helpers.InstanceHelperMixin): + """Assert the behaviour of evacuating instances when the src returns early. + + This test asserts that evacuating instances end up in an ACTIVE state on + the destination even when the source host comes back online during an + evacuation while the migration record is in a pre-migrating state. + """ + + def setUp(self): + super(TestEvacuationWithSourceReturningDuringRebuild, self).setUp() + + self.useFixture(policy_fixture.RealPolicyFixture()) + + # The NeutronFixture is needed to stub out validate_networks in API. + self.useFixture(nova_fixtures.NeutronFixture(self)) + + # This stubs out the network allocation in compute. + fake_network.set_stub_network_methods(self) + + # We need the computes reporting into placement for the filter + # scheduler to pick a host. + self.useFixture(nova_fixtures.PlacementFixture()) + + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + self.api = api_fixture.admin_api + # 2.11 is needed for force_down + # 2.14 is needed for evacuate without onSharedStorage flag + self.api.microversion = '2.14' + + # the image fake backend needed for image discovery + nova.tests.unit.image.fake.stub_out_image_service(self) + self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) + + self.start_service('conductor') + self.start_service('scheduler') + + # Start two computes + self.computes = {} + + fake.set_nodes(['host1']) + self.addCleanup(fake.restore_nodes) + self.computes['host1'] = self.start_service('compute', host='host1') + + fake.set_nodes(['host2']) + self.addCleanup(fake.restore_nodes) + self.computes['host2'] = self.start_service('compute', host='host2') + + self.image_id = self.api.get_images()[0]['id'] + self.flavor_id = self.api.get_flavors()[0]['id'] + + self.addCleanup(fake_notifier.reset) + + # Stub out rebuild with a slower method allowing the src compute to be + # restarted once the migration hits pre-migrating after claiming + # resources on the dest. + manager_class = nova.compute.manager.ComputeManager + original_rebuild = manager_class._do_rebuild_instance + + def start_src_rebuild(self_, context, instance, *args, **kwargs): + server = self.api.get_server(instance.uuid) + # Start the src compute once the migration is pre-migrating. + self._wait_for_migration_status(server, ['pre-migrating']) + self.computes.get(self.source_compute).start() + original_rebuild(self_, context, instance, *args, **kwargs) + + self.stub_out('nova.compute.manager.ComputeManager.' + '_do_rebuild_instance', start_src_rebuild) + + def test_evacuation_with_source_compute_returning_during_rebuild(self): + + # Launch an instance + server_request = {'name': 'server', + 'imageRef': self.image_id, + 'flavorRef': self.flavor_id} + server_response = self.api.post_server({'server': server_request}) + server = self._wait_for_state_change(self.api, server_response, + 'ACTIVE') + + # Record where the instance is running before forcing the service down + self.source_compute = server['OS-EXT-SRV-ATTR:host'] + self.computes.get(self.source_compute).stop() + self.api.force_down_service(self.source_compute, 'nova-compute', True) + + # Start evacuating the instance from the source_host + self.api.post_server_action(server['id'], {'evacuate': {}}) + + # Wait for the instance to go into an ACTIVE state + self._wait_for_state_change(self.api, server, 'ACTIVE') + server = self.api.get_server(server['id']) + host = server['OS-EXT-SRV-ATTR:host'] + migrations = self.api.get_migrations() + + # Assert that we have a single `done` migration record after the evac + self.assertEqual(1, len(migrations)) + self.assertEqual('done', migrations[0]['status']) + + # Assert that the instance is now on the dest + self.assertNotEqual(self.source_compute, host) diff -Nru nova-17.0.7/nova/tests/functional/regressions/test_bug_1784353.py nova-17.0.9/nova/tests/functional/regressions/test_bug_1784353.py --- nova-17.0.7/nova/tests/functional/regressions/test_bug_1784353.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/regressions/test_bug_1784353.py 2018-12-19 20:57:15.000000000 +0000 @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit import fake_network +import nova.tests.unit.image.fake +from nova.tests.unit import policy_fixture +from nova.virt import fake + + +class TestRescheduleWithVolumesAttached( + test.TestCase, integrated_helpers.InstanceHelperMixin): + """Regression test for bug 1784353 introduced in Queens. + + This regression test asserts that volume backed instances fail to start + when rescheduled due to their volume attachments being deleted by cleanup + code within the compute layer after an initial failure to spawn. + """ + + def setUp(self): + super(TestRescheduleWithVolumesAttached, self).setUp() + + # Use the new attach flow fixture for cinder + cinder_fixture = nova_fixtures.CinderFixtureNewAttachFlow(self) + self.cinder = self.useFixture(cinder_fixture) + self.useFixture(policy_fixture.RealPolicyFixture()) + self.useFixture(nova_fixtures.NeutronFixture(self)) + + fake_network.set_stub_network_methods(self) + + self.useFixture(nova_fixtures.PlacementFixture()) + + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + self.api = api_fixture.admin_api + + nova.tests.unit.image.fake.stub_out_image_service(self) + self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) + + self.flags(compute_driver='fake.FakeRescheduleDriver') + + self.start_service('conductor') + self.start_service('scheduler') + + # Start two computes to allow the instance to be rescheduled + fake.set_nodes(['host1']) + self.addCleanup(fake.restore_nodes) + self.host1 = self.start_service('compute', host='host1') + + fake.set_nodes(['host2']) + self.addCleanup(fake.restore_nodes) + self.host2 = self.start_service('compute', host='host2') + + self.image_id = self.api.get_images()[0]['id'] + self.flavor_id = self.api.get_flavors()[0]['id'] + + def test_reschedule_with_volume_attached(self): + # Boot a volume backed instance + volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL + server_request = { + 'name': 'server', + 'flavorRef': self.flavor_id, + 'block_device_mapping_v2': [{ + 'boot_index': 0, + 'uuid': volume_id, + 'source_type': 'volume', + 'destination_type': 'volume'}], + } + server_response = self.api.post_server({'server': server_request}) + server_id = server_response['id'] + + self._wait_for_state_change(self.api, server_response, 'ACTIVE') + attached_volume_ids = self.cinder.volume_ids_for_instance(server_id) + self.assertIn(volume_id, attached_volume_ids) + self.assertEqual(1, len(self.cinder.volume_to_attachment)) + # There should only be one attachment record for the volume and + # instance because the original would have been deleted before + # rescheduling off the first host. + self.assertEqual(1, len(self.cinder.volume_to_attachment[volume_id])) diff -Nru nova-17.0.7/nova/tests/functional/regressions/test_bug_1797580.py nova-17.0.9/nova/tests/functional/regressions/test_bug_1797580.py --- nova-17.0.7/nova/tests/functional/regressions/test_bug_1797580.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/regressions/test_bug_1797580.py 2018-12-19 20:57:15.000000000 +0000 @@ -0,0 +1,99 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit.image import fake as image_fake +from nova.tests.unit import policy_fixture +from nova.virt import fake + + +class ColdMigrateTargetHostThenLiveMigrateTest( + test.TestCase, integrated_helpers.InstanceHelperMixin): + """Regression test for bug 1797580 introduced in Queens. + + Microversion 2.56 allows cold migrating to a specified target host. The + compute API sets the requested destination on the request spec with the + specified target host and then conductor sends that request spec to the + scheduler to validate the host. Conductor later persists the changes to + the request spec because it's the resize flow and the flavor could change + (even though in this case it won't since it's a cold migrate). After + confirming the resize, if the server is live migrated it will fail during + scheduling because of the persisted RequestSpec.requested_destination + from the cold migration, and you can't live migrate to the same host on + which the instance is currently running. + + This test reproduces the regression and will validate the fix. + """ + + def setUp(self): + super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp() + self.useFixture(policy_fixture.RealPolicyFixture()) + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(nova_fixtures.PlacementFixture()) + + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + # The admin API is used to get the server details to verify the + # host on which the server was built and cold/live migrate it. + self.admin_api = api_fixture.admin_api + self.api = api_fixture.api + # Use the latest microversion available to make sure something does + # not regress in new microversions; cap as necessary. + self.admin_api.microversion = 'latest' + self.api.microversion = 'latest' + + image_fake.stub_out_image_service(self) + self.addCleanup(image_fake.FakeImageService_reset) + + self.start_service('conductor') + self.start_service('scheduler') + + for host in ('host1', 'host2'): + fake.set_nodes([host]) + self.addCleanup(fake.restore_nodes) + self.start_service('compute', host=host) + + def test_cold_migrate_target_host_then_live_migrate(self): + # Create a server, it doesn't matter on which host it builds. + server = self._build_minimal_create_server_request( + self.api, 'test_cold_migrate_target_host_then_live_migrate', + image_uuid=image_fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID, + networks='none') + server = self.api.post_server({'server': server}) + server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + original_host = server['OS-EXT-SRV-ATTR:host'] + target_host = 'host1' if original_host == 'host2' else 'host2' + + # Cold migrate the server to the specific target host. + migrate_req = {'migrate': {'host': target_host}} + self.admin_api.post_server_action(server['id'], migrate_req) + server = self._wait_for_state_change( + self.admin_api, server, 'VERIFY_RESIZE') + + # Confirm the resize so the server stays on the target host. + confim_req = {'confirmResize': None} + self.admin_api.post_server_action(server['id'], confim_req) + server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + + # Attempt to live migrate the server but don't specify a host so the + # scheduler has to pick one. + live_migrate_req = { + 'os-migrateLive': {'host': None, 'block_migration': 'auto'}} + self.admin_api.post_server_action(server['id'], live_migrate_req) + server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE') + + # FIXME(mriedem): Until bug 1797580 is resolved the migration will + # fail during scheduling. + migration = self._wait_for_migration_status(server, ['error']) + self.assertEqual('live-migration', migration['migration_type']) diff -Nru nova-17.0.7/nova/tests/functional/regressions/test_bug_1806064.py nova-17.0.9/nova/tests/functional/regressions/test_bug_1806064.py --- nova-17.0.7/nova/tests/functional/regressions/test_bug_1806064.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/regressions/test_bug_1806064.py 2018-12-19 20:57:15.000000000 +0000 @@ -0,0 +1,140 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.compute import utils as compute_utils +from nova import context as nova_context +from nova import exception +from nova import objects +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit import policy_fixture + + +class BootFromVolumeOverQuotaRaceDeleteTest( + test.TestCase, integrated_helpers.InstanceHelperMixin): + """Test for regression bug 1806064 introduced in Pike. + + This is very similar to regression bug 1404867 where reserved/attached + volumes during a boot from volume request are not detached while deleting + a server that failed to schedule. + + In this case, scheduling is successful but the late quota check in + ComputeTaskManager.schedule_and_build_instances fails. In the case of a + scheduling failure, the instance record along with the associated BDMs + are created in the cell0 database and that is where the "local delete" + code in the API finds them to detach the related volumes. In the case of + the quota fail race, the instance has already been created in a selected + cell but the BDMs records have not been and are thus not "seen" during + the API local delete and the volumes are left attached to a deleted server. + + An additional issue, covered in the test here, is that tags provided when + creating the server are not retrievable from the API after the late quota + check fails. + """ + + def setUp(self): + super(BootFromVolumeOverQuotaRaceDeleteTest, self).setUp() + # We need the cinder fixture for boot from volume testing. + self.cinder_fixture = self.useFixture( + nova_fixtures.CinderFixtureNewAttachFlow(self)) + # Use the standard fixtures. + self.useFixture(policy_fixture.RealPolicyFixture()) + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(nova_fixtures.PlacementFixture()) + self.api = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')).api + # Use microversion 2.52 which allows creating a server with tags. + self.api.microversion = '2.52' + + self.start_service('conductor') + self.start_service('scheduler') + self.start_service('compute') + + def test_bfv_quota_race_local_delete(self): + # Setup a boot-from-volume request where the API will create a + # volume attachment record for the given pre-existing volume. + # We also tag the server since tags, like BDMs, should be created in + # the cell database along with the instance. + volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL + server = { + 'server': { + 'name': 'test_bfv_quota_race_local_delete', + 'flavorRef': self.api.get_flavors()[0]['id'], + 'imageRef': '', + 'block_device_mapping_v2': [{ + 'boot_index': 0, + 'source_type': 'volume', + 'destination_type': 'volume', + 'uuid': volume_id + }], + 'networks': 'auto', + 'tags': ['bfv'] + } + } + + # Now we need to stub out the quota check routine so that we can + # simulate the race where the initial quota check in the API passes + # but fails in conductor once the instance has been created in cell1. + original_quota_check = compute_utils.check_num_instances_quota + + def stub_check_num_instances_quota(_self, context, instance_type, + min_count, *args, **kwargs): + # Determine where we are in the flow based on whether or not the + # min_count is 0 (API will pass 1, conductor will pass 0). + if min_count == 0: + raise exception.TooManyInstances( + 'test_bfv_quota_race_local_delete') + # We're checking from the API so perform the original quota check. + return original_quota_check( + _self, context, instance_type, min_count, *args, **kwargs) + + self.stub_out('nova.compute.utils.check_num_instances_quota', + stub_check_num_instances_quota) + + server = self.api.post_server(server) + server = self._wait_for_state_change(self.api, server, 'ERROR') + # At this point, the build request should be gone and the instance + # should have been created in cell1. + context = nova_context.get_admin_context() + self.assertRaises(exception.BuildRequestNotFound, + objects.BuildRequest.get_by_instance_uuid, + context, server['id']) + # The default cell in the functional tests is cell1 but we want to + # specifically target cell1 to make sure the instance exists there + # and we're not just getting lucky somehow due to the fixture. + cell1 = self.cell_mappings[test.CELL1_NAME] + with nova_context.target_cell(context, cell1) as cctxt: + # This would raise InstanceNotFound if the instance isn't in cell1. + instance = objects.Instance.get_by_uuid(cctxt, server['id']) + self.assertIsNone(instance.host, 'instance.host should not be set') + # Make sure the BDMs and tags also exist in cell1. + bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( + cctxt, instance.uuid) + self.assertEqual(1, len(bdms), 'BDMs were not created in cell1') + tags = objects.TagList.get_by_resource_id(cctxt, instance.uuid) + self.assertEqual(1, len(tags), 'Tags were not created in cell1') + + # Make sure we can still view the tags on the server before it is + # deleted. + self.assertEqual(['bfv'], server['tags']) + + # Now delete the server which, since it does not have a host, will be + # deleted "locally" from the API. + self.api.delete_server(server['id']) + self._wait_until_deleted(server) + + # The volume should have been detached by the API. + attached_volumes = self.cinder_fixture.volume_ids_for_instance( + server['id']) + # volume_ids_for_instance is a generator so listify + self.assertEqual(0, len(list(attached_volumes))) diff -Nru nova-17.0.7/nova/tests/functional/wsgi/test_servers.py nova-17.0.9/nova/tests/functional/wsgi/test_servers.py --- nova-17.0.7/nova/tests/functional/wsgi/test_servers.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/tests/functional/wsgi/test_servers.py 2018-12-19 20:57:15.000000000 +0000 @@ -308,7 +308,8 @@ # Since _IntegratedTestBase uses the CastAsCall fixture, when we # get the server back we know all of the volume stuff should be done. - self.assertIn(volume_id, cinder.attachments[server['id']]) + self.assertIn(volume_id, + cinder.volume_ids_for_instance(server['id'])) # Now delete the server, which should go through the "local delete" # code in the API, find the build request and delete it along with @@ -317,7 +318,8 @@ # The volume should no longer have any attachments as instance delete # should have removed them. - self.assertNotIn(volume_id, cinder.attachments[server['id']]) + self.assertNotIn(volume_id, + cinder.volume_ids_for_instance(server['id'])) def test_instance_list_build_request_marker_ip_filter(self): """Tests listing instances with a marker that is in the build_requests diff -Nru nova-17.0.7/nova/tests/unit/compute/test_compute_mgr.py nova-17.0.9/nova/tests/unit/compute/test_compute_mgr.py --- nova-17.0.7/nova/tests/unit/compute/test_compute_mgr.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/compute/test_compute_mgr.py 2018-12-19 20:57:15.000000000 +0000 @@ -767,6 +767,36 @@ mock.ANY, mock.ANY, mock.ANY) mock_save.assert_called_once_with() + @mock.patch.object(context, 'get_admin_context') + @mock.patch.object(objects.InstanceList, 'get_by_host') + @mock.patch.object(fake_driver.FakeDriver, 'init_host') + @mock.patch('nova.compute.manager.ComputeManager._init_instance') + @mock.patch('nova.compute.manager.ComputeManager.' + '_destroy_evacuated_instances') + def test_init_host_with_in_progress_evacuations(self, mock_destroy_evac, + mock_init_instance, mock_init_host, mock_host_get, + mock_admin_ctxt): + """Assert that init_instance is not called for instances that are + evacuating from the host during init_host. + """ + active_instance = fake_instance.fake_instance_obj( + self.context, host=self.compute.host, uuid=uuids.active_instance) + evacuating_instance = fake_instance.fake_instance_obj( + self.context, host=self.compute.host, uuid=uuids.evac_instance) + instance_list = objects.InstanceList(self.context, + objects=[active_instance, evacuating_instance]) + + mock_host_get.return_value = instance_list + mock_admin_ctxt.return_value = self.context + mock_destroy_evac.return_value = { + uuids.evac_instance: evacuating_instance + } + + self.compute.init_host() + + mock_init_instance.assert_called_once_with( + self.context, active_instance) + def test_init_instance_with_binding_failed_vif_type(self): # this instance will plug a 'binding_failed' vif instance = fake_instance.fake_instance_obj( @@ -1600,6 +1630,20 @@ use_slave=True) mock_spawn.assert_called_once_with(mock.ANY, instance) + @mock.patch('nova.objects.InstanceList.get_by_host', new=mock.Mock()) + @mock.patch('nova.compute.manager.ComputeManager.' + '_query_driver_power_state_and_sync', + new_callable=mock.NonCallableMock) + def test_sync_power_states_virt_driver_not_ready(self, _mock_sync): + """"Tests that the periodic task exits early if the driver raises + VirtDriverNotReady. + """ + with mock.patch.object( + self.compute.driver, 'get_num_instances', + side_effect=exception.VirtDriverNotReady) as gni: + self.compute._sync_power_states(mock.sentinel.context) + gni.assert_called_once_with() + def _get_sync_instance(self, power_state, vm_state, task_state=None, shutdown_terminate=False): instance = objects.Instance() @@ -3515,14 +3559,9 @@ instance=instance, new_pass=None) - if (expected_exception == exception.SetAdminPasswdNotSupported or - expected_exception == exception.InstanceAgentNotEnabled or - expected_exception == NotImplementedError): + if expected_exception != exception.InstancePasswordSetFailed: instance_save_mock.assert_called_once_with( expected_task_state=task_states.UPDATING_PASSWORD) - else: - # setting the instance to error state - instance_save_mock.assert_called_once_with() self.assertEqual(expected_vm_state, instance.vm_state) # check revert_task_state decorator @@ -3540,7 +3579,7 @@ exc = exception.Forbidden('Internal error') expected_exception = exception.InstancePasswordSetFailed self._do_test_set_admin_password_driver_error( - exc, vm_states.ERROR, None, expected_exception) + exc, vm_states.ACTIVE, None, expected_exception) def test_set_admin_password_driver_not_implemented(self): # Ensure expected exception is raised if set_admin_password not @@ -4622,6 +4661,23 @@ expected_reqspec_hints, self.compute._get_scheduler_hints( filter_properties, reqspec)) + def test_notify_volume_usage_detach_no_block_stats(self): + """Tests the case that the virt driver returns None from the + block_stats() method and no notification is sent, similar to the + virt driver raising NotImplementedError. + """ + self.flags(volume_usage_poll_interval=60) + fake_instance = objects.Instance() + fake_bdm = objects.BlockDeviceMapping(device_name='/dev/vda') + with mock.patch.object(self.compute.driver, 'block_stats', + return_value=None) as block_stats: + # Assert a notification isn't sent. + with mock.patch.object(self.compute.notifier, 'info', + new_callable=mock.NonCallableMock): + self.compute._notify_volume_usage_detach( + self.context, fake_instance, fake_bdm) + block_stats.assert_called_once_with(fake_instance, 'vda') + class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase): def setUp(self): @@ -6933,9 +6989,11 @@ mock_notify_about_inst.assert_has_calls([ mock.call(self.context, instance, 'fake-mini', - action='live_migration_pre', phase='start'), + action='live_migration_pre', phase='start', + bdms=mock_get_bdms.return_value), mock.call(self.context, instance, 'fake-mini', - action='live_migration_pre', phase='end')]) + action='live_migration_pre', phase='end', + bdms=mock_get_bdms.return_value)]) self.assertIsInstance(r, migrate_data_obj.LiveMigrateData) self.assertIsInstance(mock_plm.call_args_list[0][0][5], migrate_data_obj.LiveMigrateData) @@ -6985,22 +7043,19 @@ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') @mock.patch.object(vol1_bdm, 'save') @mock.patch.object(compute, '_notify_about_instance_usage') + @mock.patch('nova.compute.utils.notify_about_instance_action') @mock.patch.object(compute, 'network_api') - @mock.patch.object(compute.driver, 'pre_live_migration') - @mock.patch.object(compute, '_get_instance_block_device_info') - @mock.patch.object(compute_utils, 'is_volume_backed_instance') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') @mock.patch.object(compute.volume_api, 'attachment_delete') @mock.patch.object(compute.volume_api, 'attachment_create') def _test(mock_attach_create, mock_attach_delete, mock_get_bdms, - mock_ivbi, mock_gibdi, mock_plm, mock_nwapi, mock_notify, - mock_bdm_save, mock_exception): + mock_nwapi, mock_ver_notify, mock_notify, mock_bdm_save, + mock_exception): new_attachment_id = uuids.attachment3 mock_attach_create.side_effect = [{'id': new_attachment_id}, test.TestingException] mock_get_bdms.return_value = [vol1_bdm, vol2_bdm] - mock_plm.return_value = migrate_data self.assertRaises(test.TestingException, compute.pre_live_migration, @@ -7011,6 +7066,80 @@ self.assertEqual(mock_attach_create.call_count, 2) mock_attach_delete.assert_called_once_with(self.context, new_attachment_id) + + # Meta: ensure un-asserted mocks are still required + for m in (mock_nwapi, mock_get_bdms, mock_ver_notify, mock_notify, + mock_bdm_save, mock_exception): + # NOTE(artom) This is different from assert_called() because + # mock_calls contains the calls to a mock's method as well + # (which is what we want for network_api.get_instance_nw_info + # for example), whereas assert_called() only asserts + # calls to the mock itself. + self.assertGreater(len(m.mock_calls), 0) + _test() + + def test_pre_live_migration_exceptions_delete_attachments(self): + # The instance in this test has 2 attachments. The call to + # driver.pre_live_migration will raise an exception. This will test + # that the attachments are restored after the exception is thrown. + compute = manager.ComputeManager() + + instance = fake_instance.fake_instance_obj(self.context, + uuid=uuids.instance) + vol1_bdm = fake_block_device.fake_bdm_object( + self.context, + {'source_type': 'volume', 'destination_type': 'volume', + 'volume_id': uuids.vol1, 'device_name': '/dev/vdb', + 'instance_uuid': instance.uuid, + 'connection_info': '{"test": "test"}'}) + vol1_bdm.attachment_id = uuids.vol1_attach_orig + + vol2_bdm = fake_block_device.fake_bdm_object( + self.context, + {'source_type': 'volume', 'destination_type': 'volume', + 'volume_id': uuids.vol2, 'device_name': '/dev/vdc', + 'instance_uuid': instance.uuid, + 'connection_info': '{"test": "test"}'}) + vol2_bdm.attachment_id = uuids.vol2_attach_orig + + migrate_data = migrate_data_obj.LiveMigrateData() + migrate_data.old_vol_attachment_ids = {} + + @mock.patch.object(manager, 'compute_utils', autospec=True) + @mock.patch.object(compute, 'network_api', autospec=True) + @mock.patch.object(compute, 'volume_api', autospec=True) + @mock.patch.object(objects.BlockDeviceMapping, 'save') + @mock.patch.object(objects.BlockDeviceMappingList, + 'get_by_instance_uuid') + @mock.patch.object(compute.driver, 'pre_live_migration', autospec=True) + def _test(mock_plm, mock_bdms_get, mock_bdm_save, mock_vol_api, + mock_net_api, mock_compute_utils): + mock_vol_api.attachment_create.side_effect = [ + {'id': uuids.vol1_attach_new}, + {'id': uuids.vol2_attach_new}] + mock_bdms_get.return_value = [vol1_bdm, vol2_bdm] + mock_plm.side_effect = test.TestingException + + self.assertRaises(test.TestingException, + compute.pre_live_migration, + self.context, instance, False, {}, migrate_data) + + self.assertEqual(2, mock_vol_api.attachment_create.call_count) + + # Assert BDMs have original attachments restored + self.assertEqual(uuids.vol1_attach_orig, vol1_bdm.attachment_id) + self.assertEqual(uuids.vol2_attach_orig, vol2_bdm.attachment_id) + + # Assert attachment cleanup + self.assertEqual(2, mock_vol_api.attachment_delete.call_count) + mock_vol_api.attachment_delete.assert_has_calls( + [mock.call(self.context, uuids.vol1_attach_new), + mock.call(self.context, uuids.vol2_attach_new)], + any_order=True) + + # Meta: ensure un-asserted mocks are still required + for m in (mock_net_api, mock_compute_utils): + self.assertGreater(len(m.mock_calls), 0) _test() def test_get_neutron_events_for_live_migration_empty(self): diff -Nru nova-17.0.7/nova/tests/unit/compute/test_compute.py nova-17.0.9/nova/tests/unit/compute/test_compute.py --- nova-17.0.7/nova/tests/unit/compute/test_compute.py 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/compute/test_compute.py 2018-12-19 20:57:15.000000000 +0000 @@ -6098,7 +6098,10 @@ @mock.patch.object(fake.FakeDriver, 'ensure_filtering_rules_for_instance') @mock.patch.object(fake.FakeDriver, 'pre_live_migration') @mock.patch('nova.compute.utils.notify_about_instance_action') - def test_pre_live_migration_works_correctly(self, mock_notify, + @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid', + return_value=objects.BlockDeviceMappingList()) + def test_pre_live_migration_works_correctly(self, mock_get_bdms, + mock_notify, mock_pre, mock_ensure): # Confirm setup_compute_volume is called when volume is mounted. def stupid(*args, **kwargs): @@ -6136,9 +6139,11 @@ mock_notify.assert_has_calls([ mock.call(c, instance, 'fake-mini', - action='live_migration_pre', phase='start'), + action='live_migration_pre', phase='start', + bdms=mock_get_bdms.return_value), mock.call(c, instance, 'fake-mini', - action='live_migration_pre', phase='end')]) + action='live_migration_pre', phase='end', + bdms=mock_get_bdms.return_value)]) mock_pre.assert_called_once_with( test.MatchType(nova.context.RequestContext), @@ -7454,7 +7459,10 @@ mock_get_filter.assert_called_once_with(fake_context, {'source_compute': self.compute.host, - 'status': ['accepted', 'done'], + 'status': [ + 'accepted', + 'pre-migrating', + 'done'], 'migration_type': 'evacuation'}) mock_get_inst.assert_called_once_with(fake_context) mock_get_nw.assert_called_once_with(fake_context, evacuated_instance) diff -Nru nova-17.0.7/nova/tests/unit/conductor/test_conductor.py nova-17.0.9/nova/tests/unit/conductor/test_conductor.py --- nova-17.0.7/nova/tests/unit/conductor/test_conductor.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/conductor/test_conductor.py 2018-12-19 20:57:15.000000000 +0000 @@ -44,6 +44,7 @@ from nova.image import api as image_api from nova import objects from nova.objects import base as obj_base +from nova.objects import block_device as block_device_obj from nova.objects import fields from nova import rpc from nova.scheduler import client as scheduler_client @@ -61,6 +62,7 @@ from nova.tests.unit import utils as test_utils from nova.tests import uuidsentinel as uuids from nova import utils +from nova.volume import cinder CONF = conf.CONF @@ -961,6 +963,88 @@ do_test() + @mock.patch.object(cinder.API, 'attachment_get') + @mock.patch.object(cinder.API, 'attachment_create') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_validate_existing_attachment_ids_with_missing_attachments(self, + mock_bdm_save, mock_attachment_create, mock_attachment_get): + instance = self._create_fake_instance_obj() + bdms = [ + block_device.BlockDeviceDict({ + 'boot_index': 0, + 'guest_format': None, + 'connection_info': None, + 'device_type': u'disk', + 'source_type': 'image', + 'destination_type': 'volume', + 'volume_size': 1, + 'image_id': 1, + 'device_name': '/dev/vdb', + 'attachment_id': uuids.attachment, + 'volume_id': uuids.volume + })] + bdms = block_device_obj.block_device_make_list_from_dicts( + self.context, bdms) + mock_attachment_get.side_effect = exc.VolumeAttachmentNotFound( + attachment_id=uuids.attachment) + mock_attachment_create.return_value = {'id': uuids.new_attachment} + + self.assertEqual(uuids.attachment, bdms[0].attachment_id) + self.conductor_manager._validate_existing_attachment_ids(self.context, + instance, + bdms) + mock_attachment_get.assert_called_once_with(self.context, + uuids.attachment) + mock_attachment_create.assert_called_once_with(self.context, + uuids.volume, + instance.uuid) + mock_bdm_save.assert_called_once() + self.assertEqual(uuids.new_attachment, bdms[0].attachment_id) + + @mock.patch.object(cinder.API, 'attachment_get') + @mock.patch.object(cinder.API, 'attachment_create') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_validate_existing_attachment_ids_with_attachments_present(self, + mock_bdm_save, mock_attachment_create, mock_attachment_get): + instance = self._create_fake_instance_obj() + bdms = [ + block_device.BlockDeviceDict({ + 'boot_index': 0, + 'guest_format': None, + 'connection_info': None, + 'device_type': u'disk', + 'source_type': 'image', + 'destination_type': 'volume', + 'volume_size': 1, + 'image_id': 1, + 'device_name': '/dev/vdb', + 'attachment_id': uuids.attachment, + 'volume_id': uuids.volume + })] + bdms = block_device_obj.block_device_make_list_from_dicts( + self.context, bdms) + mock_attachment_get.return_value = { + "attachment": { + "status": "attaching", + "detached_at": "2015-09-16T09:28:52.000000", + "connection_info": {}, + "attached_at": "2015-09-16T09:28:52.000000", + "attach_mode": "ro", + "instance": instance.uuid, + "volume_id": uuids.volume, + "id": uuids.attachment + }} + + self.assertEqual(uuids.attachment, bdms[0].attachment_id) + self.conductor_manager._validate_existing_attachment_ids(self.context, + instance, + bdms) + mock_attachment_get.assert_called_once_with(self.context, + uuids.attachment) + mock_attachment_create.assert_not_called() + mock_bdm_save.assert_not_called() + self.assertEqual(uuids.attachment, bdms[0].attachment_id) + def test_unshelve_instance_on_host(self): instance = self._create_fake_instance_obj() instance.vm_state = vm_states.SHELVED diff -Nru nova-17.0.7/nova/tests/unit/consoleauth/test_consoleauth.py nova-17.0.9/nova/tests/unit/consoleauth/test_consoleauth.py --- nova-17.0.7/nova/tests/unit/consoleauth/test_consoleauth.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/consoleauth/test_consoleauth.py 2018-12-19 20:57:15.000000000 +0000 @@ -122,6 +122,23 @@ self.assertIsNone( self.manager_api.check_token(self.context, token)) + def test_delete_tokens_for_instance_no_tokens(self): + with test.nested( + mock.patch.object(self.manager, '_get_tokens_for_instance', + return_value=[]), + mock.patch.object(self.manager.mc, 'delete_multi'), + mock.patch.object(self.manager.mc_instance, 'delete') + ) as ( + mock_get_tokens, mock_delete_multi, mock_delete + ): + self.manager.delete_tokens_for_instance( + self.context, self.instance_uuid) + # Since here were no tokens, we didn't try to clear anything + # from the cache. + mock_delete_multi.assert_not_called() + mock_delete.assert_called_once_with( + self.instance_uuid.encode('UTF-8')) + @mock.patch('nova.objects.instance.Instance.get_by_uuid') def test_wrong_token_has_port(self, mock_get): mock_get.return_value = None diff -Nru nova-17.0.7/nova/tests/unit/network/test_neutronv2.py nova-17.0.9/nova/tests/unit/network/test_neutronv2.py --- nova-17.0.7/nova/tests/unit/network/test_neutronv2.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/network/test_neutronv2.py 2018-12-19 20:57:15.000000000 +0000 @@ -3086,6 +3086,9 @@ api = neutronapi.API() port_data = copy.copy(self.port_data1[0]) + # add another IP on the same subnet and verify the subnet is deduped + port_data['fixed_ips'].append({'ip_address': '10.0.1.3', + 'subnet_id': 'my_subid1'}) subnet_data1 = copy.copy(self.subnet_data1) subnet_data1[0]['host_routes'] = [ {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'} diff -Nru nova-17.0.7/nova/tests/unit/objects/test_instance.py nova-17.0.9/nova/tests/unit/objects/test_instance.py --- nova-17.0.7/nova/tests/unit/objects/test_instance.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/objects/test_instance.py 2018-12-19 20:57:15.000000000 +0000 @@ -336,8 +336,10 @@ # make sure we default the "new" flavor's disabled value to False on # load from the database. fake_flavor = jsonutils.dumps( - {'cur': objects.Flavor(disabled=False).obj_to_primitive(), - 'old': objects.Flavor(disabled=True).obj_to_primitive(), + {'cur': objects.Flavor(disabled=False, + is_public=True).obj_to_primitive(), + 'old': objects.Flavor(disabled=True, + is_public=False).obj_to_primitive(), 'new': objects.Flavor().obj_to_primitive()}) fake_inst = dict(self.fake_instance, extra={'flavor': fake_flavor}) mock_get.return_value = fake_inst @@ -345,6 +347,10 @@ self.assertFalse(inst.flavor.disabled) self.assertTrue(inst.old_flavor.disabled) self.assertFalse(inst.new_flavor.disabled) + # Assert the is_public values on the flavors + self.assertTrue(inst.flavor.is_public) + self.assertFalse(inst.old_flavor.is_public) + self.assertTrue(inst.new_flavor.is_public) @mock.patch.object(db, 'instance_get_by_uuid') def test_get_remote(self, mock_get): diff -Nru nova-17.0.7/nova/tests/unit/virt/libvirt/test_driver.py nova-17.0.9/nova/tests/unit/virt/libvirt/test_driver.py --- nova-17.0.7/nova/tests/unit/virt/libvirt/test_driver.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/tests/unit/virt/libvirt/test_driver.py 2018-12-19 20:57:15.000000000 +0000 @@ -8294,7 +8294,7 @@ instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': -1000, - 'local_gb': 100, + 'free_disk_gb': 50, 'cpu_info': 'asdf', } filename = "file" @@ -8311,7 +8311,7 @@ return_value.is_volume_backed = False self.assertThat({"filename": "file", 'image_type': 'default', - 'disk_available_mb': 102400, + 'disk_available_mb': 51200, "disk_over_commit": True, "block_migration": True, "is_volume_backed": False}, diff -Nru nova-17.0.7/nova/utils.py nova-17.0.9/nova/utils.py --- nova-17.0.7/nova/utils.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/utils.py 2018-12-19 20:57:15.000000000 +0000 @@ -1385,8 +1385,8 @@ fd = None try: fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT) - # Check is the write allowed with 512 byte alignment - align_size = 512 + # Check is the write allowed with 4096 byte alignment + align_size = 4096 m = mmap.mmap(-1, align_size) m.write(b"x" * align_size) os.write(fd, m) diff -Nru nova-17.0.7/nova/virt/driver.py nova-17.0.9/nova/virt/driver.py --- nova-17.0.7/nova/virt/driver.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/virt/driver.py 2018-12-19 20:57:15.000000000 +0000 @@ -1337,6 +1337,13 @@ unused. Note that this function takes an instance ID. + + :param instance: nova.objects.Instance to get block storage statistics + :param disk_id: mountpoint name, e.g. "vda" + :returns: None if block statistics could not be retrieved, otherwise a + list of the form: [rd_req, rd_bytes, wr_req, wr_bytes, errs] + :raises: NotImplementedError if the driver does not implement this + method """ raise NotImplementedError() diff -Nru nova-17.0.7/nova/virt/libvirt/driver.py nova-17.0.9/nova/virt/libvirt/driver.py --- nova-17.0.7/nova/virt/libvirt/driver.py 2018-10-08 21:57:15.000000000 +0000 +++ nova-17.0.9/nova/virt/libvirt/driver.py 2018-12-19 20:57:15.000000000 +0000 @@ -6508,7 +6508,7 @@ :returns: a LibvirtLiveMigrateData object """ if disk_over_commit: - disk_available_gb = dst_compute_info['local_gb'] + disk_available_gb = dst_compute_info['free_disk_gb'] else: disk_available_gb = dst_compute_info['disk_available_least'] disk_available_mb = ( diff -Nru nova-17.0.7/nova.egg-info/pbr.json nova-17.0.9/nova.egg-info/pbr.json --- nova-17.0.7/nova.egg-info/pbr.json 2018-10-08 21:59:15.000000000 +0000 +++ nova-17.0.9/nova.egg-info/pbr.json 2018-12-19 20:58:55.000000000 +0000 @@ -1 +1 @@ -{"git_version": "a48c112", "is_release": true} \ No newline at end of file +{"git_version": "33dc9f7", "is_release": true} \ No newline at end of file diff -Nru nova-17.0.7/nova.egg-info/PKG-INFO nova-17.0.9/nova.egg-info/PKG-INFO --- nova-17.0.7/nova.egg-info/PKG-INFO 2018-10-08 21:59:15.000000000 +0000 +++ nova-17.0.9/nova.egg-info/PKG-INFO 2018-12-19 20:58:55.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: nova -Version: 17.0.7 +Version: 17.0.9 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack diff -Nru nova-17.0.7/nova.egg-info/SOURCES.txt nova-17.0.9/nova.egg-info/SOURCES.txt --- nova-17.0.7/nova.egg-info/SOURCES.txt 2018-10-08 21:59:15.000000000 +0000 +++ nova-17.0.9/nova.egg-info/SOURCES.txt 2018-12-19 20:58:56.000000000 +0000 @@ -2464,6 +2464,7 @@ nova/tests/functional/compute/__init__.py nova/tests/functional/compute/test_host_api.py nova/tests/functional/compute/test_instance_list.py +nova/tests/functional/compute/test_live_migration.py nova/tests/functional/compute/test_migration_list.py nova/tests/functional/compute/test_resource_tracker.py nova/tests/functional/db/__init__.py @@ -2537,7 +2538,11 @@ nova/tests/functional/regressions/test_bug_1741307.py nova/tests/functional/regressions/test_bug_1746483.py nova/tests/functional/regressions/test_bug_1746509.py +nova/tests/functional/regressions/test_bug_1764883.py nova/tests/functional/regressions/test_bug_1780373.py +nova/tests/functional/regressions/test_bug_1784353.py +nova/tests/functional/regressions/test_bug_1797580.py +nova/tests/functional/regressions/test_bug_1806064.py nova/tests/functional/wsgi/__init__.py nova/tests/functional/wsgi/test_flavor_manage.py nova/tests/functional/wsgi/test_interfaces.py @@ -3315,6 +3320,8 @@ playbooks/legacy/nova-lvm/run.yaml playbooks/legacy/nova-multiattach/post.yaml playbooks/legacy/nova-multiattach/run.yaml +playbooks/legacy/nova-next/post.yaml +playbooks/legacy/nova-next/run.yaml releasenotes/notes/.placeholder releasenotes/notes/13.0.0-cve-bugs-fe43ef267a82f304.yaml releasenotes/notes/1516578-628b417b372f4f0f.yaml @@ -3447,6 +3454,7 @@ releasenotes/notes/bug-1759316-nova-status-api-version-check-183fac0525bfd68c.yaml releasenotes/notes/bug-1763183-service-delete-with-instances-d7c5c47e4ce31239.yaml releasenotes/notes/bug-1778044-f498ee2f2cfb35ea.yaml +releasenotes/notes/bug-1801702-c8203d3d55007deb.yaml releasenotes/notes/bug-hyperv-1629040-e1eb35a7b31d9af8.yaml releasenotes/notes/bug-volume-attach-policy-1635358-671ce4d4ee8c211b.yaml releasenotes/notes/bug_1659328-73686be497f5f85a.yaml diff -Nru nova-17.0.7/PKG-INFO nova-17.0.9/PKG-INFO --- nova-17.0.7/PKG-INFO 2018-10-08 21:59:17.000000000 +0000 +++ nova-17.0.9/PKG-INFO 2018-12-19 20:58:57.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: nova -Version: 17.0.7 +Version: 17.0.9 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack diff -Nru nova-17.0.7/playbooks/legacy/nova-next/post.yaml nova-17.0.9/playbooks/legacy/nova-next/post.yaml --- nova-17.0.7/playbooks/legacy/nova-next/post.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/playbooks/legacy/nova-next/post.yaml 2018-12-19 20:57:10.000000000 +0000 @@ -0,0 +1,15 @@ +- hosts: primary + tasks: + + - name: Copy files from {{ ansible_user_dir }}/workspace/ on node + synchronize: + src: '{{ ansible_user_dir }}/workspace/' + dest: '{{ zuul.executor.log_root }}' + mode: pull + copy_links: true + verify_host: true + rsync_opts: + - --include=/logs/** + - --include=*/ + - --exclude=* + - --prune-empty-dirs diff -Nru nova-17.0.7/playbooks/legacy/nova-next/run.yaml nova-17.0.9/playbooks/legacy/nova-next/run.yaml --- nova-17.0.7/playbooks/legacy/nova-next/run.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/playbooks/legacy/nova-next/run.yaml 2018-12-19 20:57:15.000000000 +0000 @@ -0,0 +1,69 @@ +- hosts: all + name: nova-next + tasks: + + - name: Ensure workspace directory + file: + path: '{{ ansible_user_dir }}/workspace' + state: directory + + - shell: + cmd: | + set -e + set -x + cat > clonemap.yaml << EOF + clonemap: + - name: openstack-infra/devstack-gate + dest: devstack-gate + EOF + /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ + git://git.openstack.org \ + openstack-infra/devstack-gate + executable: /bin/bash + chdir: '{{ ansible_user_dir }}/workspace' + environment: '{{ zuul | zuul_legacy_vars }}' + + - shell: + # TODO(mriedem): Consider setting USE_PYTHON3=True here to make this + # job run under python 3.5 which is a "next" type thing. + + # Enable TLS between the noVNC proxy & compute nodes; this requires + # the tls-proxy service to be enabled. + cmd: | + set -e + set -x + cat << 'EOF' >>"/tmp/dg-local.conf" + [[local|localrc]] + NOVA_USE_SERVICE_TOKEN=True + NOVA_CONSOLE_PROXY_COMPUTE_TLS=True + + EOF + executable: /bin/bash + chdir: '{{ ansible_user_dir }}/workspace' + environment: '{{ zuul | zuul_legacy_vars }}' + + - shell: + cmd: | + set -e + set -x + # tls-proxy is needed to initialize the CA and cert. + export ENABLED_SERVICES=tls-proxy + export PYTHONUNBUFFERED=true + # Yes we want to run Tempest. + export DEVSTACK_GATE_TEMPEST=1 + # Run non-slow tempest API tests (concurrently) and scenario + # tests (serially). + export DEVSTACK_GATE_TEMPEST_FULL=1 + # The post_test_hook runs some post-test CLIs for things that + # Tempest does not test, like archiving deleted records. + function post_test_hook { + if [ -f $BASE/new/nova/gate/post_test_hook.sh ]; then + $BASE/new/nova/gate/post_test_hook.sh + fi + } + export -f post_test_hook + cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh + ./safe-devstack-vm-gate-wrap.sh + executable: /bin/bash + chdir: '{{ ansible_user_dir }}/workspace' + environment: '{{ zuul | zuul_legacy_vars }}' diff -Nru nova-17.0.7/releasenotes/notes/bug-1801702-c8203d3d55007deb.yaml nova-17.0.9/releasenotes/notes/bug-1801702-c8203d3d55007deb.yaml --- nova-17.0.7/releasenotes/notes/bug-1801702-c8203d3d55007deb.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-17.0.9/releasenotes/notes/bug-1801702-c8203d3d55007deb.yaml 2018-12-19 20:57:10.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + When testing whether direct IO is possible on the backing storage + for an instance, Nova now uses a block size of 4096 bytes instead + of 512 bytes, avoiding issues when the underlying block device has + sectors larger than 512 bytes. See bug + https://launchpad.net/bugs/1801702 for details. diff -Nru nova-17.0.7/.zuul.yaml nova-17.0.9/.zuul.yaml --- nova-17.0.7/.zuul.yaml 2018-10-08 21:57:14.000000000 +0000 +++ nova-17.0.9/.zuul.yaml 2018-12-19 20:57:15.000000000 +0000 @@ -125,6 +125,20 @@ run: playbooks/legacy/nova-multiattach/run.yaml post-run: playbooks/legacy/nova-multiattach/post.yaml +- job: + name: nova-next + parent: nova-dsvm-base + description: | + This job was added in Newton when placement and cellsv2 + were optional. Placement and cellsv2 are required starting in Ocata. In + Pike, the service user token functionality was added. This job is also + unique in that it runs the post_test_hook from the nova repo, which runs + post-test scripts to ensure those scripts are still working, + e.g. archive_deleted_rows. In Queens, this job started testing the + TLS console proxy code in the libvirt driver. + run: playbooks/legacy/nova-next/run.yaml + post-run: playbooks/legacy/nova-next/post.yaml + - project: # Please try to keep the list of job names sorted alphabetically. templates: