diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/accelerator-support.rst nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/accelerator-support.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/accelerator-support.rst 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/accelerator-support.rst 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,56 @@ +============================== +Using accelerators with Cyborg +============================== + +Starting from microversion 2.82, nova supports creating servers with +accelerators provisioned with the Cyborg service, which provides lifecycle +management for accelerators. + +To launch servers with accelerators, the administrator (or an user with +appropriate privileges) must do the following: + + * Create a device profile in Cyborg, which specifies what accelerator + resources need to be provisioned. (See `Cyborg device profiles API + `_. + + * Set the device profile name as an extra spec in a chosen flavor, + with this syntax: + + .. code:: + + accel:device_profile=$device_profile_name + + The chosen flavor may be a newly created one or an existing one. + + * Use that flavor to create a server: + + .. code:: + + openstack server create --flavor $myflavor --image $myimage $servername + +As of 21.0.0 (Ussuri), nova supports only specific operations for instances +with accelerators. The lists of supported and unsupported operations are as +below: + + * Supported operations. + + * Creation and deletion. + * Reboots (soft and hard). + * Pause and unpause. + * Stop and start. + * Take a snapshot. + * Backup. + * Rescue and unrescue. + + * Unsupported operations + + * Rebuild. + * Resize. + * Evacuate. + * Suspend and resume. + * Shelve and unshelve. + * Cold migration. + * Live migration. + +Some operations, such as lock and unlock, work as they are effectively +no-ops for accelerators. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/index.rst nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/index.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/index.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/index.rst 2020-04-10 17:57:57.000000000 +0000 @@ -88,3 +88,4 @@ request_and_response_formats down_cells port_with_resource_request + accelerator-support diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/port_with_resource_request.rst nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/port_with_resource_request.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/port_with_resource_request.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/port_with_resource_request.rst 2020-04-10 17:57:57.000000000 +0000 @@ -31,8 +31,8 @@ ``[upgrade_levels]/compute`` configuration does not prevent the computes from using the latest RPC version. -As of 21.0.0 (Ussuri), nova supports evacuating and live migrating servers -with neutron ports having resource requests. +As of 21.0.0 (Ussuri), nova supports evacuating, live migrating and unshelving +servers with neutron ports having resource requests. See :nova-doc:`the admin guide ` for administrative details. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/server_concepts.rst nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/server_concepts.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-guide/source/server_concepts.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-guide/source/server_concepts.rst 2020-04-10 17:57:57.000000000 +0000 @@ -159,6 +159,17 @@ - ``tags-any`` (New in version 2.26) - ``changes-before`` (New in version 2.66) - ``locked`` (New in version 2.73) + - ``availability_zone`` (New in version 2.83) + - ``config_drive`` (New in version 2.83) + - ``key_name`` (New in version 2.83) + - ``created_at`` (New in version 2.83) + - ``launched_at`` (New in version 2.83) + - ``terminated_at`` (New in version 2.83) + - ``power_state`` (New in version 2.83) + - ``task_state`` (New in version 2.83) + - ``vm_state`` (New in version 2.83) + - ``progress`` (New in version 2.83) + - ``user_id`` (New in version 2.83) Other options will be ignored by nova silently. @@ -177,12 +188,12 @@ "servers": [ { "name": "t1", - "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:host": "devstack1", ... }, { "name": "t2", - "OS-EXT-STS:vm_state": "stopped", + "OS-EXT-SRV-ATTR:host": "devstack2", ... } ] @@ -190,13 +201,13 @@ **Example: General user query server with administrator only options** -Request with non-administrator context: ``GET /servers/detail?vm_state=active`` +Request with non-administrator context: ``GET /servers/detail?host=devstack1`` .. note:: - The ``vm_state`` query parameter is only for administrator users and + The ``host`` query parameter is only for administrator users and the query parameter is ignored if specified by non-administrator users. - Thus the API returns servers of both ``active`` and ``stopped`` + Thus the API returns servers of both ``devstack1`` and ``devstack2`` in this example. Response:: @@ -216,7 +227,7 @@ **Example: Administrator query server with administrator only options** -Request with administrator context: ``GET /servers/detail?vm_state=active`` +Request with administrator context: ``GET /servers/detail?host=devstack1`` Response:: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/os-instance-actions.inc nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/os-instance-actions.inc --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/os-instance-actions.inc 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/os-instance-actions.inc 2020-04-10 17:57:57.000000000 +0000 @@ -114,6 +114,7 @@ - events.traceback: event_traceback - events.hostId: event_hostId - events.host: event_host + - events.details: event_details - updated_at: updated_instance_action **Example Show Server Action Details For Admin (v2.62)** @@ -125,3 +126,8 @@ .. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.62/instance-action-get-non-admin-resp.json :language: javascript + +**Example Show Server Action Details For System Reader (v2.84)** + +.. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json + :language: javascript diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/os-keypairs.inc nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/os-keypairs.inc --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/os-keypairs.inc 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/os-keypairs.inc 2020-04-10 17:57:57.000000000 +0000 @@ -41,7 +41,7 @@ **Example List Keypairs (v2.35): JSON response** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.35/keypairs-list-resp.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json :language: javascript Create Or Import Keypair @@ -72,7 +72,7 @@ **Example Create Or Import Keypair (v2.10): JSON request** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json :language: javascript Response @@ -90,7 +90,7 @@ **Example Create Or Import Keypair (v2.10): JSON response** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json :language: javascript Show Keypair Details @@ -131,7 +131,7 @@ **Example Show Keypair Details (v2.10): JSON response** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-get-resp.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json :language: javascript Delete Keypair diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/os-volume-attachments.inc nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/os-volume-attachments.inc --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/os-volume-attachments.inc 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/os-volume-attachments.inc 2020-04-10 17:57:57.000000000 +0000 @@ -34,10 +34,10 @@ .. rest_parameters:: parameters.yaml - volumeAttachments: volumeAttachments - - device: device_resp - id: attachment_id_required - serverId: server_id - volumeId: volumeId_resp + - device: attachment_device_resp - tag: device_tag_bdm_attachment_resp - delete_on_termination: delete_on_termination_attachments_resp @@ -46,9 +46,9 @@ .. literalinclude:: ../../doc/api_samples/os-volumes/list-volume-attachments-resp.json :language: javascript -**Example List tagged volume attachments for an instance (v2.70): JSON response** +**Example List tagged volume attachments for an instance (v2.79): JSON response** -.. literalinclude:: ../../doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json :language: javascript Attach a volume to an instance @@ -150,10 +150,10 @@ .. rest_parameters:: parameters.yaml - volumeAttachment: volumeAttachment - - device: device_resp - id: attachment_id_required - serverId: server_id - volumeId: volumeId_resp + - device: attachment_device_resp - tag: device_tag_bdm_attachment_resp - delete_on_termination: delete_on_termination_attachments_resp @@ -162,9 +162,9 @@ .. literalinclude:: ../../doc/api_samples/os-volumes/volume-attachment-detail-resp.json :language: javascript -**Example Show a detail of a tagged volume attachment (v2.70): JSON response** +**Example Show a detail of a tagged volume attachment (v2.79): JSON response** -.. literalinclude:: ../../doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json :language: javascript Update a volume attachment @@ -177,19 +177,25 @@ .. note:: This action only valid when the server is in ACTIVE, PAUSED and RESIZED state, or a conflict(409) error will be returned. -.. warning:: This API is typically meant to only be used as part of a larger - orchestrated volume migration operation initiated in the block - storage service via the ``os-retype`` or ``os-migrate_volume`` - volume actions. Direct usage of this API is not recommended and - may result in needing to hard reboot the server to update details - within the guest such as block storage serial IDs. Furthermore, - this API is only implemented by `certain compute drivers`_. +.. warning:: When updating volumeId, this API is typically meant to + only be used as part of a larger orchestrated volume + migration operation initiated in the block storage + service via the ``os-retype`` or ``os-migrate_volume`` + volume actions. Direct usage of this API to update + volumeId is not recommended and may result in needing to + hard reboot the server to update details within the guest + such as block storage serial IDs. Furthermore, updating + volumeId via this API is only implemented by `certain + compute drivers`_. .. _certain compute drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_swap_volume -Policy defaults enable only users with the administrative role to perform -this operation. Cloud providers can change these permissions through the -``policy.json`` file. +Policy default role is 'rule:system_admin_or_owner', its scope is +[system, project], which allow project members or system admins to +change the fields of an attached volume of a server. Policy defaults +enable only users with the administrative role to change ``volumeId`` +via this operation. Cloud providers can change these permissions +through the ``policy.json`` file. Updating, or what is commonly referred to as "swapping", volume attachments with volumes that have more than one read/write attachment, is not supported. @@ -207,10 +213,19 @@ - volume_id: volume_id_swap_src - volumeAttachment: volumeAttachment_put - volumeId: volumeId_swap + - delete_on_termination: delete_on_termination_put_req + - device: attachment_device_put_req + - serverId: attachment_server_id_put_req + - tag: device_tag_bdm_attachment_put_req + - id: attachment_id_put_req + +.. note:: Other than ``volumeId``, as of v2.85 only + ``delete_on_termination`` may be changed from the current + value. -**Example Update a volume attachment: JSON request** +**Example Update a volume attachment (v2.85): JSON request** -.. literalinclude:: ../../doc/api_samples/os-volumes/update-volume-req.json +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json :language: javascript Response diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/parameters.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/parameters.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/parameters.yaml 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/parameters.yaml 2020-04-10 17:57:57.000000000 +0000 @@ -417,8 +417,9 @@ description: | Filter the server list result by server availability zone. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -579,8 +580,9 @@ description: | Filter the server list result by the config drive setting of the server. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -597,8 +599,9 @@ For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -888,8 +891,9 @@ description: | Filter the server list result by keypair name. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -942,8 +946,9 @@ For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1121,15 +1126,17 @@ 6: CRASHED 7: SUSPENDED - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. progress_query_server: description: | Filter the server list result by the progress of the server. The value could be from 0 to 100 as integer. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: integer @@ -1371,8 +1378,9 @@ description: | Filter the server list result by task state. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. tenant_id_query: description: | Specify the project ID (tenant ID) to show the rate and absolute limits. @@ -1392,8 +1400,9 @@ For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1439,8 +1448,9 @@ description: | Filter the list of servers by the given user ID. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1469,8 +1479,9 @@ - ``STOPPED`` - ``SUSPENDED`` - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1765,12 +1776,26 @@ in: body required: true type: string +attachment_device_put_req: + description: | + Name of the device in the attachment object, such as, ``/dev/vdb``. + in: body + required: false + type: string + min_version: 2.85 attachment_device_resp: description: | Name of the device in the attachment object, such as, ``/dev/vdb``. in: body required: false type: string +attachment_id_put_req: + description: | + The UUID of the attachment. + in: body + required: false + type: string + min_version: 2.85 attachment_id_required: description: | The UUID of the attachment. @@ -1783,6 +1808,13 @@ in: body required: false type: string +attachment_server_id_put_req: + description: | + The UUID of the server. + in: body + required: false + type: string + min_version: 2.85 attachment_server_id_resp: description: | The UUID of the server. @@ -2283,6 +2315,14 @@ required: true type: boolean min_version: 2.79 +delete_on_termination_put_req: + description: | + A flag indicating if the attached volume will be deleted when the server is + deleted. + in: body + required: false + type: boolean + min_version: 2.85 deleted: description: | A boolean indicates whether this aggregate is deleted or not, if it has @@ -2373,6 +2413,13 @@ required: false type: string min_version: 2.49 +device_tag_bdm_attachment_put_req: + description: | + The device tag applied to the volume block device or ``null``. + in: body + required: true + type: string + min_version: 2.85 device_tag_bdm_attachment_resp: description: | The device tag applied to the volume block device or ``null``. @@ -2600,6 +2647,13 @@ in: body required: true type: string +event_details: + min_version: 2.84 + description: | + Details of the event. May be ``null``. + in: body + required: true + type: string event_finish_time: description: | The date and time when the event was finished. The date and time @@ -2648,6 +2702,7 @@ - ``network-vif-deleted`` - ``volume-extended`` (since microversion ``2.51``) - ``power-update`` (since microversion ``2.76``) + - ``accelerator-request-bound`` (since microversion ``2.82``) in: body required: true @@ -2685,6 +2740,8 @@ A string value that identifies the event. Certain types of events require specific tags: + - For the ``accelerator-request-bound`` event, the tag must be + the accelerator request UUID. - For the ``power-update`` event the tag must be either be ``POWER_ON`` or ``POWER_OFF``. - For the ``volume-extended`` event the tag must be the volume id. @@ -7349,7 +7406,8 @@ volumeAttachment_put: description: | A dictionary representation of a volume attachment containing the field - ``volumeId`` which is the UUID of the replacement volume. + ``volumeId`` which is the UUID of the replacement volume, and other fields + to update in the attachment. in: body required: true type: object diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/servers.inc nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/servers.inc --- nova-21.0.0~b2~git2020021008.1fcd74730d/api-ref/source/servers.inc 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/api-ref/source/servers.inc 2020-04-10 17:57:57.000000000 +0000 @@ -173,7 +173,17 @@ - ``tags-any`` (New in version 2.26) - ``changes-before`` (New in version 2.66) - ``locked`` (New in version 2.73) - + - ``availability_zone`` (New in version 2.83) + - ``config_drive`` (New in version 2.83) + - ``key_name`` (New in version 2.83) + - ``created_at`` (New in version 2.83) + - ``launched_at`` (New in version 2.83) + - ``terminated_at`` (New in version 2.83) + - ``power_state`` (New in version 2.83) + - ``task_state`` (New in version 2.83) + - ``vm_state`` (New in version 2.83) + - ``progress`` (New in version 2.83) + - ``user_id`` (New in version 2.83) - For admin user, whitelist includes all filter keys mentioned in :ref:`list-server-request` Section. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/AUTHORS nova-21.0.0~b3~git2020041013.57ff308d6d/AUTHORS --- nova-21.0.0~b2~git2020021008.1fcd74730d/AUTHORS 2020-02-10 08:50:37.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/AUTHORS 2020-04-10 17:58:17.000000000 +0000 @@ -206,6 +206,7 @@ Brian Lamar Brian Moss Brian Rosmaita +Brian Rosmaita Brian Schott Brian Waldon Brianna Poulos @@ -545,6 +546,7 @@ Ildiko Vancsa Ildiko Vancsa Ilya Alekseyev +Ilya Etingof Ilya Pekelny Ilya Popov Ilya Shakhat @@ -583,6 +585,7 @@ Janis Gengeris Jared Culp Jared Winborne +Jason Anderson Jason Cannavale Jason Dillaman Jason Koelker @@ -640,6 +643,7 @@ Johannes Erdfelt Johannes Erdfelt Johannes Kulik +Johannes Kulik John John Bresnahan John Dewey @@ -923,6 +927,7 @@ NTT PF Lab. Nachi Ueno Nam Nguyen Hoai +Nathan Kinder Naveed Massjouni Navneet Kumar Neha Alhat @@ -1192,6 +1197,7 @@ Stephanie Reese Stephen Finucane Stephen Finucane +Stephen Finucane Stephen Finucane Stephen Gran StephenSun @@ -1214,6 +1220,7 @@ Sumanth Nagadavalli Sumedh Degaonkar Sumit Naiksatam +Sundar Nadathur Sunil Thaha Surojit Pathak Surya @@ -1308,6 +1315,7 @@ Venkateswarlu Pallamala Vern Hart Vic Howard +Victor Coutellier Victor Morales Victor Sergeyev Victor Stinner @@ -1517,6 +1525,7 @@ guillaume-thouvenin guohliu gustavo panizzo +hackertron hartsocks heha heijlong @@ -1699,6 +1708,7 @@ wanghao wanghongtaozz wanghongxu +wangjiajing wangqi wangxiyuan warewang diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/ChangeLog nova-21.0.0~b3~git2020041013.57ff308d6d/ChangeLog --- nova-21.0.0~b2~git2020021008.1fcd74730d/ChangeLog 2020-02-10 08:50:36.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/ChangeLog 2020-04-10 17:58:15.000000000 +0000 @@ -1,18 +1,301 @@ CHANGES ======= +* [Trivial] FUP: addressed comments in support non-admin filter instances +* Add new default roles in shelve server policies +* Introduce scope\_types in shelve server +* Add test coverage of existing shelve policies +* Functional test with pGPUs +* Support different vGPU types per pGPU +* libvirt: Calculate disk\_over\_committed for raw instances +* Temporarily skip TestNovaMigrationsMySQL +* libvirt: Add support for stable device rescue +* virt: Provide block\_device\_info during rescue +* Pass the actual target in os-console-auth-tokens policy +* Add new default roles in os-console-auth-tokens policies +* FUP: add missing test for PUT volume attachments API +* Reset the cell cache for database access in Service +* Add new default roles in server password policies +* Follow-up for flavor-extra-spec-validators series +* docs: Add documentation for flavor extra specs +* api: Add microversion for extra spec validation +* Drop concept of '?validation' parameter +* api: Add support for new cyborg extra specs +* api: Add framework for extra spec validation +* Convert delete\_on\_termination from string to boolean +* Separate update and swap volume policies +* Provide the parent pGPU when creating a new vGPU +* Support live migration with vpmem +* partial support for live migration with specific resources +* Correct server topology policy check\_str +* Correct server shelve policy check\_str +* Add new default roles in server tags policies +* Introduce scope\_types in server tags policy +* Add test coverage of existing server tags policies +* Fix server tags policy to be admin\_or\_owner +* Fix new context comparison workaround in base tests class +* Disable the policy warning temporary +* Pass the actual target in os-flavor-manage policy +* Add new default roles in os-flavor\_manage policies +* Introduce scope\_types in os-flavor-manage +* Pass the actual target in server migration policy +* Add new default roles in server migration policies +* Introduce scope\_types in server migration +* Add test coverage of existing server migrations policies +* Add test coverage of existing flavor\_manage policies +* Introduce scope\_types in simple tenant usage +* Add new default roles in suspend server policies +* Introduce scope\_types in suspend server +* Add test coverage of existing suspend server policies +* Fix resume server policy to be admin\_or\_owner +* Add test coverage of existing simple tenant usage policies +* Introduce scope\_types in server password policy +* Add test coverage of existing server password policies +* Add new default roles in server metadata policies +* Introduce scope\_types in server metadata +* Add test coverage of existing server metadata policies +* Fix server metadata policy to be admin\_or\_owner +* Fix server password policy to be admin\_or\_owner +* Add new default roles in security group policies +* Allow versioned discovery unauthenticated +* Repro bug 1845530: versioned discovery is authed +* Stabilize functional tests +* Add release notes for Cyborg-Nova integration +* Introduce scope\_types in server group policy +* Add test coverage of existing server group policies +* Introduce scope\_types in server external events +* Pass the actual target in limits policy +* Add new default roles in limits policies +* Introduce scope\_types in limits policy +* Add test coverage of existing server external events policies +* Introduce scope\_types in security groups policy +* Add test coverage of existing security groups policies +* Correct security groups policy check\_str +* Pass the actual target in server diagnostics policy +* Add test coverage of existing limits policies +* Support for nova-manage placement heal\_allocations --cell +* Allow PUT volume attachments API to modify delete\_on\_termination +* Fix assertEqual param order in Accelerator tests +* Add new default roles in server diagnostics policies +* Introduce scope\_types in server diagnostics +* Add test coverage of existing server diagnostics policies +* Add new default roles in remote console policies +* Combine the limits policies in single place +* libvirt: Remove QEMU\_VERSION\_REQ\_SHARED +* images: Remove Libvirt specific configurable use from qemu\_img\_info +* libvirt: Always provide the size in bytes when calling virDomainBlockResize +* Add new default roles in rescue server policies +* Introduce scope\_types in rescue server policy +* Add test coverage of existing rescue policies +* Introduce scope\_types in remote consoles policy +* Add test coverage of existing remote console policies +* Pass the actual target in unlock override policy +* Pass the actual target in migrate server policy +* Add new default roles in migrate server policies +* Introduce scope\_types in migrate server +* Add info about affinity requests to the troubleshooting doc +* Add new default roles in lock server policies +* Pass the actual target in migrations policy +* Add new default roles in migrations policies +* Add new default roles in pause server policies +* Introduce scope\_types in pause server policy +* Add test coverage of existing pause server policies +* Add test coverage of existing lock server policies +* Add cyborg tempest job +* Block unsupported instance operations with accelerators +* Bump compute rpcapi version and reduce Cyborg calls +* Fix unpause server policy to be admin\_or\_owner +* Introduce scope\_types in list migrations +* Add test coverage of existing migrations policies +* Add test coverage of existing migrate server policies +* Correct limits policy check\_str +* Pass the actual target in os-hypervisors policy +* Introduce scope\_types in os-hypervisors +* Add test coverage of existing hypervisors policies +* Pass the actual target in os-agents policy +* Add new default roles in os-hypervisors policies +* Add new default roles in os-agents policies +* Fix unlock server policy to be admin\_or\_owner +* Pass the actual target in os-instance-usage-audit-log policy +* Add new default roles in os-instance-usage-audit-log policies +* FUP for Add a placement audit command +* Add instance actions v284 samples test +* Add new default roles in os-ips policies +* Introduce scope\_types in os-ips +* Add test coverage of existing ips policies +* Fix os-ips policy to be admin\_or\_owner +* Enable and use COMPUTE\_ACCELERATORS trait +* Expose instance action event details out of the API +* Add default cpu model for AArch64 +* Introduce scope\_types in os-instance-usage-audit-log +* Add test coverage of existing instance usage log policies +* libvirt: Use virDomainBlockCopy to swap volumes when using -blockdev +* [Community goal] Update contributor documentation +* Enable start/stop of instances with accelerators +* Enable hard/soft reboot with accelerators +* Delete ARQs for an instance when the instance is deleted +* Add transform\_image\_metadata request filter +* libvirt: Use domain capabilities to get supported device models +* tests: work around malformed serial XML +* func tests: move \_run\_periodics() into base class +* [Trivial] fixing some nits in instance actions policy tests +* Compose accelerator PCI devices into domain XML in libvirt driver +* Pass accelerator requests to each virt driver from compute manager +* Create and bind Cyborg ARQs +* Add Cyborg device profile groups to request spec +* ksa auth conf and client for Cyborg access +* nova-live-migration: Only stop n-cpu and q-agt during evacuation testing +* Store instance action event exc\_val fault details +* Make serialize\_args handle exception messages safely +* libvirt: Fix unit test error block info on non x86 architecture +* Add config option for neutron client retries +* nova-live-migration: Ensure subnode is fenced during evacuation testing +* Add new default roles in os-instance-actions policies +* Add new default roles in os-flavor-access policies +* Add service version check for evacuate with qos +* Add service version check for live migrate with qos +* Enable unshelve with qos ports +* Support unshelve with qos ports +* Bump python-subunit minimum to 1.4.0 +* Introduce scope\_types in os-flavor-access +* Add test coverage of existing flavor\_access policies +* Switching new default roles in os-volumes-attachments policies +* bug-fix: Reject live migration with vpmem +* Refine and introduce correct parameters for test\_get\_guest\_config\_numa\_host\_instance\_topo\_cpu\_pinning +* Ensures that COMPUTE\_RESOURCE\_SEMAPHORE usage is fair +* Fix intermittently failing regression case +* nova-live-migration: Wait for n-cpu services to come up after configuring Ceph +* libvirt: Use oslo.utils >= 4.1.0 to fetch format-specific image data +* libvirt: Correctly resize encrypted LUKSv1 volumes +* virt: Pass request context to extend\_volume +* images: Allow the output format of qemu-img info to be controlled +* images: Move qemu-img info calls into privsep +* Non-Admin user can filter their instances by more filters +* Cleanup test for system reader and reader\_or\_owner rules +* Run sdk functional tests on nova changes +* Deprecate the vmwareapi driver +* Use fair locks in resource tracker +* trivial: Use 'from foo import bar' +* libvirt: don't log error if guest gone during interface detach +* [Trivial] Fix code comment of admin password tests +* nit: Fix NOTE error of fatal=False +* Lowercase ironic driver hash ring and ignore case in cache +* Add new default roles in os-atttach-inerfaces policies +* trivial: Rename directory for os-keypairs samples +* Fix os-keypairs pagination links +* Introduce scope\_types in os-instance-action policy +* Validate id as integer for os-aggregates +* Introduce scope\_types in os-aggregates policy +* Introduce scope\_types in os-volumes-attachments policy +* Add test coverage of existing os-volumes-attachments policies +* Fix os-volumes-attachments policy to be admin\_or\_owner +* Catch exception when use invalid architecture of image +* Introduce scope\_types in os-create-backup +* Add test coverage of existing create\_backup policies +* Fix os-create-backup policy to be admin\_or\_owner +* Introduce scope\_types in os-console-output +* Add test coverage of existing console\_output policies +* Introduce scope\_types in os-deferred\_delete +* Add a tests to check when legacy access is removed +* Add new default roles in os-admin-password policies +* Introduce scope\_types in os-admin-password +* Add test coverage of existing os-instance-actions policies +* Correct the actual target in os-instance-actions policy +* Add new default roles in os-create-backup policies +* Add new default roles in os-console-output policies +* Add new default roles in os-deferred\_delete policies +* Fix os-console-output policy to be admin\_or\_owner +* Stop using PlacementDirect +* Introduce scope\_types in os-attach-interfaces +* Add test coverage of existing attach\_interfaces policies +* Introduce scope\_types in os-console-auth-tokens +* Remove oslo\_db.sqlalchemy.compat reference +* libvirt: Remove native LUKS compat code +* hyper-v: update support matrix +* functional: Avoid race and fix use of self.api within test\_bug\_1831771 +* Add test coverage of existing deferred\_delete policies +* Fix os-os-deferred-delete policy to be admin\_or\_owner +* Remove old policy enforcement in attach\_interfaces +* Introduce scope\_types in os-agents policy +* Add test coverage of existing os-console-auth-tokens policies +* Pass the actual target in os-availability-zone policy +* Ensure we pass a target in admin actions +* Fix two test cases that use side effects in comprehensions +* Add new default roles in Admin Action API policies +* Pass the actual target in os-assisted\_volume\_snapshots policy +* Add new default roles in os-assisted\_volume\_snapshots policies +* Introduce scope\_types in os-assisted\_volume\_snapshots policy +* Add test coverage of existing os-assisted\_volume\_snapshots policies +* Fix os-attach-interfaces policy to be admin\_or\_owner +* Add test coverage of existing os-agents policies +* Define Cyborg ARQ binding notification event +* Fix H702 pep8 error with latest hacking +* libvirt: Provide the backing file format when creating qcow2 disks +* Unplug VIFs as part of cleanup of networks +* Name Enums +* Remove unnecessary parentheses +* Functional test for UnexpectedDeletingTaskStateError +* Avoid allocation leak when deleting instance stuck in BUILD +* Fix hypervisors paginted collection\_name +* Enforce os-traits/SUPPORTED\_STORAGE\_BUSES sync +* libvirt: Report storage bus traits +* trivial: Update '\_get\_foo\_traits' docstrings +* Follow-up: Add delete\_on\_termination to volume-attach API +* libvirt: Check the guest support UEFI +* Avoid PlacementFixture silently swallowing kwargs +* trivial: Use recognized extra specs in tests +* Use tempest-full-py3 as base job +* docs: Improve documentation on writing custom scheduler filters +* conf: Deprecate '[scheduler] driver' +* trivial: Remove FakeScheduler +* nova-net: Remove unused parameters +* nova-net: Remove unused nova-network objects +* nova-net: Remove unnecessary exception handling, mocks +* Remove 'nova.image.api' module +* Introduce scope\_types in os-evacuate +* Add test coverage of existing evacuate policies +* Reject boot request for unsupported images +* Absolutely-non-inheritable image properties +* Add JSON schema and test for network\_data.json +* Support large network queries towards neutron +* Add new default roles in os-availability-zone policies +* Introduce scope\_types in os-availability-zone +* Add test coverage of existing availability-zone policies +* Correct os-availability-zone policy check\_str +* Monkey patch original current\_thread \_active +* Allow TLS ciphers/protocols to be configurable for console proxies +* Skip to run all integration jobs for policy-only changes +* set default value to 0 instead of '' +* Clean up allocation if unshelve fails due to neutron +* Add test coverage of existing os-aggregates policies +* Reproduce bug 1862633 +* Add test coverage of existing admin\_password policies * Fix instance.hidden migration and querying +* Remove universal wheel configuration +* trivial: Remove 'run\_once' helper +* trivial: Merge unnecessary 'NovaProxyRequestHandlerBase' separation * libvirt: Rename \_is\_storage\_shared\_with to \_is\_path\_shared\_with * Don't error out on floating IPs without associated ports +* Deprecate base rules in favor of new rules +* trivial: Bump minimum version of websockify +* trivial: Fetch 'Service' objects once when building AZs +* trivial: Remove unused 'cache\_utils' APIs +* remove DISTINCT ON SQL instruction that does nothing on MySQL * Minor improvements to cell commands * Avoid calling neutron for N networks * Handle neutron without the fip-port-details extension +* Handle unset 'connection\_info' * Enable live migration with qos ports * Use common server create function for qos func tests * Remove extra instance.save() calls related to qos SRIOV ports * docs: Fix the monkeypatching of blockdiag +* tests: Validate huge pages +* Recalculate 'RequestSpec.numa\_topology' on resize +* Add a placement audit command * Use COMPUTE\_SAME\_HOST\_COLD\_MIGRATE trait during migrate +* Make RBD imagebackend flatten method idempotent * Avoid fetching metadata when no subnets found +* zuul: Add Fedora based jobs to the experimental queue * libvirt: Add a default VirtIO-RNG device to guests * Remove remaining Python 2.7-only dependencies * nova-net: Update API reference guide @@ -21,6 +304,7 @@ * Remove unused code * functional: Add '\_create\_server' helper * Make removal of host from aggregate consistent +* Clarify fitting hugepages log message * Add ironic hypervisor doc * Fix typos for update\_available\_resource reference * nova-net: Remove layer of indirection in 'nova.network' @@ -55,6 +339,7 @@ * [Trivial]Fix typo instnace * Handle cell failures in get\_compute\_nodes\_by\_host\_or\_node * Fix an invalid assertIsNotNone statement +* Add description of live\_migration\_timeout\_action option * [api-ref] Fix the incorrect link * FUP to Iff8194c868580facb1cc81b5567d66d4093c5274 * FUP for docs nits in cross-cell-resize series @@ -82,15 +367,18 @@ * Support live migration with qos ports * Zuul v3: use devstack-plugin-nfs-tempest-full * Add recreate test for bug 1855927 +* FUP: Remove noqa and tone down an exception * nova-net: Correct some broken VIF tests * nova-net: Remove nova-network security group driver * nova-net: Remove 'is\_neutron\_security\_groups' function * nova-net: Convert remaining unit tests to neutron +* Use reasonable name for provider mapping * DRY: Build ImageMetaPropsPayload from ImageMetaProps * api-ref: avoid mushy wording around server.image description * Sync ImageMetaPropsPayload fields * Move \_update\_pci\_request\_spec\_with\_allocated\_interface\_name * Revert "(Temporarily) readd bare support for py27" +* db: Remove unused ec2 DB APIs * Create instance action when burying in cell0 * Do not reschedule on ExternalNetworkAttachForbidden * libvirt: flatten rbd image during cross-cell move spawn at dest @@ -265,6 +553,7 @@ * Add functional recreate test for bug 1852610 * Convert legacy nova-live-migration and nova-multinode-grenade to py3 * docs: update SUSPENDED server status wrt supported drivers +* api-ref: mark device response param as optional for list/show vol attachments * doc: add troubleshooting guide for cleaning up orphaned allocations * Remove functional test specific nova code * "SUSPENDED" description changed in server\_concepts guide and API REF @@ -347,6 +636,7 @@ * Require Migration object arg to migrate\_instance\_finish method * Add image precaching docs for aggregates * Remove fixed sqlalchemy-migrate deprecation warning filters +* doc: note the need to configure cinder auth in reclaim\_instance\_interval * Fix listing deleted servers with a marker * Add functional regression test for bug 1849409 * Added openssh-client into bindep @@ -483,6 +773,7 @@ * docs: Document how to revert, confirm a cold migration * docs: Update CPU topologies guide to reflect the new PCPU world * docs: Clarify everything CPU pinning +* VMware VMDK detach: get adapter type from instance VM * Add a prelude for the Train release * Correct link to placement upgrade notes * Move HostNameWeigher to a common fixture @@ -1789,6 +2080,7 @@ * SIGHUP n-cpu to clear provider tree cache * libvirt: Refactor handling of PCIe root ports * Fix misuse of assertTrue +* Workaround a race initialising version control in db\_version() * Make [cinder]/catalog\_info no longer require a service\_name * Remove get\_node\_uuid * Restore nova-consoleauth to install docs @@ -2262,6 +2554,7 @@ * Reload oslo\_context after calling monkey\_patch() * Fix comments in \_anchors\_for\_sharing\_providers and related test * Ensure the order of AllocationRequestResources +* Don't overwrite greenthread-local context in host manager * libvirt: Remove usage of migrateToURI{2} APIs * Remove unnecessary PlacementFixture setups * Don't poison Host.\_init\_events if it's already mocked diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/CONTRIBUTING.rst nova-21.0.0~b3~git2020041013.57ff308d6d/CONTRIBUTING.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/CONTRIBUTING.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/CONTRIBUTING.rst 2020-04-10 17:57:57.000000000 +0000 @@ -1,16 +1,19 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: +The source repository for this project can be found at: - https://docs.openstack.org/infra/manual/developers.html + https://opendev.org/openstack/nova -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: +Pull requests submitted through GitHub are not monitored. - https://docs.openstack.org/infra/manual/developers.html#development-workflow +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: -Pull requests submitted through GitHub will be ignored. + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html -Bugs should be filed on Launchpad, not GitHub: +Bugs should be filed on Launchpad: https://bugs.launchpad.net/nova + +For more specific information about contributing to this repository, see the +Nova contributor guide: + + https://docs.openstack.org/nova/latest/contributor/contributing.html diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/changelog nova-21.0.0~b3~git2020041013.57ff308d6d/debian/changelog --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/changelog 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/changelog 2020-04-17 08:48:19.000000000 +0000 @@ -1,3 +1,48 @@ +nova (2:21.0.0~b3~git2020041013.57ff308d6d-0ubuntu2) focal; urgency=medium + + * d/tests/nova-daemons: Skip validation that nova-scheduler is + running; this serivce requires configuration of both keystone and + the placement service which is beyond the scope of a single unit + autopkgtest. + * d/tests/control: Install nova-spiceproxy instead of nova-novncproxy + during testing as SPICE is the console option supported in Ubuntu + main. + + -- James Page Fri, 17 Apr 2020 09:48:19 +0100 + +nova (2:21.0.0~b3~git2020041013.57ff308d6d-0ubuntu1) focal; urgency=medium + + * New upstream snapshot for OpenStack Ussuri. + * d/p/revert-generalize-db-conf-group-copying.patch: Dropped. Not needed + with python3-oslo.db > 4.34.0. + * d/p/drop-sphinxcontrib-rsvgconverter.patch: Rebased. + * d/p/skip-ssl-tests.patch: Dropped. No longer needed. + * d/p/arm-console-patch.patch: Rebased. + * d/p/monkey-patch-original-current-thread-active.patch: Dropped. Fixed + in snapshot. + + -- Corey Bryant Fri, 10 Apr 2020 14:00:26 -0400 + +nova (2:21.0.0~b3~git2020032515.35240b0d8c-0ubuntu2) focal; urgency=medium + + * d/nova-common.postinst: Set ownership and permissions for all /var/lib + files and directories. + * d/nova-compute-libvirt.postinst: Add libvirt-qemu user to nova group + to ensure access to /var/lib/ (LP: #1870415). + + -- Corey Bryant Thu, 02 Apr 2020 15:08:40 -0400 + +nova (2:21.0.0~b3~git2020032515.35240b0d8c-0ubuntu1) focal; urgency=medium + + * New upstream snapshot for OpenStack Ussuri. + * d/control: Align (Build-)Depends with upstream. + * d/p/libvirt-provide-backing-file-format-creating-qcow2.patch: Drop, + included upstream. + * d/nova-common.postinst: Refactor to standardise /etc, /var/lib and + /var/log ownership and permissions (LP: #1859422). + + -- James Page Thu, 26 Mar 2020 12:05:22 +0000 + nova (2:21.0.0~b2~git2020021008.1fcd74730d-0ubuntu5) focal; urgency=medium * No change rebuild with new version of openstack-pkg-tools to resolve diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/control nova-21.0.0~b3~git2020041013.57ff308d6d/debian/control --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/control 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/control 2020-04-17 08:48:19.000000000 +0000 @@ -69,7 +69,7 @@ python3-os-win (>= 3.0.0), python3-os-xenapi (>= 0.3.3), python3-oslo.cache (>= 1.26.0), - python3-oslo.concurrency (>= 3.26.0), + python3-oslo.concurrency (>= 3.29.0), python3-oslo.config (>= 1:6.1.0), python3-oslo.context (>= 1:2.21.0), python3-oslo.db (>= 4.44.0), @@ -121,7 +121,7 @@ python3-testtools (>= 2.2.0), python3-tooz (>= 1.58.0), python3-webob (>= 1:1.8.2), - python3-websockify (>= 0.8.0), + python3-websockify (>= 0.9.0), python3-wsgi-intercept (>= 1.7.0), python3-zvmcloudconnector (>= 1.3.0), Build-Conflicts: @@ -728,7 +728,7 @@ python3-os-win (>= 3.0.0), python3-os-xenapi (>= 0.3.3), python3-oslo.cache (>= 1.26.0), - python3-oslo.concurrency (>= 3.26.0), + python3-oslo.concurrency (>= 3.29.0), python3-oslo.config (>= 1:6.1.0), python3-oslo.context (>= 1:2.21.0), python3-oslo.db (>= 4.44.0), @@ -766,7 +766,7 @@ python3-taskflow (>= 2.16.0), python3-tooz (>= 1.58.0), python3-webob (>= 1:1.8.2), - python3-websockify (>= 0.8.0), + python3-websockify (>= 0.9.0), python3-zvmcloudconnector (>= 1.3.0), ${misc:Depends}, ${python3:Depends}, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/nova-common.postinst nova-21.0.0~b3~git2020041013.57ff308d6d/debian/nova-common.postinst --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/nova-common.postinst 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/nova-common.postinst 2020-04-17 08:48:19.000000000 +0000 @@ -21,52 +21,20 @@ if [ -z "$2" ]; then # New install - blanket permissions chown -R nova:nova /var/lib/nova/ - elif dpkg --compare-versions "$2" lt "2011.3-0ubuntu4"; then - # make sure that LXC rootfs mount points are excleuded - # during upgrades from previous versions - find /var/lib/nova/ -name 'rootfs' -prune -o \ - -group root -a -user nova -exec chown nova:nova {} \; - find /var/lib/nova/ -name 'rootfs' -prune -o \ - -group nogroup -a -user nova -exec chown nova:nova {} \; - elif dpkg --compare-versions "$2" lt "2012.2~f1~20120503.13935-0ubuntu1"; then - # convert the root_helper to rootwrap_config - sed -e "s,^root_helper=.\+,rootwrap_config=/etc/nova/rootwrap.conf," -i /etc/nova/nova.conf fi - chown -R nova:adm /var/log/nova + chown nova:adm /var/log/nova chmod 0750 /var/log/nova - if [ -z "$2" ]; then - # New install - blanket permissions - chown -R nova:nova /var/lib/nova/ - elif dpkg --compare-versions "$2" lt "2011.3-0ubuntu4"; then - # Make sure the LXC rootfs mount points are excluded - # during upgrades from previous versions - find /var/lib/nova/ -name 'rootfs' -prune -o \ - -group root -a -user nova -exec chown nova:nova {} \; - find /var/lib/nova/ -name 'rootfs' -prune -o \ - -group nogroup -a -user nova -exec chown nova:nova {} \; - fi + find /etc/nova -exec chown root:nova "{}" + + find /etc/nova -type f -exec chmod 0640 "{}" + -o -type d -exec chmod 0750 "{}" + - chown root:nova /etc/nova - chmod 0750 /etc/nova + # Optional rootwrap.d configuration files. + find /etc/nova/rootwrap.d -exec chown root:root "{}" + + find /etc/nova/rootwrap.d -type f -exec chmod 0644 "{}" + -o -type d -exec chmod 0755 "{}" + - chown root:nova /etc/nova/*.conf /etc/nova/*.ini - chmod 0640 /etc/nova/*.conf /etc/nova/*.ini - - chown root:root /etc/nova/rootwrap.d /etc/nova/rootwrap.d/* - chmod 0755 /etc/nova/rootwrap.d - - if [ -e /var/lib/nova/nova.sqlite ] - then - chown nova:nova /var/lib/nova/nova.sqlite - chmod 0640 /var/lib/nova/nova.sqlite - fi - if [ -e /var/lib/nova/nova_api.sqlite ] - then - chown nova:nova /var/lib/nova/nova_api.sqlite - chmod 0640 /var/lib/nova/nova_api.sqlite - fi + find /var/lib/nova -exec chown nova:nova "{}" + + find /var/lib/nova -type f -exec chmod 0640 "{}" + -o -type d -exec chmod 0750 "{}" + fi #DEBHELPER# diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/nova-compute-libvirt.postinst nova-21.0.0~b3~git2020041013.57ff308d6d/debian/nova-compute-libvirt.postinst --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/nova-compute-libvirt.postinst 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/nova-compute-libvirt.postinst 2020-04-17 08:48:19.000000000 +0000 @@ -4,16 +4,23 @@ # libvirt switched from libvirtd group to libvirt group in 1.3.3 libvirt_group=$(getent group libvirt) || true libvirtd_group=$(getent group libvirtd) || true + libvirt_qemu_user=$(getent passwd libvirt-qemu) || true + nova_group=$(getent group nova) || true - if [ $libvirt_group ]; then + if [ "$libvirt_group" ]; then if ! echo $libvirt_group | grep -qE '\'; then adduser nova libvirt fi - elif [ $libvirtd_group ]; then + elif [ "$libvirtd_group" ]; then if ! echo $libvirtd_group | grep -qE '\'; then adduser nova libvirtd fi fi + if [ "$libvirt_qemu_user" ]; then + if ! echo $nova_group | grep -qE '\'; then + adduser libvirt-qemu nova + fi + fi fi #DEBHELPER# diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/arm-console-patch.patch nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/arm-console-patch.patch --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/arm-console-patch.patch 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/arm-console-patch.patch 2020-04-17 08:48:19.000000000 +0000 @@ -1,6 +1,6 @@ --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py -@@ -2624,7 +2624,7 @@ +@@ -2700,7 +2700,7 @@ self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(fields.VMMode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) @@ -9,7 +9,7 @@ cfg.os_cmdline) self.assertEqual("OpenStack Nova", cfg.os_init_env['product_name']) self.assertIsNone(cfg.os_root) -@@ -2651,7 +2651,7 @@ +@@ -2727,7 +2727,7 @@ self.assertEqual(instance_ref.vcpus, cfg.vcpus) self.assertEqual(fields.VMMode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/drop-sphinxcontrib-rsvgconverter.patch nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/drop-sphinxcontrib-rsvgconverter.patch --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/drop-sphinxcontrib-rsvgconverter.patch 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/drop-sphinxcontrib-rsvgconverter.patch 2020-04-17 08:48:19.000000000 +0000 @@ -5,8 +5,8 @@ --- a/doc/source/conf.py +++ b/doc/source/conf.py -@@ -43,7 +43,6 @@ - 'ext.feature_matrix', +@@ -44,7 +44,6 @@ + 'ext.extra_specs', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', - 'sphinxcontrib.rsvgconverter', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/libvirt-provide-backing-file-format-creating-qcow2.patch nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/libvirt-provide-backing-file-format-creating-qcow2.patch --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/libvirt-provide-backing-file-format-creating-qcow2.patch 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/libvirt-provide-backing-file-format-creating-qcow2.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,85 +0,0 @@ -From 0cfe9c81e3fe4d268c9949c6b8e873abab94a407 Mon Sep 17 00:00:00 2001 -From: Lee Yarwood -Date: Wed, 19 Feb 2020 20:39:54 +0000 -Subject: [PATCH] libvirt: Provide the backing file format when creating qcow2 - disks - -Libvirt v6.0.0 [1] will now fail to launch a domain when using qcow2 -disks where the backing file format is not recorded in the qcow2 -metadata. - -There are some discussions upstream around relaxing this slightly [2] -but for now any attempt to launch an instance using qcow2 disks will -fail as Nova does not populate this value when creating the disk. - -Nova needs to at a minimum start populating this field and depending on -the outcome of the thread upstream in Libvirt also potentially handle -the upgrade case where we may need to rebase existing disks in order to -update the metadata. - -For now this change simply adds the backing_fmt option to the qemu-img -command line used to create these disks. - -[1] https://github.com/libvirt/libvirt/commit/3615e8b39badf2a526996a69dc91a92b04cf262e -[2] https://www.redhat.com/archives/libvir-list/2020-February/msg00616.html - -Partial-Bug: #1864020 -Change-Id: I77ebada015f6522a300be4fa043fb8676458402b ---- - nova/tests/unit/virt/libvirt/test_utils.py | 23 +++++++++++++--------- - nova/virt/libvirt/utils.py | 3 ++- - 2 files changed, 16 insertions(+), 10 deletions(-) - -diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py -index 9c146b5104..2888078cde 100644 ---- a/nova/tests/unit/virt/libvirt/test_utils.py -+++ b/nova/tests/unit/virt/libvirt/test_utils.py -@@ -354,16 +354,21 @@ ID TAG VM SIZE DATE VM CLOCK - - @mock.patch('os.path.exists', return_value=True) - @mock.patch('oslo_concurrency.processutils.execute') -- def test_create_cow_image(self, mock_execute, mock_exists): -+ @mock.patch('nova.virt.images.qemu_img_info') -+ def test_create_cow_image(self, mock_info, mock_execute, mock_exists): - mock_execute.return_value = ('stdout', None) -- libvirt_utils.create_cow_image('/some/path', '/the/new/cow') -- expected_args = [(('env', 'LC_ALL=C', 'LANG=C', -- 'qemu-img', 'info', '/some/path'), -- {'prlimit': images.QEMU_IMG_LIMITS}), -- (('qemu-img', 'create', '-f', 'qcow2', -- '-o', 'backing_file=/some/path', -- '/the/new/cow'),)] -- self.assertEqual(expected_args, mock_execute.call_args_list) -+ mock_info.return_value = mock.Mock( -+ file_format=mock.sentinel.backing_fmt, -+ cluster_size=mock.sentinel.cluster_size) -+ libvirt_utils.create_cow_image(mock.sentinel.backing_path, -+ mock.sentinel.new_path) -+ mock_info.assert_called_once_with(mock.sentinel.backing_path) -+ mock_execute.assert_has_calls([mock.call( -+ 'qemu-img', 'create', '-f', 'qcow2', '-o', -+ 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % ( -+ mock.sentinel.backing_path, mock.sentinel.backing_fmt, -+ mock.sentinel.cluster_size), -+ mock.sentinel.new_path)]) - - @ddt.unpack - @ddt.data({'fs_type': 'some_fs_type', -diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py -index 69ca21b9bf..0c04833892 100644 ---- a/nova/virt/libvirt/utils.py -+++ b/nova/virt/libvirt/utils.py -@@ -114,8 +114,9 @@ def create_cow_image(backing_file, path, size=None): - base_cmd = ['qemu-img', 'create', '-f', 'qcow2'] - cow_opts = [] - if backing_file: -- cow_opts += ['backing_file=%s' % backing_file] - base_details = images.qemu_img_info(backing_file) -+ cow_opts += ['backing_file=%s' % backing_file] -+ cow_opts += ['backing_fmt=%s' % base_details.file_format] - else: - base_details = None - # Explicitly inherit the value of 'cluster_size' property of a qcow2 --- -2.25.0 - diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/monkey-patch-original-current-thread-active.patch nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/monkey-patch-original-current-thread-active.patch --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/monkey-patch-original-current-thread-active.patch 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/monkey-patch-original-current-thread-active.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -From 2078ef850c93b47ec1bcd044450b589d7a8fc0b5 Mon Sep 17 00:00:00 2001 -From: Corey Bryant -Date: Wed, 12 Feb 2020 16:32:40 -0500 -Subject: [PATCH] Monkey patch original current_thread _active - -Monkey patch the original current_thread to use the up-to-date _active -global variable. This solution is based on that documented at: -https://github.com/eventlet/eventlet/issues/592 - -Change-Id: I4872169413f27aeaff8d8fdfa5cdaf6ee32f4680 -Closes-Bug: #1863021 ---- - nova/monkey_patch.py | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/nova/monkey_patch.py b/nova/monkey_patch.py -index a07ff91dac..1a610248ad 100644 ---- a/nova/monkey_patch.py -+++ b/nova/monkey_patch.py -@@ -59,6 +59,13 @@ def _monkey_patch(): - else: - eventlet.monkey_patch() - -+ # Monkey patch the original current_thread to use the up-to-date _active -+ # global variable. See https://bugs.launchpad.net/bugs/1863021 and -+ # https://github.com/eventlet/eventlet/issues/592 -+ import __original_module_threading as orig_threading -+ import threading -+ orig_threading.current_thread.__globals__['_active'] = threading._active -+ - # NOTE(rpodolyaka): import oslo_service first, so that it makes eventlet - # hub use a monotonic clock to avoid issues with drifts of system time (see - # LP 1510234 for details) --- -2.25.0 - diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/revert-generalize-db-conf-group-copying.patch nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/revert-generalize-db-conf-group-copying.patch --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/revert-generalize-db-conf-group-copying.patch 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/revert-generalize-db-conf-group-copying.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -Description: Revert upstream commit due to 'nova-manage api_db sync' error. - This reverts commit 910008e2ef5dae1698ff7db791f4816c728c8bd0. -Author: Corey Bryant -Bug-Ubuntu: https://bugs.launchpad.net/bugs/1746530 - ---- - nova/db/sqlalchemy/api.py | 19 ++++++++++++++++--- - 1 file changed, 16 insertions(+), 3 deletions(-) - ---- a/nova/db/sqlalchemy/api.py -+++ b/nova/db/sqlalchemy/api.py -@@ -82,9 +82,22 @@ api_context_manager = enginefacade.trans - - - def _get_db_conf(conf_group, connection=None): -- kw = dict(conf_group.items()) -- if connection is not None: -- kw['connection'] = connection -+ kw = dict( -+ connection=connection or conf_group.connection, -+ slave_connection=conf_group.slave_connection, -+ sqlite_fk=False, -+ __autocommit=True, -+ expire_on_commit=False, -+ mysql_sql_mode=conf_group.mysql_sql_mode, -+ connection_recycle_time=conf_group.connection_recycle_time, -+ connection_debug=conf_group.connection_debug, -+ max_pool_size=conf_group.max_pool_size, -+ max_overflow=conf_group.max_overflow, -+ pool_timeout=conf_group.pool_timeout, -+ sqlite_synchronous=conf_group.sqlite_synchronous, -+ connection_trace=conf_group.connection_trace, -+ max_retries=conf_group.max_retries, -+ retry_interval=conf_group.retry_interval) - return kw - - diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/series nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/series --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/series 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/series 2020-04-17 08:48:19.000000000 +0000 @@ -1,9 +1,5 @@ # Ubuntu specific patches below here. Note these can be dropped eventually. drop-sphinxcontrib-rsvgconverter.patch drop-sphinx-feature-classification.patch -skip-ssl-tests.patch arm-console-patch.patch -revert-generalize-db-conf-group-copying.patch add-mysql8-compatibility.patch -monkey-patch-original-current-thread-active.patch -libvirt-provide-backing-file-format-creating-qcow2.patch diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/skip-ssl-tests.patch nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/skip-ssl-tests.patch --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/patches/skip-ssl-tests.patch 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/patches/skip-ssl-tests.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -Description: Skip SSL tests that are timing out. -Author: Corey Bryant -Bug-Ubuntu: https://bugs.launchpad.net/bugs/1482633 -Forwarded: no -Last-Update: 2017-08-23 - ---- a/nova/tests/unit/test_wsgi.py -+++ b/nova/tests/unit/test_wsgi.py -@@ -226,7 +226,7 @@ class TestWSGIServer(test.NoDBTestCase): - server.stop() - - --@testtools.skipIf(six.PY3, "bug/1482633: test hangs on Python 3") -+@testtools.skip("bug/1482633: test hangs on Python 3 and Python 2.7 for Ubuntu package builds") - class TestWSGIServerWithSSL(test.NoDBTestCase): - """WSGI server with SSL tests.""" - diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/tests/control nova-21.0.0~b3~git2020041013.57ff308d6d/debian/tests/control --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/tests/control 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/tests/control 2020-04-17 08:48:19.000000000 +0000 @@ -4,7 +4,7 @@ nova-api, nova-compute, nova-conductor, - nova-novncproxy, + nova-spiceproxy, nova-scheduler, rabbitmq-server, Restrictions: needs-root, allow-stderr diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/debian/tests/nova-daemons nova-21.0.0~b3~git2020041013.57ff308d6d/debian/tests/nova-daemons --- nova-21.0.0~b2~git2020021008.1fcd74730d/debian/tests/nova-daemons 2020-03-09 10:09:54.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/debian/tests/nova-daemons 2020-04-17 08:48:19.000000000 +0000 @@ -3,7 +3,7 @@ # Testing nova-daemons #--------------------- set -e -DAEMONS=('nova-scheduler' 'nova-api') +DAEMONS=('nova-api') ret=0 mysql -u root << EOF diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } -} \ No newline at end of file +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } -} \ No newline at end of file +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -1,3 +1,3 @@ { - "key1": "value1" -} \ No newline at end of file + "hw:numa_nodes": "1" +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } -} \ No newline at end of file +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -1,3 +1,3 @@ { - "key1": "new_value1" -} \ No newline at end of file + "hw:numa_nodes": "2" +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -1,3 +1,3 @@ { - "key1": "new_value1" -} \ No newline at end of file + "hw:numa_nodes": "2" +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.61/flavor-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.61/flavor-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.61/flavor-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.61/flavor-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -22,8 +22,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.61/flavors-detail-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.61/flavors-detail-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.61/flavors-detail-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.61/flavors-detail-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -169,8 +169,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.75/flavor-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.75/flavor-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.75/flavor-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.75/flavor-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -22,8 +22,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.75/flavors-detail-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.75/flavors-detail-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/flavors/v2.75/flavors-detail-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/flavors/v2.75/flavors-detail-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -169,8 +169,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-get-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -{ - "keypair": { - "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", - "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", - "user_id": "fake", - "deleted": false, - "created_at": "2014-05-07T12:06:13.681238", - "updated_at": null, - "deleted_at": null, - "id": 1 - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-import-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-import-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-import-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-import-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -{ - "keypair": { - "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-import-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-import-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-import-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-import-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -{ - "keypair": { - "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", - "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", - "user_id": "fake" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" - } - } - ] -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -{ - "keypair": { - "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/keypairs-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{ - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", - "user_id": "fake" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -{ - "keypair": { - "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", - "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", - "user_id": "fake", - "deleted": false, - "created_at": "2014-05-07T12:06:13.681238", - "updated_at": null, - "deleted_at": null, - "id": 1 - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -{ - "keypair": { - "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{ - "keypair": { - "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", - "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", - "user_id": "fake" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" - } - } - ] -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "keypair": { - "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", - "type": "ssh", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{ - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", - "type": "ssh", - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -{ - "keypair": { - "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", - "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", - "user_id": "fake", - "deleted": false, - "created_at": "2014-05-07T12:06:13.681238", - "updated_at": null, - "deleted_at": null, - "id": 1 - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "keypair": { - "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{ - "keypair": { - "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", - "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", - "user_id": "fake" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" - } - } - ] -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -{ - "keypair": { - "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", - "type": "ssh" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{ - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", - "type": "ssh", - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", - "user_id": "fake" - } -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" - } - } - ], - "keypairs_links": [ - { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", - "rel": "next" - } - ] -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" - } - } - ] -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", - "type": "ssh", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" - } - } - ], - "keypairs_links": [ - { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2", - "rel": "next" - } - ] -} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-post-req.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "keypair": { - "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", - "type": "ssh", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{ - "keypair": { - "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", - "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", - "type": "ssh", - "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", - "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -42,7 +42,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=2", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -9,7 +9,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=2", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -42,7 +42,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -9,7 +9,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,21 @@ +{ + "instanceAction": { + "action": "stop", + "events": [ + { + "event": "compute_stop_instance", + "finish_time": "2018-04-25T01:26:34.784165", + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "result": "Success", + "start_time": "2018-04-25T01:26:34.612020" + } + ], + "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e", + "start_time": "2018-04-25T01:26:34.388280", + "updated_at": "2018-04-25T01:26:34.784165", + "user_id": "fake" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "instanceAction": { + "action": "stop", + "events": [ + { + "event": "compute_stop_instance", + "finish_time": "2018-04-25T01:26:36.790544", + "host": "compute", + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "result": "Success", + "start_time": "2018-04-25T01:26:36.539271", + "traceback": null, + "details": null + } + ], + "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b", + "start_time": "2018-04-25T01:26:36.341290", + "updated_at": "2018-04-25T01:26:36.790544", + "user_id": "admin" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d", + "start_time": "2018-04-25T01:26:36.036697", + "updated_at": "2018-04-25T01:26:36.525308", + "user_id": "admin" + }, + { + "action": "create", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", + "start_time": "2018-04-25T01:26:33.692125", + "updated_at": "2018-04-25T01:26:35.993821", + "user_id": "admin" + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", + "start_time": "2018-04-25T01:26:29.594237", + "updated_at": "2018-04-25T01:26:30.065061", + "user_id": "admin" + }, + { + "action": "create", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", + "start_time": "2018-04-25T01:26:33.692125", + "updated_at": "2018-04-25T01:26:35.993821", + "user_id": "admin" + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", + "start_time": "2018-04-25T01:26:29.594237", + "updated_at": "2018-04-25T01:26:30.065061", + "user_id": "admin" + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", + "start_time": "2018-04-25T01:26:28.909887", + "updated_at": "2018-04-25T01:26:29.400606", + "user_id": "admin" + } + ], + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", + "rel": "next" + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "create", + "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c", + "start_time": "2018-04-25T01:26:33.710291", + "updated_at": "2018-04-25T01:26:35.374936", + "user_id": "fake" + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-get-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "keypair": { + "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", + "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", + "user_id": "fake", + "deleted": false, + "created_at": "2014-05-07T12:06:13.681238", + "updated_at": null, + "deleted_at": null, + "id": 1 + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-import-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-import-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-import-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-import-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "keypair": { + "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-import-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-import-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-import-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-import-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "keypair": { + "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", + "user_id": "fake" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" + } + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "keypair": { + "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/keypairs-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", + "user_id": "fake" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "keypair": { + "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", + "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", + "user_id": "fake", + "deleted": false, + "created_at": "2014-05-07T12:06:13.681238", + "updated_at": null, + "deleted_at": null, + "id": 1 + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "keypair": { + "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", + "user_id": "fake" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" + } + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "keypair": { + "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", + "type": "ssh", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", + "type": "ssh", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "keypair": { + "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", + "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", + "user_id": "fake", + "deleted": false, + "created_at": "2014-05-07T12:06:13.681238", + "updated_at": null, + "deleted_at": null, + "id": 1 + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "keypair": { + "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", + "user_id": "fake" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" + } + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "keypair": { + "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", + "type": "ssh" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", + "type": "ssh", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", + "user_id": "fake" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" + } + } + ], + "keypairs_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", + "rel": "next" + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" + } + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", + "type": "ssh", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" + } + } + ], + "keypairs_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2", + "rel": "next" + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "keypair": { + "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", + "type": "ssh", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "keypair": { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", + "type": "ssh", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "tag": "foo", + "delete_on_termination": true + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "volumeAttachments": [ + { + "delete_on_termination": false, + "device": "/dev/sdc", + "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", + "tag": null, + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + }, + { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } + ] +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "delete_on_termination": true + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/update-volume-req.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/update-volume-req.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/update-volume-req.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/update-volume-req.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "volumeAttachment": { + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/versions/v21-version-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/versions/v21-version-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/versions/v21-version-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/versions/v21-version-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -19,7 +19,7 @@ } ], "status": "CURRENT", - "version": "2.81", + "version": "2.86", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/versions/versions-get-resp.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/versions/versions-get-resp.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_samples/versions/versions-get-resp.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_samples/versions/versions-get-resp.json 2020-04-10 17:57:57.000000000 +0000 @@ -22,7 +22,7 @@ } ], "status": "CURRENT", - "version": "2.81", + "version": "2.86", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_schemas/config_drive.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_schemas/config_drive.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_schemas/config_drive.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_schemas/config_drive.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,30 @@ +{ + "anyOf": [ + { + "type": "object", + "properties": { + "meta_data": { + "type": "object" + }, + "network_data": { + "type": "object" + }, + "user_data": { + "type": [ + "object", + "array", + "string", + "null" + ] + } + }, + "additionalProperties": false + }, + { + "type": [ + "string", + "null" + ] + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_schemas/network_data.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_schemas/network_data.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/api_schemas/network_data.json 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/api_schemas/network_data.json 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,580 @@ +{ + "$schema": "http://openstack.org/nova/network_data.json#", + "id": "http://openstack.org/nova/network_data.json", + "type": "object", + "title": "OpenStack Nova network metadata schema", + "description": "Schema of Nova instance network configuration information", + "required": [ + "links", + "networks", + "services" + ], + "properties": { + "links": { + "$id": "#/properties/links", + "type": "array", + "title": "L2 interfaces settings", + "items": { + "$id": "#/properties/links/items", + "oneOf": [ + { + "$ref": "#/definitions/l2_link" + }, + { + "$ref": "#/definitions/l2_bond" + }, + { + "$ref": "#/definitions/l2_vlan" + } + ] + } + }, + "networks": { + "$id": "#/properties/networks", + "type": "array", + "title": "L3 networks", + "items": { + "$id": "#/properties/networks/items", + "oneOf": [ + { + "$ref": "#/definitions/l3_ipv4_network" + }, + { + "$ref": "#/definitions/l3_ipv6_network" + } + ] + } + }, + "services": { + "$ref": "#/definitions/services" + } + }, + "definitions": { + "l2_address": { + "$id": "#/definitions/l2_address", + "type": "string", + "pattern": "(?i)^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$", + "title": "L2 interface address", + "examples": [ + "fa:16:3e:9c:bf:3d" + ] + }, + "l2_id": { + "$id": "#/definitions/l2_id", + "type": "string", + "title": "L2 interface ID", + "examples": [ + "eth0" + ] + }, + "l2_mtu": { + "$id": "#/definitions/l2_mtu", + "title": "L2 interface MTU", + "anyOf": [ + { + "type": "number", + "minimum": 1, + "maximum": 65535 + }, + { + "type": "null" + } + ], + "examples": [ + 1500 + ] + }, + "l2_vif_id": { + "$id": "#/definitions/l2_vif_id", + "type": "string", + "title": "Virtual interface ID", + "examples": [ + "cd9f6d46-4a3a-43ab-a466-994af9db96fc" + ] + }, + "l2_link": { + "$id": "#/definitions/l2_link", + "type": "object", + "title": "L2 interface configuration settings", + "required": [ + "ethernet_mac_address", + "id", + "type" + ], + "properties": { + "id": { + "$ref": "#/definitions/l2_id" + }, + "ethernet_mac_address": { + "$ref": "#/definitions/l2_address" + }, + "mtu": { + "$ref": "#/definitions/l2_mtu" + }, + "type": { + "$id": "#/definitions/l2_link/properties/type", + "type": "string", + "enum": [ + "bridge", + "dvs", + "hw_veb", + "hyperv", + "ovs", + "tap", + "vhostuser", + "vif", + "phy" + ], + "title": "Interface type", + "examples": [ + "bridge" + ] + }, + "vif_id": { + "$ref": "#/definitions/l2_vif_id" + } + } + }, + "l2_bond": { + "$id": "#/definitions/l2_bond", + "type": "object", + "title": "L2 bonding interface configuration settings", + "required": [ + "ethernet_mac_address", + "id", + "type", + "bond_mode", + "bond_links" + ], + "properties": { + "id": { + "$ref": "#/definitions/l2_id" + }, + "ethernet_mac_address": { + "$ref": "#/definitions/l2_address" + }, + "mtu": { + "$ref": "#/definitions/l2_mtu" + }, + "type": { + "$id": "#/definitions/l2_bond/properties/type", + "type": "string", + "enum": [ + "bond" + ], + "title": "Interface type", + "examples": [ + "bond" + ] + }, + "vif_id": { + "$ref": "#/definitions/l2_vif_id" + }, + "bond_mode": { + "$id": "#/definitions/bond/properties/bond_mode", + "type": "string", + "title": "Port bonding type", + "enum": [ + "802.1ad", + "balance-rr", + "active-backup", + "balance-xor", + "broadcast", + "balance-tlb", + "balance-alb" + ], + "examples": [ + "802.1ad" + ] + }, + "bond_links": { + "$id": "#/definitions/bond/properties/bond_links", + "type": "array", + "title": "Port bonding links", + "items": { + "$id": "#/definitions/bond/properties/bond_links/items", + "type": "string" + } + } + } + }, + "l2_vlan": { + "$id": "#/definitions/l2_vlan", + "type": "object", + "title": "L2 VLAN interface configuration settings", + "required": [ + "vlan_mac_address", + "id", + "type", + "vlan_link", + "vlan_id" + ], + "properties": { + "id": { + "$ref": "#/definitions/l2_id" + }, + "vlan_mac_address": { + "$ref": "#/definitions/l2_address" + }, + "mtu": { + "$ref": "#/definitions/l2_mtu" + }, + "type": { + "$id": "#/definitions/l2_vlan/properties/type", + "type": "string", + "enum": [ + "vlan" + ], + "title": "VLAN interface type", + "examples": [ + "vlan" + ] + }, + "vif_id": { + "$ref": "#/definitions/l2_vif_id" + }, + "vlan_id": { + "$id": "#/definitions/l2_vlan/properties/vlan_id", + "type": "integer", + "title": "VLAN ID" + }, + "vlan_link": { + "$id": "#/definitions/l2_vlan/properties/vlan_link", + "type": "string", + "title": "VLAN link name" + } + } + }, + "l3_id": { + "$id": "#/definitions/l3_id", + "type": "string", + "title": "Network name", + "examples": [ + "network0" + ] + }, + "l3_link": { + "$id": "#/definitions/l3_link", + "type": "string", + "title": "L2 network link to use for L3 interface", + "examples": [ + "99e88329-f20d-4741-9593-25bf07847b16" + ] + }, + "l3_network_id": { + "$id": "#/definitions/l3_network_id", + "type": "string", + "title": "Network ID", + "examples": [ + "99e88329-f20d-4741-9593-25bf07847b16" + ] + }, + "l3_ipv4_type": { + "$id": "#/definitions/l3_ipv4_type", + "type": "string", + "enum": [ + "ipv4", + "ipv4_dhcp" + ], + "title": "L3 IPv4 network type", + "examples": [ + "ipv4_dhcp" + ] + }, + "l3_ipv6_type": { + "$id": "#/definitions/l3_ipv6_type", + "type": "string", + "enum": [ + "ipv6", + "ipv6_dhcp", + "ipv6_slaac" + ], + "title": "L3 IPv6 network type", + "examples": [ + "ipv6_dhcp" + ] + }, + "l3_ipv4_host": { + "$id": "#/definitions/l3_ipv4_host", + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", + "title": "L3 IPv4 host address", + "examples": [ + "192.168.81.99" + ] + }, + "l3_ipv6_host": { + "$id": "#/definitions/l3_ipv6_host", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(/[0-9]{1,2})?$", + "title": "L3 IPv6 host address", + "examples": [ + "2001:db8:3:4::192.168.81.99" + ] + }, + "l3_ipv4_netmask": { + "$id": "#/definitions/l3_ipv4_netmask", + "type": "string", + "pattern": "^(254|252|248|240|224|192|128|0)\\.0\\.0\\.0|255\\.(254|252|248|240|224|192|128|0)\\.0\\.0|255\\.255\\.(254|252|248|240|224|192|128|0)\\.0|255\\.255\\.255\\.(254|252|248|240|224|192|128|0)$", + "title": "L3 IPv4 network mask", + "examples": [ + "255.255.252.0" + ] + }, + "l3_ipv6_netmask": { + "$id": "#/definitions/l3_ipv6_netmask", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", + "title": "L3 IPv6 network mask", + "examples": [ + "ffff:ffff:ffff:ffff::" + ] + }, + "l3_ipv4_nw": { + "$id": "#/definitions/l3_ipv4_nw", + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", + "title": "L3 IPv4 network address", + "examples": [ + "0.0.0.0" + ] + }, + "l3_ipv6_nw": { + "$id": "#/definitions/l3_ipv6_nw", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", + "title": "L3 IPv6 network address", + "examples": [ + "8000::" + ] + }, + "l3_ipv4_gateway": { + "$id": "#/definitions/l3_ipv4_gateway", + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", + "title": "L3 IPv4 gateway address", + "examples": [ + "192.168.200.1" + ] + }, + "l3_ipv6_gateway": { + "$id": "#/definitions/l3_ipv6_gateway", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$", + "title": "L3 IPv6 gateway address", + "examples": [ + "2001:db8:3:4::192.168.81.99" + ] + }, + "l3_ipv4_network_route": { + "$id": "#/definitions/l3_ipv4_network_route", + "type": "object", + "title": "L3 IPv4 routing configuration item", + "required": [ + "gateway", + "netmask", + "network" + ], + "properties": { + "network": { + "$ref": "#/definitions/l3_ipv4_nw" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv4_netmask" + }, + "gateway": { + "$ref": "#/definitions/l3_ipv4_gateway" + }, + "services": { + "$ref": "#/definitions/ipv4_services" + } + } + }, + "l3_ipv6_network_route": { + "$id": "#/definitions/l3_ipv6_network_route", + "type": "object", + "title": "L3 IPv6 routing configuration item", + "required": [ + "gateway", + "netmask", + "network" + ], + "properties": { + "network": { + "$ref": "#/definitions/l3_ipv6_nw" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv6_netmask" + }, + "gateway": { + "$ref": "#/definitions/l3_ipv6_gateway" + }, + "services": { + "$ref": "#/definitions/ipv6_services" + } + } + }, + "l3_ipv4_network": { + "$id": "#/definitions/l3_ipv4_network", + "type": "object", + "title": "L3 IPv4 network configuration", + "required": [ + "id", + "link", + "network_id", + "type" + ], + "properties": { + "id": { + "$ref": "#/definitions/l3_id" + }, + "link": { + "$ref": "#/definitions/l3_link" + }, + "network_id": { + "$ref": "#/definitions/l3_network_id" + }, + "type": { + "$ref": "#/definitions/l3_ipv4_type" + }, + "ip_address": { + "$ref": "#/definitions/l3_ipv4_host" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv4_netmask" + }, + "routes": { + "$id": "#/definitions/l3_ipv4_network/routes", + "type": "array", + "title": "L3 IPv4 network routes", + "items": { + "$ref": "#/definitions/l3_ipv4_network_route" + } + } + } + }, + "l3_ipv6_network": { + "$id": "#/definitions/l3_ipv6_network", + "type": "object", + "title": "L3 IPv6 network configuration", + "required": [ + "id", + "link", + "network_id", + "type" + ], + "properties": { + "id": { + "$ref": "#/definitions/l3_id" + }, + "link": { + "$ref": "#/definitions/l3_link" + }, + "network_id": { + "$ref": "#/definitions/l3_network_id" + }, + "type": { + "$ref": "#/definitions/l3_ipv6_type" + }, + "ip_address": { + "$ref": "#/definitions/l3_ipv6_host" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv6_netmask" + }, + "routes": { + "$id": "#/definitions/properties/l3_ipv6_network/routes", + "type": "array", + "title": "L3 IPv6 network routes", + "items": { + "$ref": "#/definitions/l3_ipv6_network_route" + } + } + } + }, + "ipv4_service": { + "$id": "#/definitions/ipv4_service", + "type": "object", + "title": "Service on a IPv4 network", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "$ref": "#/definitions/l3_ipv4_host" + }, + "type": { + "$id": "#/definitions/ipv4_service/properties/type", + "type": "string", + "enum": [ + "dns" + ], + "title": "Service type", + "examples": [ + "dns" + ] + } + } + }, + "ipv6_service": { + "$id": "#/definitions/ipv6_service", + "type": "object", + "title": "Service on a IPv6 network", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "$ref": "#/definitions/l3_ipv6_host" + }, + "type": { + "$id": "#/definitions/ipv4_service/properties/type", + "type": "string", + "enum": [ + "dns" + ], + "title": "Service type", + "examples": [ + "dns" + ] + } + } + }, + "ipv4_services": { + "$id": "#/definitions/ipv4_services", + "type": "array", + "title": "Network services on IPv4 network", + "items": { + "$id": "#/definitions/ipv4_services/items", + "$ref": "#/definitions/ipv4_service" + } + }, + "ipv6_services": { + "$id": "#/definitions/ipv6_services", + "type": "array", + "title": "Network services on IPv6 network", + "items": { + "$id": "#/definitions/ipv6_services/items", + "$ref": "#/definitions/ipv6_service" + } + }, + "services": { + "$id": "#/definitions/services", + "type": "array", + "title": "Network services", + "items": { + "$id": "#/definitions/services/items", + "anyOf": [ + { + "$ref": "#/definitions/ipv4_service" + }, + { + "$ref": "#/definitions/ipv6_service" + } + ] + } + } + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/ext/extra_specs.py nova-21.0.0~b3~git2020041013.57ff308d6d/doc/ext/extra_specs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/ext/extra_specs.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/ext/extra_specs.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,231 @@ +# Copyright 2020, Red Hat, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Display extra specs in documentation. + +Provides a single directive that can be used to list all extra specs validators +and, thus, document all extra specs that nova recognizes and supports. +""" + +import typing as ty + +from docutils import nodes +from docutils.parsers import rst +from docutils.parsers.rst import directives +from docutils import statemachine +from sphinx import addnodes +from sphinx import directives as sphinx_directives +from sphinx import domains +from sphinx import roles +from sphinx.util import logging +from sphinx.util import nodes as sphinx_nodes + +from nova.api.validation.extra_specs import base +from nova.api.validation.extra_specs import validators + +LOG = logging.getLogger(__name__) + + +class ExtraSpecXRefRole(roles.XRefRole): + """Cross reference a extra spec. + + Example:: + + :nova:extra-spec:`hw:cpu_policy` + """ + + def __init__(self): + super(ExtraSpecXRefRole, self).__init__( + warn_dangling=True, + ) + + def process_link(self, env, refnode, has_explicit_title, title, target): + # The anchor for the extra spec link is the extra spec name + return target, target + + +class ExtraSpecDirective(sphinx_directives.ObjectDescription): + """Document an individual extra spec. + + Accepts one required argument - the extra spec name, including the group. + + Example:: + + .. extra-spec:: hw:cpu_policy + """ + + def handle_signature(self, sig, signode): + """Transform an option description into RST nodes.""" + # Insert a node into the output showing the extra spec name + signode += addnodes.desc_name(sig, sig) + signode['allnames'] = [sig] + return sig + + def add_target_and_index(self, firstname, sig, signode): + cached_options = self.env.domaindata['nova']['extra_specs'] + signode['ids'].append(sig) + self.state.document.note_explicit_target(signode) + # Store the location of the option definition for later use in + # resolving cross-references + cached_options[sig] = self.env.docname + + +def _indent(text): + if not text: + return text + + padding = ' ' * 4 + return padding + text + + +def _format_validator_group_help( + validators: ty.Dict[str, base.ExtraSpecValidator], + summary: bool, +): + """Generate reStructuredText snippets for a group of validators.""" + for validator in validators.values(): + for line in _format_validator_help(validator, summary): + yield line + + +def _format_validator_help( + validator: base.ExtraSpecValidator, + summary: bool, +): + """Generate reStucturedText snippets for the provided validator. + + :param validator: A validator to document. + :type validator: nova.api.validation.extra_specs.base.ExtraSpecValidator + """ + yield f'.. nova:extra-spec:: {validator.name}' + yield '' + + # NOTE(stephenfin): We don't print the pattern, if present, since it's too + # internal. Instead, the description should provide this information in a + # human-readable format + yield _indent(f':Type: {validator.value["type"].__name__}') + + if validator.value.get('min') is not None: + yield _indent(f':Min: {validator.value["min"]}') + + if validator.value.get('max') is not None: + yield _indent(f':Max: {validator.value["max"]}') + + yield '' + + if not summary: + for line in validator.description.splitlines(): + yield _indent(line) + + yield '' + + if validator.deprecated: + yield _indent('.. warning::') + yield _indent( + 'This extra spec has been deprecated and should not be used.' + ) + + yield '' + + +class ExtraSpecGroupDirective(rst.Directive): + """Document extra specs belonging to the specified group. + + Accepts one optional argument - the extra spec group - and one option - + whether to show a summary view only (omit descriptions). Example:: + + .. extra-specs:: hw_rng + :summary: + """ + + required_arguments = 0 + optional_arguments = 1 + option_spec = { + 'summary': directives.flag, + } + has_content = False + + def run(self): + result = statemachine.ViewList() + source_name = self.state.document.current_source + + group = self.arguments[0] if self.arguments else None + summary = self.options.get('summary', False) + + if group: + group_validators = { + n.split(':', 1)[1]: v for n, v in validators.VALIDATORS.items() + if ':' in n and n.split(':', 1)[0].split('{')[0] == group + } + else: + group_validators = { + n: v for n, v in validators.VALIDATORS.items() + if ':' not in n + } + + if not group_validators: + LOG.warning("No validators found for group '%s'", group or '') + + for count, line in enumerate( + _format_validator_group_help(group_validators, summary) + ): + result.append(line, source_name, count) + LOG.debug('%5d%s%s', count, ' ' if line else '', line) + + node = nodes.section() + node.document = self.state.document + + sphinx_nodes.nested_parse_with_titles(self.state, result, node) + + return node.children + + +class NovaDomain(domains.Domain): + """nova domain.""" + name = 'nova' + label = 'nova' + object_types = { + 'configoption': domains.ObjType( + 'extra spec', 'spec', + ), + } + directives = { + 'extra-spec': ExtraSpecDirective, + } + roles = { + 'extra-spec': ExtraSpecXRefRole(), + } + initial_data = { + 'extra_specs': {}, + } + + def resolve_xref( + self, env, fromdocname, builder, typ, target, node, contnode, + ): + """Resolve cross-references""" + if typ == 'option': + return sphinx_nodes.make_refnode( + builder, + fromdocname, + env.domaindata['nova']['extra_specs'][target], + target, + contnode, + target, + ) + return None + + +def setup(app): + app.add_domain(NovaDomain) + app.add_directive('extra-specs', ExtraSpecGroupDirective) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/flavor-update.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/flavor-update.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/flavor-update.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/flavor-update.json 2020-04-10 17:57:57.000000000 +0000 @@ -11,8 +11,7 @@ "disabled": false, "vcpus": 2, "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:numa_nodes": "2" }, "projects": ["fake_tenant"], "swap": 0, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/instance-live_migration_rollback_dest-end.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/instance-live_migration_rollback_dest-end.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/instance-live_migration_rollback_dest-end.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/instance-live_migration_rollback_dest-end.json 2020-04-10 17:57:57.000000000 +0000 @@ -3,7 +3,8 @@ "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { - "action_initiator_user": "admin" + "action_initiator_user": "admin", + "task_state": "migrating" } }, "priority": "INFO", diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/instance-live_migration_rollback_dest-start.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/instance-live_migration_rollback_dest-start.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/instance-live_migration_rollback_dest-start.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/instance-live_migration_rollback_dest-start.json 2020-04-10 17:57:57.000000000 +0000 @@ -3,7 +3,8 @@ "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { - "action_initiator_user": "admin" + "action_initiator_user": "admin", + "task_state": "migrating" } }, "priority": "INFO", diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/instance-live_migration_rollback-start.json nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/instance-live_migration_rollback-start.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/notification_samples/instance-live_migration_rollback-start.json 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/notification_samples/instance-live_migration_rollback-start.json 2020-04-10 17:57:57.000000000 +0000 @@ -3,7 +3,8 @@ "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ - "action_initiator_user": "admin" + "action_initiator_user": "admin", + "task_state": "migrating" } }, "priority":"INFO", diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/configuring-migrations.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/configuring-migrations.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/configuring-migrations.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/configuring-migrations.rst 2020-04-10 17:57:57.000000000 +0000 @@ -234,10 +234,12 @@ #. **Live migration completion timeout** - The Compute service aborts a migration when it has been running for too - long. The timeout is calculated based on the instance size, which is the - instance's memory size in GiB. In the case of block migration, the size of - ephemeral storage in GiB is added. + The Compute service will either abort or force complete a migration + when it has been running too long. This behavior is configurable + using the :oslo.config:option:`libvirt.live_migration_timeout_action` + config option. The timeout is calculated based on the instance size, which + is the instance's memory size in GiB. In the case of block migration, the + size of ephemeral storage in GiB is added. The timeout in seconds is the instance size multiplied by the configurable parameter :oslo.config:option:`libvirt.live_migration_completion_timeout`, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/remote-console-access.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/remote-console-access.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/remote-console-access.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/remote-console-access.rst 2020-04-10 17:57:57.000000000 +0000 @@ -105,6 +105,8 @@ - :oslo.config:option:`cert` - :oslo.config:option:`key` - :oslo.config:option:`web` +- :oslo.config:option:`console.ssl_ciphers` +- :oslo.config:option:`console.ssl_minimum_version` - :oslo.config:option:`vnc.novncproxy_host` - :oslo.config:option:`vnc.novncproxy_port` @@ -326,6 +328,8 @@ - :oslo.config:option:`cert` - :oslo.config:option:`key` - :oslo.config:option:`web` +- :oslo.config:option:`console.ssl_ciphers` +- :oslo.config:option:`console.ssl_minimum_version` - :oslo.config:option:`spice.html5proxy_host` - :oslo.config:option:`spice.html5proxy_port` @@ -407,6 +411,8 @@ - :oslo.config:option:`cert` - :oslo.config:option:`key` - :oslo.config:option:`web` +- :oslo.config:option:`console.ssl_ciphers` +- :oslo.config:option:`console.ssl_minimum_version` - :oslo.config:option:`serial_console.serialproxy_host` - :oslo.config:option:`serial_console.serialproxy_port` diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/support-compute.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/support-compute.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/support-compute.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/support-compute.rst 2020-04-10 17:57:57.000000000 +0000 @@ -16,6 +16,7 @@ troubleshooting/orphaned-allocations.rst troubleshooting/rebuild-placement-db.rst + troubleshooting/affinity-policy-violated.rst Compute service logging diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/troubleshooting/affinity-policy-violated.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/troubleshooting/affinity-policy-violated.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/troubleshooting/affinity-policy-violated.rst 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/troubleshooting/affinity-policy-violated.rst 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,78 @@ +Affinity policy violated with parallel requests +=============================================== + +Problem +------- + +Parallel server create requests for affinity or anti-affinity land on the same +host and servers go to the ``ACTIVE`` state even though the affinity or +anti-affinity policy was violated. + +Solution +-------- + +There are two ways to avoid anti-/affinity policy violations among multiple +server create requests. + +Create multiple servers as a single request +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use the `multi-create API`_ with the ``min_count`` parameter set or the +`multi-create CLI`_ with the ``--min`` option set to the desired number of +servers. + +This works because when the batch of requests is visible to ``nova-scheduler`` +at the same time as a group, it will be able to choose compute hosts that +satisfy the anti-/affinity constraint and will send them to the same hosts or +different hosts accordingly. + +.. _multi-create API: https://docs.openstack.org/api-ref/compute/#create-multiple-servers +.. _multi-create CLI: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html#server-create + +Adjust Nova configuration settings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When requests are made separately and the scheduler cannot consider the batch +of requests at the same time as a group, anti-/affinity races are handled by +what is called the "late affinity check" in ``nova-compute``. Once a server +lands on a compute host, if the request involves a server group, +``nova-compute`` contacts the API database (via ``nova-conductor``) to retrieve +the server group and then it checks whether the affinity policy has been +violated. If the policy has been violated, ``nova-compute`` initiates a +reschedule of the server create request. Note that this means the deployment +must have :oslo.config:option:`scheduler.max_attempts` set greater than ``1`` +(default is ``3``) to handle races. + +An ideal configuration for multiple cells will minimize `upcalls`_ from the +cells to the API database. This is how devstack, for example, is configured in +the CI gate. The cell conductors do not set +:oslo.config:option:`api_database.connection` and ``nova-compute`` sets +:oslo.config:option:`workarounds.disable_group_policy_check_upcall` to +``True``. + +However, if a deployment needs to handle racing affinity requests, it needs to +configure cell conductors to have access to the API database, for example: + +.. code-block:: ini + + [api_database] + connection = mysql+pymysql://root:a@127.0.0.1/nova_api?charset=utf8 + +The deployment also needs to configure ``nova-compute`` services not to disable +the group policy check upcall by either not setting (use the default) +:oslo.config:option:`workarounds.disable_group_policy_check_upcall` or setting +it to ``False``, for example: + +.. code-block:: ini + + [workarounds] + disable_group_policy_check_upcall = False + +With these settings, anti-/affinity policy should not be violated even when +parallel server create requests are racing. + +Future work is needed to add anti-/affinity support to the placement service in +order to eliminate the need for the late affinity check in ``nova-compute``. + +.. _upcalls: https://docs.openstack.org/nova/latest/user/cellsv2-layout.html#operations-requiring-upcalls + diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/virtual-gpu.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/virtual-gpu.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/admin/virtual-gpu.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/admin/virtual-gpu.rst 2020-04-10 17:57:57.000000000 +0000 @@ -35,15 +35,33 @@ [devices] enabled_vgpu_types = nvidia-35 - .. note:: + If you want to support more than a single GPU type, you need to provide a + separate configuration section for each device. For example: - As of the Queens release, Nova only supports a single type. If more - than one vGPU type is specified (as a comma-separated list), only the - first one will be used. + .. code-block:: ini + + [devices] + enabled_vgpu_types = nvidia-35, nvidia-36 + + [vgpu_nvidia-35] + device_addresses = 0000:84:00.0,0000:85:00.0 + + [vgpu_nvidia-36] + device_addresses = 0000:86:00.0 + + where you have to define which physical GPUs are supported per GPU type. + + If the same PCI address is provided for two different types, nova-compute + will refuse to start and issue a specific error in the logs. To know which specific type(s) to mention, please refer to `How to discover a GPU type`_. + .. versionchanged:: 21.0.0 + + Supporting multiple GPU types is only supported by the Ussuri release and + later versions. + #. Restart the ``nova-compute`` service. @@ -269,6 +287,59 @@ physical GPU having the PCI ID ``0000:85:00.0``. +(Optional) Provide custom traits for multiple GPU types +------------------------------------------------------- + +Since operators want to support different GPU types per compute, it would be +nice to have flavors asking for a specific GPU type. This is now possible +using custom traits by decorating child Resource Providers that correspond +to physical GPUs. + +.. note:: + + Possible improvements in a future release could consist of providing + automatic tagging of Resource Providers with standard traits corresponding + to versioned mapping of public GPU types. For the moment, this has to be + done manually. + +#. Get the list of resource providers + + See `Checking allocations and inventories for virtual GPUs`_ first for getting + the list of Resource Providers that support a ``VGPU`` resource class. + +#. Define custom traits that will correspond for each to a GPU type + + .. code-block:: console + + $ openstack --os-placement-api-version 1.6 trait create CUSTOM_NVIDIA_11 + + In this example, we ask to create a custom trait named ``CUSTOM_NVIDIA_11``. + +#. Add the corresponding trait to the Resource Provider matching the GPU + + .. code-block:: console + + $ openstack --os-placement-api-version 1.6 resource provider trait set \ + --trait CUSTOM_NVIDIA_11 e2f8607b-0683-4141-a8af-f5e20682e28c + + In this case, the trait ``CUSTOM_NVIDIA_11`` will be added to the Resource + Provider with the UUID ``e2f8607b-0683-4141-a8af-f5e20682e28c`` that + corresponds to the PCI address ``0000:85:00:0`` as shown above. + +#. Amend the flavor to add a requested trait + + .. code-block:: console + + $ openstack flavor set --property trait:CUSTOM_NVIDIA_11=required vgpu_1 + + In this example, we add the ``CUSTOM_NVIDIA_11`` trait as a required + information for the ``vgpu_1`` flavor we created earlier. + + This will allow the Placement service to only return the Resource Providers + matching this trait so only the GPUs that were decorated with will be checked + for this flavor. + + Caveats ------- @@ -324,6 +395,8 @@ resize. If you want to migrate an instance, make sure to rebuild it after the migration. +* Multiple GPU types per compute is not supported by the XenServer driver. + .. [#] https://bugs.launchpad.net/nova/+bug/1762688 .. Links diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/cli/nova-manage.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/cli/nova-manage.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/cli/nova-manage.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/cli/nova-manage.rst 2020-04-10 17:57:57.000000000 +0000 @@ -546,7 +546,7 @@ .. _heal_allocations_cli: -``nova-manage placement heal_allocations [--max-count ] [--verbose] [--skip-port-allocations] [--dry-run] [--instance ]`` +``nova-manage placement heal_allocations [--max-count ] [--verbose] [--skip-port-allocations] [--dry-run] [--instance ] [--cell ]`` + Iterates over all the Resource Providers (or just one if you provide the + UUID) and then verifies if the compute allocations are either related to + an existing instance or a migration UUID. + If not, it will tell which allocations are orphaned. + + You can also ask to delete all the orphaned allocations by specifying + ``-delete``. + + Specify ``--verbose`` to get detailed progress output during execution. + + This command requires that the + :oslo.config:option:`api_database.connection` and + :oslo.config:group:`placement` configuration options are set. Placement API + >= 1.14 is required. + + **Return Codes** + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - No orphaned allocations were found + * - 1 + - An unexpected error occurred + * - 3 + - Orphaned allocations were found + * - 4 + - All found orphaned allocations were deleted + * - 127 + - Invalid input + See Also ======== diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/configuration/extra-specs.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/configuration/extra-specs.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/configuration/extra-specs.rst 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/configuration/extra-specs.rst 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,212 @@ +=========== +Extra Specs +=========== + +The following is an overview of all extra specs recognized by nova in its +default configuration. + +.. note:: + + Other services and virt drivers may provide additional extra specs not + listed here. In addition, it is possible to register your own extra specs. + For more information on the latter, refer to :doc:`/user/filter-scheduler`. + +Placement +--------- + +The following extra specs are used during scheduling to modify the request sent +to placement. + +``resources`` +~~~~~~~~~~~~~ + +The following extra specs are used to request an amount of the specified +resource from placement when scheduling. All extra specs expect an integer +value. + +.. note:: + + Not all of the resource types listed below are supported by all virt + drivers. + +.. extra-specs:: resources + :summary: + +``trait`` +~~~~~~~~~ + +The following extra specs are used to request a specified trait from placement +when scheduling. All extra specs expect one of the following values: + +- ``required`` +- ``forbidden`` + +.. note:: + + Not all of the traits listed below are supported by all virt drivers. + +.. extra-specs:: trait + :summary: + +Scheduler Filters +----------------- + +The following extra specs are specific to various in-tree scheduler filters. + +``aggregate_instance_extra_specs`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following extra specs are used to specify metadata that must be present on +the aggregate of a host. If this metadata is not present or does not match the +expected value, the aggregate and all hosts within in will be rejected. + +Requires the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. + +.. extra-specs:: aggregate_instance_extra_specs + +``capabilities`` +~~~~~~~~~~~~~~~~ + +The following extra specs are used to specify a host capability that must be +provided by the host compute service. If this capability is not present or does +not match the expected value, the host will be rejected. + +Requires the ``ComputeCapabilitiesFilter`` scheduler filter. + +All extra specs expect similar types of values: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) +* A specific value, e.g. ``true``, ``123``, ``testing`` + +Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and +`` fpu gpu`` + +.. note:: + + Not all operators will apply to all types of values. For example, the ``==`` + operator should not be used for a string value - use ``s==`` instead. + +.. extra-specs:: capabilities + :summary: + +Virt driver +----------- + +The following extra specs are used as hints to configure internals of a +instance, from the bus used for paravirtualized devices to the amount of a +physical device to passthrough to the instance. Most of these are virt +driver-specific. + +``quota`` +~~~~~~~~~ + +The following extra specs are used to configure quotas for various +paravirtualized devices. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: quota + +``accel`` +~~~~~~~~~ + +The following extra specs are used to configure attachment of various +accelerators to an instance. For more information, refer to :cyborg-doc:`the +Cyborg documentation <>`. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: accel + +``pci_passthrough`` +~~~~~~~~~~~~~~~~~~~ + +The following extra specs are used to configure passthrough of a host PCI +device to an instance. This requires prior host configuration. For more +information, refer to :doc:`/admin/pci-passthrough`. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: pci_passthrough + +``hw`` +~~~~~~ + +The following extra specs are used to configure various attributes of +instances. Some of the extra specs act as feature flags, while others tweak for +example the guest-visible CPU topology of the instance. + +Except where otherwise stated, they are only supported by the libvirt virt +driver. + +.. extra-specs:: hw + +``hw_rng`` +~~~~~~~~~~ + +The following extra specs are used to configure a random number generator for +an instance. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: hw_rng + +``hw_video`` +~~~~~~~~~~~~ + +The following extra specs are used to configure attributes of the default guest +video device. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: hw_video + +``os`` +~~~~~~ + +The following extra specs are used to configure various attributes of +instances when using the HyperV virt driver. + +They are only supported by the HyperV virt driver. + +.. extra-specs:: os + +``powervm`` +~~~~~~~~~~~ + +The following extra specs are used to configure various attributes of +instances when using the PowerVM virt driver. + +They are only supported by the PowerVM virt driver. + +.. extra-specs:: powervm + +``vmware`` +~~~~~~~~~~ + +The following extra specs are used to configure various attributes of +instances when using the VMWare virt driver. + +They are only supported by the VMWare virt driver. + +.. extra-specs:: vmware + +Others (uncategorized) +---------------------- + +The following extra specs are not part of a group. + +.. extra-specs:: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/configuration/index.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/configuration/index.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/configuration/index.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/configuration/index.rst 2020-04-10 17:57:57.000000000 +0000 @@ -75,3 +75,19 @@ :hidden: sample-policy + +Extra Specs +----------- + +Nova uses *flavor extra specs* as a way to provide additional information to +instances beyond basic information like amount of RAM or disk. This information +can range from hints for the scheduler to hypervisor-specific configuration +instructions for the instance. + +* :doc:`Extra Spec Reference `: A complete reference for all extra + specs currently recognized and supported by nova. + +.. toctree:: + :hidden: + + extra-specs diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/conf.py nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/conf.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/conf.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/conf.py 2020-04-10 17:57:57.000000000 +0000 @@ -41,6 +41,7 @@ 'oslo_policy.sphinxext', 'ext.versioned_notifications', 'ext.feature_matrix', + 'ext.extra_specs', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.rsvgconverter', @@ -148,6 +149,7 @@ openstack_projects = [ 'ceilometer', 'cinder', + 'cyborg', 'glance', 'horizon', 'ironic', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/contributor/contributing.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/contributor/contributing.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/contributor/contributing.rst 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/contributor/contributing.rst 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,62 @@ +============================ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the accounts +you need, the basics of interacting with our Gerrit review system, how we +communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with nova. + +Communication +~~~~~~~~~~~~~ + +:doc:`how-to-get-involved` + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ + +The overall structure of the Nova team is documented on `the wiki +`_. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ + +If you want to propose a new feature please read the :doc:`blueprints` page. + +Task Tracking +~~~~~~~~~~~~~ + +We track our tasks in `Launchpad `__. + +If you're looking for some smaller, easier work item to pick up and get started +on, search for the 'low-hanging-fruit' tag. + +Reporting a Bug +~~~~~~~~~~~~~~~ + +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad `__. +More info about Launchpad usage can be found on `OpenStack docs page +`_. + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ + +All changes proposed to the Nova requires two ``Code-Review +2`` votes from +Nova core reviewers before one of the core reviewers can approve patch by +giving ``Workflow +1`` vote. More detailed guidelines for reviewers of Nova +patches are available at :doc:`code-review`. + + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ + +All common PTL duties are enumerated in the `PTL guide +`_. + +For the Nova specific duties you can read the Nova PTL guide +:doc:`ptl-guide` diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/contributor/index.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/contributor/index.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/contributor/index.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/contributor/index.rst 2020-04-10 17:57:57.000000000 +0000 @@ -7,6 +7,14 @@ valuable, and part of what keeps the project going. Here are a list of resources to get your started. +Basic Information +================= + +.. toctree:: + :maxdepth: 2 + + contributing + Getting Started =============== diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/contributor/microversions.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/contributor/microversions.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/contributor/microversions.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/contributor/microversions.rst 2020-04-10 17:57:57.000000000 +0000 @@ -374,9 +374,17 @@ * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. +* If applicable, add Functional sample tests under + ``nova/tests/functional/api_sample_tests``. Also, add JSON examples to + ``doc/api_samples`` directory which can be generated automatically via tox + env ``api-samples`` or run test with env var ``GENERATE_SAMPLES`` True. + * Update the `API Reference`_ documentation as appropriate. The source is located under `api-ref/source/`. +* If the microversion changes servers related APIs, update the + ``api-guide/source/server_concepts.rst`` accordingly. + .. _API Reference: https://docs.openstack.org/api-ref/compute/ Allocating a microversion diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/index.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/index.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/index.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/index.rst 2020-04-10 17:57:57.000000000 +0000 @@ -222,6 +222,8 @@ For Contributors ================ +* :doc:`contributor/contributing`: If you are a new contributor this should + help you to start contributing to Nova. * :doc:`contributor/index`: If you are new to Nova, this should help you start to understand what Nova actually does, and why. * :doc:`reference/index`: There are also a number of technical references on @@ -234,6 +236,7 @@ :hidden: contributor/index + contributor/contributing reference/index .. only:: html diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/user/filter-scheduler.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/user/filter-scheduler.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/user/filter-scheduler.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/user/filter-scheduler.rst 2020-04-10 17:57:57.000000000 +0000 @@ -358,38 +358,124 @@ Writing Your Own Filter ----------------------- -To create **your own filter** you must inherit from -|BaseHostFilter| and implement one method: ``host_passes``. -This method should return ``True`` if a host passes the filter and return -``False`` elsewhere. -It takes two parameters (named arbitrarily as ``host_state`` and ``spec_obj``): +To create **your own filter**, you must inherit from |BaseHostFilter| and +implement one method: ``host_passes``. This method should return ``True`` if a +host passes the filter and return ``False`` elsewhere. It takes two parameters: -* the ``HostState`` object allows to get attributes of the host. +* the ``HostState`` object allows to get attributes of the host * the ``RequestSpec`` object describes the user request, including the flavor, - the image and the scheduler hints. + the image and the scheduler hints For further details about each of those objects and their corresponding -attributes, please refer to the codebase (at least by looking at the other -filters code) or ask for help in the #openstack-nova IRC channel. +attributes, refer to the codebase (at least by looking at the other filters +code) or ask for help in the #openstack-nova IRC channel. -As an example, nova.conf could contain the following scheduler-related -settings: +In addition, if your custom filter uses non-standard extra specs, you must +register validators for these extra specs. Examples of validators can be found +in the ``nova.api.validation.extra_specs`` module. These should be registered +via the ``nova.api.extra_spec_validator`` `entrypoint`__. -:: +The module containing your custom filter(s) must be packaged and available in +the same environment(s) that the nova controllers, or specifically the +:program:`nova-scheduler` and :program:`nova-api` services, are available in. +As an example, consider the following sample package, which is the `minimal +structure`__ for a standard, setuptools-based Python package: - --scheduler.driver=nova.scheduler.FilterScheduler - --filter_scheduler.available_filters=nova.scheduler.filters.all_filters - --filter_scheduler.available_filters=myfilter.MyFilter - --filter_scheduler.enabled_filters=ComputeFilter,MyFilter - -.. note:: When writing your own filter, be sure to add it to the list of available filters - and enable it in the default filters. The "all_filters" setting only includes the - filters shipped with nova. +__ https://packaging.python.org/specifications/entry-points/ +__ https://python-packaging.readthedocs.io/en/latest/minimal.html + +.. code-block:: none + + acmefilter/ + acmefilter/ + __init__.py + validators.py + setup.py + +Where ``__init__.py`` contains: + +.. code-block:: python + + from oslo_log import log as logging + from nova.scheduler import filters + + LOG = logging.getLogger(__name__) + + class AcmeFilter(filters.BaseHostFilter): + + def host_passes(self, host_state, spec_obj): + extra_spec = spec_obj.flavor.extra_specs.get('acme:foo') + LOG.info("Extra spec value was '%s'", extra_spec) + + # do meaningful stuff here... + + return True + +``validators.py`` contains: + +.. code-block:: python + + from nova.api.validation.extra_specs import base + + def register(): + validators = [ + base.ExtraSpecValidator( + name='acme:foo', + description='My custom extra spec.' + value={ + 'type': str, + 'enum': [ + 'bar', + 'baz', + ], + }, + ), + ] + + return validators + +``setup.py`` contains: + +.. code-block:: python + + from setuptools import setup + + setup( + name='acmefilter', + version='0.1', + description='My custom filter', + packages=[ + 'acmefilter' + ], + entry_points={ + 'nova.api.extra_spec_validators': [ + 'acme = acmefilter.validators', + ], + }, + ) + +To enable this, you would set the following in :file:`nova.conf`: + +.. code-block:: ini + + [filter_scheduler] + available_filters = nova.scheduler.filters.all_filters + available_filters = acmefilter.AcmeFilter + enabled_filters = ComputeFilter,AcmeFilter + +.. note:: + + You **must** add custom filters to the list of available filters using the + :oslo.config:option:`filter_scheduler.available_filters` config option in + addition to enabling them via the + :oslo.config:option:`filter_scheduler.enabled_filters` config option. The + default ``nova.scheduler.filters.all_filters`` value for the former only + includes the filters shipped with nova. With these settings, nova will use the ``FilterScheduler`` for the scheduler -driver. All of the standard nova filters and MyFilter are available to the -FilterScheduler, but just the ``ComputeFilter`` and ``MyFilter`` will be -used on each request. +driver. All of the standard nova filters and the custom ``AcmeFilter`` filter +are available to the ``FilterScheduler``, but just the ``ComputeFilter`` and +``AcmeFilter`` will be used on each request. Weights ------- diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/user/metadata.rst nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/user/metadata.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/user/metadata.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/user/metadata.rst 2020-04-10 17:57:57.000000000 +0000 @@ -337,6 +337,8 @@ } +::download:`Download` network_data.json JSON schema. + .. _metadata-ec2-format: EC2-compatible metadata diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/user/support-matrix.ini nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/user/support-matrix.ini --- nova-21.0.0~b2~git2020021008.1fcd74730d/doc/source/user/support-matrix.ini 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/doc/source/user/support-matrix.ini 2020-04-10 17:57:57.000000000 +0000 @@ -266,7 +266,7 @@ driver.libvirt-lxc=missing driver.libvirt-xen=complete driver.vmware=missing -driver.hyperv=missing +driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing @@ -1339,7 +1339,9 @@ driver.libvirt-lxc=missing driver.libvirt-xen=missing driver.vmware=complete -driver.hyperv=missing +driver.hyperv=complete +driver-notes.hyperv=In order to use uefi, a second generation Hyper-V vm must + be requested. driver.ironic=partial driver-notes.ironic=depends on hardware support driver.libvirt-vz-vm=missing diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/etc/nova/api-paste.ini nova-21.0.0~b3~git2020041013.57ff308d6d/etc/nova/api-paste.ini --- nova-21.0.0~b2~git2020021008.1fcd74730d/etc/nova/api-paste.ini 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/etc/nova/api-paste.ini 2020-04-10 17:57:57.000000000 +0000 @@ -18,13 +18,15 @@ [composite:osapi_compute] use = call:nova.api.openstack.urlmap:urlmap_factory /: oscomputeversions +/v2: oscomputeversion_legacy_v2 +/v2.1: oscomputeversion_v2 # v21 is an exactly feature match for v2, except it has more stringent # input validation on the wsgi surface (prevents fuzzing early on the # API). It also provides new features via API microversions which are # opt into for clients. Unaware clients will receive the same frozen # v2 API feature set, but with some relaxed validation -/v2: openstack_compute_api_v21_legacy_v2_compatible -/v2.1: openstack_compute_api_v21 +/v2/+: openstack_compute_api_v21_legacy_v2_compatible +/v2.1/+: openstack_compute_api_v21 [composite:openstack_compute_api_v21] use = call:nova.api.auth:pipeline_factory_v21 @@ -72,9 +74,18 @@ [pipeline:oscomputeversions] pipeline = cors faultwrap request_log http_proxy_to_wsgi oscomputeversionapp +[pipeline:oscomputeversion_v2] +pipeline = cors compute_req_id faultwrap request_log http_proxy_to_wsgi oscomputeversionapp_v2 + +[pipeline:oscomputeversion_legacy_v2] +pipeline = cors compute_req_id faultwrap request_log http_proxy_to_wsgi legacy_v2_compatible oscomputeversionapp_v2 + [app:oscomputeversionapp] paste.app_factory = nova.api.openstack.compute.versions:Versions.factory +[app:oscomputeversionapp_v2] +paste.app_factory = nova.api.openstack.compute.versions:VersionsV2.factory + ########## # Shared # ########## diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/gate/live_migration/hooks/ceph.sh nova-21.0.0~b3~git2020041013.57ff308d6d/gate/live_migration/hooks/ceph.sh --- nova-21.0.0~b2~git2020021008.1fcd74730d/gate/live_migration/hooks/ceph.sh 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/gate/live_migration/hooks/ceph.sh 2020-04-10 17:57:57.000000000 +0000 @@ -99,19 +99,56 @@ fi } +function _wait_for_nova_compute_service_state { + source $BASE/new/devstack/openrc admin admin + local status=$1 + local attempt=1 + local max_attempts=24 + local attempt_sleep=5 + local computes_count=$(openstack compute service list | grep -c nova-compute) + local computes_ready=$(openstack compute service list | grep nova-compute | grep $status | wc -l) + + echo "Waiting for $computes_count computes to report as $status" + while [ "$computes_ready" -ne "$computes_count" ]; do + if [ "$attempt" -eq "$max_attempts" ]; then + echo "Failed waiting for computes to report as ${status}, ${computes_ready}/${computes_count} ${status} after ${max_attempts} attempts" + exit 4 + fi + echo "Waiting ${attempt_sleep} seconds for ${computes_count} computes to report as ${status}, ${computes_ready}/${computes_count} ${status} after ${attempt}/${max_attempts} attempts" + sleep $attempt_sleep + attempt=$((attempt+1)) + computes_ready=$(openstack compute service list | grep nova-compute | grep $status | wc -l) + done + echo "All computes are now reporting as ${status} after ${attempt} attempts" +} + function configure_and_start_nova { + + echo "Checking all n-cpu services" + $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "pgrep -u stack -a nova-compute" + + # stop nova-compute + echo "Stopping all n-cpu services" + $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl stop devstack@n-cpu" + + # Wait for the service to be marked as down + _wait_for_nova_compute_service_state "down" + _ceph_configure_nova + #import secret to libvirt _populate_libvirt_secret - echo 'check compute processes before restart' - $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" - # restart nova-compute - $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@n-cpu" + # start nova-compute + echo "Starting all n-cpu services" + $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl start devstack@n-cpu" + echo "Checking all n-cpu services" # test that they are all running again - $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute" + $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "pgrep -u stack -a nova-compute" + # Wait for the service to be marked as up + _wait_for_nova_compute_service_state "up" } function _ceph_configure_cinder { diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/gate/test_evacuate.sh nova-21.0.0~b3~git2020041013.57ff308d6d/gate/test_evacuate.sh --- nova-21.0.0~b2~git2020021008.1fcd74730d/gate/test_evacuate.sh 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/gate/test_evacuate.sh 2020-04-10 17:57:57.000000000 +0000 @@ -54,6 +54,11 @@ --block-device id=${image_id},source=image,dest=volume,size=1,bootindex=0,shutdown=remove \ --nic net-id=${network_id} --availability-zone nova:${subnode} evacuate-bfv-test +# Fence the subnode +echo "Stopping n-cpu, q-agt and guest domains on subnode" +$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl stop devstack@n-cpu devstack@q-agt" +$ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "for domain in \$(virsh list --all --name); do virsh destroy \$domain; done" + echo "Forcing down the subnode so we can evacuate from it" openstack --os-compute-api-version 2.11 compute service set --down ${subnode} nova-compute diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/lower-constraints.txt nova-21.0.0~b3~git2020041013.57ff308d6d/lower-constraints.txt --- nova-21.0.0~b2~git2020021008.1fcd74730d/lower-constraints.txt 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/lower-constraints.txt 2020-04-10 17:57:57.000000000 +0000 @@ -15,6 +15,7 @@ coverage==4.0 cryptography==2.7 cursive==0.2.1 +dataclasses==0.7 ddt==1.0.1 debtcollector==1.19.0 decorator==3.4.0 @@ -66,13 +67,13 @@ os-client-config==1.29.0 os-resource-classes==0.4.0 os-service-types==1.7.0 -os-traits==2.1.0 +os-traits==2.2.0 os-vif==1.14.0 os-win==3.0.0 os-xenapi==0.3.3 osc-lib==1.10.0 oslo.cache==1.26.0 -oslo.concurrency==3.26.0 +oslo.concurrency==3.29.0 oslo.config==6.1.0 oslo.context==2.21.0 oslo.db==4.44.0 @@ -87,7 +88,7 @@ oslo.serialization==2.21.1 oslo.service==1.40.1 oslo.upgradecheck==0.1.1 -oslo.utils==3.40.2 +oslo.utils==4.1.0 oslo.versionedobjects==1.35.0 oslo.vmware==2.17.0 oslotest==3.8.0 @@ -129,7 +130,7 @@ python-keystoneclient==3.15.0 python-mimeparse==1.6.0 python-neutronclient==6.7.0 -python-subunit==1.2.0 +python-subunit==1.4.0 pytz==2018.3 PyYAML==3.12 repoze.lru==0.7 @@ -165,7 +166,7 @@ voluptuous==0.11.1 warlock==1.2.0 WebOb==1.8.2 -websockify==0.8.0 +websockify==0.9.0 wrapt==1.10.11 wsgi-intercept==1.7.0 zVMCloudConnector==1.3.0 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/accelerator/cyborg.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/accelerator/cyborg.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/accelerator/cyborg.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/accelerator/cyborg.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,280 @@ +# Copyright 2019 Intel +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from oslo_log import log as logging + +from keystoneauth1 import exceptions as ks_exc + +from nova import exception +from nova.i18n import _ +from nova import objects +from nova.scheduler import utils as schedutils +from nova import service_auth +from nova import utils + +""" + Note on object relationships: + 1 device profile (DP) has D >= 1 request groups (just as a flavor + has many request groups). + Each DP request group corresponds to exactly 1 numbered request + group (RG) in the request spec. + Each numbered RG corresponds to exactly one resource provider (RP). + A DP request group may request A >= 1 accelerators, and so result + in the creation of A ARQs. + Each ARQ corresponds to exactly 1 DP request group. + + A device profile is a dictionary: + { "name": "mydpname", + "uuid": , + "groups": [ ] + } + + A device profile group is a dictionary too: + { "resources:CUSTOM_ACCELERATOR_FPGA": "2", + "resources:CUSTOM_LOCAL_MEMORY": "1", + "trait:CUSTOM_INTEL_PAC_ARRIA10": "required", + "trait:CUSTOM_FUNCTION_NAME_FALCON_GZIP_1_1": "required", + # 0 or more Cyborg properties + "accel:bitstream_id": "FB021995_BF21_4463_936A_02D49D4DB5E5" + } + + See cyborg/cyborg/objects/device_profile.py for more details. +""" + +LOG = logging.getLogger(__name__) + + +def get_client(context): + return _CyborgClient(context) + + +def get_device_profile_group_requester_id(dp_group_id): + """Return the value to use in objects.RequestGroup.requester_id. + + The requester_id is used to match device profile groups from + Cyborg to the request groups in request spec. + + :param dp_group_id: The index of the request group in the device profile. + """ + req_id = "device_profile_" + str(dp_group_id) + return req_id + + +def get_device_profile_request_groups(context, dp_name): + cyclient = get_client(context) + return cyclient.get_device_profile_groups(dp_name) + + +class _CyborgClient(object): + DEVICE_PROFILE_URL = "/device_profiles" + ARQ_URL = "/accelerator_requests" + + def __init__(self, context): + auth = service_auth.get_auth_plugin(context) + self._client = utils.get_ksa_adapter('accelerator', ksa_auth=auth) + + def _call_cyborg(self, func, *args, **kwargs): + resp = err_msg = None + try: + resp = func(*args, **kwargs) + if not resp: + msg = _('Invalid response from Cyborg: ') + err_msg = msg + str(resp) + except ks_exc.ClientException as exc: + err_msg = _('Could not communicate with Cyborg.') + LOG.exception('%s: %s', err_msg, six.text_type(exc)) + + return resp, err_msg + + def _get_device_profile_list(self, dp_name): + query = {"name": dp_name} + err_msg = None + + resp, err_msg = self._call_cyborg(self._client.get, + self.DEVICE_PROFILE_URL, params=query) + + if err_msg: + raise exception.DeviceProfileError(name=dp_name, msg=err_msg) + + return resp.json().get('device_profiles') + + def get_device_profile_groups(self, dp_name): + """Get list of profile group objects from the device profile. + + Cyborg API returns: {"device_profiles": []} + See module notes above for further details. + + :param dp_name: string: device profile name + Expected to be valid, not None or ''. + :returns: [objects.RequestGroup] + :raises: DeviceProfileError + """ + dp_list = self._get_device_profile_list(dp_name) + if not dp_list: + msg = _('Expected 1 device profile but got nothing.') + raise exception.DeviceProfileError(name=dp_name, msg=msg) + if len(dp_list) != 1: + err = _('Expected 1 device profile but got %s.') % len(dp_list) + raise exception.DeviceProfileError(name=dp_name, msg=err) + + dp_groups = dp_list[0]['groups'] + request_groups = [] + for dp_group_id, dp_group in enumerate(dp_groups): + req_id = get_device_profile_group_requester_id(dp_group_id) + rg = objects.RequestGroup(requester_id=req_id) + for key, val in dp_group.items(): + match = schedutils.ResourceRequest.XS_KEYPAT.match(key) + if not match: + continue # could be 'accel:foo=bar', skip it + prefix, _ignore, name = match.groups() + if prefix == schedutils.ResourceRequest.XS_RES_PREFIX: + rg.add_resource(rclass=name, amount=val) + elif prefix == schedutils.ResourceRequest.XS_TRAIT_PREFIX: + rg.add_trait(trait_name=name, trait_type=val) + request_groups.append(rg) + return request_groups + + def _create_arqs(self, dp_name): + data = {"device_profile_name": dp_name} + resp, err_msg = self._call_cyborg(self._client.post, + self.ARQ_URL, json=data) + + if err_msg: + raise exception.AcceleratorRequestOpFailed( + op=_('create'), msg=err_msg) + + return resp.json().get('arqs') + + def create_arqs_and_match_resource_providers(self, dp_name, rg_rp_map): + """Create ARQs, match them with request groups and thereby + determine their corresponding RPs. + + :param dp_name: Device profile name + :param rg_rp_map: Request group - Resource Provider map + {requester_id: [resource_provider_uuid]} + :returns: + [arq], with each ARQ associated with an RP + :raises: DeviceProfileError, AcceleratorRequestOpFailed + """ + LOG.info('Creating ARQs for device profile %s', dp_name) + arqs = self._create_arqs(dp_name) + if not arqs or len(arqs) == 0: + msg = _('device profile name %s') % dp_name + raise exception.AcceleratorRequestOpFailed(op=_('create'), msg=msg) + for arq in arqs: + dp_group_id = arq['device_profile_group_id'] + arq['device_rp_uuid'] = None + requester_id = ( + get_device_profile_group_requester_id(dp_group_id)) + arq['device_rp_uuid'] = rg_rp_map[requester_id][0] + return arqs + + def bind_arqs(self, bindings): + """Initiate Cyborg bindings. + + Handles RFC 6902-compliant JSON patching, sparing + calling Nova code from those details. + + :param bindings: + { "$arq_uuid": { + "hostname": STRING + "device_rp_uuid": UUID + "instance_uuid": UUID + }, + ... + } + :returns: nothing + :raises: AcceleratorRequestOpFailed + """ + LOG.info('Binding ARQs.') + # Create a JSON patch in RFC 6902 format + patch_list = {} + for arq_uuid, binding in bindings.items(): + patch = [{"path": "/" + field, + "op": "add", + "value": value + } for field, value in binding.items()] + patch_list[arq_uuid] = patch + + resp, err_msg = self._call_cyborg(self._client.patch, + self.ARQ_URL, json=patch_list) + if err_msg: + msg = _(' Binding failed for ARQ UUIDs: ') + err_msg = err_msg + msg + ','.join(bindings.keys()) + raise exception.AcceleratorRequestOpFailed( + op=_('bind'), msg=err_msg) + + def get_arqs_for_instance(self, instance_uuid, only_resolved=False): + """Get ARQs for the instance. + + :param instance_uuid: Instance UUID + :param only_resolved: flag to return only resolved ARQs + :returns: List of ARQs for the instance: + if only_resolved: only those ARQs which have completed binding + else: all ARQs + The format of the returned data structure is as below: + [ + {'uuid': $arq_uuid, + 'device_profile_name': $dp_name, + 'device_profile_group_id': $dp_request_group_index, + 'state': 'Bound', + 'device_rp_uuid': $resource_provider_uuid, + 'hostname': $host_nodename, + 'instance_uuid': $instance_uuid, + 'attach_handle_info': { # PCI bdf + 'bus': '0c', 'device': '0', + 'domain': '0000', 'function': '0'}, + 'attach_handle_type': 'PCI' + # or 'TEST_PCI' for Cyborg fake driver + } + ] + :raises: AcceleratorRequestOpFailed + """ + query = {"instance": instance_uuid} + resp, err_msg = self._call_cyborg(self._client.get, + self.ARQ_URL, params=query) + + if err_msg: + err_msg = err_msg + _(' Instance %s') % instance_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('get'), msg=err_msg) + + arqs = resp.json().get('arqs') + if not arqs: + err_msg = _('Cyborg returned no accelerator requests for ' + 'instance %s') % instance_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('get'), msg=err_msg) + + if only_resolved: + arqs = [arq for arq in arqs if + arq['state'] in ['Bound', 'BindFailed', 'Deleting']] + return arqs + + def delete_arqs_for_instance(self, instance_uuid): + """Delete ARQs for instance, after unbinding if needed. + + :param instance_uuid: Instance UUID + :raises: AcceleratorRequestOpFailed + """ + # Unbind and delete the ARQs + params = {"instance": instance_uuid} + resp, err_msg = self._call_cyborg(self._client.delete, + self.ARQ_URL, params=params) + if err_msg: + msg = err_msg + _(' Instance %s') % instance_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('delete'), msg=msg) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/metadata/handler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/metadata/handler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/metadata/handler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/metadata/handler.py 2020-04-10 17:57:57.000000000 +0000 @@ -39,6 +39,12 @@ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) +# 160 networks is large enough to satisfy most cases. +# Yet while reaching 182 networks Neutron server will break as URL length +# exceeds the maximum. Left this at 160 to allow additional parameters when +# they're needed. +MAX_QUERY_NETWORKS = 160 + class MetadataRequestHandler(wsgi.Application): """Serve metadata.""" @@ -219,11 +225,14 @@ try: # Retrieve the instance data from the instance's port - ports = neutron.list_ports( - context, - fixed_ips='ip_address=' + instance_address, - network_id=md_networks, - fields=['device_id', 'tenant_id'])['ports'] + ports = [] + while md_networks: + ports.extend(neutron.list_ports( + context, + fixed_ips='ip_address=' + instance_address, + network_id=md_networks[:MAX_QUERY_NETWORKS], + fields=['device_id', 'tenant_id'])['ports']) + md_networks = md_networks[MAX_QUERY_NETWORKS:] except Exception as e: LOG.error('Failed to get instance id for metadata ' 'request, provider %(provider)s ' @@ -239,10 +248,10 @@ if len(ports) != 1: msg = _('Expected a single port matching provider %(pr)s ' - 'and IP %(ip)s. Found %(count)d.' % { + 'and IP %(ip)s. Found %(count)d.') % { 'pr': provider_id, 'ip': instance_address, - 'count': len(ports)}) + 'count': len(ports)} LOG.error(msg) raise webob.exc.HTTPBadRequest(explanation=msg) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/api_version_request.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/api_version_request.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/api_version_request.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/api_version_request.py 2020-04-10 17:57:57.000000000 +0000 @@ -209,6 +209,7 @@ request body to ``POST /servers/{server_id}/os-volume_attachments`` and exposes this via the response from + ``POST /servers/{server_id}/os-volume_attachments``, ``GET /servers/{server_id}/os-volume_attachments`` and ``GET /servers/{server_id}/os-volume_attachments/{volume_id}``. * 2.80 - Adds support for optional query parameters ``user_id`` and @@ -219,6 +220,20 @@ ``GET /servers/{server_id}/migrations/{migration_id}``. * 2.81 - Adds support for image cache management by aggregate by adding ``POST /os-aggregates/{aggregate_id}/images``. + * 2.82 - Adds ``accelerator-request-bound`` event to + ``os-server-external-events`` API. This event is sent by Cyborg + to indicate completion of ARQ binding. The ARQs can be obtained + from Cyborg with ``GET /v2/accelerator_requests?instance={uuid}`` + * 2.83 - Allow more filter parameters for ``GET /servers/detail`` and + ``GET /servers`` for non-admin. + * 2.84 - Adds ``details`` field to instance action events. + * 2.85 - Add support for + ``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` + which supports specifying the ``delete_on_termination`` field in + the request body to change the attached volume's flag. + * 2.86 - Add support for validation of known extra specs to the + ``POST /flavors/{flavor_id}/os-extra_specs`` and + ``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs. """ # The minimum and maximum versions of the API supported @@ -226,8 +241,8 @@ # minimum version of the API supported. # Note(cyeoh): This only applies for the v2.1 API once microversions # support is fully merged. It does not affect the V2 API. -_MIN_API_VERSION = "2.1" -_MAX_API_VERSION = "2.81" +_MIN_API_VERSION = '2.1' +_MAX_API_VERSION = '2.86' DEFAULT_API_VERSION = _MIN_API_VERSION # Almost all proxy APIs which are related to network, images and baremetal diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/common.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/common.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/common.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/common.py 2020-04-10 17:57:57.000000000 +0000 @@ -33,6 +33,7 @@ from nova.i18n import _ from nova.network import constants from nova import objects +from nova.objects import service from nova import quota from nova import utils @@ -558,20 +559,15 @@ return api_version_request.is_supported(req, '2.72') -def supports_port_resource_request_during_move(req): - """Check to see if the requested API version is high enough for support - port resource request during move operation. - - NOTE: At the moment there is no such microversion that supports port - resource request during move. This function is added as a preparation for - that microversion (assuming there will be a new microversion, which is - yet to be decided). +def supports_port_resource_request_during_move(): + """Check to see if the global compute service version is high enough to + support port resource request during move operation. - :param req: The incoming API request - :returns: True if the requested API microversion is high enough for + :returns: True if the compute service version is high enough for port resource request move support, False otherwise. """ - return False + return service.get_minimum_version_all_cells( + nova_context.get_admin_context(), ['nova-compute']) >= 49 def instance_has_port_with_resource_request(instance_uuid, network_api): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/admin_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/admin_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/admin_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/admin_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -40,7 +40,7 @@ def _reset_network(self, req, id, body): """Permit admins to reset networking on a server.""" context = req.environ['nova.context'] - context.can(aa_policies.POLICY_ROOT % 'reset_network') + context.can(aa_policies.POLICY_ROOT % 'reset_network', target={}) instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.reset_network(context, instance) @@ -53,7 +53,7 @@ def _inject_network_info(self, req, id, body): """Permit admins to inject network info into a server.""" context = req.environ['nova.context'] - context.can(aa_policies.POLICY_ROOT % 'inject_network_info') + context.can(aa_policies.POLICY_ROOT % 'inject_network_info', target={}) instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.inject_network_info(context, instance) @@ -67,7 +67,7 @@ def _reset_state(self, req, id, body): """Permit admins to reset the state of a server.""" context = req.environ["nova.context"] - context.can(aa_policies.POLICY_ROOT % 'reset_state') + context.can(aa_policies.POLICY_ROOT % 'reset_state', target={}) # Identify the desired state from the body state = state_map[body["os-resetState"]["state"]] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/agents.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/agents.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/agents.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/agents.py 2020-04-10 17:57:57.000000000 +0000 @@ -52,7 +52,7 @@ def index(self, req): """Return a list of all agent builds. Filter by hypervisor.""" context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) + context.can(agents_policies.BASE_POLICY_NAME % 'list', target={}) hypervisor = None agents = [] if 'hypervisor' in req.GET: @@ -75,7 +75,7 @@ def update(self, req, id, body): """Update an existing agent build.""" context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) + context.can(agents_policies.BASE_POLICY_NAME % 'update', target={}) # TODO(oomichi): This parameter name "para" is different from the ones # of the other APIs. Most other names are resource names like "server" @@ -118,7 +118,7 @@ def delete(self, req, id): """Deletes an existing agent build.""" context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) + context.can(agents_policies.BASE_POLICY_NAME % 'delete', target={}) try: utils.validate_integer(id, 'id') @@ -140,7 +140,7 @@ def create(self, req, body): """Creates a new agent build.""" context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) + context.can(agents_policies.BASE_POLICY_NAME % 'create', target={}) agent = body['agent'] hypervisor = agent['hypervisor'] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/aggregates.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/aggregates.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/aggregates.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/aggregates.py 2020-04-10 17:57:57.000000000 +0000 @@ -32,6 +32,7 @@ from nova import exception from nova.i18n import _ from nova.policies import aggregates as aggr_policies +from nova import utils LOG = logging.getLogger(__name__) @@ -92,11 +93,17 @@ return agg - @wsgi.expected_errors(404) + @wsgi.expected_errors((400, 404)) def show(self, req, id): """Shows the details of an aggregate, hosts and metadata included.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'show') + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.get_aggregate(context, id) except exception.AggregateNotFound as e: @@ -115,6 +122,11 @@ updates['name'] = common.normalize_name(updates['name']) try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + + try: aggregate = self.api.update_aggregate(context, id, updates) except exception.AggregateNameExists as e: raise exc.HTTPConflict(explanation=e.format_message()) @@ -133,6 +145,12 @@ """Removes an aggregate by id.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'delete') + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: self.api.delete_aggregate(context, id) except exception.AggregateNotFound as e: @@ -143,7 +161,7 @@ # NOTE(gmann): Returns 200 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((400, 404, 409)) @wsgi.action('add_host') @validation.schema(aggregates.add_host) def _add_host(self, req, id, body): @@ -152,6 +170,12 @@ context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'add_host') + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.add_host_to_aggregate(context, id, host) except (exception.AggregateNotFound, @@ -166,7 +190,7 @@ # NOTE(gmann): Returns 200 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((400, 404, 409)) @wsgi.action('remove_host') @validation.schema(aggregates.remove_host) def _remove_host(self, req, id, body): @@ -175,6 +199,12 @@ context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'remove_host') + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.remove_host_from_aggregate(context, id, host) except (exception.AggregateNotFound, @@ -202,6 +232,11 @@ context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'set_metadata') + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + metadata = body["set_metadata"]["metadata"] try: aggregate = self.api.update_aggregate_metadata(context, @@ -245,6 +280,11 @@ context = _get_context(req) context.can(aggr_policies.NEW_POLICY_ROOT % 'images') + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + image_ids = [] for image_req in body.get('cache'): image_ids.append(image_req['id']) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/assisted_volume_snapshots.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/assisted_volume_snapshots.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/assisted_volume_snapshots.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/assisted_volume_snapshots.py 2020-04-10 17:57:57.000000000 +0000 @@ -40,7 +40,7 @@ def create(self, req, body): """Creates a new snapshot.""" context = req.environ['nova.context'] - context.can(avs_policies.POLICY_ROOT % 'create') + context.can(avs_policies.POLICY_ROOT % 'create', target={}) snapshot = body['snapshot'] create_info = snapshot['create_info'] @@ -70,7 +70,7 @@ def delete(self, req, id): """Delete a snapshot.""" context = req.environ['nova.context'] - context.can(avs_policies.POLICY_ROOT % 'delete') + context.can(avs_policies.POLICY_ROOT % 'delete', target={}) delete_metadata = {} delete_metadata.update(req.GET) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/attach_interfaces.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/attach_interfaces.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/attach_interfaces.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/attach_interfaces.py 2020-04-10 17:57:57.000000000 +0000 @@ -67,9 +67,10 @@ def index(self, req, server_id): """Returns the list of interface attachments for a given instance.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, server_id) + context.can(ai_policies.POLICY_ROOT % 'list', + target={'project_id': instance.project_id}) + search_opts = {'device_id': instance.uuid} try: @@ -108,13 +109,11 @@ def show(self, req, server_id, id): """Return data about the given interface attachment.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, server_id) + context.can(ai_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) port_id = id - # NOTE(mriedem): We need to verify the instance actually exists from - # the server_id even though we're not using the instance for anything, - # just the port id. - common.get_instance(self.compute_api, context, server_id) try: port_info = self.network_api.show_port(context, port_id) @@ -139,8 +138,10 @@ def create(self, req, server_id, body): """Attach an interface to an instance.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - context.can(ai_policies.POLICY_ROOT % 'create') + instance = common.get_instance(self.compute_api, context, server_id) + + context.can(ai_policies.POLICY_ROOT % 'create', + target={'project_id': instance.project_id}) network_id = None port_id = None @@ -163,7 +164,6 @@ msg = _("Must input network_id when request IP address") raise exc.HTTPBadRequest(explanation=msg) - instance = common.get_instance(self.compute_api, context, server_id) try: vif = self.compute_api.attach_interface(context, instance, network_id, port_id, req_ip, tag=tag) @@ -199,12 +199,14 @@ def delete(self, req, server_id, id): """Detach an interface from an instance.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - context.can(ai_policies.POLICY_ROOT % 'delete') - port_id = id instance = common.get_instance(self.compute_api, context, server_id, expected_attrs=['device_metadata']) + + context.can(ai_policies.POLICY_ROOT % 'delete', + target={'project_id': instance.project_id}) + port_id = id + try: self.compute_api.detach_interface(context, instance, port_id=port_id) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/availability_zone.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/availability_zone.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/availability_zone.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/availability_zone.py 2020-04-10 17:57:57.000000000 +0000 @@ -44,9 +44,9 @@ def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = \ + available_zones, not_available_zones = ( availability_zones.get_availability_zones( - ctxt, self.host_api) + ctxt, self.host_api)) filtered_available_zones = \ self._get_filtered_availability_zones(available_zones, True) @@ -58,18 +58,16 @@ def _describe_availability_zones_verbose(self, context, **kwargs): ctxt = context.elevated() - # Available services - enabled_services = self.host_api.service_get_all( - context, {'disabled': False}, set_zones=True, all_cells=True) - + services = self.host_api.service_get_all( + context, set_zones=True, all_cells=True) available_zones, not_available_zones = ( availability_zones.get_availability_zones( - ctxt, self.host_api, enabled_services=enabled_services)) + ctxt, self.host_api, services=services)) zone_hosts = {} host_services = {} api_services = ('nova-osapi_compute', 'nova-metadata') - for service in enabled_services: + for service in filter(lambda x: not x.disabled, services): if service.binary in api_services: # Skip API services in the listing since they are not # maintained in the same way as other services @@ -108,7 +106,7 @@ def index(self, req): """Returns a summary list of availability zone.""" context = req.environ['nova.context'] - context.can(az_policies.POLICY_ROOT % 'list') + context.can(az_policies.POLICY_ROOT % 'list', target={}) return self._describe_availability_zones(context) @@ -116,6 +114,6 @@ def detail(self, req): """Returns a detailed list of availability zone.""" context = req.environ['nova.context'] - context.can(az_policies.POLICY_ROOT % 'detail') + context.can(az_policies.POLICY_ROOT % 'detail', target={}) return self._describe_availability_zones_verbose(context) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/console_auth_tokens.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/console_auth_tokens.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/console_auth_tokens.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/console_auth_tokens.py 2020-04-10 17:57:57.000000000 +0000 @@ -30,7 +30,7 @@ def _show(self, req, id, rdp_only): """Checks a console auth token and returns the related connect info.""" context = req.environ['nova.context'] - context.can(cat_policies.BASE_POLICY_NAME) + context.can(cat_policies.BASE_POLICY_NAME, target={}) token = id if not token: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/console_output.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/console_output.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/console_output.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/console_output.py 2020-04-10 17:57:57.000000000 +0000 @@ -38,9 +38,10 @@ def get_console_output(self, req, id, body): """Get text console output.""" context = req.environ['nova.context'] - context.can(co_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, id) + context.can(co_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) + length = body['os-getConsoleOutput'].get('length') # TODO(cyeoh): In a future API update accept a length of -1 # as meaning unlimited length (convert to None) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/create_backup.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/create_backup.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/create_backup.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/create_backup.py 2020-04-10 17:57:57.000000000 +0000 @@ -47,7 +47,9 @@ """ context = req.environ["nova.context"] - context.can(cb_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id) + context.can(cb_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) entity = body["createBackup"] image_name = common.normalize_name(entity["name"]) @@ -63,8 +65,6 @@ common.check_img_metadata_properties_quota(context, metadata) props.update(metadata) - instance = common.get_instance(self.compute_api, context, id) - try: image = self.compute_api.backup(context, instance, image_name, backup_type, rotation, extra_properties=props) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/deferred_delete.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/deferred_delete.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/deferred_delete.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/deferred_delete.py 2020-04-10 17:57:57.000000000 +0000 @@ -35,8 +35,9 @@ def _restore(self, req, id, body): """Restore a previously deleted instance.""" context = req.environ["nova.context"] - context.can(dd_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, id) + context.can(dd_policies.BASE_POLICY_NAME % 'restore', + target={'project_id': instance.project_id}) try: self.compute_api.restore(context, instance) except exception.QuotaError as error: @@ -52,7 +53,7 @@ """Force delete of instance before deferred cleanup.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) - context.can(dd_policies.BASE_POLICY_NAME, + context.can(dd_policies.BASE_POLICY_NAME % 'force', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/evacuate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/evacuate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/evacuate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/evacuate.py 2020-04-10 17:57:57.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. +from oslo_log import log as logging from oslo_utils import strutils from webob import exc @@ -31,6 +32,8 @@ CONF = nova.conf.CONF +LOG = logging.getLogger(__name__) + class EvacuateController(wsgi.Controller): def __init__(self): @@ -71,7 +74,7 @@ # TODO(eliqiao): Should be responding here with 202 Accept # because evacuate is an async call, but keep to 200 for # backwards compatibility reasons. - @wsgi.expected_errors((400, 404, 409)) + @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('evacuate') @validation.schema(evacuate.evacuate, "2.0", "2.13") @validation.schema(evacuate.evacuate_v214, "2.14", "2.28") @@ -117,6 +120,22 @@ msg = _("The target host can't be the same one.") raise exc.HTTPBadRequest(explanation=msg) + # We could potentially move this check to conductor and avoid the + # extra API call to neutron when we support move operations with ports + # having resource requests. + if (common.instance_has_port_with_resource_request( + instance.uuid, self.network_api) and not + common.supports_port_resource_request_during_move()): + LOG.warning("The evacuate action on a server with ports " + "having resource requests, like a port with a QoS " + "minimum bandwidth policy, is not supported until " + "every nova-compute is upgraded to Ussuri") + msg = _("The evacuate action on a server with ports having " + "resource requests, like a port with a QoS minimum " + "bandwidth policy, is not supported by this cluster right " + "now") + raise exc.HTTPBadRequest(explanation=msg) + try: self.compute_api.evacuate(context, instance, host, on_shared_storage, password, force) @@ -125,6 +144,8 @@ 'evacuate', id) except exception.ComputeServiceInUse as e: raise exc.HTTPBadRequest(explanation=e.format_message()) + except exception.ForbiddenWithAccelerators as e: + raise exc.HTTPForbidden(explanation=e.format_message()) if (not api_version_request.is_supported(req, min_version='2.14') and CONF.api.enable_instance_password): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/flavor_manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/flavor_manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/flavor_manage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/flavor_manage.py 2020-04-10 17:57:57.000000000 +0000 @@ -32,11 +32,11 @@ # 204 as this operation complete the deletion of aggregate resource and # return no response body. @wsgi.response(202) - @wsgi.expected_errors((404)) + @wsgi.expected_errors(404) @wsgi.action("delete") def _delete(self, req, id): context = req.environ['nova.context'] - context.can(fm_policies.POLICY_ROOT % 'delete') + context.can(fm_policies.POLICY_ROOT % 'delete', target={}) flavor = objects.Flavor(context=context, flavorid=id) try: @@ -54,7 +54,7 @@ flavors_view.FLAVOR_DESCRIPTION_MICROVERSION) def _create(self, req, body): context = req.environ['nova.context'] - context.can(fm_policies.POLICY_ROOT % 'create') + context.can(fm_policies.POLICY_ROOT % 'create', target={}) vals = body['flavor'] @@ -108,7 +108,7 @@ def _update(self, req, id, body): # Validate the policy. context = req.environ['nova.context'] - context.can(fm_policies.POLICY_ROOT % 'update') + context.can(fm_policies.POLICY_ROOT % 'update', target={}) # Get the flavor and update the description. try: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/flavors_extraspecs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/flavors_extraspecs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/flavors_extraspecs.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/flavors_extraspecs.py 2020-04-10 17:57:57.000000000 +0000 @@ -16,10 +16,12 @@ import six import webob +from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import flavors_extraspecs from nova.api.openstack import wsgi from nova.api import validation +from nova.api.validation.extra_specs import validators from nova import exception from nova.i18n import _ from nova.policies import flavor_extra_specs as fes_policies @@ -28,22 +30,31 @@ class FlavorExtraSpecsController(wsgi.Controller): """The flavor extra specs API controller for the OpenStack API.""" + def _get_extra_specs(self, context, flavor_id): flavor = common.get_flavor(context, flavor_id) return dict(extra_specs=flavor.extra_specs) - # NOTE(gmann): Max length for numeric value is being checked - # explicitly as json schema cannot have max length check for numeric value - def _check_extra_specs_value(self, specs): - for value in specs.values(): - try: - if isinstance(value, (six.integer_types, float)): - value = six.text_type(value) + def _check_extra_specs_value(self, req, specs): + validation_supported = api_version_request.is_supported( + req, min_version='2.86', + ) + + for name, value in specs.items(): + # NOTE(gmann): Max length for numeric value is being checked + # explicitly as json schema cannot have max length check for + # numeric value + if isinstance(value, (six.integer_types, float)): + value = six.text_type(value) + try: utils.check_string_length(value, 'extra_specs value', max_length=255) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest( - explanation=error.format_message()) + except exception.InvalidInput as error: + raise webob.exc.HTTPBadRequest( + explanation=error.format_message()) + + if validation_supported: + validators.validate(name, value) @wsgi.expected_errors(404) def index(self, req, flavor_id): @@ -62,7 +73,7 @@ context.can(fes_policies.POLICY_ROOT % 'create') specs = body['extra_specs'] - self._check_extra_specs_value(specs) + self._check_extra_specs_value(req, specs) flavor = common.get_flavor(context, flavor_id) try: flavor.extra_specs = dict(flavor.extra_specs, **specs) @@ -79,7 +90,7 @@ context = req.environ['nova.context'] context.can(fes_policies.POLICY_ROOT % 'update') - self._check_extra_specs_value(body) + self._check_extra_specs_value(req, body) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/floating_ips.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/floating_ips.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/floating_ips.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/floating_ips.py 2020-04-10 17:57:57.000000000 +0000 @@ -72,9 +72,6 @@ self.network_api.disassociate_floating_ip(context, instance, address) except exception.Forbidden: raise webob.exc.HTTPForbidden() - except exception.CannotDisassociateAutoAssignedFloatingIP: - msg = _('Cannot disassociate auto assigned floating IP') - raise webob.exc.HTTPForbidden(explanation=msg) class FloatingIPController(wsgi.Controller): @@ -170,9 +167,6 @@ context, instance, floating_ip) except exception.Forbidden: raise webob.exc.HTTPForbidden() - except exception.CannotDisassociateAutoAssignedFloatingIP: - msg = _('Cannot disassociate auto assigned floating IP') - raise webob.exc.HTTPForbidden(explanation=msg) except exception.FloatingIpNotFoundForAddress as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) @@ -288,11 +282,7 @@ # disassociate if associated if instance and floating_ip['port_id'] and instance.uuid == id: - try: - disassociate_floating_ip(self, context, instance, address) - except exception.FloatingIpNotAssociated: - msg = _('Floating IP is not associated') - raise webob.exc.HTTPBadRequest(explanation=msg) + disassociate_floating_ip(self, context, instance, address) return webob.Response(status_int=202) else: msg = _("Floating IP %(address)s is not associated with instance " diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/hypervisors.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/hypervisors.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/hypervisors.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/hypervisors.py 2020-04-10 17:57:57.000000000 +0000 @@ -122,7 +122,6 @@ :param links: If True, return links in the response for paging. """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) # The 2.53 microversion moves the search and servers routes into # GET /os-hypervisors and GET /os-hypervisors/detail with query @@ -210,7 +209,7 @@ @wsgi.Controller.api_version("2.33", "2.52") # noqa @validation.query_schema(hyper_schema.list_query_schema_v233) - @wsgi.expected_errors((400)) + @wsgi.expected_errors(400) def index(self, req): limit, marker = common.get_limit_and_marker(req) return self._index(req, limit=limit, marker=marker, links=True) @@ -221,6 +220,8 @@ return self._index(req) def _index(self, req, limit=None, marker=None, links=False): + context = req.environ['nova.context'] + context.can(hv_policies.BASE_POLICY_NAME % 'list', target={}) return self._get_hypervisors(req, detail=False, limit=limit, marker=marker, links=links) @@ -251,6 +252,8 @@ return self._detail(req) def _detail(self, req, limit=None, marker=None, links=False): + context = req.environ['nova.context'] + context.can(hv_policies.BASE_POLICY_NAME % 'list-detail', target={}) return self._get_hypervisors(req, detail=True, limit=limit, marker=marker, links=links) @@ -302,7 +305,7 @@ def _show(self, req, id, with_servers=False): context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'show', target={}) self._validate_id(req, id) @@ -324,7 +327,7 @@ @wsgi.expected_errors((400, 404, 501)) def uptime(self, req, id): context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'uptime', target={}) self._validate_id(req, id) @@ -362,7 +365,7 @@ index and detail methods. """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'search', target={}) hypervisors = self._get_compute_nodes_by_name_pattern(context, id) try: return dict(hypervisors=[ @@ -386,7 +389,7 @@ GET /os-hypervisors index and detail methods. """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'servers', target={}) compute_nodes = self._get_compute_nodes_by_name_pattern(context, id) hypervisors = [] for compute_node in compute_nodes: @@ -405,6 +408,6 @@ @wsgi.expected_errors(()) def statistics(self, req): context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'statistics', target={}) stats = self.host_api.compute_node_statistics(context) return dict(hypervisor_statistics=stats) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/image_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/image_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/image_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/image_metadata.py 2020-04-10 17:57:57.000000000 +0000 @@ -24,7 +24,7 @@ from nova.api import validation from nova import exception from nova.i18n import _ -import nova.image +from nova.image import glance class ImageMetadataController(wsgi.Controller): @@ -32,7 +32,7 @@ def __init__(self): super(ImageMetadataController, self).__init__() - self.image_api = nova.image.API() + self.image_api = glance.API() def _get_image(self, context, image_id): try: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/images.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/images.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/images.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/images.py 2020-04-10 17:57:57.000000000 +0000 @@ -22,8 +22,7 @@ from nova.api.openstack import wsgi from nova import exception from nova.i18n import _ -import nova.image -import nova.utils +from nova.image import glance SUPPORTED_FILTERS = { @@ -44,7 +43,7 @@ def __init__(self): super(ImagesController, self).__init__() - self._image_api = nova.image.API() + self._image_api = glance.API() def _get_filters(self, req): """Return a dictionary of query param filters from the request. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/instance_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/instance_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/instance_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/instance_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -53,8 +53,9 @@ action[key] = action_raw.get(key) return action - def _format_event(self, event_raw, project_id, show_traceback=False, - show_host=False, show_hostid=False): + @staticmethod + def _format_event(event_raw, project_id, show_traceback=False, + show_host=False, show_hostid=False, show_details=False): event = {} for key in EVENT_KEYS: # By default, non-admins are not allowed to see traceback details. @@ -67,6 +68,8 @@ if show_hostid: event['hostId'] = utils.generate_hostid(event_raw['host'], project_id) + if show_details: + event['details'] = event_raw['details'] return event @wsgi.Controller.api_version("2.1", "2.20") @@ -84,7 +87,8 @@ """Returns the list of actions recorded for a given instance.""" context = req.environ["nova.context"] instance = self._get_instance(req, context, server_id) - context.can(ia_policies.BASE_POLICY_NAME, instance) + context.can(ia_policies.BASE_POLICY_NAME % 'list', + target={'project_id': instance.project_id}) actions_raw = self.action_api.actions_get(context, instance) actions = [self._format_action(action, ACTION_KEYS) for action in actions_raw] @@ -100,7 +104,8 @@ """Returns the list of actions recorded for a given instance.""" context = req.environ["nova.context"] instance = self._get_instance(req, context, server_id) - context.can(ia_policies.BASE_POLICY_NAME, instance) + context.can(ia_policies.BASE_POLICY_NAME % 'list', + target={'project_id': instance.project_id}) search_opts = {} search_opts.update(req.GET) if 'changes-since' in search_opts: @@ -138,7 +143,8 @@ """Return data about the given instance action.""" context = req.environ['nova.context'] instance = self._get_instance(req, context, server_id) - context.can(ia_policies.BASE_POLICY_NAME, instance) + context.can(ia_policies.BASE_POLICY_NAME % 'show', + target={'project_id': instance.project_id}) action = self.action_api.action_get_by_request_id(context, instance, id) if action is None: @@ -158,7 +164,9 @@ show_events = False show_traceback = False show_host = False - if context.can(ia_policies.POLICY_ROOT % 'events', fatal=False): + if context.can(ia_policies.BASE_POLICY_NAME % 'events', + target={'project_id': instance.project_id}, + fatal=False): # For all microversions, the user can see all event details # including the traceback. show_events = show_traceback = True @@ -173,6 +181,15 @@ show_hostid = api_version_request.is_supported(req, '2.62') if show_events: + # NOTE(brinzhang): Event details are shown since microversion + # 2.84. + show_details = False + support_v284 = api_version_request.is_supported(req, '2.84') + if support_v284: + show_details = context.can( + ia_policies.BASE_POLICY_NAME % 'events:details', + target={'project_id': instance.project_id}, fatal=False) + events_raw = self.action_api.action_events_get(context, instance, action_id) # NOTE(takashin): The project IDs of instance action events @@ -184,6 +201,7 @@ action['events'] = [self._format_event( evt, action['project_id'] or instance.project_id, show_traceback=show_traceback, - show_host=show_host, show_hostid=show_hostid + show_host=show_host, show_hostid=show_hostid, + show_details=show_details ) for evt in events_raw] return {'instanceAction': action} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/instance_usage_audit_log.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/instance_usage_audit_log.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/instance_usage_audit_log.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/instance_usage_audit_log.py 2020-04-10 17:57:57.000000000 +0000 @@ -35,14 +35,14 @@ @wsgi.expected_errors(()) def index(self, req): context = req.environ['nova.context'] - context.can(iual_policies.BASE_POLICY_NAME) + context.can(iual_policies.BASE_POLICY_NAME % 'list', target={}) task_log = self._get_audit_task_logs(context) return {'instance_usage_audit_logs': task_log} @wsgi.expected_errors(400) def show(self, req, id): context = req.environ['nova.context'] - context.can(iual_policies.BASE_POLICY_NAME) + context.can(iual_policies.BASE_POLICY_NAME % 'show', target={}) try: if '.' in id: before_date = datetime.datetime.strptime(str(id), diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/ips.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/ips.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/ips.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/ips.py 2020-04-10 17:57:57.000000000 +0000 @@ -38,16 +38,18 @@ @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(ips_policies.POLICY_ROOT % 'index') instance = common.get_instance(self._compute_api, context, server_id) + context.can(ips_policies.POLICY_ROOT % 'index', + target={'project_id': instance.project_id}) networks = common.get_networks_for_instance(context, instance) return self._view_builder.index(networks) @wsgi.expected_errors(404) def show(self, req, server_id, id): context = req.environ["nova.context"] - context.can(ips_policies.POLICY_ROOT % 'show') instance = common.get_instance(self._compute_api, context, server_id) + context.can(ips_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) networks = common.get_networks_for_instance(context, instance) if id not in networks: msg = _("Instance is not a member of specified network") diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/limits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/limits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/limits.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/limits.py 2020-04-10 17:57:57.000000000 +0000 @@ -74,8 +74,13 @@ def _index(self, req, filtered_limits=None, max_image_meta=True): """Return all global limit information.""" context = req.environ['nova.context'] - context.can(limits_policies.BASE_POLICY_NAME) - project_id = req.params.get('tenant_id', context.project_id) + context.can(limits_policies.BASE_POLICY_NAME, target={}) + project_id = context.project_id + if 'tenant_id' in req.GET: + project_id = req.GET.get('tenant_id') + context.can(limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME, + target={'project_id': project_id}) + quotas = QUOTAS.get_project_quotas(context, project_id, usages=True) builder = limits_views.ViewBuilder() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/lock_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/lock_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/lock_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/lock_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -50,10 +50,11 @@ def _unlock(self, req, id, body): """Unlock a server instance.""" context = req.environ['nova.context'] - context.can(ls_policies.POLICY_ROOT % 'unlock') instance = common.get_instance(self.compute_api, context, id) + context.can(ls_policies.POLICY_ROOT % 'unlock', + target={'project_id': instance.project_id}) if not self.compute_api.is_expected_locked_by(context, instance): context.can(ls_policies.POLICY_ROOT % 'unlock:unlock_override', - instance) + target={'project_id': instance.project_id}) self.compute_api.unlock(context, instance) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/migrate_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/migrate_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/migrate_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/migrate_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -48,16 +48,17 @@ def _migrate(self, req, id, body): """Permit admins to migrate a server to a new host.""" context = req.environ['nova.context'] - context.can(ms_policies.POLICY_ROOT % 'migrate') + + instance = common.get_instance(self.compute_api, context, id, + expected_attrs=['flavor', 'services']) + context.can(ms_policies.POLICY_ROOT % 'migrate', + target={'project_id': instance.project_id}) host_name = None if (api_version_request.is_supported(req, min_version='2.56') and body['migrate'] is not None): host_name = body['migrate'].get('host') - instance = common.get_instance(self.compute_api, context, id, - expected_attrs=['flavor', 'services']) - if common.instance_has_port_with_resource_request( instance.uuid, self.network_api): # TODO(gibi): Remove when nova only supports compute newer than @@ -74,7 +75,8 @@ try: self.compute_api.resize(req.environ['nova.context'], instance, host_name=host_name) - except (exception.TooManyInstances, exception.QuotaError) as e: + except (exception.TooManyInstances, exception.QuotaError, + exception.ForbiddenWithAccelerators) as e: raise exc.HTTPForbidden(explanation=e.format_message()) except (exception.InstanceIsLocked, exception.InstanceNotReady, @@ -90,7 +92,7 @@ raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(202) - @wsgi.expected_errors((400, 404, 409)) + @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('os-migrateLive') @validation.schema(migrate_server.migrate_live, "2.0", "2.24") @validation.schema(migrate_server.migrate_live_v2_25, "2.25", "2.29") @@ -99,7 +101,15 @@ def _migrate_live(self, req, id, body): """Permit admins to (live) migrate a server to a new host.""" context = req.environ["nova.context"] - context.can(ms_policies.POLICY_ROOT % 'migrate_live') + + # NOTE(stephenfin): we need 'numa_topology' because of the + # 'LiveMigrationTask._check_instance_has_no_numa' check in the + # conductor + instance = common.get_instance(self.compute_api, context, id, + expected_attrs=['numa_topology']) + + context.can(ms_policies.POLICY_ROOT % 'migrate_live', + target={'project_id': instance.project_id}) host = body["os-migrateLive"]["host"] block_migration = body["os-migrateLive"]["block_migration"] @@ -122,11 +132,21 @@ disk_over_commit = strutils.bool_from_string(disk_over_commit, strict=True) - # NOTE(stephenfin): we need 'numa_topology' because of the - # 'LiveMigrationTask._check_instance_has_no_numa' check in the - # conductor - instance = common.get_instance(self.compute_api, context, id, - expected_attrs=['numa_topology']) + # We could potentially move this check to conductor and avoid the + # extra API call to neutron when we support move operations with ports + # having resource requests. + if (common.instance_has_port_with_resource_request( + instance.uuid, self.network_api) and not + common.supports_port_resource_request_during_move()): + LOG.warning("The os-migrateLive action on a server with ports " + "having resource requests, like a port with a QoS " + "minimum bandwidth policy, is not supported until " + "every nova-compute is upgraded to Ussuri") + msg = _("The os-migrateLive action on a server with ports having " + "resource requests, like a port with a QoS minimum " + "bandwidth policy, is not supported by this cluster right " + "now") + raise exc.HTTPBadRequest(explanation=msg) try: self.compute_api.live_migrate(context, instance, block_migration, @@ -158,6 +178,8 @@ except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'os-migrateLive', id) + except exception.ForbiddenWithAccelerators as e: + raise exc.HTTPForbidden(explanation=e.format_message()) def _get_force_param_for_live_migration(self, body, host): force = body["os-migrateLive"].get("force", False) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/migrations.py 2020-04-10 17:57:57.000000000 +0000 @@ -86,7 +86,7 @@ sort_dirs=None, sort_keys=None, limit=None, marker=None, allow_changes_since=False, allow_changes_before=False): context = req.environ['nova.context'] - context.can(migrations_policies.POLICY_ROOT % 'index') + context.can(migrations_policies.POLICY_ROOT % 'index', target={}) search_opts = {} search_opts.update(req.GET) if 'changes-since' in search_opts: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/multinic.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/multinic.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/multinic.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/multinic.py 2020-04-10 17:57:57.000000000 +0000 @@ -65,5 +65,5 @@ try: self.compute_api.remove_fixed_ip(context, instance, address) - except exception.FixedIpNotFoundForSpecificInstance as e: + except exception.FixedIpNotFoundForInstance as e: raise exc.HTTPBadRequest(explanation=e.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/pause_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/pause_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/pause_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/pause_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -55,8 +55,9 @@ def _unpause(self, req, id, body): """Permit Admins to unpause the server.""" ctxt = req.environ['nova.context'] - ctxt.can(ps_policies.POLICY_ROOT % 'unpause') server = common.get_instance(self.compute_api, ctxt, id) + ctxt.can(ps_policies.POLICY_ROOT % 'unpause', + target={'project_id': server.project_id}) try: self.compute_api.unpause(ctxt, server) except exception.InstanceIsLocked as e: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/remote_consoles.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/remote_consoles.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/remote_consoles.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/remote_consoles.py 2020-04-10 17:57:57.000000000 +0000 @@ -155,8 +155,9 @@ @validation.schema(remote_consoles.create_v28, "2.8") def create(self, req, server_id, body): context = req.environ['nova.context'] - context.can(rc_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, server_id) + context.can(rc_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) protocol = body['remote_console']['protocol'] console_type = body['remote_console']['type'] try: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/rescue.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/rescue.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/rescue.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/rescue.py 2020-04-10 17:57:57.000000000 +0000 @@ -83,8 +83,9 @@ def _unrescue(self, req, id, body): """Unrescue an instance.""" context = req.environ["nova.context"] - context.can(rescue_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, id) + context.can(rescue_policies.UNRESCUE_POLICY_NAME, + target={'project_id': instance.project_id}) try: self.compute_api.unrescue(context, instance) except exception.InstanceIsLocked as e: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/rest_api_version_history.rst nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/rest_api_version_history.rst --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/rest_api_version_history.rst 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/rest_api_version_history.rst 2020-04-10 17:57:57.000000000 +0000 @@ -1047,7 +1047,8 @@ field in the request body when attaching a volume to a server, to support configuring whether to delete the data volume when the server is destroyed. Also, ``delete_on_termination`` is added to the GET responses when showing -attached volumes. +attached volumes, and the ``delete_on_termination`` field is contained +in the POST API response body when attaching a volume. The affected APIs are as follows: @@ -1079,3 +1080,57 @@ Adds support for image cache management by aggregate by adding ``POST /os-aggregates/{aggregate_id}/images``. + +2.82 +---- + +Adds ``accelerator-request-bound`` event to ``os-server-external-events`` +API. This event is sent by Cyborg to indicate completion of the binding +event for one accelerator request (ARQ) associated with an instance. + +2.83 +---- + +Allow the following filter parameters for ``GET /servers/detail`` +and ``GET /servers`` for non-admin : + +* ``availability_zone`` +* ``config_drive`` +* ``key_name`` +* ``created_at`` +* ``launched_at`` +* ``terminated_at`` +* ``power_state`` +* ``task_state`` +* ``vm_state`` +* ``progress`` +* ``user_id`` + +2.84 +---- + +The ``GET /servers/{server_id}/os-instance-actions/{request_id}`` API returns +a ``details`` parameter for each failed event with a fault message, similar to +the server ``fault.message`` parameter in ``GET /servers/{server_id}`` for a +server with status ``ERROR``. + +2.85 +---- + +Adds the ability to specify ``delete_on_termination`` in the +``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` API, which +allows changing the behavior of volume deletion on instance deletion. + +2.86 +---- + +Add support for validation of known extra specs. This is enabled by default +for the following APIs: + +* ``POST /flavors/{flavor_id}/os-extra_specs`` +* ``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` + +Validation is only used for recognized extra spec namespaces, currently: +``accel``, ``aggregate_instance_extra_specs``, ``capabilities``, ``hw``, +``hw_rng``, ``hw_video``, ``os``, ``pci_passthrough``, ``powervm``, ``quota``, +``resources``, ``trait``, and ``vmware``. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/schemas/server_external_events.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/schemas/server_external_events.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/schemas/server_external_events.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/schemas/server_external_events.py 2020-04-10 17:57:57.000000000 +0000 @@ -59,3 +59,7 @@ create_v276 = copy.deepcopy(create_v251) name = create_v276['properties']['events']['items']['properties']['name'] name['enum'].append('power-update') + +create_v282 = copy.deepcopy(create_v276) +name = create_v282['properties']['events']['items']['properties']['name'] +name['enum'].append('accelerator-request-bound') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/schemas/volumes.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/schemas/volumes.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/schemas/volumes.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/schemas/volumes.py 2020-04-10 17:57:57.000000000 +0000 @@ -74,7 +74,7 @@ # NOTE: The validation pattern from match_device() in # nova/block_device.py. 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' - } + }, }, 'required': ['volumeId'], 'additionalProperties': False, @@ -95,6 +95,36 @@ del update_volume_attachment['properties']['volumeAttachment'][ 'properties']['device'] +# NOTE(brinzhang): Allow attachment_id, serverId, device, tag, and +# delete_on_termination (i.e., follow the content of the GET response) +# to be specified for RESTfulness, even though we will not allow updating +# all of them. +update_volume_attachment_v285 = { + 'type': 'object', + 'properties': { + 'volumeAttachment': { + 'type': 'object', + 'properties': { + 'volumeId': parameter_types.volume_id, + 'device': { + 'type': ['string', 'null'], + # NOTE: The validation pattern from match_device() in + # nova/block_device.py. + 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' + }, + 'tag': parameter_types.tag, + 'delete_on_termination': parameter_types.boolean, + 'serverId': parameter_types.server_id, + 'id': parameter_types.attachment_id + }, + 'required': ['volumeId'], + 'additionalProperties': False, + }, + }, + 'required': ['volumeAttachment'], + 'additionalProperties': False, +} + index_query = { 'type': 'object', 'properties': { diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/security_groups.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/security_groups.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/security_groups.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/security_groups.py 2020-04-10 17:57:57.000000000 +0000 @@ -159,8 +159,7 @@ try: id = security_group_api.validate_id(id) - security_group = security_group_api.get( - context, None, id, map_exception=True) + security_group = security_group_api.get(context, id) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: @@ -178,8 +177,7 @@ try: id = security_group_api.validate_id(id) - security_group = security_group_api.get( - context, None, id, map_exception=True) + security_group = security_group_api.get(context, id) security_group_api.destroy(context, security_group) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) @@ -222,7 +220,7 @@ try: security_group_api.validate_property(group_name, 'name', None) security_group_api.validate_property(group_description, - 'description', None) + 'description', None) group_ref = security_group_api.create_security_group( context, group_name, group_description) except exception.Invalid as exp: @@ -241,8 +239,7 @@ try: id = security_group_api.validate_id(id) - security_group = security_group_api.get( - context, None, id, map_exception=True) + security_group = security_group_api.get(context, id) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: @@ -283,7 +280,7 @@ parent_group_id = security_group_api.validate_id( sg_rule.get('parent_group_id')) security_group = security_group_api.get( - context, None, parent_group_id, map_exception=True) + context, parent_group_id) if group_id is not None: group_id = security_group_api.validate_id(group_id) @@ -354,8 +351,7 @@ id = security_group_api.validate_id(id) rule = security_group_api.get_rule(context, id) group_id = rule['parent_group_id'] - security_group = security_group_api.get( - context, None, group_id, map_exception=True) + security_group = security_group_api.get(context, group_id) security_group_api.remove_rules( context, security_group, [rule['id']]) except exception.SecurityGroupNotFound as exp: @@ -371,9 +367,10 @@ @wsgi.expected_errors(404) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" - context = _authorize_context(req) - + context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) + context.can(sg_policies.POLICY_NAME % 'list', + target={'project_id': instance.project_id}) try: groups = security_group_api.get_instance_security_groups( context, instance, True) @@ -419,28 +416,25 @@ return group_name - def _invoke(self, method, context, id, group_name): - instance = common.get_instance(self.compute_api, context, id) - method(context, instance, group_name) - @wsgi.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] - context.can(sg_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id) + context.can(sg_policies.POLICY_NAME % 'add', + target={'project_id': instance.project_id}) group_name = self._parse(body, 'addSecurityGroup') try: - return self._invoke(security_group_api.add_to_instance, - context, id, group_name) + return security_group_api.add_to_instance(context, instance, + group_name) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: raise exc.HTTPConflict(explanation=exp.format_message()) - except (exception.SecurityGroupCannotBeApplied, - exception.SecurityGroupExistsForInstance) as exp: + except exception.SecurityGroupCannotBeApplied as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) @wsgi.expected_errors((400, 404, 409)) @@ -448,17 +442,17 @@ @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] - context.can(sg_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id) + context.can(sg_policies.POLICY_NAME % 'remove', + target={'project_id': instance.project_id}) group_name = self._parse(body, 'removeSecurityGroup') try: - return self._invoke(security_group_api.remove_from_instance, - context, id, group_name) + return security_group_api.remove_from_instance(context, instance, + group_name) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: raise exc.HTTPConflict(explanation=exp.format_message()) - except exception.SecurityGroupNotExistsForInstance as exp: - raise exc.HTTPBadRequest(explanation=exp.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_diagnostics.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_diagnostics.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_diagnostics.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_diagnostics.py 2020-04-10 17:57:57.000000000 +0000 @@ -34,9 +34,9 @@ @wsgi.expected_errors((400, 404, 409, 501)) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(sd_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, server_id) + context.can(sd_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) try: if api_version_request.is_supported(req, min_version='2.48'): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_external_events.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_external_events.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_external_events.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_external_events.py 2020-04-10 17:57:57.000000000 +0000 @@ -26,7 +26,8 @@ LOG = logging.getLogger(__name__) -TAG_REQUIRED = ('volume-extended', 'power-update') +TAG_REQUIRED = ('volume-extended', 'power-update', + 'accelerator-request-bound') class ServerExternalEventsController(wsgi.Controller): @@ -67,7 +68,8 @@ @wsgi.response(200) @validation.schema(server_external_events.create, '2.0', '2.50') @validation.schema(server_external_events.create_v251, '2.51', '2.75') - @validation.schema(server_external_events.create_v276, '2.76') + @validation.schema(server_external_events.create_v276, '2.76', '2.81') + @validation.schema(server_external_events.create_v282, '2.82') def create(self, req, body): """Creates a new instance event.""" context = req.environ['nova.context'] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_metadata.py 2020-04-10 17:57:57.000000000 +0000 @@ -33,8 +33,7 @@ super(ServerMetadataController, self).__init__() self.compute_api = compute.API() - def _get_metadata(self, context, server_id): - server = common.get_instance(self.compute_api, context, server_id) + def _get_metadata(self, context, server): try: # NOTE(mikal): get_instance_metadata sometimes returns # InstanceNotFound in unit tests, even though the instance is @@ -52,8 +51,10 @@ def index(self, req, server_id): """Returns the list of metadata for a given instance.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'index') - return {'metadata': self._get_metadata(context, server_id)} + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'index', + target={'project_id': server.project_id}) + return {'metadata': self._get_metadata(context, server)} @wsgi.expected_errors((403, 404, 409)) # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 @@ -62,9 +63,11 @@ def create(self, req, server_id, body): metadata = body['metadata'] context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'create') + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'create', + target={'project_id': server.project_id}) new_metadata = self._update_instance_metadata(context, - server_id, + server, metadata, delete=False) @@ -74,14 +77,16 @@ @validation.schema(server_metadata.update) def update(self, req, server_id, id, body): context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'update') + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'update', + target={'project_id': server.project_id}) meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) self._update_instance_metadata(context, - server_id, + server, meta_item, delete=False) @@ -91,18 +96,19 @@ @validation.schema(server_metadata.update_all) def update_all(self, req, server_id, body): context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'update_all') + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'update_all', + target={'project_id': server.project_id}) metadata = body['metadata'] new_metadata = self._update_instance_metadata(context, - server_id, + server, metadata, delete=True) return {'metadata': new_metadata} - def _update_instance_metadata(self, context, server_id, metadata, + def _update_instance_metadata(self, context, server, metadata, delete=False): - server = common.get_instance(self.compute_api, context, server_id) try: return self.compute_api.update_instance_metadata(context, server, @@ -114,14 +120,16 @@ raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, - 'update metadata', server_id) + 'update metadata', server.uuid) @wsgi.expected_errors(404) def show(self, req, server_id, id): """Return a single metadata item.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'show') - data = self._get_metadata(context, server_id) + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'show', + target={'project_id': server.project_id}) + data = self._get_metadata(context, server) try: return {'meta': {id: data[id]}} @@ -134,14 +142,15 @@ def delete(self, req, server_id, id): """Deletes an existing metadata.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'delete') - metadata = self._get_metadata(context, server_id) + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'delete', + target={'project_id': server.project_id}) + metadata = self._get_metadata(context, server) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) - server = common.get_instance(self.compute_api, context, server_id) try: self.compute_api.delete_instance_metadata(context, server, id) except exception.InstanceIsLocked as e: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_migrations.py 2020-04-10 17:57:57.000000000 +0000 @@ -72,9 +72,10 @@ @validation.schema(server_migrations.force_complete) def _force_complete(self, req, id, server_id, body): context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'force_complete') - instance = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'force_complete', + target={'project_id': instance.project_id}) + try: self.compute_api.live_migrate_force_complete(context, instance, id) except exception.InstanceNotFound as e: @@ -94,11 +95,12 @@ def index(self, req, server_id): """Return all migrations of an instance in progress.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'index') - # NOTE(Shaohe Feng) just check the instance is available. To keep # consistency with other API, check it before get migrations. - common.get_instance(self.compute_api, context, server_id) + instance = common.get_instance(self.compute_api, context, server_id) + + context.can(sm_policies.POLICY_ROOT % 'index', + target={'project_id': instance.project_id}) migrations = self.compute_api.get_migrations_in_progress_by_instance( context, server_id, 'live-migration') @@ -115,11 +117,12 @@ def show(self, req, server_id, id): """Return the migration of an instance in progress by id.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'show') - # NOTE(Shaohe Feng) just check the instance is available. To keep # consistency with other API, check it before get migrations. - common.get_instance(self.compute_api, context, server_id) + instance = common.get_instance(self.compute_api, context, server_id) + + context.can(sm_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) try: migration = self.compute_api.get_migration_by_id_and_instance( @@ -153,11 +156,12 @@ def delete(self, req, server_id, id): """Abort an in progress migration of an instance.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'delete') + instance = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'delete', + target={'project_id': instance.project_id}) support_abort_in_queue = api_version_request.is_supported(req, '2.65') - instance = common.get_instance(self.compute_api, context, server_id) try: self.compute_api.live_migrate_abort( context, instance, id, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_password.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_password.py 2020-04-10 17:57:57.000000000 +0000 @@ -31,8 +31,9 @@ @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ['nova.context'] - context.can(sp_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, server_id) + context.can(sp_policies.BASE_POLICY_NAME % 'show', + target={'project_id': instance.project_id}) passw = password.extract_password(instance) return {'password': passw or ''} @@ -47,8 +48,9 @@ """ context = req.environ['nova.context'] - context.can(sp_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, server_id) + context.can(sp_policies.BASE_POLICY_NAME % 'clear', + target={'project_id': instance.project_id}) meta = password.convert_password(context, None) instance.system_metadata.update(meta) instance.save() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/servers.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/servers.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/servers.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/servers.py 2020-04-10 17:57:57.000000000 +0000 @@ -40,7 +40,7 @@ from nova import context as nova_context from nova import exception from nova.i18n import _ -from nova.image import api as image_api +from nova.image import glance from nova.network import neutron from nova import objects from nova.policies import servers as server_policies @@ -719,6 +719,7 @@ except (exception.ImageNotActive, exception.ImageBadRequest, exception.ImageNotAuthorized, + exception.ImageUnacceptable, exception.FixedIpNotFoundForAddress, exception.FlavorNotFound, exception.FlavorDiskTooSmall, @@ -753,6 +754,7 @@ exception.MultiattachNotSupportedOldMicroversion, exception.CertificateValidationFailed, exception.CreateWithPortResourceRequestOldVersion, + exception.DeviceProfileError, exception.ComputeHostNotFound) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error: @@ -944,7 +946,8 @@ try: self.compute_api.resize(context, instance, flavor_id, auto_disk_config=auto_disk_config) - except exception.QuotaError as error: + except (exception.QuotaError, + exception.ForbiddenWithAccelerators) as error: raise exc.HTTPForbidden( explanation=error.format_message()) except (exception.InstanceIsLocked, @@ -1111,7 +1114,8 @@ except exception.KeypairNotFound: msg = _("Invalid key_name provided.") raise exc.HTTPBadRequest(explanation=msg) - except exception.QuotaError as error: + except (exception.QuotaError, + exception.ForbiddenWithAccelerators) as error: raise exc.HTTPForbidden(explanation=error.format_message()) except (exception.AutoDiskConfigDisabledByImage, exception.CertificateValidationFailed, @@ -1120,6 +1124,7 @@ exception.ImageNotActive, exception.ImageUnacceptable, exception.InvalidMetadata, + exception.InvalidArchitectureName, ) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error: @@ -1236,7 +1241,7 @@ # build location of newly-created image entity image_id = str(image['id']) - image_ref = image_api.API().generate_image_url(image_id, context) + image_ref = glance.API().generate_image_url(image_id, context) resp = webob.Response(status_int=202) resp.headers['Location'] = image_ref @@ -1267,6 +1272,11 @@ opt_list += ('changes-before',) if api_version_request.is_supported(req, min_version='2.73'): opt_list += ('locked',) + if api_version_request.is_supported(req, min_version='2.83'): + opt_list += ('availability_zone', 'config_drive', 'key_name', + 'created_at', 'launched_at', 'terminated_at', + 'power_state', 'task_state', 'vm_state', 'progress', + 'user_id',) return opt_list def _get_instance(self, context, instance_uuid): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_tags.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_tags.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_tags.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_tags.py 2020-04-10 17:57:57.000000000 +0000 @@ -65,15 +65,14 @@ @wsgi.expected_errors(404) def show(self, req, server_id, id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'show') + im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'show', + target={'project_id': im.project_id}) try: - im = objects.InstanceMapping.get_by_instance_uuid(context, - server_id) with nova_context.target_cell(context, im.cell_mapping) as cctxt: exists = objects.Tag.exists(cctxt, server_id, id) - except (exception.InstanceNotFound, - exception.InstanceMappingNotFound) as e: + except (exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) if not exists: @@ -85,15 +84,14 @@ @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'index') + im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'index', + target={'project_id': im.project_id}) try: - im = objects.InstanceMapping.get_by_instance_uuid(context, - server_id) with nova_context.target_cell(context, im.cell_mapping) as cctxt: tags = objects.TagList.get_by_resource_id(cctxt, server_id) - except (exception.InstanceNotFound, - exception.InstanceMappingNotFound) as e: + except (exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return {'tags': _get_tags_names(tags)} @@ -103,8 +101,9 @@ @validation.schema(schema.update) def update(self, req, server_id, id, body): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'update') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'update', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( @@ -155,8 +154,9 @@ @validation.schema(schema.update_all) def update_all(self, req, server_id, body): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'update_all') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'update_all', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( @@ -179,8 +179,9 @@ @wsgi.expected_errors((404, 409)) def delete(self, req, server_id, id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'delete') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'delete', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( @@ -203,8 +204,9 @@ @wsgi.expected_errors((404, 409)) def delete_all(self, req, server_id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'delete_all') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'delete_all', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_topology.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_topology.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/server_topology.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/server_topology.py 2020-04-10 17:57:57.000000000 +0000 @@ -24,17 +24,19 @@ self.compute_api = compute.API() @wsgi.Controller.api_version("2.78") - @wsgi.expected_errors((404)) + @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(st_policies.BASE_POLICY_NAME % 'index') - host_policy = (st_policies.BASE_POLICY_NAME % 'host:index') - show_host_info = context.can(host_policy, fatal=False) - instance = common.get_instance(self.compute_api, context, server_id, expected_attrs=['numa_topology', 'vcpu_model']) + context.can(st_policies.BASE_POLICY_NAME % 'index', + target={'project_id': instance.project_id}) + + host_policy = (st_policies.BASE_POLICY_NAME % 'host:index') + show_host_info = context.can(host_policy, fatal=False) + return self._get_numa_topology(context, instance, show_host_info) def _get_numa_topology(self, context, instance, show_host_info): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/shelve.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/shelve.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/shelve.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/shelve.py 2020-04-10 17:57:57.000000000 +0000 @@ -14,6 +14,7 @@ """The shelved mode extension.""" +from oslo_log import log as logging from webob import exc from nova.api.openstack import api_version_request @@ -28,6 +29,8 @@ from nova.network import neutron from nova.policies import shelve as shelve_policies +LOG = logging.getLogger(__name__) + class ShelveController(wsgi.Controller): def __init__(self): @@ -83,8 +86,9 @@ def _unshelve(self, req, id, body): """Restore an instance from shelved mode.""" context = req.environ["nova.context"] - context.can(shelve_policies.POLICY_ROOT % 'unshelve') instance = common.get_instance(self.compute_api, context, id) + context.can(shelve_policies.POLICY_ROOT % 'unshelve', + target={'project_id': instance.project_id}) new_az = None unshelve_dict = body['unshelve'] @@ -98,12 +102,15 @@ if (instance.vm_state == vm_states.SHELVED_OFFLOADED and common.instance_has_port_with_resource_request( instance.uuid, self.network_api) and - not common.supports_port_resource_request_during_move( - req)): + not common.supports_port_resource_request_during_move()): + LOG.warning("The unshelve action on a server with ports having " + "resource requests, like a port with a QoS minimum " + "bandwidth policy, is not supported until every " + "nova-compute is upgraded to Ussuri") msg = _("The unshelve action on a server with ports having " "resource requests, like a port with a QoS minimum " - "bandwidth policy, is not supported with this " - "microversion") + "bandwidth policy, is not supported by this cluster right " + "now") raise exc.HTTPBadRequest(explanation=msg) try: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/suspend_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/suspend_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/suspend_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/suspend_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -27,7 +27,7 @@ self.compute_api = compute.API() @wsgi.response(202) - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((403, 404, 409)) @wsgi.action('suspend') def _suspend(self, req, id, body): """Permit admins to suspend the server.""" @@ -44,6 +44,8 @@ except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'suspend', id) + except exception.ForbiddenWithAccelerators as e: + raise exc.HTTPForbidden(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @@ -51,8 +53,9 @@ def _resume(self, req, id, body): """Permit admins to resume the server from suspend.""" context = req.environ['nova.context'] - context.can(ss_policies.POLICY_ROOT % 'resume') server = common.get_instance(self.compute_api, context, id) + context.can(ss_policies.POLICY_ROOT % 'resume', + target={'project_id': server.project_id}) try: self.compute_api.resume(context, server) except exception.InstanceIsLocked as e: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/versions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/versions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/versions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/versions.py 2020-04-10 17:57:57.000000000 +0000 @@ -100,3 +100,18 @@ args['action'] = 'multi' return args + + +class VersionsV2(wsgi.Resource): + + def __init__(self): + super(VersionsV2, self).__init__(None) + + def index(self, req, body=None): + builder = views_versions.get_view_builder(req) + ver = 'v2.0' if req.is_legacy_v2() else 'v2.1' + return builder.build_version(VERSIONS[ver]) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + return {'action': 'index'} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/views/hypervisors.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/views/hypervisors.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/views/hypervisors.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/views/hypervisors.py 2020-04-10 17:57:57.000000000 +0000 @@ -17,7 +17,7 @@ class ViewBuilder(common.ViewBuilder): - _collection_name = "hypervisors" + _collection_name = "os-hypervisors" def get_links(self, request, hypervisors, detail=False): coll_name = (self._collection_name + '/detail' if detail else diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/views/keypairs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/views/keypairs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/views/keypairs.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/views/keypairs.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,7 +18,7 @@ class ViewBuilder(common.ViewBuilder): - _collection_name = "keypairs" + _collection_name = 'os-keypairs' # TODO(takashin): After v2 and v2.1 is no longer supported, # 'type' can always be included in the response. _index_params = ('name', 'public_key', 'fingerprint') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/views/limits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/views/limits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/views/limits.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/views/limits.py 2020-04-10 17:57:57.000000000 +0000 @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from nova.policies import used_limits as ul_policies - class ViewBuilder(object): """OpenStack API base limits view builder.""" @@ -79,7 +77,6 @@ return limits def _build_used_limits(self, request, quotas, filtered_limits): - self._check_requested_project_scope(request) quota_map = { 'totalRAMUsed': 'ram', 'totalCoresUsed': 'cores', @@ -94,13 +91,3 @@ used_limits[display_name] = quotas[key]['in_use'] return used_limits - - def _check_requested_project_scope(self, request): - if 'tenant_id' in request.GET: - context = request.environ['nova.context'] - tenant_id = request.GET.get('tenant_id') - target = { - 'project_id': tenant_id, - 'user_id': context.user_id - } - context.can(ul_policies.BASE_POLICY_NAME, target) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/volumes.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/volumes.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/compute/volumes.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/compute/volumes.py 2020-04-10 17:57:57.000000000 +0000 @@ -279,9 +279,9 @@ def index(self, req, server_id): """Returns the list of volume attachments for a given instance.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'index') - instance = common.get_instance(self.compute_api, context, server_id) + context.can(va_policies.POLICY_ROOT % 'index', + target={'project_id': instance.project_id}) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) @@ -304,10 +304,11 @@ def show(self, req, server_id, id): """Return data about the given volume attachment.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'show') + instance = common.get_instance(self.compute_api, context, server_id) + context.can(va_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) volume_id = id - instance = common.get_instance(self.compute_api, context, server_id) try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( @@ -334,7 +335,9 @@ def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'create') + instance = common.get_instance(self.compute_api, context, server_id) + context.can(va_policies.POLICY_ROOT % 'create', + target={'project_id': instance.project_id}) volume_id = body['volumeAttachment']['volumeId'] device = body['volumeAttachment'].get('device') @@ -342,8 +345,6 @@ delete_on_termination = body['volumeAttachment'].get( 'delete_on_termination', False) - instance = common.get_instance(self.compute_api, context, server_id) - if instance.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): _check_request_version(req, '2.20', 'attach_volume', @@ -390,13 +391,8 @@ attachment['delete_on_termination'] = delete_on_termination return {'volumeAttachment': attachment} - @wsgi.response(202) - @wsgi.expected_errors((400, 404, 409)) - @validation.schema(volumes_schema.update_volume_attachment) - def update(self, req, server_id, id, body): + def _update_volume_swap(self, req, instance, id, body): context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'update') - old_volume_id = id try: old_volume = self.volume_api.get(context, old_volume_id) @@ -416,8 +412,6 @@ # NotFound response if that is not existent. raise exc.HTTPBadRequest(explanation=e.format_message()) - instance = common.get_instance(self.compute_api, context, server_id) - try: self.compute_api.swap_volume(context, instance, old_volume, new_volume) @@ -430,19 +424,84 @@ raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, - 'swap_volume', server_id) + 'swap_volume', instance.uuid) + + def _update_volume_regular(self, req, instance, id, body): + context = req.environ['nova.context'] + att = body['volumeAttachment'] + # NOTE(danms): We may be doing an update of regular parameters in + # the midst of a swap operation, so to find the original BDM, we need + # to use the old volume ID, which is the one in the path. + volume_id = id + + try: + bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( + context, volume_id, instance.uuid) + + # NOTE(danms): The attachment id is just the (current) volume id + if 'id' in att and att['id'] != volume_id: + raise exc.HTTPBadRequest(explanation='The id property is ' + 'not mutable') + if 'serverId' in att and att['serverId'] != instance.uuid: + raise exc.HTTPBadRequest(explanation='The serverId property ' + 'is not mutable') + if 'device' in att and att['device'] != bdm.device_name: + raise exc.HTTPBadRequest(explanation='The device property is ' + 'not mutable') + if 'tag' in att and att['tag'] != bdm.tag: + raise exc.HTTPBadRequest(explanation='The tag property is ' + 'not mutable') + if 'delete_on_termination' in att: + bdm.delete_on_termination = strutils.bool_from_string( + att['delete_on_termination'], strict=True) + bdm.save() + except exception.VolumeBDMNotFound as e: + raise exc.HTTPNotFound(explanation=e.format_message()) + + @wsgi.response(202) + @wsgi.expected_errors((400, 404, 409)) + @validation.schema(volumes_schema.update_volume_attachment, '2.0', '2.84') + @validation.schema(volumes_schema.update_volume_attachment_v285, + min_version='2.85') + def update(self, req, server_id, id, body): + context = req.environ['nova.context'] + instance = common.get_instance(self.compute_api, context, server_id) + attachment = body['volumeAttachment'] + volume_id = attachment['volumeId'] + only_swap = not api_version_request.is_supported(req, '2.85') + + # NOTE(brinzhang): If the 'volumeId' requested by the user is + # different from the 'id' in the url path, or only swap is allowed by + # the microversion, we should check the swap volume policy. + # otherwise, check the volume update policy. + if only_swap or id != volume_id: + context.can(va_policies.POLICY_ROOT % 'swap', target={}) + else: + context.can(va_policies.POLICY_ROOT % 'update', + target={'project_id': instance.project_id}) + + if only_swap: + # NOTE(danms): Original behavior is always call swap on PUT + self._update_volume_swap(req, instance, id, body) + else: + # NOTE(danms): New behavior is update any supported attachment + # properties first, and then call swap if volumeId differs + self._update_volume_regular(req, instance, id, body) + if id != volume_id: + self._update_volume_swap(req, instance, id, body) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, server_id, id): """Detach a volume from an instance.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'delete') + instance = common.get_instance(self.compute_api, context, server_id, + expected_attrs=['device_metadata']) + context.can(va_policies.POLICY_ROOT % 'delete', + target={'project_id': instance.project_id}) volume_id = id - instance = common.get_instance(self.compute_api, context, server_id, - expected_attrs=['device_metadata']) if instance.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): _check_request_version(req, '2.20', 'detach_volume', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/urlmap.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/urlmap.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/openstack/urlmap.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/openstack/urlmap.py 2020-04-10 17:57:57.000000000 +0000 @@ -168,6 +168,25 @@ for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue + # Rudimentary "wildcard" support: + # By declaring a urlmap path ending in '/+', you're saying the + # incoming path must start with everything up to and including the + # '/' *and* have something after that as well. For example, path + # /foo/bar/+ will match /foo/bar/baz, but not /foo/bar/ or /foo/bar + # NOTE(efried): This assumes we'll never need a path URI component + # that legitimately starts with '+'. (We could use a + # more obscure character/sequence here in that case.) + if app_url.endswith('/+'): + # Must be requesting at least the path element (including /) + if not path_info.startswith(app_url[:-1]): + continue + # ...but also must be requesting something after that / + if len(path_info) < len(app_url): + continue + # Trim the /+ off the app_url to make it look "normal" for e.g. + # proper splitting of SCRIPT_NAME and PATH_INFO. + return app, app_url[:-2] + # Normal (non-wildcarded) prefix match if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/accel.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/accel.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/accel.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/accel.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,36 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``accel`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='accel:device_profile', + description=( + 'The name of a device profile to configure for the instance. ' + 'A device profile may be viewed as a "flavor for devices".' + ), + value={ + 'type': str, + 'description': 'A name of a device profile.', + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,72 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for (preferrably) ``aggregate_instance_extra_specs`` namespaced +extra specs. + +These are used by the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. +Note that we explicitly do not support the unnamespaced variant of extra specs +since these have been deprecated since Havana (commit fbedf60a432). Users that +insist on using these can disable extra spec validation. +""" + +from nova.api.validation.extra_specs import base + + +DESCRIPTION = """\ +Specify metadata that must be present on the aggregate of a host. If this +metadata is not present, the host will be rejected. Requires the +``AggregateInstanceExtraSpecsFilter`` scheduler filter. + +The value can be one of the following: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) +* A specific value, e.g. ``true``, ``123``, ``testing`` +""" + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='aggregate_instance_extra_specs:{key}', + description=DESCRIPTION, + parameters=[ + { + 'name': 'key', + 'description': 'The metadata key to match on', + 'pattern': r'.+', + }, + ], + value={ + # this is totally arbitary, since we need to support specific + # values + 'type': str, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/base.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/base.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/base.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/base.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,120 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import dataclasses +import re +import typing as ty + +from oslo_utils import strutils + +from nova import exception + + +@dataclasses.dataclass +class ExtraSpecValidator: + name: str + description: str + value: ty.Dict[str, ty.Any] + deprecated: bool = False + parameters: ty.List[ty.Dict[str, ty.Any]] = dataclasses.field( + default_factory=list + ) + + name_regex: str = None + value_regex: str = None + + def __post_init__(self): + # generate a regex for the name + + name_regex = self.name + # replace the human-readable patterns with named regex groups; this + # will transform e.g. 'hw:numa_cpus.{id}' to 'hw:numa_cpus.(?P\d+)' + for param in self.parameters: + pattern = f'(?P<{param["name"]}>{param["pattern"]})' + name_regex = name_regex.replace(f'{{{param["name"]}}}', pattern) + + self.name_regex = name_regex + + # ...and do the same for the value, but only if we're using strings + + if self.value['type'] not in (int, str, bool): + raise ValueError( + f"Unsupported parameter type '{self.value['type']}'" + ) + + value_regex = None + if self.value['type'] == str and self.value.get('pattern'): + value_regex = self.value['pattern'] + + self.value_regex = value_regex + + def _validate_str(self, value): + if 'pattern' in self.value: + value_match = re.fullmatch(self.value_regex, value) + if not value_match: + raise exception.ValidationError( + f"Validation failed; '{value}' is not of the format " + f"'{self.value_regex}'." + ) + elif 'enum' in self.value: + if value not in self.value['enum']: + values = ', '.join(str(x) for x in self.value['enum']) + raise exception.ValidationError( + f"Validation failed; '{value}' is not one of: {values}." + ) + + def _validate_int(self, value): + try: + value = int(value) + except ValueError: + raise exception.ValidationError( + f"Validation failed; '{value}' is not a valid integer value." + ) + + if 'max' in self.value and self.value['max'] < value: + raise exception.ValidationError( + f"Validation failed; '{value}' is greater than the max value " + f"of '{self.value['max']}'." + ) + + if 'min' in self.value and self.value['min'] > value: + raise exception.ValidationError( + f"Validation failed; '{value}' is less than the min value " + f"of '{self.value['min']}'." + ) + + def _validate_bool(self, value): + try: + strutils.bool_from_string(value, strict=True) + except ValueError: + raise exception.ValidationError( + f"Validation failed; '{value}' is not a valid boolean-like " + f"value." + ) + + def validate(self, name, value): + name_match = re.fullmatch(self.name_regex, name) + if not name_match: + # NOTE(stephenfin): This is mainly here for testing purposes + raise exception.ValidationError( + f"Validation failed; expected a name of format '{self.name}' " + f"but got '{name}'." + ) + + if self.value['type'] == int: + self._validate_int(value) + elif self.value['type'] == bool: + self._validate_bool(value) + else: # str + self._validate_str(value) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/capabilities.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/capabilities.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/capabilities.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/capabilities.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,112 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for (preferrably) ``capabilities`` namespaced extra specs. + +These are used by the ``ComputeCapabilitiesFilter`` scheduler filter. Note that +we explicitly do not allow the unnamespaced variant of extra specs since this +has been deprecated since Grizzly (commit 8ce8e4b6c0d). Users that insist on +using these can disable extra spec validation. + +For all extra specs, the value can be one of the following: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) +* A specific value, e.g. ``true``, ``123``, ``testing`` + +Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and +`` fpu gpu`` +""" + +from nova.api.validation.extra_specs import base + + +DESCRIPTION = """\ +Specify that the '{capability}' capability provided by the host compute service +satisfy the provided filter value. Requires the ``ComputeCapabilitiesFilter`` +scheduler filter. +""" + +EXTRA_SPEC_VALIDATORS = [] + +# non-nested capabilities (from 'nova.objects.compute_node.ComputeNode' and +# nova.scheduler.host_manager.HostState') + +for capability in ( + 'id', 'uuid', 'service_id', 'host', 'vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used', + 'hypervisor_type', 'hypervisor_version', 'hypervisor_hostname', + 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', + 'disk_available_least', 'host_ip', 'mapped', + 'cpu_allocation_ratio', 'ram_allocation_ratio', 'disk_allocation_ratio', +) + ( + 'total_usable_ram_mb', 'total_usable_disk_gb', 'disk_mb_used', + 'free_disk_mb', 'vcpus_total', 'vcpus_used', 'num_instances', + 'num_io_ops', 'failed_builds', 'aggregates', 'cell_uuid', 'updated', +): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'capabilities:{capability}', + description=DESCRIPTION.format(capability=capability), + value={ + # this is totally arbitary, since we need to support specific + # values + 'type': str, + }, + ), + ) + + +# nested capabilities (from 'nova.objects.compute_node.ComputeNode' and +# nova.scheduler.host_manager.HostState') + +for capability in ( + 'cpu_info', 'metrics', 'stats', 'numa_topology', 'supported_hv_specs', + 'pci_device_pools', +) + ( + 'nodename', 'pci_stats', 'supported_instances', 'limits', 'instances', +): + EXTRA_SPEC_VALIDATORS.extend([ + base.ExtraSpecValidator( + name=f'capabilities:{capability}{{filter}}', + description=DESCRIPTION.format(capability=capability), + parameters=[ + { + 'name': 'filter', + # this is optional, but if it's present it must be preceded + # by ':' + 'pattern': r'(:\w+)*', + } + ], + value={ + 'type': str, + }, + ), + ]) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/hw.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/hw.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/hw.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/hw.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,370 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``hw`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +realtime_validators = [ + base.ExtraSpecValidator( + name='hw:cpu_realtime', + description=( + 'Determine whether realtime mode should be enabled for the ' + 'instance or not. Only supported by the libvirt driver.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable realtime priority.', + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_realtime_mask', + description=( + 'A exclusion mask of CPUs that should not be enabled for realtime.' + ), + value={ + 'type': str, + # NOTE(stephenfin): Yes, these things *have* to start with '^' + 'pattern': r'\^\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', + }, + ), +] + +cpu_policy_validators = [ + base.ExtraSpecValidator( + name='hw:cpu_policy', + description=( + 'The policy to apply when determining what host CPUs the guest ' + 'CPUs can run on. If ``shared`` (default), guest CPUs can be ' + 'overallocated but cannot float across host cores. If ' + '``dedicated``, guest CPUs cannot be overallocated but are ' + 'individually pinned to their own host core.' + ), + value={ + 'type': str, + 'description': 'The CPU policy.', + 'enum': [ + 'dedicated', + 'shared' + ], + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_thread_policy', + description=( + 'The policy to apply when determining whether the destination ' + 'host can have hardware threads enabled or not. If ``prefer`` ' + '(default), hosts with hardware threads will be preferred. If ' + '``require``, hosts with hardware threads will be required. If ' + '``isolate``, hosts with hardware threads will be forbidden.' + ), + value={ + 'type': str, + 'description': 'The CPU thread policy.', + 'enum': [ + 'prefer', + 'isolate', + 'require', + ], + }, + ), + base.ExtraSpecValidator( + name='hw:emulator_threads_policy', + description=( + 'The policy to apply when determining whether emulator threads ' + 'should be offloaded to a separate isolated core or to a pool ' + 'of shared cores. If ``share``, emulator overhead threads will ' + 'be offloaded to a pool of shared cores. If ``isolate``, ' + 'emulator overhead threads will be offloaded to their own core.' + ), + value={ + 'type': str, + 'description': 'The emulator thread policy.', + 'enum': [ + 'isolate', + 'share', + ], + }, + ), +] + +hugepage_validators = [ + base.ExtraSpecValidator( + name='hw:mem_page_size', + description=( + 'The size of memory pages to allocate to the guest with. Can be ' + 'one of the three alias - ``large``, ``small`` or ``any``, - or ' + 'an actual size. Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The size of memory page to allocate', + 'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)', + }, + ), +] + +numa_validators = [ + base.ExtraSpecValidator( + name='hw:numa_nodes', + description=( + 'The number of virtual NUMA nodes to allocate to configure the ' + 'guest with. Each virtual NUMA node will be mapped to a unique ' + 'host NUMA node. Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'description': 'The number of virtual NUMA nodes to allocate', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:numa_cpus.{id}', + description=( + 'A mapping of **guest** CPUs to the **guest** NUMA node ' + 'identified by ``{id}``. This can be used to provide asymmetric ' + 'CPU-NUMA allocation and is necessary where the number of guest ' + 'NUMA nodes is not a factor of the number of guest CPUs.' + ), + parameters=[ + { + 'name': 'id', + 'pattern': r'\d+', # positive integers + 'description': 'The ID of the **guest** NUMA node.', + }, + ], + value={ + 'type': str, + 'description': ( + 'The guest CPUs, in the form of a CPU map, to allocate to the ' + 'guest NUMA node identified by ``{id}``.' + ), + 'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', + }, + ), + base.ExtraSpecValidator( + name='hw:numa_mem.{id}', + description=( + 'A mapping of **guest** memory to the **guest** NUMA node ' + 'identified by ``{id}``. This can be used to provide asymmetric ' + 'memory-NUMA allocation and is necessary where the number of ' + 'guest NUMA nodes is not a factor of the total guest memory.' + ), + parameters=[ + { + 'name': 'id', + 'pattern': r'\d+', # positive integers + 'description': 'The ID of the **guest** NUMA node.', + }, + ], + value={ + 'type': int, + 'description': ( + 'The guest memory, in MB, to allocate to the guest NUMA node ' + 'identified by ``{id}``.' + ), + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:pci_numa_affinity_policy', + description=( + 'The NUMA affinity policy of any PCI passthrough devices or ' + 'SR-IOV network interfaces attached to the instance.' + ), + value={ + 'type': str, + 'description': 'The PCI NUMA affinity policy', + 'enum': [ + 'required', + 'preferred', + 'legacy', + ], + }, + ), +] + +cpu_topology_validators = [ + base.ExtraSpecValidator( + name='hw:cpu_sockets', + description=( + 'The number of virtual CPU threads to emulate in the guest ' + 'CPU topology.' + ), + value={ + 'type': int, + 'description': 'A number of vurtla CPU sockets', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_cores', + description=( + 'The number of virtual CPU cores to emulate per socket in the ' + 'guest CPU topology.' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU cores', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_threads', + description=( + 'The number of virtual CPU threads to emulate per core in the ' + 'guest CPU topology.' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU threads', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:max_cpu_sockets', + description=( + 'The max number of virtual CPU threads to emulate in the ' + 'guest CPU topology. This is used to limit the topologies that ' + 'can be requested by an image and will be used to validate the ' + '``hw_cpu_sockets`` image metadata property.' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU sockets', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:max_cpu_cores', + description=( + 'The max number of virtual CPU cores to emulate per socket in the ' + 'guest CPU topology. This is used to limit the topologies that ' + 'can be requested by an image and will be used to validate the ' + '``hw_cpu_cores`` image metadata property.' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU cores', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:max_cpu_threads', + description=( + 'The max number of virtual CPU threads to emulate per core in the ' + 'guest CPU topology. This is used to limit the topologies that ' + 'can be requested by an image and will be used to validate the ' + '``hw_cpu_threads`` image metadata property.' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU threads', + 'min': 1, + }, + ), +] + +feature_flag_validators = [ + # TODO(stephenfin): Consider deprecating and moving this to the 'os:' + # namespace + base.ExtraSpecValidator( + name='hw:boot_menu', + description=( + 'Whether to show a boot menu when booting the guest.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable the boot menu', + }, + ), + base.ExtraSpecValidator( + name='hw:mem_encryption', + description=( + 'Whether to enable memory encryption for the guest. Only ' + 'supported by the libvirt driver on hosts with AMD SEV support.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable memory encryption', + }, + ), + base.ExtraSpecValidator( + name='hw:pmem', + description=( + 'A comma-separated list of ``$LABEL``\\ s defined in config for ' + 'vPMEM devices.' + ), + value={ + 'type': str, + 'description': ( + 'A comma-separated list of valid resource class names.' + ), + 'pattern': '([a-zA-Z0-9_]+(,)?)+', + }, + ), + base.ExtraSpecValidator( + name='hw:pmu', + description=( + 'Whether to enable the Performance Monitory Unit (PMU) for the ' + 'guest. Only supported by the libvirt driver.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable the PMU', + }, + ), + base.ExtraSpecValidator( + name='hw:serial_port_count', + description=( + 'The number of serial ports to allocate to the guest. Only ' + 'supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + 'description': 'The number of serial ports to allocate', + }, + ), + base.ExtraSpecValidator( + name='hw:watchdog_action', + description=( + 'The action to take when the watchdog timer is kicked. Only ' + 'supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The action to take', + 'enum': [ + 'none', + 'pause', + 'poweroff', + 'reset', + 'disabled', + ], + }, + ), +] + + +def register(): + return ( + realtime_validators + + cpu_policy_validators + + hugepage_validators + + numa_validators + + cpu_topology_validators + + feature_flag_validators + ) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/hw_rng.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/hw_rng.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/hw_rng.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/hw_rng.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,57 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``hw_rng`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): Move these to the 'hw:' namespace +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='hw_rng:allowed', + description=( + 'Whether to disable configuration of a random number generator ' + 'in their image. Before 21.0.0 (Ussuri), random number generators ' + 'were not enabled by default so this was used to determine ' + 'whether to **enable** configuration.' + ), + value={ + 'type': bool, + }, + ), + base.ExtraSpecValidator( + name='hw_rng:rate_bytes', + description=( + 'The allowed amount of bytes for the guest to read from the ' + 'host\'s entropy per period.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), + base.ExtraSpecValidator( + name='hw_rng:rate_period', + description='The duration of a read period in seconds.', + value={ + 'type': int, + 'min': 0, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/hw_video.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/hw_video.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/hw_video.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/hw_video.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``hw_video`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): Move these to the 'hw:' namespace +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='hw_video:ram_max_mb', + description=( + 'The maximum amount of memory the user can request using the ' + '``hw_video_ram`` image metadata property, which represents the ' + 'video memory that the guest OS will see. This has no effect for ' + 'vGPUs.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/null.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/null.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/null.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/null.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,51 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for non-namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): These should be moved to a namespace +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='hide_hypervisor_id', + description=( + 'Determine whether the hypervisor ID should be hidden from the ' + 'guest. Only supported by the libvirt driver.' + ), + value={ + 'type': bool, + 'description': 'Whether to hide the hypervisor ID.', + }, + ), + base.ExtraSpecValidator( + name='group_policy', + description=( + 'The group policy to apply when using the granular resource ' + 'request syntax.' + ), + value={ + 'type': str, + 'enum': [ + 'isolate', + 'none', + ], + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/os.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/os.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/os.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/os.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,95 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``os`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): Most of these belong in the 'hw:' or 'hyperv:' namespace +# and should be moved. +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='os:secure_boot', + description=( + 'Determine whether secure boot is enabled or not. Currently only ' + 'supported by the HyperV driver.' + ), + value={ + 'type': str, + 'description': 'Whether secure boot is required or not', + 'enum': [ + 'disabled', + 'required', + ], + }, + ), + base.ExtraSpecValidator( + name='os:resolution', + description=( + 'Guest VM screen resolution size. Only supported by the HyperV ' + 'driver.' + ), + value={ + 'type': str, + 'description': 'The chosen resolution', + 'enum': [ + '1024x768', + '1280x1024', + '1600x1200', + '1920x1200', + '2560x1600', + '3840x2160', + ], + }, + ), + base.ExtraSpecValidator( + name='os:monitors', + description=( + 'Guest VM number of monitors. Only supported by the HyperV driver.' + ), + value={ + 'type': int, + 'description': 'The number of monitors enabled', + 'min': 1, + 'max': 8, + }, + ), + # TODO(stephenfin): Consider merging this with the 'hw_video_ram' image + # metadata property or adding a 'hw:video_ram' extra spec that works for + # both Hyper-V and libvirt. + base.ExtraSpecValidator( + name='os:vram', + description=( + 'Guest VM VRAM amount. Only supported by the HyperV driver.' + ), + # NOTE(stephenfin): This is really an int, but because there's a + # limited range of options we treat it as a string + value={ + 'type': str, + 'description': 'Amount of VRAM to allocate to instance', + 'enum': [ + '64', + '128', + '256', + '512', + '1024', + ], + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/pci_passthrough.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/pci_passthrough.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/pci_passthrough.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/pci_passthrough.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,38 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``pci_passthrough`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='pci_passthrough:alias', + description=( + 'Specify the number of ``$alias`` PCI device(s) to attach to the ' + 'instance. Must be of format ``$alias:$number``. Use commas to ' + 'specify multiple values.' + ), + value={ + 'type': str, + # one or more comma-separated '$alias:$num' values + 'pattern': r'[^:]+:\d+(?:\s*,\s*[^:]+:\d+)*', + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/powervm.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/powervm.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/powervm.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/powervm.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,271 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``powervm`` namespaced extra specs. + +These were all taken from the IBM documentation. + +https://www.ibm.com/support/knowledgecenter/SSXK2N_1.4.4/com.ibm.powervc.standard.help.doc/powervc_pg_flavorsextraspecs_hmc.html +""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): A lot of these seem to overlap with existing 'hw:' extra +# specs and could be deprecated in favour of those. +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='powervm:min_mem', + description=( + 'Minimum memory (MB). If you do not specify the value, the value ' + 'is defaulted to the value for ``memory_mb``.' + ), + value={ + 'type': int, + 'min': 256, + 'description': 'Integer >=256 divisible by LMB size of the target', + }, + ), + base.ExtraSpecValidator( + name='powervm:max_mem', + description=( + 'Maximum memory (MB). If you do not specify the value, the value ' + 'is defaulted to the value for ``memory_mb``.' + ), + value={ + 'type': int, + 'min': 256, + 'description': 'Integer >=256 divisible by LMB size of the target', + }, + ), + base.ExtraSpecValidator( + name='powervm:min_vcpu', + description=( + 'Minimum virtual processors. Minimum resource that is required ' + 'for LPAR to boot is 1. The maximum value can be equal to the ' + 'value, which is set to vCPUs. If you specify the value of the ' + 'attribute, you must also specify value of powervm:max_vcpu. ' + 'Defaults to value set for vCPUs.' + ), + value={ + 'type': int, + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='powervm:max_vcpu', + description=( + 'Minimum virtual processors. Minimum resource that is required ' + 'for LPAR to boot is 1. The maximum value can be equal to the ' + 'value, which is set to vCPUs. If you specify the value of the ' + 'attribute, you must also specify value of powervm:max_vcpu. ' + 'Defaults to value set for vCPUs.' + ), + value={ + 'type': int, + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='powervm:proc_units', + description=( + 'The wanted ``proc_units``. The value for the attribute cannot be ' + 'less than 1/10 of the value that is specified for Virtual ' + 'CPUs (vCPUs) for hosts with firmware level 7.5 or earlier and ' + '1/20 of the value that is specified for vCPUs for hosts with ' + 'firmware level 7.6 or later. If the value is not specified ' + 'during deployment, it is defaulted to vCPUs * 0.5.' + ), + value={ + 'type': str, + 'pattern': r'\d+\.\d+', + 'description': ( + 'Float (divisible by 0.1 for hosts with firmware level 7.5 or ' + 'earlier and 0.05 for hosts with firmware level 7.6 or later)' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:min_proc_units', + description=( + 'Minimum ``proc_units``. The minimum value for the attribute is ' + '0.1 for hosts with firmware level 7.5 or earlier and 0.05 for ' + 'hosts with firmware level 7.6 or later. The maximum value must ' + 'be equal to the maximum value of ``powervm:proc_units``. If you ' + 'specify the attribute, you must also specify ' + '``powervm:proc_units``, ``powervm:max_proc_units``, ' + '``powervm:min_vcpu``, `powervm:max_vcpu``, and ' + '``powervm:dedicated_proc``. Set the ``powervm:dedicated_proc`` ' + 'to false.' + '\n' + 'The value for the attribute cannot be less than 1/10 of the ' + 'value that is specified for powervm:min_vcpu for hosts with ' + 'firmware level 7.5 or earlier and 1/20 of the value that is ' + 'specified for ``powervm:min_vcpu`` for hosts with firmware ' + 'level 7.6 or later. If you do not specify the value of the ' + 'attribute during deployment, it is defaulted to equal the value ' + 'of ``powervm:proc_units``.' + ), + value={ + 'type': str, + 'pattern': r'\d+\.\d+', + 'description': ( + 'Float (divisible by 0.1 for hosts with firmware level 7.5 or ' + 'earlier and 0.05 for hosts with firmware level 7.6 or later)' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:max_proc_units', + description=( + 'Maximum ``proc_units``. The minimum value can be equal to `` ' + '``powervm:proc_units``. The maximum value for the attribute ' + 'cannot be more than the value of the host for maximum allowed ' + 'processors per partition. If you specify this attribute, you ' + 'must also specify ``powervm:proc_units``, ' + '``powervm:min_proc_units``, ``powervm:min_vcpu``, ' + '``powervm:max_vcpu``, and ``powervm:dedicated_proc``. Set the ' + '``powervm:dedicated_proc`` to false.' + '\n' + 'The value for the attribute cannot be less than 1/10 of the ' + 'value that is specified for powervm:max_vcpu for hosts with ' + 'firmware level 7.5 or earlier and 1/20 of the value that is ' + 'specified for ``powervm:max_vcpu`` for hosts with firmware ' + 'level 7.6 or later. If you do not specify the value of the ' + 'attribute during deployment, the value is defaulted to equal the ' + 'value of ``powervm:proc_units``.' + ), + value={ + 'type': str, + 'pattern': r'\d+\.\d+', + 'description': ( + 'Float (divisible by 0.1 for hosts with firmware level 7.5 or ' + 'earlier and 0.05 for hosts with firmware level 7.6 or later)' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:dedicated_proc', + description=( + 'Use dedicated processors. The attribute defaults to false.' + ), + value={ + 'type': bool, + }, + ), + base.ExtraSpecValidator( + name='powervm:shared_weight', + description=( + 'Shared processor weight. When ``powervm:dedicated_proc`` is set ' + 'to true and ``powervm:uncapped`` is also set to true, the value ' + 'of the attribute defaults to 128.' + ), + value={ + 'type': int, + 'min': 0, + 'max': 255, + }, + ), + base.ExtraSpecValidator( + name='powervm:availability_priority', + description=( + 'Availability priority. The attribute priority of the server if ' + 'there is a processor failure and there are not enough resources ' + 'for all servers. VIOS and i5 need to remain high priority ' + 'default of 191. The value of the attribute defaults to 128.' + ), + value={ + 'type': int, + 'min': 0, + 'max': 255, + }, + ), + base.ExtraSpecValidator( + name='powervm:uncapped', + description=( + 'LPAR can use unused processor cycles that are beyond or exceed ' + 'the wanted setting of the attribute. This attribute is ' + 'supported only when ``powervm:dedicated_proc`` is set to false. ' + 'When ``powervm:dedicated_proc`` is set to false, ' + '``powervm:uncapped`` defaults to true.' + ), + value={ + 'type': bool, + }, + ), + base.ExtraSpecValidator( + name='powervm:dedicated_sharing_mode', + description=( + 'Sharing mode for dedicated processors. The attribute is ' + 'supported only when ``powervm:dedicated_proc`` is set to true.' + ), + value={ + 'type': str, + 'enum': ( + 'share_idle_procs', + 'keep_idle_procs', + 'share_idle_procs_active', + 'share_idle_procs_always', + ) + }, + ), + base.ExtraSpecValidator( + name='powervm:processor_compatibility', + description=( + 'A processor compatibility mode is a value that is assigned to a ' + 'logical partition by the hypervisor that specifies the processor ' + 'environment in which the logical partition can successfully ' + 'operate.' + ), + value={ + 'type': str, + 'enum': ( + 'default', + 'POWER6', + 'POWER6+', + 'POWER6_Enhanced', + 'POWER6+_Enhanced', + 'POWER7', + 'POWER8' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:shared_proc_pool_name', + description=( + 'Specifies the shared processor pool to be targeted during ' + 'deployment of a virtual machine.' + ), + value={ + 'type': str, + 'description': 'String with upper limit of 14 characters', + }, + ), + base.ExtraSpecValidator( + name='powervm:srr_capability', + description=( + 'If the value of simplified remote restart capability is set to ' + 'true for the LPAR, you can remote restart the LPAR to supported ' + 'CEC or host when the source CEC or host is down. The attribute ' + 'defaults to false.' + ), + value={ + 'type': bool, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/quota.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/quota.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/quota.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/quota.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,103 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``quota`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [] + + +# CPU, memory, disk IO and VIF quotas (VMWare) +for resource in ('cpu', 'memory', 'disk_io', 'vif'): + for key, fmt in ( + ('limit', int), + ('reservation', int), + ('shares_level', str), + ('shares_share', int) + ): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'quota:{resource}_{key}', + description=( + 'The {} for {}. Only supported by the VMWare virt ' + 'driver.'.format(' '.join(key.split('_')), resource) + ), + value={ + 'type': fmt, + }, + ) + ) + + +# CPU quotas (libvirt) +for key in ('shares', 'period', 'quota'): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'quota:cpu_{key}', + description=( + f'The quota {key} for CPU. Only supported by the libvirt ' + f'virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ) + ) + + +# Disk quotas (libvirt, HyperV) +for stat in ('read', 'write', 'total'): + for metric in ('bytes', 'iops'): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'quota:disk_{stat}_{metric}_sec', + # NOTE(stephenfin): HyperV supports disk_total_{metric}_sec + # too; update + description=( + f'The quota {stat} {metric} for disk. Only supported ' + f'by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ) + ) + + +# VIF quotas (libvirt) +# TODO(stephenfin): Determine whether this should be deprecated now that +# nova-network is dead +for stat in ('inbound', 'outbound'): + for metric in ('average', 'peak', 'burst'): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'quota:vif_{stat}_{metric}', + description=( + f'The quota {stat} {metric} for VIF. Only supported ' + f'by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ) + ) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/resources.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/resources.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/resources.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/resources.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,65 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``resources`` namespaced extra specs.""" + +import os_resource_classes + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [] + +for resource_class in os_resource_classes.STANDARDS: + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'resources{{group}}:{resource_class}', + description=f'The amount of resource {resource_class} requested.', + value={ + 'type': int, + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'(_[a-zA-z0-9_]*|\d+)?', + }, + ], + ) + ) + +EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name='resources{group}:CUSTOM_{resource}', + description=( + 'The amount of resource CUSTOM_{resource} requested.' + ), + value={ + 'type': int, + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'(_[a-zA-z0-9_]*|\d+)?', + }, + { + 'name': 'resource', + 'pattern': r'.+', + }, + ], + ) +) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/traits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/traits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/traits.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/traits.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,47 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``traits`` namespaced extra specs.""" + +import os_traits + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [] + +for trait in os_traits.get_traits(): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'trait{{group}}:{trait}', + description=f'Require or forbid trait {trait}.', + value={ + 'type': str, + 'enum': [ + 'required', + 'forbidden', + ], + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'(_[a-zA-z0-9_]*|\d+)?', + }, + ], + ) + ) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/validators.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/validators.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/validators.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/validators.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,86 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for all extra specs known by nova.""" + +import re +import typing as ty + +from oslo_log import log as logging +from stevedore import extension + +from nova.api.validation.extra_specs import base +from nova import exception + +LOG = logging.getLogger(__name__) + +VALIDATORS: ty.Dict[str, base.ExtraSpecValidator] = {} +NAMESPACES: ty.Set[str] = set() + + +def validate(name: str, value: str): + """Validate a given extra spec. + + :param name: Extra spec name. + :param value: Extra spec value. + :raises: exception.ValidationError if validation fails. + """ + # attempt a basic lookup for extra specs without embedded parameters + if name in VALIDATORS: + VALIDATORS[name].validate(name, value) + return + + # if that failed, fallback to a linear search through the registry + for validator in VALIDATORS.values(): + if re.fullmatch(validator.name_regex, name): + validator.validate(name, value) + return + + # check if there's a namespace; if not, we've done all we can do + if ':' not in name: # no namespace + return + + # if there is, check if it's one we recognize + for namespace in NAMESPACES: + if re.fullmatch(namespace, name.split(':', 1)[0]): + break + else: + return + + raise exception.ValidationError( + f"Validation failed; extra spec '{name}' does not appear to be a " + f"valid extra spec." + ) + + +def load_validators(): + global VALIDATORS + + def _report_load_failure(mgr, ep, err): + LOG.warning(u'Failed to load %s: %s', ep.module_name, err) + + mgr = extension.ExtensionManager( + 'nova.api.extra_spec_validators', + on_load_failure_callback=_report_load_failure, + invoke_on_load=False, + ) + for ext in mgr: + # TODO(stephenfin): Make 'register' return a dict rather than a list? + for validator in ext.plugin.register(): + VALIDATORS[validator.name] = validator + if ':' in validator.name_regex: + NAMESPACES.add(validator.name_regex.split(':', 1)[0]) + + +load_validators() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/vmware.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/vmware.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/extra_specs/vmware.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/extra_specs/vmware.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,48 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``vmware`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='vmware:hw_version', + description=( + 'Specify the hardware version used to create images. In an ' + 'environment with different host versions, you can use this ' + 'parameter to place instances on the correct hosts.' + ), + value={ + 'type': str, + }, + ), + base.ExtraSpecValidator( + name='vmware:storage_policy', + description=( + 'Specify the storage policy used for new instances.' + '\n' + 'If Storage Policy-Based Management (SPBM) is not enabled, this ' + 'parameter is ignored.' + ), + value={ + 'type': str, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/parameter_types.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/parameter_types.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/api/validation/parameter_types.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/api/validation/parameter_types.py 2020-04-10 17:57:57.000000000 +0000 @@ -336,6 +336,11 @@ } +attachment_id = { + 'type': 'string', 'format': 'uuid' +} + + volume_type = { 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255 } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/availability_zones.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/availability_zones.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/availability_zones.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/availability_zones.py 2020-04-10 17:57:57.000000000 +0000 @@ -110,7 +110,7 @@ def get_availability_zones(context, hostapi, get_only_available=False, - with_hosts=False, enabled_services=None): + with_hosts=False, services=None): """Return available and unavailable zones on demand. :param context: nova auth RequestContext @@ -121,50 +121,65 @@ available zones only :param with_hosts: whether to return hosts part of the AZs :type with_hosts: bool - :param enabled_services: list of enabled services to use; if None - enabled services will be retrieved from all cells with zones set + :param services: list of services to use; if None, enabled services will be + retrieved from all cells with zones set """ - if enabled_services is None: - enabled_services = hostapi.service_get_all( - context, {'disabled': False}, set_zones=True, all_cells=True) - - available_zones = [] - for (zone, host) in [(service['availability_zone'], service['host']) - for service in enabled_services]: - if not with_hosts and zone not in available_zones: - available_zones.append(zone) - elif with_hosts: - _available_zones = dict(available_zones) - zone_hosts = _available_zones.setdefault(zone, set()) - zone_hosts.add(host) - # .items() returns a view in Py3, casting it to list for Py2 compat - available_zones = list(_available_zones.items()) - - if not get_only_available: - # TODO(mriedem): We could probably optimize if we know that we're going - # to get both enabled and disabled services and just pull them all from - # the cell DBs at the same time and then filter into enabled/disabled - # lists in python. - disabled_services = hostapi.service_get_all( - context, {'disabled': True}, set_zones=True, all_cells=True) - not_available_zones = [] - azs = available_zones if not with_hosts else dict(available_zones) - zones = [(service['availability_zone'], service['host']) - for service in disabled_services - if service['availability_zone'] not in azs] - for (zone, host) in zones: - if not with_hosts and zone not in not_available_zones: - not_available_zones.append(zone) - elif with_hosts: - _not_available_zones = dict(not_available_zones) - zone_hosts = _not_available_zones.setdefault(zone, set()) - zone_hosts.add(host) - # .items() returns a view in Py3, casting it to list for Py2 - # compat - not_available_zones = list(_not_available_zones.items()) - return (available_zones, not_available_zones) + if services is None: + services = hostapi.service_get_all( + context, set_zones=True, all_cells=True) + + enabled_services = [] + disabled_services = [] + for service in services: + if not service.disabled: + enabled_services.append(service) + else: + disabled_services.append(service) + + if with_hosts: + return _get_availability_zones_with_hosts( + enabled_services, disabled_services, get_only_available) else: - return available_zones + return _get_availability_zones( + enabled_services, disabled_services, get_only_available) + + +def _get_availability_zones( + enabled_services, disabled_services, get_only_available=False): + + available_zones = { + service['availability_zone'] for service in enabled_services + } + + if get_only_available: + return sorted(available_zones) + + not_available_zones = { + service['availability_zone'] for service in disabled_services + if service['availability_zone'] not in available_zones + } + + return sorted(available_zones), sorted(not_available_zones) + + +def _get_availability_zones_with_hosts( + enabled_services, disabled_services, get_only_available=False): + + available_zones = collections.defaultdict(set) + for service in enabled_services: + available_zones[service['availability_zone']].add(service['host']) + + if get_only_available: + return sorted(available_zones.items()) + + not_available_zones = collections.defaultdict(set) + for service in disabled_services: + if service['availability_zone'] in available_zones: + continue + + not_available_zones[service['availability_zone']].add(service['host']) + + return sorted(available_zones.items()), sorted(not_available_zones.items()) def get_instance_availability_zone(context, instance): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/cache_utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/cache_utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/cache_utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/cache_utils.py 2020-04-10 17:57:57.000000000 +0000 @@ -109,22 +109,8 @@ return None return value - def get_or_create(self, key, creator): - return self.region.get_or_create(key, creator) - def set(self, key, value): return self.region.set(key, value) - def add(self, key, value): - return self.region.get_or_create(key, lambda: value) - def delete(self, key): return self.region.delete(key) - - def get_multi(self, keys): - values = self.region.get_multi(keys) - return [None if value is cache.NO_VALUE else value for value in - values] - - def delete_multi(self, keys): - return self.region.delete_multi(keys) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/cmd/baseproxy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/cmd/baseproxy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/cmd/baseproxy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/cmd/baseproxy.py 2020-04-10 17:57:57.000000000 +0000 @@ -72,6 +72,8 @@ cert=CONF.cert, key=CONF.key, ssl_only=CONF.ssl_only, + ssl_ciphers=CONF.console.ssl_ciphers, + ssl_minimum_version=CONF.console.ssl_minimum_version, daemon=CONF.daemon, record=CONF.record, traffic=not CONF.daemon, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/cmd/manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/cmd/manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/cmd/manage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/cmd/manage.py 2020-04-10 17:57:57.000000000 +0000 @@ -32,6 +32,7 @@ from dateutil import parser as dateutil_parser from keystoneauth1 import exceptions as ks_exc from neutronclient.common import exceptions as neutron_client_exc +import os_resource_classes as orc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging @@ -2089,7 +2090,8 @@ 'changes. The return code should be 4.') @args('--instance', metavar='', dest='instance_uuid', help='UUID of a specific instance to process. If specified ' - '--max-count has no effect.') + '--max-count has no effect. ' + 'The --cell and --instance options are mutually exclusive.') @args('--skip-port-allocations', action='store_true', dest='skip_port_allocations', default=False, help='Skip the healing of the resource allocations of bound ports. ' @@ -2098,8 +2100,12 @@ 'not use such a feature then the performance impact of ' 'querying neutron ports for each instance can be avoided with ' 'this flag.') + @args('--cell', metavar='', dest='cell_uuid', + help='Heal allocations within a specific cell. ' + 'The --cell and --instance options are mutually exclusive.') def heal_allocations(self, max_count=None, verbose=False, dry_run=False, - instance_uuid=None, skip_port_allocations=False): + instance_uuid=None, skip_port_allocations=False, + cell_uuid=None): """Heals instance allocations in the Placement service Return codes: @@ -2116,7 +2122,6 @@ * 127: Invalid input. """ # NOTE(mriedem): Thoughts on ways to expand this: - # - allow passing a specific cell to heal # - allow filtering on enabled/disabled cells # - add a force option to force allocations for instances which have # task_state is not None (would get complicated during a migration); @@ -2134,6 +2139,13 @@ if verbose: output = lambda msg: print(msg) + # If user has provided both cell and instance + # Throw an error + if instance_uuid and cell_uuid: + print(_('The --cell and --instance options ' + 'are mutually exclusive.')) + return 127 + # TODO(mriedem): Rather than --max-count being both a total and batch # count, should we have separate options to be specific, i.e. --total # and --batch-size? Then --batch-size defaults to 50 and --total @@ -2169,6 +2181,15 @@ '"nova-manage cell_v2 map_instances".' % instance_uuid) return 127 + elif cell_uuid: + try: + # validate cell_uuid + cell = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) + # create CellMappingList + cells = objects.CellMappingList(objects=[cell]) + except exception.CellMappingNotFound: + print(_('Cell with uuid %s was not found.') % cell_uuid) + return 127 else: cells = objects.CellMappingList.get_all(ctxt) if not cells: @@ -2416,6 +2437,300 @@ return return_code + def _get_instances_and_current_migrations(self, ctxt, cn_uuid): + if self.cn_uuid_mapping.get(cn_uuid): + cell_uuid, cn_host, cn_node = self.cn_uuid_mapping[cn_uuid] + else: + # We need to find the compute node record from all cells. + results = context.scatter_gather_skip_cell0( + ctxt, objects.ComputeNode.get_by_uuid, cn_uuid) + for result_cell_uuid, result in results.items(): + if not context.is_cell_failure_sentinel(result): + cn = result + cell_uuid = result_cell_uuid + break + else: + return False + cn_host, cn_node = (cn.host, cn.hypervisor_hostname) + self.cn_uuid_mapping[cn_uuid] = (cell_uuid, cn_host, cn_node) + cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) + + # Get all the active instances from this compute node + if self.instances_mapping.get(cn_uuid): + inst_uuids = self.instances_mapping[cn_uuid] + else: + # Get the instance list record from the cell. + with context.target_cell(ctxt, cell_mapping) as cctxt: + instances = objects.InstanceList.get_by_host_and_node( + cctxt, cn_host, cn_node, expected_attrs=[]) + inst_uuids = [instance.uuid for instance in instances] + self.instances_mapping[cn_uuid] = inst_uuids + + # Get all *active* migrations for this compute node + # NOTE(sbauza): Since migrations are transient, it's better to not + # cache the results as they could be stale + with context.target_cell(ctxt, cell_mapping) as cctxt: + migs = objects.MigrationList.get_in_progress_by_host_and_node( + cctxt, cn_host, cn_node) + mig_uuids = [migration.uuid for migration in migs] + + return (inst_uuids, mig_uuids) + + def _delete_allocations_from_consumer(self, ctxt, placement, provider, + consumer_uuid, consumer_type): + """Deletes allocations from a resource provider with consumer UUID. + + :param ctxt: nova.context.RequestContext + :param placement: nova.scheduler.client.report.SchedulerReportClient + to communicate with the Placement service API. + :param provider: Resource Provider to look at. + :param consumer_uuid: the consumer UUID having allocations. + :param consumer_type: the type of consumer, + either 'instance' or 'migration' + :returns: bool whether the allocations were deleted. + """ + # We need to be careful and only remove the allocations + # against this specific RP or we would delete the + # whole instance usage and then it would require some + # healing. + # TODO(sbauza): Remove this extra check once placement + # supports querying allocation delete on both + # consumer and resource provider parameters. + allocations = placement.get_allocs_for_consumer( + ctxt, consumer_uuid) + if len(allocations['allocations']) > 1: + # This consumer has resources spread among multiple RPs (think + # nested or shared for example) + # We then need to just update the usage to remove + # the orphaned resources on the specific RP + del allocations['allocations'][provider['uuid']] + try: + placement.put_allocations( + ctxt, consumer_uuid, allocations) + except exception.AllocationUpdateFailed: + return False + + else: + try: + placement.delete_allocation_for_instance( + ctxt, consumer_uuid, consumer_type) + except exception.AllocationDeleteFailed: + return False + return True + + def _check_orphaned_allocations_for_provider(self, ctxt, placement, + output, provider, + delete): + """Finds orphaned allocations for a specific resource provider. + + :param ctxt: nova.context.RequestContext + :param placement: nova.scheduler.client.report.SchedulerReportClient + to communicate with the Placement service API. + :param output: function that takes a single message for verbose output + :param provider: Resource Provider to look at. + :param delete: deletes the found orphaned allocations. + :return: a tuple (, ) + """ + num_processed = 0 + faults = 0 + + # TODO(sbauza): Are we sure we have all Nova RCs ? + # FIXME(sbauza): Possibly use consumer types once Placement API + # supports them. + # NOTE(sbauza): We check allocations having *any* below RC, not having + # *all* of them. + NOVA_RCS = [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB, orc.VGPU, + orc.NET_BW_EGR_KILOBIT_PER_SEC, + orc.NET_BW_IGR_KILOBIT_PER_SEC, + orc.PCPU, orc.MEM_ENCRYPTION_CONTEXT] + + # Since the RP can be a child RP, we need to get the root RP as it's + # the compute node UUID + # NOTE(sbauza): In case Placement doesn't support 1.14 microversion, + # that means we don't have nested RPs. + # Since we ask for microversion 1.14, all RPs have a root RP UUID. + cn_uuid = provider.get("root_provider_uuid") + # Now get all the existing instances and active migrations for this + # compute node + result = self._get_instances_and_current_migrations(ctxt, cn_uuid) + if result is False: + # We don't want to hard stop here because the compute service could + # have disappear while we could still have orphaned allocations. + output(_('The compute node for UUID %s can not be ' + 'found') % cn_uuid) + inst_uuids, mig_uuids = result or ([], []) + try: + pallocs = placement.get_allocations_for_resource_provider( + ctxt, provider['uuid']) + except exception.ResourceProviderAllocationRetrievalFailed: + print(_('Not able to find allocations for resource ' + 'provider %s.') % provider['uuid']) + raise + + # Verify every allocations for each consumer UUID + for consumer_uuid, consumer_resources in six.iteritems( + pallocs.allocations): + consumer_allocs = consumer_resources['resources'] + if any(rc in NOVA_RCS + for rc in consumer_allocs): + # We reset the consumer type for each allocation + consumer_type = None + # This is an allocation for Nova resources + # We need to guess whether the instance was deleted + # or if the instance is currently migrating + if not (consumer_uuid in inst_uuids or + consumer_uuid in mig_uuids): + # By default we suspect the orphaned allocation was for a + # migration... + consumer_type = 'migration' + if not(consumer_uuid in inst_uuids): + # ... but if we can't find it either for an instance, + # that means it was for this. + consumer_type = 'instance' + if consumer_type is not None: + output(_('Allocations were set against consumer UUID ' + '%(consumer_uuid)s but no existing instances or ' + 'active migrations are related. ') + % {'consumer_uuid': consumer_uuid}) + if delete: + deleted = self._delete_allocations_from_consumer( + ctxt, placement, provider, consumer_uuid, + consumer_type) + if not deleted: + print(_('Not able to delete allocations ' + 'for consumer UUID %s') + % consumer_uuid) + faults += 1 + continue + output(_('Deleted allocations for consumer UUID ' + '%(consumer_uuid)s on Resource Provider ' + '%(rp)s: %(allocations)s') + % {'consumer_uuid': consumer_uuid, + 'rp': provider['uuid'], + 'allocations': consumer_allocs}) + else: + output(_('Allocations for consumer UUID ' + '%(consumer_uuid)s on Resource Provider ' + '%(rp)s can be deleted: ' + '%(allocations)s') + % {'consumer_uuid': consumer_uuid, + 'rp': provider['uuid'], + 'allocations': consumer_allocs}) + num_processed += 1 + return (num_processed, faults) + + # TODO(sbauza): Move this to the scheduler report client ? + def _get_resource_provider(self, context, placement, uuid): + """Returns a single Resource Provider by its UUID. + + :param context: The nova.context.RequestContext auth context + :param placement: nova.scheduler.client.report.SchedulerReportClient + to communicate with the Placement service API. + :param uuid: A specific Resource Provider UUID + :return: the existing resource provider. + :raises: keystoneauth1.exceptions.base.ClientException on failure to + communicate with the placement API + """ + + resource_providers = self._get_resource_providers(context, placement, + uuid=uuid) + if not resource_providers: + # The endpoint never returns a 404, it rather returns an empty list + raise exception.ResourceProviderNotFound(name_or_uuid=uuid) + return resource_providers[0] + + def _get_resource_providers(self, context, placement, **kwargs): + """Returns all resource providers regardless of their relationships. + + :param context: The nova.context.RequestContext auth context + :param placement: nova.scheduler.client.report.SchedulerReportClient + to communicate with the Placement service API. + :param kwargs: extra attributes for the query string + :return: list of resource providers. + :raises: keystoneauth1.exceptions.base.ClientException on failure to + communicate with the placement API + """ + url = '/resource_providers' + if 'uuid' in kwargs: + url += '&uuid=%s' % kwargs['uuid'] + + resp = placement.get(url, global_request_id=context.global_id, + version='1.14') + if resp is None: + raise exception.PlacementAPIConnectFailure() + + data = resp.json() + resource_providers = data.get('resource_providers') + + return resource_providers + + @action_description( + _("Audits orphaned allocations that are no longer corresponding to " + "existing instance resources. This command requires that " + "the [api_database]/connection and [placement] configuration " + "options are set.")) + @args('--verbose', action='store_true', dest='verbose', default=False, + help='Provide verbose output during execution.') + @args('--resource_provider', metavar='', + dest='provider_uuid', + help='UUID of a specific resource provider to verify.') + @args('--delete', action='store_true', dest='delete', default=False, + help='Deletes orphaned allocations that were found.') + def audit(self, verbose=False, provider_uuid=None, delete=False): + """Provides information about orphaned allocations that can be removed + + Return codes: + + * 0: Command completed successfully and no orphaned allocations exist. + * 1: An unexpected error happened during run. + * 3: Orphaned allocations were detected. + * 4: Orphaned allocations were detected and deleted. + * 127: Invalid input. + """ + + ctxt = context.get_admin_context() + output = lambda msg: None + if verbose: + output = lambda msg: print(msg) + + placement = report.SchedulerReportClient() + # Resets two in-memory dicts for knowing instances per compute node + self.cn_uuid_mapping = collections.defaultdict(tuple) + self.instances_mapping = collections.defaultdict(list) + + num_processed = 0 + faults = 0 + + if provider_uuid: + try: + resource_provider = self._get_resource_provider( + ctxt, placement, provider_uuid) + except exception.ResourceProviderNotFound: + print(_('Resource provider with UUID %s does not exist.') % + provider_uuid) + return 127 + resource_providers = [resource_provider] + else: + resource_providers = self._get_resource_providers(ctxt, placement) + + for provider in resource_providers: + nb_p, faults = self._check_orphaned_allocations_for_provider( + ctxt, placement, output, provider, delete) + num_processed += nb_p + if faults > 0: + print(_('The Resource Provider %s had problems when ' + 'deleting allocations. Stopping now. Please fix the ' + 'problem by hand and run again.') % + provider['uuid']) + return 1 + if num_processed > 0: + suffix = 's.' if num_processed > 1 else '.' + output(_('Processed %(num)s allocation%(suffix)s') + % {'num': num_processed, + 'suffix': suffix}) + return 4 if delete else 3 + return 0 + CATEGORIES = { 'api_db': ApiDbCommands, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/api.py 2020-04-10 17:57:57.000000000 +0000 @@ -37,6 +37,7 @@ import six from six.moves import range +from nova.accelerator import cyborg from nova import availability_zones from nova import block_device from nova.compute import flavors @@ -59,7 +60,7 @@ from nova import exception_wrapper from nova import hooks from nova.i18n import _ -from nova import image +from nova.image import glance from nova.network import constants from nova.network import model as network_model from nova.network import neutron @@ -283,13 +284,23 @@ return image_meta +def block_accelerators(func): + @functools.wraps(func) + def wrapper(self, context, instance, *args, **kwargs): + dp_name = instance.flavor.extra_specs.get('accel:device_profile') + if dp_name: + raise exception.ForbiddenWithAccelerators() + return func(self, context, instance, *args, **kwargs) + return wrapper + + @profiler.trace_cls("compute_api") class API(base.Base): """API for interacting with the compute manager.""" def __init__(self, image_api=None, network_api=None, volume_api=None, **kwargs): - self.image_api = image_api or image.API() + self.image_api = image_api or glance.API() self.network_api = network_api or neutron.API() self.volume_api = volume_api or cinder.API() self._placementclient = None # Lazy-load on first access. @@ -384,13 +395,10 @@ :param context: The nova request context. :type context: nova.context.RequestContext - :param secgroups: list of requested security group names, or uuids in - the case of Neutron. + :param secgroups: list of requested security group names :type secgroups: list - :returns: list of requested security group names unmodified if using - nova-network. If using Neutron, the list returned is all uuids. - Note that 'default' is a special case and will be unmodified if - it's requested. + :returns: list of requested security group UUIDs; note that 'default' + is a special case and will be unmodified if it's requested. """ security_groups = [] for secgroup in secgroups: @@ -398,12 +406,8 @@ if secgroup == "default": security_groups.append(secgroup) continue - secgroup_dict = security_group_api.get(context, secgroup) - if not secgroup_dict: - raise exception.SecurityGroupNotFoundForProject( - project_id=context.project_id, security_group_id=secgroup) - - security_groups.append(secgroup_dict['id']) + secgroup_uuid = security_group_api.validate_name(context, secgroup) + security_groups.append(secgroup_uuid) return security_groups @@ -462,7 +466,7 @@ # image (below) and not any image URIs that might have been # supplied. # TODO(jaypipes): Get rid of this silliness once we move to a real - # Image object and hide all of that stuff within nova.image.api. + # Image object and hide all of that stuff within nova.image.glance kernel_id = kernel_image['id'] if ramdisk_id is not None: @@ -567,6 +571,31 @@ root_bdm, validate_numa) @staticmethod + def _detect_nonbootable_image_from_properties(image_id, image): + """Check image for a property indicating it's nonbootable. + + This is called from the API service to ensure that there are + no known image properties indicating that this image is of a + type that we do not support booting from. + + Currently the only such property is 'cinder_encryption_key_id'. + + :param image_id: UUID of the image + :param image: a dict representation of the image including properties + :raises: ImageUnacceptable if the image properties indicate + that booting this image is not supported + """ + if not image: + return + + image_properties = image.get('properties', {}) + if image_properties.get('cinder_encryption_key_id'): + reason = _('Direct booting of an image uploaded from an ' + 'encrypted volume is unsupported.') + raise exception.ImageUnacceptable(image_id=image_id, + reason=reason) + + @staticmethod def _validate_flavor_image_nostatus(context, image, instance_type, root_bdm, validate_numa=True, validate_pci=False): @@ -866,6 +895,7 @@ validate_numa=True): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, files_to_inject) + self._detect_nonbootable_image_from_properties(image_id, image) self._validate_flavor_image(context, image_id, image, instance_type, root_bdm, validate_numa=validate_numa) @@ -895,17 +925,17 @@ # When using Neutron, _check_requested_secgroups will translate and # return any requested security group names to uuids. - security_groups = ( - self._check_requested_secgroups(context, security_groups)) + security_groups = self._check_requested_secgroups( + context, security_groups) # Note: max_count is the number of instances requested by the user, # max_network_count is the maximum number of instances taking into # account any network quotas - max_network_count = self._check_requested_networks(context, - requested_networks, max_count) + max_network_count = self._check_requested_networks( + context, requested_networks, max_count) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( - context, kernel_id, ramdisk_id, boot_meta) + context, kernel_id, ramdisk_id, boot_meta) config_drive = self._check_config_drive(config_drive) @@ -1186,6 +1216,12 @@ # base_options to match the volume zone. base_options['availability_zone'] = volume_az LOG.debug("Going to run %s instances...", num_instances) + extra_specs = instance_type.extra_specs + dp_name = extra_specs.get('accel:device_profile') + dp_request_groups = [] + if dp_name: + dp_request_groups = cyborg.get_device_profile_request_groups( + context, dp_name) try: for i in range(num_instances): # Create a uuid for the instance so we can store the @@ -1221,6 +1257,9 @@ if destination: req_spec.requested_destination = destination + if dp_request_groups: + req_spec.requested_resources.extend(dp_request_groups) + # Create an instance object, but do not store in db yet. instance = objects.Instance(context=context) instance.uuid = instance_uuid @@ -2110,6 +2149,23 @@ return True return False + def _local_delete_cleanup(self, context, instance): + # NOTE(aarents) Ensure instance allocation is cleared and instance + # mapping queued as deleted before _delete() return + try: + self.placementclient.delete_allocation_for_instance( + context, instance.uuid) + except exception.AllocationDeleteFailed: + LOG.info("Allocation delete failed during local delete cleanup.", + instance=instance) + + try: + self._update_queued_for_deletion(context, instance, True) + except exception.InstanceMappingNotFound: + LOG.info("Instance Mapping does not exist while attempting" + "local delete cleanup.", + instance=instance) + def _attempt_delete_of_buildrequest(self, context, instance): # If there is a BuildRequest then the instance may not have been # written to a cell db yet. Delete the BuildRequest here, which @@ -2145,6 +2201,7 @@ if not instance.host and not may_have_ports_or_volumes: try: if self._delete_while_booting(context, instance): + self._local_delete_cleanup(context, instance) return # If instance.host was not set it's possible that the Instance # object here was pulled from a BuildRequest object and is not @@ -2163,9 +2220,11 @@ except exception.InstanceNotFound: pass # The instance was deleted or is already gone. + self._local_delete_cleanup(context, instance) return if not instance: # Instance is already deleted. + self._local_delete_cleanup(context, instance) return except exception.ObjectActionError: # NOTE(melwitt): This means the instance.host changed @@ -2178,6 +2237,7 @@ cell, instance = self._lookup_instance(context, instance.uuid) if not instance: # Instance is already deleted + self._local_delete_cleanup(context, instance) return bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -2221,6 +2281,7 @@ 'field, its vm_state is %(state)s.', {'state': instance.vm_state}, instance=instance) + self._local_delete_cleanup(context, instance) return except exception.ObjectActionError as ex: # The instance's host likely changed under us as @@ -2393,6 +2454,10 @@ # cleanup volumes self._local_cleanup_bdm_volumes(bdms, instance, context) + + # cleanup accelerator requests (ARQs) + compute_utils.delete_arqs_if_needed(context, instance) + # Cleanup allocations in Placement since we can't do it from the # compute service. self.placementclient.delete_allocation_for_instance( @@ -3311,6 +3376,13 @@ block_device_info=None, reboot_type='HARD') + def _check_image_arch(self, image=None): + if image: + img_arch = image.get("properties", {}).get('hw_architecture') + if img_arch: + fields_obj.Architecture.canonicalize(img_arch) + + @block_accelerators # TODO(stephenfin): We should expand kwargs out to named args @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, @@ -3357,6 +3429,7 @@ image_id, image = self._get_image(context, image_href) self._check_auto_disk_config(image=image, auto_disk_config=auto_disk_config) + self._check_image_arch(image=image) flavor = instance.get_flavor() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -3614,15 +3687,22 @@ availability_zones.get_host_availability_zone( context, migration.source_compute)) - # Conductor updated the RequestSpec.flavor during the initial resize - # operation to point at the new flavor, so we need to update the - # RequestSpec to point back at the original flavor, otherwise - # subsequent move operations through the scheduler will be using the - # wrong flavor. + # If this was a resize, the conductor may have updated the + # RequestSpec.flavor field (to point at the new flavor) and the + # RequestSpec.numa_topology field (to reflect the new flavor's extra + # specs) during the initial resize operation, so we need to update the + # RequestSpec to point back at the original flavor and reflect the NUMA + # settings of this flavor, otherwise subsequent move operations through + # the scheduler will be using the wrong values. There's no need to do + # this if the flavor hasn't changed though and we're migrating rather + # than resizing. reqspec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) - reqspec.flavor = instance.old_flavor - reqspec.save() + if reqspec.flavor['id'] != instance.old_flavor['id']: + reqspec.flavor = instance.old_flavor + reqspec.numa_topology = hardware.numa_get_constraints( + instance.old_flavor, instance.image_meta) + reqspec.save() # NOTE(gibi): This is a performance optimization. If the network info # cache does not have ports with allocations in the binding profile @@ -3781,6 +3861,7 @@ return node + @block_accelerators @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) @check_instance_host(check_is_up=True) @@ -3879,6 +3960,11 @@ context, instance.uuid) request_spec.ignore_hosts = filter_properties['ignore_hosts'] + # don't recalculate the NUMA topology unless the flavor has changed + if not same_instance_type: + request_spec.numa_topology = hardware.numa_get_constraints( + new_instance_type, instance.image_meta) + instance.task_state = task_states.RESIZE_PREP instance.progress = 0 instance.auto_disk_config = auto_disk_config or False @@ -3972,6 +4058,7 @@ allow_same_host = CONF.allow_resize_to_same_host return allow_same_host + @block_accelerators @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]) @@ -4136,6 +4223,7 @@ return self.compute_rpcapi.get_instance_diagnostics(context, instance=instance) + @block_accelerators @reject_sev_instances(instance_actions.SUSPEND) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE]) @@ -4816,6 +4904,7 @@ diff=diff) return _metadata + @block_accelerators @reject_sev_instances(instance_actions.LIVE_MIGRATION) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED]) @@ -4945,6 +5034,7 @@ self.compute_rpcapi.live_migration_abort(context, instance, migration.id) + @block_accelerators @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.ERROR]) def evacuate(self, context, instance, host, on_shared_storage, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/manager.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/manager.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/manager.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/manager.py 2020-04-10 17:57:57.000000000 +0000 @@ -56,6 +56,7 @@ import six from six.moves import range +from nova.accelerator import cyborg from nova import block_device from nova.compute import api as compute from nova.compute import build_results @@ -74,7 +75,7 @@ from nova import exception_wrapper from nova import hooks from nova.i18n import _ -from nova import image +from nova.image import glance from nova import manager from nova.network import model as network_model from nova.network import neutron @@ -558,7 +559,7 @@ class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" - target = messaging.Target(version='5.10') + target = messaging.Target(version='5.11') def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" @@ -569,7 +570,7 @@ self.virtapi = ComputeVirtAPI(self) self.network_api = neutron.API() self.volume_api = cinder.API() - self.image_api = image.API() + self.image_api = glance.API() self._last_bw_usage_poll = 0 self._bw_usage_supported = True self.compute_api = compute.API() @@ -1383,6 +1384,8 @@ whitelist.Whitelist(CONF.pci.passthrough_whitelist) nova.conf.neutron.register_dynamic_opts(CONF) + # Even if only libvirt uses them, make it available for all drivers + nova.conf.devices.register_dynamic_opts(CONF) # Override the number of concurrent disk operations allowed if the # user has specified a limit. @@ -2074,7 +2077,7 @@ filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, - node=None, limits=None, host_list=None): + node=None, limits=None, host_list=None, accel_uuids=None): @utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, **kwargs): @@ -2126,7 +2129,8 @@ context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, - block_device_mapping, node, limits, host_list) + block_device_mapping, node, limits, host_list, + accel_uuids) def _check_device_tagging(self, requested_networks, block_device_mapping): tagging_requested = False @@ -2163,7 +2167,7 @@ def _do_build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, - node=None, limits=None, host_list=None): + node=None, limits=None, host_list=None, accel_uuids=None): try: LOG.debug('Starting instance...', instance=instance) @@ -2193,7 +2197,7 @@ self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, - filter_properties, request_spec) + filter_properties, request_spec, accel_uuids) LOG.info('Took %0.2f seconds to build instance.', timer.elapsed(), instance=instance) return build_results.ACTIVE @@ -2303,7 +2307,7 @@ def _build_and_run_instance(self, context, instance, image, injected_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties, - request_spec=None): + request_spec=None, accel_uuids=None): image_name = image.get('name') self._notify_about_instance_usage(context, instance, 'create.start', @@ -2321,15 +2325,13 @@ self._check_device_tagging(requested_networks, block_device_mapping) self._check_trusted_certs(instance) - request_group_resource_providers_mapping = \ - self._get_request_group_mapping(request_spec) + provider_mapping = self._get_request_group_mapping(request_spec) - if request_group_resource_providers_mapping: + if provider_mapping: try: compute_utils\ .update_pci_request_spec_with_allocated_interface_name( - context, self.reportclient, instance, - request_group_resource_providers_mapping) + context, self.reportclient, instance, provider_mapping) except (exception.AmbiguousResourceProviderForPCIRequest, exception.UnexpectedResourceProviderNameForPCIRequest ) as e: @@ -2352,13 +2354,10 @@ scheduler_hints) image_meta = objects.ImageMeta.from_dict(image) - request_group_resource_providers_mapping = \ - self._get_request_group_mapping(request_spec) - with self._build_resources(context, instance, requested_networks, security_groups, image_meta, - block_device_mapping, - request_group_resource_providers_mapping) as resources: + block_device_mapping, provider_mapping, + accel_uuids) as resources: instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING # NOTE(JoshNang) This also saves the changes to the @@ -2368,13 +2367,15 @@ task_states.BLOCK_DEVICE_MAPPING) block_device_info = resources['block_device_info'] network_info = resources['network_info'] + accel_info = resources['accel_info'] LOG.debug('Start spawning the instance on the hypervisor.', instance=instance) with timeutils.StopWatch() as timer: self.driver.spawn(context, instance, image_meta, injected_files, admin_password, allocs, network_info=network_info, - block_device_info=block_device_info) + block_device_info=block_device_info, + accel_info=accel_info) LOG.info('Took %0.2f seconds to spawn the instance on ' 'the hypervisor.', timer.elapsed(), instance=instance) @@ -2409,9 +2410,8 @@ context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping, tb=tb) - except (exception.FixedIpLimitExceeded, - exception.NoMoreNetworks, exception.NoMoreFixedIps) as e: - LOG.warning('No more network or fixed IP to be allocated', + except exception.NoMoreFixedIps as e: + LOG.warning('No more fixed IP to be allocated', instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) @@ -2519,10 +2519,18 @@ self.host, phase=fields.NotificationPhase.END, bdms=block_device_mapping) + def _build_resources_cleanup(self, instance, network_info): + # Make sure the async call finishes + if network_info is not None: + network_info.wait(do_raise=False) + self.driver.clean_networks_preparation(instance, + network_info) + self.driver.failed_spawn_cleanup(instance) + @contextlib.contextmanager def _build_resources(self, context, instance, requested_networks, security_groups, image_meta, block_device_mapping, - resource_provider_mapping): + resource_provider_mapping, accel_uuids): resources = {} network_info = None try: @@ -2573,33 +2581,35 @@ except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): with excutils.save_and_reraise_exception(): - # Make sure the async call finishes - if network_info is not None: - network_info.wait(do_raise=False) - self.driver.clean_networks_preparation(instance, - network_info) - self.driver.failed_spawn_cleanup(instance) + self._build_resources_cleanup(instance, network_info) except (exception.UnexpectedTaskStateError, exception.OverQuota, exception.InvalidBDM) as e: - # Make sure the async call finishes - if network_info is not None: - network_info.wait(do_raise=False) - self.driver.clean_networks_preparation(instance, network_info) - self.driver.failed_spawn_cleanup(instance) + self._build_resources_cleanup(instance, network_info) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: LOG.exception('Failure prepping block device', instance=instance) - # Make sure the async call finishes - if network_info is not None: - network_info.wait(do_raise=False) - self.driver.clean_networks_preparation(instance, network_info) - self.driver.failed_spawn_cleanup(instance) + self._build_resources_cleanup(instance, network_info) msg = _('Failure prepping block device.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) + arqs = [] + dp_name = instance.flavor.extra_specs.get('accel:device_profile') + try: + if dp_name: + arqs = self._get_bound_arq_resources( + context, dp_name, instance, accel_uuids) + except (Exception, eventlet.timeout.Timeout) as exc: + LOG.exception(exc.format_message()) + self._build_resources_cleanup(instance, network_info) + compute_utils.delete_arqs_if_needed(context, instance) + msg = _('Failure getting accelerator requests.') + raise exception.BuildAbortException(instance_uuid=instance.uuid, + reason=msg) + resources['accel_info'] = arqs + try: yield resources except Exception as exc: @@ -2629,9 +2639,104 @@ raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=six.text_type(exc)) + finally: + # Call Cyborg to delete accelerator requests + compute_utils.delete_arqs_if_needed(context, instance) + + def _get_bound_arq_resources(self, context, dp_name, instance, arq_uuids): + """Get bound accelerator requests. + + The ARQ binding was kicked off in the conductor as an async + operation. Here we wait for the notification from Cyborg. + + If the notification arrived before this point, which can happen + in many/most cases (see [1]), it will be lost. To handle that, + we use exit_wait_early. + [1] https://review.opendev.org/#/c/631244/46/nova/compute/ + manager.py@2627 + + :param dp_name: Device profile name. Caller ensures this is valid. + :param instance: instance object + :param arq_uuids: List of accelerator request (ARQ) UUIDs. + :returns: List of ARQs for which bindings have completed, + successfully or otherwise + """ + + cyclient = cyborg.get_client(context) + if arq_uuids is None: + arqs = cyclient.get_arqs_for_instance(instance.uuid) + arq_uuids = [arq['uuid'] for arq in arqs] + events = [('accelerator-request-bound', arq_uuid) + for arq_uuid in arq_uuids] + + timeout = CONF.arq_binding_timeout + with self.virtapi.wait_for_instance_event( + instance, events, deadline=timeout): + resolved_arqs = cyclient.get_arqs_for_instance( + instance.uuid, only_resolved=True) + # Events for these resolved ARQs may have already arrived. + # Such 'early' events need to be ignored. + early_events = [('accelerator-request-bound', arq['uuid']) + for arq in resolved_arqs] + if early_events: + self.virtapi.exit_wait_early(early_events) + + # Since a timeout in wait_for_instance_event will raise, we get + # here only if all binding events have been received. + resolved_uuids = [arq['uuid'] for arq in resolved_arqs] + if sorted(resolved_uuids) != sorted(arq_uuids): + # Query Cyborg to get all. + arqs = cyclient.get_arqs_for_instance(instance.uuid) + else: + arqs = resolved_arqs + return arqs def _cleanup_allocated_networks(self, context, instance, requested_networks): + """Cleanup networks allocated for instance. + + :param context: nova request context + :param instance: nova.objects.instance.Instance object + :param requested_networks: nova.objects.NetworkRequestList + """ + LOG.debug('Unplugging VIFs for instance', instance=instance) + + network_info = instance.get_network_info() + + # NOTE(stephenfin) to avoid nova destroying the instance without + # unplugging the interface, refresh network_info if it is empty. + if not network_info: + try: + network_info = self.network_api.get_instance_nw_info( + context, instance, + ) + except Exception as exc: + LOG.warning( + 'Failed to update network info cache when cleaning up ' + 'allocated networks. Stale VIFs may be left on this host.' + 'Error: %s', six.text_type(exc) + ) + return + + try: + self.driver.unplug_vifs(instance, network_info) + except NotImplementedError: + # This is an optional method so ignore things if it doesn't exist + LOG.debug( + 'Virt driver does not provide unplug_vifs method, so it ' + 'is not possible determine if VIFs should be unplugged.' + ) + except exception.NovaException as exc: + # It's possible that the instance never got as far as plugging + # VIFs, in which case we would see an exception which can be + # mostly ignored + LOG.warning( + 'Cleaning up VIFs failed for instance. Error: %s', + six.text_type(exc), instance=instance, + ) + else: + LOG.debug('Unplugged VIFs for instance', instance=instance) + try: self._deallocate_network(context, instance, requested_networks) except Exception: @@ -2882,6 +2987,8 @@ self._cleanup_volumes(context, instance, bdms, raise_exc=False, detach=False) + # Delete Cyborg ARQs if the instance has a device profile. + compute_utils.delete_arqs_if_needed(context, instance) # if a delete task succeeded, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED @@ -3002,9 +3109,10 @@ network_info = self.network_api.get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) + accel_info = self._get_accel_info(context, instance) self.driver.power_on(context, instance, network_info, - block_device_info) + block_device_info, accel_info) def _delete_snapshot_of_shelved_instance(self, context, instance, snapshot_id): @@ -3354,16 +3462,14 @@ allocations, rebuild_claim, scheduled_node, limits): """Helper to avoid deep nesting in the top-level method.""" - request_group_resource_providers_mapping = None + provider_mapping = None if evacuate: - request_group_resource_providers_mapping = \ - self._get_request_group_mapping(request_spec) + provider_mapping = self._get_request_group_mapping(request_spec) - if request_group_resource_providers_mapping: + if provider_mapping: compute_utils.\ update_pci_request_spec_with_allocated_interface_name( - context, self.reportclient, instance, - request_group_resource_providers_mapping) + context, self.reportclient, instance, provider_mapping) claim_context = rebuild_claim( context, instance, scheduled_node, allocations, @@ -3374,7 +3480,7 @@ context, instance, orig_image_ref, image_meta, injected_files, new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage, preserve_ephemeral, migration, request_spec, allocations, - request_group_resource_providers_mapping) + provider_mapping) @staticmethod def _get_image_name(image_meta): @@ -3585,6 +3691,15 @@ # Manager-detach self.detach_volume(context, volume_id, instance) + def _get_accel_info(self, context, instance): + dp_name = instance.flavor.extra_specs.get('accel:device_profile') + if dp_name: + cyclient = cyborg.get_client(context) + accel_info = cyclient.get_arqs_for_instance(instance.uuid) + else: + accel_info = [] + return accel_info + @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @@ -3619,6 +3734,8 @@ network_info = self.network_api.get_instance_nw_info(context, instance) + accel_info = self._get_accel_info(context, instance) + self._notify_about_instance_usage(context, instance, "reboot.start") compute_utils.notify_about_instance_action( context, instance, self.host, @@ -3660,6 +3777,7 @@ network_info, reboot_type, block_device_info=block_device_info, + accel_info=accel_info, bad_volumes_callback=bad_volumes_callback) except Exception as error: @@ -4032,6 +4150,11 @@ rescue_image_meta = self._get_rescue_image(context, instance, rescue_image_ref) + bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( + context, instance.uuid) + block_device_info = self._get_instance_block_device_info( + context, instance, bdms=bdms) + extra_usage_info = {'rescue_image_name': self._get_image_name(rescue_image_meta)} self._notify_about_instance_usage(context, instance, @@ -4044,9 +4167,9 @@ try: self._power_off_instance(context, instance, clean_shutdown) - self.driver.rescue(context, instance, - network_info, - rescue_image_meta, admin_password) + self.driver.rescue(context, instance, network_info, + rescue_image_meta, admin_password, + block_device_info) except Exception as e: LOG.exception("Error trying to Rescue Instance", instance=instance) @@ -5035,15 +5158,13 @@ # the request spec would already have the new flavor in it from the # else block below. - request_group_resource_providers_mapping = \ - self._get_request_group_mapping(request_spec) + provider_mapping = self._get_request_group_mapping(request_spec) - if request_group_resource_providers_mapping: + if provider_mapping: try: compute_utils.\ update_pci_request_spec_with_allocated_interface_name( - context, self.reportclient, instance, - request_group_resource_providers_mapping) + context, self.reportclient, instance, provider_mapping) except (exception.AmbiguousResourceProviderForPCIRequest, exception.UnexpectedResourceProviderNameForPCIRequest ) as e: @@ -6419,8 +6540,9 @@ @utils.synchronized(instance.uuid) def do_unshelve_instance(): - self._unshelve_instance(context, instance, image, - filter_properties, node) + self._unshelve_instance( + context, instance, image, filter_properties, node, + request_spec) do_unshelve_instance() def _unshelve_instance_key_scrub(self, instance): @@ -6437,7 +6559,7 @@ instance.update(keys) def _unshelve_instance(self, context, instance, image, filter_properties, - node): + node, request_spec): LOG.info('Unshelving', instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) @@ -6470,10 +6592,20 @@ utils.get_image_from_system_metadata( instance.system_metadata)) - self.network_api.setup_instance_network_on_host(context, instance, - self.host) - network_info = self.network_api.get_instance_nw_info(context, instance) + provider_mappings = self._get_request_group_mapping(request_spec) + try: + if provider_mappings: + update = ( + compute_utils. + update_pci_request_spec_with_allocated_interface_name) + update(context, self.reportclient, instance, provider_mappings) + + self.network_api.setup_instance_network_on_host( + context, instance, self.host, + provider_mappings=provider_mappings) + network_info = self.network_api.get_instance_nw_info( + context, instance) with self.rt.instance_claim(context, instance, node, allocations, limits): self.driver.spawn(context, instance, image_meta, @@ -7538,12 +7670,14 @@ migration) LOG.debug('destination check data is %s', dest_check_data) try: + allocs = self.reportclient.get_allocations_for_consumer( + ctxt, instance.uuid) migrate_data = self.compute_rpcapi.check_can_live_migrate_source( ctxt, instance, dest_check_data) if ('src_supports_numa_live_migration' in migrate_data and migrate_data.src_supports_numa_live_migration): migrate_data = self._live_migration_claim( - ctxt, instance, migrate_data, migration, limits) + ctxt, instance, migrate_data, migration, limits, allocs) elif 'dst_supports_numa_live_migration' in dest_check_data: LOG.info('Destination was ready for NUMA live migration, ' 'but source is either too old, or is set to an ' @@ -7563,7 +7697,7 @@ return migrate_data def _live_migration_claim(self, ctxt, instance, migrate_data, - migration, limits): + migration, limits, allocs): """Runs on the destination and does a resources claim, if necessary. Currently, only NUMA live migrations require it. @@ -7582,7 +7716,7 @@ # migration.dest_node here and must use self._get_nodename(). claim = self.rt.live_migration_claim( ctxt, instance, self._get_nodename(instance), migration, - limits) + limits, allocs) LOG.debug('Created live migration claim.', instance=instance) except exception.ComputeResourcesUnavailable as e: raise exception.MigrationPreCheckError( @@ -8122,9 +8256,10 @@ self.driver.live_migration_abort(instance) self._notify_live_migrate_abort_end(context, instance) - def _live_migration_cleanup_flags(self, migrate_data): - """Determine whether disks or instance path need to be cleaned up after - live migration (at source on success, at destination on rollback) + def _live_migration_cleanup_flags(self, migrate_data, migr_ctxt=None): + """Determine whether disks, instance path or other resources + need to be cleaned up after live migration (at source on success, + at destination on rollback) Block migration needs empty image at destination host before migration starts, so if any failure occurs, any empty images has to be deleted. @@ -8133,7 +8268,11 @@ newly created instance-xxx dir on the destination as a part of its rollback process + There may be other resources which need cleanup; currently this is + limited to vPMEM devices with the libvirt driver. + :param migrate_data: implementation specific data + :param migr_ctxt: specific resources stored in migration_context :returns: (bool, bool) -- do_cleanup, destroy_disks """ # NOTE(pkoniszewski): block migration specific params are set inside @@ -8143,11 +8282,20 @@ do_cleanup = False destroy_disks = False if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData): + has_vpmem = False + if migr_ctxt and migr_ctxt.old_resources: + for resource in migr_ctxt.old_resources: + if ('metadata' in resource and + isinstance(resource.metadata, + objects.LibvirtVPMEMDevice)): + has_vpmem = True + break # No instance booting at source host, but instance dir # must be deleted for preparing next block migration # must be deleted for preparing next live migration w/o shared # storage - do_cleanup = not migrate_data.is_shared_instance_path + # vpmem must be cleanped + do_cleanup = not migrate_data.is_shared_instance_path or has_vpmem destroy_disks = not migrate_data.is_shared_block_storage elif isinstance(migrate_data, migrate_data_obj.XenapiLiveMigrateData): do_cleanup = migrate_data.block_migration @@ -8299,6 +8447,17 @@ # destination, which will update it source_node = instance.node + do_cleanup, destroy_disks = self._live_migration_cleanup_flags( + migrate_data, migr_ctxt=instance.migration_context) + + if do_cleanup: + LOG.debug('Calling driver.cleanup from _post_live_migration', + instance=instance) + self.driver.cleanup(ctxt, instance, unplug_nw_info, + destroy_disks=destroy_disks, + migrate_data=migrate_data, + destroy_vifs=destroy_vifs) + # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. post_at_dest_success = True @@ -8313,26 +8472,6 @@ LOG.exception("Post live migration at destination %s failed", dest, instance=instance, error=error) - do_cleanup, destroy_disks = self._live_migration_cleanup_flags( - migrate_data) - - if do_cleanup: - # NOTE(artom) By this time post_live_migration_at_destination() - # will have applied the migration context and saved the instance, - # writing a new instance NUMA topology in the process (if the - # intance has one). Here on the source, some drivers will call - # instance.save() in their cleanup() method, which would clobber - # the new instance NUMA topology saved by the destination with the - # old fields in our instance object. To prevent this, refresh our - # instance. - instance.refresh() - LOG.debug('Calling driver.cleanup from _post_live_migration', - instance=instance) - self.driver.cleanup(ctxt, instance, unplug_nw_info, - destroy_disks=destroy_disks, - migrate_data=migrate_data, - destroy_vifs=destroy_vifs) - self.instance_events.clear_events_for_instance(instance) # NOTE(timello): make sure we update available resources on source @@ -8580,28 +8719,6 @@ 'rollback; compute driver did not provide migrate_data', instance=instance) - # TODO(artom) drop_move_claim_at_destination() is new in RPC 5.3, only - # call it if we performed a NUMA-aware live migration (which implies us - # being able to send RPC 5.3). To check this, we can use the - # src_supports_numa_live_migration flag, as it will be set if and only - # if: - # - dst_supports_numa_live_migration made its way to the source - # (meaning both dest and source are new and conductor can speak - # RPC 5.3) - # - src_supports_numa_live_migration was set by the source driver and - # passed the send-RPC-5.3 check. - # This check can be removed in RPC 6.0. - if ('src_supports_numa_live_migration' in migrate_data and - migrate_data.src_supports_numa_live_migration): - LOG.debug('Calling destination to drop move claim.', - instance=instance) - self.compute_rpcapi.drop_move_claim_at_destination(context, - instance, dest) - instance.task_state = None - instance.progress = 0 - instance.drop_migration_context() - instance.save(expected_task_state=[task_states.MIGRATING]) - # NOTE(tr3buchet): setup networks on source host (really it's re-setup # for nova-network) # NOTE(mriedem): This is a no-op for neutron. @@ -8631,7 +8748,7 @@ bdms=bdms) do_cleanup, destroy_disks = self._live_migration_cleanup_flags( - migrate_data) + migrate_data, migr_ctxt=instance.migration_context) if do_cleanup: self.compute_rpcapi.rollback_live_migration_at_destination( @@ -8660,6 +8777,34 @@ 'during live migration rollback.', instance=instance) + # NOTE(luyao): We drop move_claim and migration_context after cleanup + # is complete, to ensure the specific resources claimed on destination + # are released safely. + # TODO(artom) drop_move_claim_at_destination() is new in RPC 5.3, only + # call it if we performed a NUMA-aware live migration (which implies us + # being able to send RPC 5.3). To check this, we can use the + # src_supports_numa_live_migration flag, as it will be set if and only + # if: + # - dst_supports_numa_live_migration made its way to the source + # (meaning both dest and source are new and conductor can speak + # RPC 5.3) + # - src_supports_numa_live_migration was set by the source driver and + # passed the send-RPC-5.3 check. + # This check can be removed in RPC 6.0. + if ('src_supports_numa_live_migration' in migrate_data and + migrate_data.src_supports_numa_live_migration): + LOG.debug('Calling destination to drop move claim.', + instance=instance) + self.compute_rpcapi.drop_move_claim_at_destination(context, + instance, dest) + + # NOTE(luyao): We only update instance info after rollback operations + # are complete + instance.task_state = None + instance.progress = 0 + instance.drop_migration_context() + instance.save(expected_task_state=[task_states.MIGRATING]) + self._notify_about_instance_usage(context, instance, "live_migration._rollback.end") compute_utils.notify_about_instance_action(context, instance, @@ -8668,6 +8813,9 @@ phase=fields.NotificationPhase.END, bdms=bdms) + # TODO(luyao): set migration status to 'failed' but not 'error' + # which means rollback_live_migration is done, we have successfully + # cleaned up and returned instance back to normal status. self._set_migration_status(migration, migration_status) @wrap_exception() @@ -8740,9 +8888,13 @@ # check_can_live_migrate_destination() self.rt.free_pci_device_claims_for_instance(context, instance) - self.driver.rollback_live_migration_at_destination( - context, instance, network_info, block_device_info, - destroy_disks=destroy_disks, migrate_data=migrate_data) + # NOTE(luyao): Apply migration_context temporarily since it's + # on destination host, we rely on instance object to cleanup + # specific resources like vpmem + with instance.mutated_migration_context(): + self.driver.rollback_live_migration_at_destination( + context, instance, network_info, block_device_info, + destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.end", @@ -9937,8 +10089,7 @@ raise exception.ExtendVolumeNotSupported() try: - self.driver.extend_volume(connection_info, - instance, + self.driver.extend_volume(context, connection_info, instance, bdm.volume_size * units.Gi) except Exception as ex: LOG.warning('Extend volume failed, ' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/resource_tracker.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/resource_tracker.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/resource_tracker.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/resource_tracker.py 2020-04-10 17:57:57.000000000 +0000 @@ -112,7 +112,7 @@ self.assigned_resources = collections.defaultdict( lambda: collections.defaultdict(set)) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def instance_claim(self, context, instance, nodename, allocations, limits=None): """Indicate that some resources are needed for an upcoming compute @@ -186,7 +186,7 @@ return claim - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def rebuild_claim(self, context, instance, nodename, allocations, limits=None, image_meta=None, migration=None): """Create a claim for a rebuild operation.""" @@ -195,7 +195,7 @@ migration, allocations, move_type='evacuation', limits=limits, image_meta=image_meta) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def resize_claim(self, context, instance, instance_type, nodename, migration, allocations, image_meta=None, limits=None): """Create a claim for a resize or cold-migration move. @@ -207,9 +207,9 @@ migration, allocations, image_meta=image_meta, limits=limits) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def live_migration_claim(self, context, instance, nodename, migration, - limits): + limits, allocs): """Builds a MoveClaim for a live migration. :param context: The request context. @@ -219,15 +219,14 @@ migration. :param limits: A SchedulerLimits object from when the scheduler selected the destination host. + :param allocs: The placement allocation records for the instance. :returns: A MoveClaim for this live migration. """ # Flavor and image cannot change during a live migration. instance_type = instance.flavor image_meta = instance.image_meta - # TODO(Luyao) will pass allocations to live_migration_claim after the - # live migration change is done, now just set it None to _move_claim return self._move_claim(context, instance, instance_type, nodename, - migration, None, move_type='live-migration', + migration, allocs, move_type='live-migration', image_meta=image_meta, limits=limits) def _move_claim(self, context, instance, new_instance_type, nodename, @@ -515,7 +514,7 @@ instance.node = None instance.save() - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def abort_instance_claim(self, context, instance, nodename): """Remove usage from the given instance.""" self._update_usage_from_instance(context, instance, nodename, @@ -538,7 +537,7 @@ dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj() self.compute_nodes[nodename].pci_device_pools = dev_pools_obj - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def drop_move_claim(self, context, instance, nodename, instance_type=None, prefix='new_'): """Remove usage for an incoming/outgoing migration. @@ -587,7 +586,7 @@ ctxt = context.elevated() self._update(ctxt, self.compute_nodes[nodename]) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def update_usage(self, context, instance, nodename): """Update the resource usage and stats after a change in an instance @@ -858,7 +857,7 @@ 'another host\'s instance!', {'uuid': migration.instance_uuid}) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def _update_available_resource(self, context, resources, startup=False): # initialize the compute node object, creating it @@ -1705,7 +1704,7 @@ """Resets the failed_builds stats for the given node.""" self.stats[nodename].build_succeeded() - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def claim_pci_devices(self, context, pci_requests): """Claim instance PCI resources @@ -1718,7 +1717,7 @@ self.pci_tracker.save(context) return result - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def allocate_pci_devices_for_instance(self, context, instance): """Allocate instance claimed PCI resources @@ -1728,7 +1727,7 @@ self.pci_tracker.allocate_instance(instance) self.pci_tracker.save(context) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def free_pci_device_allocations_for_instance(self, context, instance): """Free instance allocated PCI resources @@ -1738,7 +1737,7 @@ self.pci_tracker.free_instance_allocations(context, instance) self.pci_tracker.save(context) - @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE) + @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def free_pci_device_claims_for_instance(self, context, instance): """Free instance claimed PCI resources diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/rpcapi.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/rpcapi.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/rpcapi.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/rpcapi.py 2020-04-10 17:57:57.000000000 +0000 @@ -376,6 +376,8 @@ * 5.8 - Add confirm_snapshot_based_resize_at_source() * 5.9 - Add revert_snapshot_based_resize_at_dest() * 5.10 - Add finish_revert_snapshot_based_resize_at_source() + * 5.11 - Add accel_uuids (accelerator requests) parameter to + build_and_run_instance() ''' VERSION_ALIASES = { @@ -1418,7 +1420,7 @@ filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None, - host_list=None): + host_list=None, accel_uuids=None): # NOTE(edleafe): compute nodes can only use the dict form of limits. if isinstance(limits, objects.SchedulerLimits): limits = limits.to_dict() @@ -1434,9 +1436,13 @@ "node": node, "limits": limits, "host_list": host_list, + "accel_uuids": accel_uuids, } client = self.router.client(ctxt) - version = '5.0' + version = '5.11' + if not client.can_send_version(version): + kwargs.pop('accel_uuids') + version = '5.0' cctxt = client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', **kwargs) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/compute/utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/compute/utils.py 2020-04-10 17:57:57.000000000 +0000 @@ -27,6 +27,7 @@ from oslo_utils import excutils import six +from nova.accelerator import cyborg from nova import block_device from nova.compute import power_state from nova.compute import task_states @@ -57,6 +58,18 @@ CONF = nova.conf.CONF LOG = log.getLogger(__name__) +# These properties are specific to a particular image by design. It +# does not make sense for them to be inherited by server snapshots. +# This list is distinct from the configuration option of the same +# (lowercase) name. +NON_INHERITABLE_IMAGE_PROPERTIES = frozenset([ + 'cinder_encryption_key_id', + 'cinder_encryption_key_deletion_policy', + 'img_signature', + 'img_signature_hash_method', + 'img_signature_key_type', + 'img_signature_certificate_uuid']) + def exception_to_dict(fault, message=None): """Converts exceptions to a dict for use in notifications. @@ -1214,7 +1227,7 @@ :param instance: nova.objects.instance.Instance object :param name: string for name of the snapshot :param image_type: snapshot | backup - :param image_api: instance of nova.image.API + :param image_api: instance of nova.image.glance.API :param extra_properties: dict of extra image properties to include """ @@ -1276,7 +1289,9 @@ # Delete properties that are non-inheritable properties = image_meta['properties'] - for key in CONF.non_inheritable_image_properties: + keys_to_pop = set(CONF.non_inheritable_image_properties).union( + NON_INHERITABLE_IMAGE_PROPERTIES) + for key in keys_to_pop: properties.pop(key, None) # The properties in extra_properties have precedence @@ -1534,3 +1549,18 @@ for spec in pci_request.spec: spec['parent_ifname'] = rp_name_pieces[2] + + +def delete_arqs_if_needed(context, instance): + """Delete Cyborg ARQs for the instance.""" + dp_name = instance.flavor.extra_specs.get('accel:device_profile') + if dp_name is None: + return + cyclient = cyborg.get_client(context) + LOG.debug('Calling Cyborg to delete ARQs for instance %(instance)s', + {'instance': instance.uuid}) + try: + cyclient.delete_arqs_for_instance(instance.uuid) + except exception.AcceleratorRequestOpFailed as e: + LOG.exception('Failed to delete accelerator requests for ' + 'instance %s. Exception: %s', instance.uuid, e) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/api.py 2020-04-10 17:57:57.000000000 +0000 @@ -20,7 +20,7 @@ from nova import baserpc from nova.conductor import rpcapi import nova.conf -from nova import image +from nova.image import glance CONF = nova.conf.CONF @@ -84,7 +84,7 @@ def __init__(self): self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI() - self.image_api = image.API() + self.image_api = glance.API() # TODO(stephenfin): Remove the 'reservations' parameter since we don't use # reservations anymore diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/manager.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/manager.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/manager.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/manager.py 2020-04-10 17:57:57.000000000 +0000 @@ -31,6 +31,7 @@ from oslo_utils import versionutils import six +from nova.accelerator import cyborg from nova import availability_zones from nova.compute import instance_actions from nova.compute import rpcapi as compute_rpcapi @@ -45,7 +46,7 @@ from nova.db import base from nova import exception from nova.i18n import _ -from nova import image +from nova.image import glance from nova import manager from nova.network import neutron from nova import notifications @@ -239,7 +240,7 @@ super(ComputeTaskManager, self).__init__() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.volume_api = cinder.API() - self.image_api = image.API() + self.image_api = glance.API() self.network_api = neutron.API() self.servicegroup_api = servicegroup.API() self.query_client = query.SchedulerQueryClient() @@ -594,6 +595,7 @@ legacy_request_spec) self._cleanup_allocated_networks( context, instance, requested_networks) + compute_utils.delete_arqs_if_needed(context, instance) # NOTE(danms): This is never cell-targeted because it is only used for # n-cpu reschedules which go to the cell conductor and thus are always @@ -835,6 +837,18 @@ LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s", host.service_host, host.nodename, alts, instance=instance) + try: + resource_provider_mapping = ( + local_reqspec.get_request_group_mapping()) + accel_uuids = self._create_and_bind_arqs( + context, instance.uuid, instance.flavor.extra_specs, + host.nodename, resource_provider_mapping) + except Exception as exc: + LOG.exception('Failed to reschedule. Reason: %s', exc) + self._cleanup_when_reschedule_fails(context, instance, exc, + legacy_request_spec, requested_networks) + continue + self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host.service_host, image=image, request_spec=local_reqspec, @@ -844,7 +858,8 @@ requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host.nodename, - limits=host.limits, host_list=host_list) + limits=host.limits, host_list=host_list, + accel_uuids=accel_uuids) def _schedule_instances(self, context, request_spec, instance_uuids=None, return_alternates=False): @@ -935,9 +950,14 @@ filter_properties = request_spec.\ to_legacy_filter_properties_dict() - # TODO(gibi): We need to make sure that the - # requested_resources field is re calculated based on - # neutron ports. + port_res_req = ( + self.network_api.get_requested_resource_for_instance( + context, instance.uuid)) + # NOTE(gibi): When cyborg or other module wants to handle + # similar non-nova resources then here we have to collect + # all the external resource requests in a single list and + # add them to the RequestSpec. + request_spec.requested_resources = port_res_req # NOTE(cfriesen): Ensure that we restrict the scheduler to # the cell specified by the instance mapping. @@ -959,6 +979,10 @@ instance.availability_zone = ( availability_zones.get_host_availability_zone( context, host)) + + scheduler_utils.fill_provider_mapping( + request_spec, selection) + self.compute_rpcapi.unshelve_instance( context, instance, host, request_spec, image=image, filter_properties=filter_properties, node=node) @@ -1595,6 +1619,22 @@ # this one. continue + accel_uuids = [] + try: + resource_provider_mapping = ( + request_spec.get_request_group_mapping()) + # Using nodename instead of hostname. See: + # http://lists.openstack.org/pipermail/openstack-discuss/2019-November/011044.html # noqa + accel_uuids = self._create_and_bind_arqs( + context, instance.uuid, instance.flavor.extra_specs, + host.nodename, resource_provider_mapping) + except Exception as exc: + # If anything failed here we need to cleanup and bail out. + with excutils.save_and_reraise_exception(): + self._cleanup_build_artifacts( + context, exc, instances, build_requests, request_specs, + block_device_mapping, tags, cell_mapping_cache) + # NOTE(danms): Compute RPC expects security group names or ids # not objects, so convert this to a list of names until we can # pass the objects. @@ -1611,7 +1651,36 @@ security_groups=legacy_secgroups, block_device_mapping=instance_bdms, host=host.service_host, node=host.nodename, - limits=host.limits, host_list=host_list) + limits=host.limits, host_list=host_list, + accel_uuids=accel_uuids) + + def _create_and_bind_arqs(self, context, instance_uuid, extra_specs, + hostname, resource_provider_mapping): + """Create ARQs, determine their RPs and initiate ARQ binding. + + The binding is asynchronous; Cyborg will notify on completion. + The notification will be handled in the compute manager. + """ + dp_name = extra_specs.get('accel:device_profile') + if not dp_name: + return [] + + LOG.debug('Calling Cyborg to get ARQs. dp_name=%s instance=%s', + dp_name, instance_uuid) + cyclient = cyborg.get_client(context) + arqs = cyclient.create_arqs_and_match_resource_providers( + dp_name, resource_provider_mapping) + LOG.debug('Got ARQs with resource provider mapping %s', arqs) + + bindings = {arq['uuid']: + {"hostname": hostname, + "device_rp_uuid": arq['device_rp_uuid'], + "instance_uuid": instance_uuid + } + for arq in arqs} + # Initiate Cyborg binding asynchronously + cyclient.bind_arqs(bindings=bindings) + return [arq['uuid'] for arq in arqs] @staticmethod def _map_instance_to_cell(context, instance, cell): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/tasks/cross_cell_migrate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/tasks/cross_cell_migrate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/tasks/cross_cell_migrate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/tasks/cross_cell_migrate.py 2020-04-10 17:57:57.000000000 +0000 @@ -28,7 +28,7 @@ from nova import context as nova_context from nova import exception from nova.i18n import _ -from nova import image as nova_image +from nova.image import glance from nova.network import constants as neutron_constants from nova.network import neutron from nova import objects @@ -401,7 +401,7 @@ :param migration: Migration object from the source cell :param request_spec: RequestSpec object for the resize operation :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI - :param image_api: instance of nova.image.api.API + :param image_api: instance of nova.image.glance.API """ super(PrepResizeAtSourceTask, self).__init__(context, instance) self.migration = migration @@ -636,7 +636,7 @@ self.network_api = neutron.API() self.volume_api = cinder.API() - self.image_api = nova_image.API() + self.image_api = glance.API() # Keep an ordered dict of the sub-tasks completed so we can call their # rollback routines if something fails. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/tasks/live_migrate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/tasks/live_migrate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conductor/tasks/live_migrate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conductor/tasks/live_migrate.py 2020-04-10 17:57:57.000000000 +0000 @@ -12,6 +12,7 @@ from oslo_log import log as logging import oslo_messaging as messaging +from oslo_utils import excutils import six from nova import availability_zones @@ -45,6 +46,17 @@ return svc.version >= 36 +def supports_vpmem_live_migration(context): + """Checks if the commpute host service is new enough to support + instance live migration with virtual persistent memory. + + :param context: The user request context. + :returns: True if the compute hosts are new enough to support live + migration with vpmem + """ + return objects.Service.get_minimum_version(context, 'nova-compute') >= 51 + + class LiveMigrationTask(base.TaskBase): def __init__(self, context, instance, destination, block_migration, disk_over_commit, migration, compute_rpcapi, @@ -93,12 +105,12 @@ # live migrating with a specific destination host so the scheduler # is bypassed. There are still some minimal checks performed here # though. - source_node, dest_node = self._check_requested_destination() - # Now that we're semi-confident in the force specified host, we - # need to copy the source compute node allocations in Placement - # to the destination compute node. Normally select_destinations() - # in the scheduler would do this for us, but when forcing the - # target host we don't call the scheduler. + self._check_destination_is_not_source() + self._check_host_is_up(self.destination) + self._check_destination_has_enough_memory() + source_node, dest_node = ( + self._check_compatible_with_source_hypervisor( + self.destination)) # TODO(mriedem): Call select_destinations() with a # skip_filters=True flag so the scheduler does the work of claiming # resources on the destination in Placement but still bypass the @@ -111,11 +123,20 @@ # this assumption fails then placement will return consumer # generation conflict and this call raise a AllocationUpdateFailed # exception. We let that propagate here to abort the migration. + # NOTE(luyao): When forcing the target host we don't call the + # scheduler, that means we need to get allocations from placement + # first, then claim resources in resource tracker on the + # destination host based on these allocations. scheduler_utils.claim_resources_on_destination( self.context, self.report_client, self.instance, source_node, dest_node, source_allocations=self._held_allocations, consumer_generation=None) + try: + self._check_requested_destination() + except Exception: + with excutils.save_and_reraise_exception(): + self._remove_host_allocations(dest_node.uuid) # dest_node is a ComputeNode object, so we need to get the actual # node name off it to set in the Migration object below. @@ -241,6 +262,27 @@ "source and destination nodes do not support " "the operation.") + def _check_can_migrate_specific_resources(self): + """Checks that an instance can migrate with specific resources. + + For virtual persistent memory resource: + 1. check if Instance contains vpmem resources + 2. check if live migration with vpmem is supported + """ + if not self.instance.resources: + return + + has_vpmem = False + for resource in self.instance.resources: + if resource.resource_class.startswith("CUSTOM_PMEM_NAMESPACE_"): + has_vpmem = True + break + + if has_vpmem and not supports_vpmem_live_migration(self.context): + raise exception.MigrationPreCheckError( + reason="Cannot live migrate with virtual persistent memory, " + "the operation is not supported.") + def _check_host_is_up(self, host): service = objects.Service.get_by_compute_host(self.context, host) @@ -248,15 +290,7 @@ raise exception.ComputeServiceUnavailable(host=host) def _check_requested_destination(self): - """Performs basic pre-live migration checks for the forced host. - - :returns: tuple of (source ComputeNode, destination ComputeNode) - """ - self._check_destination_is_not_source() - self._check_host_is_up(self.destination) - self._check_destination_has_enough_memory() - source_node, dest_node = self._check_compatible_with_source_hypervisor( - self.destination) + """Performs basic pre-live migration checks for the forced host.""" # NOTE(gibi): This code path is used when the live migration is forced # to a target host and skipping the scheduler. Such operation is # rejected for servers with nested resource allocations since @@ -273,7 +307,6 @@ raise exception.MigrationPreCheckError( reason=(_('Unable to force live migrate instance %s ' 'across cells.') % self.instance.uuid)) - return source_node, dest_node def _check_destination_is_not_source(self): if self.destination == self.source: @@ -322,6 +355,7 @@ return source_info, destination_info def _call_livem_checks_on_host(self, destination, provider_mapping): + self._check_can_migrate_specific_resources() self._check_can_migrate_pci(self.source, destination) try: self.migrate_data = self.compute_rpcapi.\ diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/compute.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/compute.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/compute.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/compute.py 2020-04-10 17:57:57.000000000 +0000 @@ -55,9 +55,7 @@ if you allow the ServerGroupAffinityFilter and need to resize. """), cfg.ListOpt('non_inheritable_image_properties', - default=['cache_in_nova', 'bittorrent', - 'img_signature_hash_method', 'img_signature', - 'img_signature_key_type', 'img_signature_certificate_uuid'], + default=['cache_in_nova', 'bittorrent'], help=""" Image properties that should not be inherited from the instance when taking a snapshot. @@ -65,15 +63,25 @@ This option gives an opportunity to select which image-properties should not be inherited by newly created snapshots. +.. note:: + + The following image properties are *never* inherited regardless of + whether they are listed in this configuration option or not: + + * cinder_encryption_key_id + * cinder_encryption_key_deletion_policy + * img_signature + * img_signature_hash_method + * img_signature_key_type + * img_signature_certificate_uuid + Possible values: * A comma-separated list whose item is an image property. Usually only the image properties that are only needed by base images can be included here, since the snapshots that are created from the base images don't need them. -* Default list: cache_in_nova, bittorrent, img_signature_hash_method, - img_signature, img_signature_key_type, - img_signature_certificate_uuid +* Default list: cache_in_nova, bittorrent """), cfg.IntOpt('max_local_block_devices', @@ -169,6 +177,17 @@ ``vif_plugging_is_fatal`` is False, events should not be expected to arrive at all. """), + cfg.IntOpt('arq_binding_timeout', + default=300, + min=1, + help=""" +Timeout for Accelerator Request (ARQ) bind event message arrival. + +Number of seconds to wait for ARQ bind resolution event to arrive. +The event indicates that every ARQ for an instance has either bound +successfully or failed to bind. If it does not arrive, instance bringup +is aborted with an exception. +"""), cfg.StrOpt('injected_network_template', default=paths.basedir_def('nova/virt/interfaces.template'), help="""Path to '/etc/network/interfaces' template. @@ -1015,11 +1034,23 @@ state in the API and never actually reclaimed (deleted) on the compute node. +.. note:: When using this option, you should also configure the ``[cinder]`` + auth options, e.g. ``auth_type``, ``auth_url``, ``username``, etc. + Since the reclaim happens in a periodic task, there is no user token + to cleanup volumes attached to any SOFT_DELETED servers so nova must + be configured with administrator role access to cleanup those + resources in cinder. + Possible values: * Any positive integer(in seconds) greater than 0 will enable this option. * Any value <=0 will disable the option. + +Related options: + +* [cinder] auth options for cleaning up volumes attached to servers during + the reclaim process """), cfg.IntOpt('volume_usage_poll_interval', default=0, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/console.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/console.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/console.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/console.py 2020-04-10 17:57:57.000000000 +0000 @@ -41,6 +41,44 @@ * A list where each element is an allowed origin hostnames, else an empty list """), + cfg.StrOpt('ssl_ciphers', + help=""" +OpenSSL cipher preference string that specifies what ciphers to allow for TLS +connections from clients. For example:: + + ssl_ciphers = "kEECDH+aECDSA+AES:kEECDH+AES+aRSA:kEDH+aRSA+AES" + +See the man page for the OpenSSL `ciphers` command for details of the cipher +preference string format and allowed values:: + + https://www.openssl.org/docs/man1.1.0/man1/ciphers.html + +Related options: + +* [DEFAULT] cert +* [DEFAULT] key +"""), + cfg.StrOpt('ssl_minimum_version', + default='default', + choices=[ + # These values must align with SSL_OPTIONS in + # websockify/websocketproxy.py + ('default', 'Use the underlying system OpenSSL defaults'), + ('tlsv1_1', + 'Require TLS v1.1 or greater for TLS connections'), + ('tlsv1_2', + 'Require TLS v1.2 or greater for TLS connections'), + ('tlsv1_3', + 'Require TLS v1.3 or greater for TLS connections'), + ], + help=""" +Minimum allowed SSL/TLS protocol version. + +Related options: + +* [DEFAULT] cert +* [DEFAULT] key +"""), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/cyborg.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/cyborg.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/cyborg.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/cyborg.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,43 @@ +# Copyright 2019 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneauth1 import loading as ks_loading +from oslo_config import cfg + +from nova.conf import utils as confutils + + +DEFAULT_SERVICE_TYPE = 'accelerator' +CYBORG_GROUP = 'cyborg' + +cyborg_group = cfg.OptGroup( + CYBORG_GROUP, + title='Cyborg Options', + help=""" +Configuration options for Cyborg (accelerator as a service). +""") + + +def register_opts(conf): + conf.register_group(cyborg_group) + confutils.register_ksa_opts(conf, cyborg_group, DEFAULT_SERVICE_TYPE, + include_auth=False) + + +def list_opts(): + return { + cyborg_group: ( + ks_loading.get_session_conf_options() + + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)) + } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/devices.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/devices.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/devices.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/devices.py 2020-04-10 17:57:57.000000000 +0000 @@ -24,12 +24,33 @@ Some pGPUs (e.g. NVIDIA GRID K1) support different vGPU types. User can use this option to specify a list of enabled vGPU types that may be assigned to a -guest instance. But please note that Nova only supports a single type in the -Queens release. If more than one vGPU type is specified (as a comma-separated -list), only the first one will be used. An example is as the following:: +guest instance. + +If more than one single vGPU type is provided, then for each *vGPU type* an +additional section, ``[vgpu_$(VGPU_TYPE)]``, must be added to the configuration +file. Each section then **must** be configured with a single configuration +option, ``device_addresses``, which should be a list of PCI addresses +corresponding to the physical GPU(s) to assign to this type. + +If one or more sections are missing (meaning that a specific type is not wanted +to use for at least one physical GPU) or if no device addresses are provided, +then Nova will only use the first type that was provided by +``[devices]/enabled_vgpu_types``. + +If the same PCI address is provided for two different types, nova-compute will +return an InvalidLibvirtGPUConfig exception at restart. + +An example is as the following:: [devices] - enabled_vgpu_types = GRID K100,Intel GVT-g,MxGPU.2,nvidia-11 + enabled_vgpu_types = nvidia-35, nvidia-36 + + [vgpu_nvidia-35] + device_addresses = 0000:84:00.0,0000:85:00.0 + + [vgpu_nvidia-36] + device_addresses = 0000:86:00.0 + """) ] @@ -39,5 +60,20 @@ conf.register_opts(vgpu_opts, group=devices_group) +def register_dynamic_opts(conf): + """Register dynamically-generated options and groups. + + This must be called by the service that wishes to use the options **after** + the initial configuration has been loaded. + """ + opt = cfg.ListOpt('device_addresses', default=[], + item_type=cfg.types.String()) + + # Register the '[vgpu_$(VGPU_TYPE)]/device_addresses' opts, implicitly + # registering the '[vgpu_$(VGPU_TYPE)]' groups in the process + for vgpu_type in conf.devices.enabled_vgpu_types: + conf.register_opt(opt, group='vgpu_%s' % vgpu_type) + + def list_opts(): return {devices_group: vgpu_opts} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/__init__.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/__init__.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/__init__.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/__init__.py 2020-04-10 17:57:57.000000000 +0000 @@ -29,6 +29,7 @@ from nova.conf import configdrive from nova.conf import console from nova.conf import consoleauth +from nova.conf import cyborg from nova.conf import database from nova.conf import devices from nova.conf import ephemeral_storage @@ -80,6 +81,7 @@ configdrive.register_opts(CONF) console.register_opts(CONF) consoleauth.register_opts(CONF) +cyborg.register_opts(CONF) database.register_opts(CONF) devices.register_opts(CONF) ephemeral_storage.register_opts(CONF) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/neutron.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/neutron.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/neutron.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/neutron.py 2020-04-10 17:57:57.000000000 +0000 @@ -94,6 +94,20 @@ * ``[neutron_physnet_$PHYSNET] numa_nodes`` must be configured for each value of ``$PHYSNET`` specified by this option """), + cfg.IntOpt('http_retries', + default=3, + min=0, + help=""" +Number of times neutronclient should retry on any failed http call. + +0 means connection is attempted only once. Setting it to any positive integer +means that on failure connection is retried that many times e.g. setting it +to 3 means total attempts to connect will be 4. + +Possible values: + +* Any integer value. 0 means connection is attempted only once +"""), ] metadata_proxy_opts = [ diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/novnc.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/novnc.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/novnc.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/novnc.py 2020-04-10 17:57:57.000000000 +0000 @@ -27,15 +27,37 @@ help="Run as a background process."), cfg.BoolOpt('ssl_only', default=False, - help="Disallow non-encrypted connections."), + help=""" +Disallow non-encrypted connections. + +Related options: + +* cert +* key +"""), cfg.BoolOpt('source_is_ipv6', default=False, help="Set to True if source host is addressed with IPv6."), cfg.StrOpt('cert', default='self.pem', - help="Path to SSL certificate file."), + help=""" +Path to SSL certificate file. + +Related options: + +* key +* ssl_only +* [console] ssl_ciphers +* [console] ssl_minimum_version +"""), cfg.StrOpt('key', - help="SSL key file (if separate from cert)."), + help=""" +SSL key file (if separate from cert). + +Related options: + +* cert +"""), cfg.StrOpt('web', default='/usr/share/spice-html5', help=""" diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/scheduler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/scheduler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/conf/scheduler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/conf/scheduler.py 2020-04-10 17:57:57.000000000 +0000 @@ -26,26 +26,30 @@ default="filter_scheduler", deprecated_name="scheduler_driver", deprecated_group="DEFAULT", + deprecated_for_removal=True, + deprecated_since='21.0.0', + deprecated_reason=""" +nova no longer provides any in-tree filters except for the 'filter_scheduler' +scheduler. This filter is considered flexible and pluggable enough for all use +cases and can be extended through the use of custom, out-of-tree filters and +weighers along with powerful, in-tree filters like the +'AggregateInstanceExtraSpecsFilter' and 'ComputeCapabilitiesFilter' filters. +""", help=""" The class of the driver used by the scheduler. This should be chosen from one of the entrypoints under the namespace 'nova.scheduler.driver' of file 'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is used. -Other options are: - -* 'fake_scheduler' which is used for testing. - Possible values: * Any of the drivers included in Nova: * filter_scheduler - * fake_scheduler * You may also set this to the entry point name of a custom scheduler driver, - but you will be responsible for creating and maintaining it in your setup.cfg - file. + but you will be responsible for creating and maintaining it in your + ``setup.cfg`` file. Related options: @@ -194,7 +198,17 @@ and/or image metadata must also contain ``trait:$TRAIT_NAME=required`` to be eligible to be scheduled to hosts in that aggregate. More technical details at https://docs.openstack.org/nova/latest/reference/isolate-aggregates.html -""") +"""), + cfg.BoolOpt("image_metadata_prefilter", + default=False, + help=""" +This setting causes the scheduler to transform well known image metadata +properties into placement required traits to filter host based on image +metadata. This feature requires host support and is currently supported by the +following compute drivers: + +- ``libvirt.LibvirtDriver`` (since Ussuri (21.0.0)) +"""), ] filter_scheduler_group = cfg.OptGroup(name="filter_scheduler", diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/console/websocketproxy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/console/websocketproxy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/console/websocketproxy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/console/websocketproxy.py 2020-04-10 17:57:57.000000000 +0000 @@ -93,12 +93,20 @@ self.reqhandler.send_close() -class NovaProxyRequestHandlerBase(object): - def address_string(self): - # NOTE(rpodolyaka): override the superclass implementation here and - # explicitly disable the reverse DNS lookup, which might fail on some - # deployments due to DNS configuration and break VNC access completely - return str(self.client_address[0]) +class NovaProxyRequestHandler(websockify.ProxyRequestHandler): + + def __init__(self, *args, **kwargs): + self._compute_rpcapi = None + websockify.ProxyRequestHandler.__init__(self, *args, **kwargs) + + @property + def compute_rpcapi(self): + # Lazy load the rpcapi/ComputeAPI upon first use for this connection. + # This way, if we receive a TCP RST, we will not create a ComputeAPI + # object we won't use. + if not self._compute_rpcapi: + self._compute_rpcapi = compute_rpcapi.ComputeAPI() + return self._compute_rpcapi def verify_origin_proto(self, connect_info, origin_proto): if 'access_url_base' not in connect_info: @@ -277,33 +285,8 @@ {'host': host, 'port': port}) raise - -class NovaProxyRequestHandler(NovaProxyRequestHandlerBase, - websockify.ProxyRequestHandler): - def __init__(self, *args, **kwargs): - self._compute_rpcapi = None - websockify.ProxyRequestHandler.__init__(self, *args, **kwargs) - - @property - def compute_rpcapi(self): - # Lazy load the rpcapi/ComputeAPI upon first use for this connection. - # This way, if we receive a TCP RST, we will not create a ComputeAPI - # object we won't use. - if not self._compute_rpcapi: - self._compute_rpcapi = compute_rpcapi.ComputeAPI() - return self._compute_rpcapi - def socket(self, *args, **kwargs): - # TODO(melwitt): The try_import and if-else condition can be removed - # when we get to the point where we're requiring at least websockify - # v.0.9.0 in our lower-constraints. - if websockifyserver is not None: - # In websockify v0.9.0, the 'socket' method moved to the - # websockify.websockifyserver.WebSockifyServer class. - return websockifyserver.WebSockifyServer.socket(*args, **kwargs) - else: - # Fall back to the websockify <= v0.8.0 'socket' method location. - return websockify.WebSocketServer.socket(*args, **kwargs) + return websockifyserver.WebSockifyServer.socket(*args, **kwargs) class NovaWebSocketProxy(websockify.WebSocketProxy): @@ -316,6 +299,17 @@ with the compute node. """ self.security_proxy = kwargs.pop('security_proxy', None) + + # If 'default' was specified as the ssl_minimum_version, we leave + # ssl_options unset to default to the underlying system defaults. + # We do this to avoid using websockify's behaviour for 'default' + # in select_ssl_version(), which hardcodes the versions to be + # quite relaxed and prevents us from using sytem crypto policies. + ssl_min_version = kwargs.pop('ssl_minimum_version', None) + if ssl_min_version and ssl_min_version != 'default': + kwargs['ssl_options'] = websockify.websocketproxy. \ + select_ssl_version(ssl_min_version) + super(NovaWebSocketProxy, self).__init__(*args, **kwargs) @staticmethod diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/context.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/context.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/context.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/context.py 2020-04-10 17:57:57.000000000 +0000 @@ -37,9 +37,6 @@ from nova import utils LOG = logging.getLogger(__name__) -# TODO(melwitt): This cache should be cleared whenever WSGIService receives a -# SIGHUP and periodically based on an expiration time. Currently, none of the -# cell caches are purged, so neither is this one, for now. CELL_CACHE = {} # NOTE(melwitt): Used for the scatter-gather utility to indicate we timed out # waiting for a result from a cell. @@ -118,7 +115,8 @@ # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('image', 'block-storage', 'volumev3', - 'key-manager', 'placement', 'network')] + 'key-manager', 'placement', 'network', + 'accelerator')] else: # if list is empty or none self.service_catalog = [] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/api.py 2020-04-10 17:57:57.000000000 +0000 @@ -391,114 +391,6 @@ project_id) -################### - -def floating_ip_get(context, id): - return IMPL.floating_ip_get(context, id) - - -def floating_ip_get_pools(context): - """Returns a list of floating IP pools.""" - return IMPL.floating_ip_get_pools(context) - - -def floating_ip_allocate_address(context, project_id, pool, - auto_assigned=False): - """Allocate free floating IP from specified pool and return the address. - - Raises if one is not available. - - """ - return IMPL.floating_ip_allocate_address(context, project_id, pool, - auto_assigned) - - -def floating_ip_bulk_create(context, ips, want_result=True): - """Create a lot of floating IPs from the values dictionary. - :param want_result: If set to True, return floating IPs inserted - """ - return IMPL.floating_ip_bulk_create(context, ips, want_result=want_result) - - -def floating_ip_bulk_destroy(context, ips): - """Destroy a lot of floating IPs from the values dictionary.""" - return IMPL.floating_ip_bulk_destroy(context, ips) - - -def floating_ip_create(context, values): - """Create a floating IP from the values dictionary.""" - return IMPL.floating_ip_create(context, values) - - -def floating_ip_deallocate(context, address): - """Deallocate a floating IP by address.""" - return IMPL.floating_ip_deallocate(context, address) - - -def floating_ip_destroy(context, address): - """Destroy the floating_ip or raise if it does not exist.""" - return IMPL.floating_ip_destroy(context, address) - - -def floating_ip_disassociate(context, address): - """Disassociate a floating IP from a fixed IP by address. - - :returns: the fixed IP record joined to network record or None - if the IP was not associated to an IP. - - """ - return IMPL.floating_ip_disassociate(context, address) - - -def floating_ip_fixed_ip_associate(context, floating_address, - fixed_address, host): - """Associate a floating IP to a fixed_ip by address. - - :returns: the fixed IP record joined to network record or None - if the IP was already associated to the fixed IP. - """ - - return IMPL.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address, - host) - - -def floating_ip_get_all(context): - """Get all floating IPs.""" - return IMPL.floating_ip_get_all(context) - - -def floating_ip_get_all_by_host(context, host): - """Get all floating IPs by host.""" - return IMPL.floating_ip_get_all_by_host(context, host) - - -def floating_ip_get_all_by_project(context, project_id): - """Get all floating IPs by project.""" - return IMPL.floating_ip_get_all_by_project(context, project_id) - - -def floating_ip_get_by_address(context, address): - """Get a floating IP by address or raise if it doesn't exist.""" - return IMPL.floating_ip_get_by_address(context, address) - - -def floating_ip_get_by_fixed_address(context, fixed_address): - """Get a floating IPs by fixed address.""" - return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) - - -def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): - """Get a floating IPs by fixed address.""" - return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) - - -def floating_ip_update(context, address, values): - """Update a floating IP by address or raise if it doesn't exist.""" - return IMPL.floating_ip_update(context, address, values) - - #################### @@ -581,101 +473,6 @@ #################### -def fixed_ip_associate(context, address, instance_uuid, network_id=None, - reserved=False, virtual_interface_id=None): - """Associate fixed IP to instance. - - Raises if fixed IP is not available. - - """ - return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id, - reserved, virtual_interface_id) - - -def fixed_ip_associate_pool(context, network_id, instance_uuid=None, - host=None, virtual_interface_id=None): - """Find free IP in network and associate it to instance or host. - - Raises if one is not available. - - """ - return IMPL.fixed_ip_associate_pool(context, network_id, - instance_uuid, host, - virtual_interface_id) - - -def fixed_ip_create(context, values): - """Create a fixed IP from the values dictionary.""" - return IMPL.fixed_ip_create(context, values) - - -def fixed_ip_bulk_create(context, ips): - """Create a lot of fixed IPs from the values dictionary.""" - return IMPL.fixed_ip_bulk_create(context, ips) - - -def fixed_ip_disassociate(context, address): - """Disassociate a fixed IP from an instance by address.""" - return IMPL.fixed_ip_disassociate(context, address) - - -def fixed_ip_disassociate_all_by_timeout(context, host, time): - """Disassociate old fixed IPs from host.""" - return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) - - -def fixed_ip_get(context, id, get_network=False): - """Get fixed IP by id or raise if it does not exist. - - If get_network is true, also return the associated network. - """ - return IMPL.fixed_ip_get(context, id, get_network) - - -def fixed_ip_get_all(context): - """Get all defined fixed IPs.""" - return IMPL.fixed_ip_get_all(context) - - -def fixed_ip_get_by_address(context, address, columns_to_join=None): - """Get a fixed IP by address or raise if it does not exist.""" - return IMPL.fixed_ip_get_by_address(context, address, - columns_to_join=columns_to_join) - - -def fixed_ip_get_by_floating_address(context, floating_address): - """Get a fixed IP by a floating address.""" - return IMPL.fixed_ip_get_by_floating_address(context, floating_address) - - -def fixed_ip_get_by_instance(context, instance_uuid): - """Get fixed IPs by instance or raise if none exist.""" - return IMPL.fixed_ip_get_by_instance(context, instance_uuid) - - -def fixed_ip_get_by_host(context, host): - """Get fixed IPs by compute host.""" - return IMPL.fixed_ip_get_by_host(context, host) - - -def fixed_ip_get_by_network_host(context, network_uuid, host): - """Get fixed IP for a host in a network.""" - return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host) - - -def fixed_ips_by_virtual_interface(context, vif_id): - """Get fixed IPs by virtual interface or raise if none exist.""" - return IMPL.fixed_ips_by_virtual_interface(context, vif_id) - - -def fixed_ip_update(context, address, values): - """Create a fixed IP from the values dictionary.""" - return IMPL.fixed_ip_update(context, address, values) - - -#################### - - def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) @@ -845,11 +642,6 @@ return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id) -def instance_floating_address_get_all(context, instance_uuid): - """Get all floating IP addresses of an instance.""" - return IMPL.instance_floating_address_get_all(context, instance_uuid) - - # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0. def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" @@ -978,104 +770,6 @@ return IMPL.key_pair_count_by_user(context, user_id) -#################### - - -def network_associate(context, project_id, network_id=None, force=False): - """Associate a free network to a project.""" - return IMPL.network_associate(context, project_id, network_id, force) - - -def network_count_reserved_ips(context, network_id): - """Return the number of reserved IPs in the network.""" - return IMPL.network_count_reserved_ips(context, network_id) - - -def network_create_safe(context, values): - """Create a network from the values dict. - - The network is only returned if the create succeeds. If the create violates - constraints because the network already exists, no exception is raised. - - """ - return IMPL.network_create_safe(context, values) - - -def network_delete_safe(context, network_id): - """Delete network with key network_id. - - This method assumes that the network is not associated with any project - - """ - return IMPL.network_delete_safe(context, network_id) - - -def network_disassociate(context, network_id, disassociate_host=True, - disassociate_project=True): - """Disassociate the network from project or host - - Raises if it does not exist. - """ - return IMPL.network_disassociate(context, network_id, disassociate_host, - disassociate_project) - - -def network_get(context, network_id, project_only="allow_none"): - """Get a network or raise if it does not exist.""" - return IMPL.network_get(context, network_id, project_only=project_only) - - -def network_get_all(context, project_only="allow_none"): - """Return all defined networks.""" - return IMPL.network_get_all(context, project_only) - - -def network_get_all_by_uuids(context, network_uuids, - project_only="allow_none"): - """Return networks by ids.""" - return IMPL.network_get_all_by_uuids(context, network_uuids, - project_only=project_only) - - -def network_in_use_on_host(context, network_id, host=None): - """Indicates if a network is currently in use on host.""" - return IMPL.network_in_use_on_host(context, network_id, host) - - -def network_get_associated_fixed_ips(context, network_id, host=None): - """Get all network's IPs that have been associated.""" - return IMPL.network_get_associated_fixed_ips(context, network_id, host) - - -def network_get_by_uuid(context, uuid): - """Get a network by uuid or raise if it does not exist.""" - return IMPL.network_get_by_uuid(context, uuid) - - -def network_get_by_cidr(context, cidr): - """Get a network by cidr or raise if it does not exist.""" - return IMPL.network_get_by_cidr(context, cidr) - - -def network_get_all_by_host(context, host): - """All networks for which the given host is the network host.""" - return IMPL.network_get_all_by_host(context, host) - - -def network_set_host(context, network_id, host_id): - """Safely set the host for network.""" - return IMPL.network_set_host(context, network_id, host_id) - - -def network_update(context, network_id, values): - """Set the given properties on a network and update it. - - Raises NotFound if network does not exist. - - """ - return IMPL.network_update(context, network_id, values) - - ############### @@ -1163,33 +857,6 @@ ################### -def ec2_volume_create(context, volume_id, forced_id=None): - return IMPL.ec2_volume_create(context, volume_id, forced_id) - - -def ec2_volume_get_by_id(context, volume_id): - return IMPL.ec2_volume_get_by_id(context, volume_id) - - -def ec2_volume_get_by_uuid(context, volume_uuid): - return IMPL.ec2_volume_get_by_uuid(context, volume_uuid) - - -def ec2_snapshot_create(context, snapshot_id, forced_id=None): - return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id) - - -def ec2_snapshot_get_by_ec2_id(context, ec2_id): - return IMPL.ec2_snapshot_get_by_ec2_id(context, ec2_id) - - -def ec2_snapshot_get_by_uuid(context, snapshot_uuid): - return IMPL.ec2_snapshot_get_by_uuid(context, snapshot_uuid) - - -#################### - - def block_device_mapping_create(context, values, legacy=True): """Create an entry of block device mapping.""" return IMPL.block_device_mapping_create(context, values, legacy) @@ -1355,19 +1022,6 @@ ################### -def project_get_networks(context, project_id, associate=True): - """Return the network associated with the project. - - If associate is true, it will attempt to associate a new - network if one is not found, otherwise it returns None. - - """ - return IMPL.project_get_networks(context, project_id, associate) - - -################## - - def pci_device_get_by_addr(context, node_id, dev_addr): """Get PCI device by address.""" return IMPL.pci_device_get_by_addr(context, node_id, dev_addr) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/sqlalchemy/api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/sqlalchemy/api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/sqlalchemy/api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/sqlalchemy/api.py 2020-04-10 17:57:57.000000000 +0000 @@ -45,7 +45,6 @@ from sqlalchemy import MetaData from sqlalchemy import or_ from sqlalchemy.orm import aliased -from sqlalchemy.orm import contains_eager from sqlalchemy.orm import joinedload from sqlalchemy.orm import noload from sqlalchemy.orm import undefer @@ -485,7 +484,7 @@ query = query.join(models.ComputeNode, models.Service.host == models.ComputeNode.host).\ filter(models.ComputeNode.hypervisor_type == hv_type).\ - distinct('host') + distinct() return query.all() @@ -888,617 +887,6 @@ @require_context -@pick_context_manager_reader -def floating_ip_get(context, id): - try: - result = model_query(context, models.FloatingIp, project_only=True).\ - filter_by(id=id).\ - options(_joinedload_all('fixed_ip.instance')).\ - first() - - if not result: - raise exception.FloatingIpNotFound(id=id) - except db_exc.DBError: - LOG.warning("Invalid floating IP ID %s in request", id) - raise exception.InvalidID(id=id) - return result - - -@require_context -@pick_context_manager_reader -def floating_ip_get_pools(context): - pools = [] - for result in model_query(context, models.FloatingIp, - (models.FloatingIp.pool,)).distinct(): - pools.append({'name': result[0]}) - return pools - - -@require_context -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -@pick_context_manager_writer -def floating_ip_allocate_address(context, project_id, pool, - auto_assigned=False): - nova.context.authorize_project_context(context, project_id) - floating_ip_ref = model_query(context, models.FloatingIp, - read_deleted="no").\ - filter_by(fixed_ip_id=None).\ - filter_by(project_id=None).\ - filter_by(pool=pool).\ - first() - - if not floating_ip_ref: - raise exception.NoMoreFloatingIps() - - params = {'project_id': project_id, 'auto_assigned': auto_assigned} - - rows_update = model_query(context, models.FloatingIp, read_deleted="no").\ - filter_by(id=floating_ip_ref['id']).\ - filter_by(fixed_ip_id=None).\ - filter_by(project_id=None).\ - filter_by(pool=pool).\ - update(params, synchronize_session='evaluate') - - if not rows_update: - LOG.debug('The row was updated in a concurrent transaction, ' - 'we will fetch another one') - raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed()) - - return floating_ip_ref['address'] - - -@require_context -@pick_context_manager_writer -def floating_ip_bulk_create(context, ips, want_result=True): - try: - tab = models.FloatingIp().__table__ - context.session.execute(tab.insert(), ips) - except db_exc.DBDuplicateEntry as e: - raise exception.FloatingIpExists(address=e.value) - - if want_result: - return model_query(context, models.FloatingIp).filter( - models.FloatingIp.address.in_( - [ip['address'] for ip in ips])).all() - - -def _ip_range_splitter(ips, block_size=256): - """Yields blocks of IPs no more than block_size elements long.""" - out = [] - count = 0 - for ip in ips: - out.append(ip['address']) - count += 1 - - if count > block_size - 1: - yield out - out = [] - count = 0 - - if out: - yield out - - -@require_context -@pick_context_manager_writer -def floating_ip_bulk_destroy(context, ips): - project_id_to_quota_count = collections.defaultdict(int) - for ip_block in _ip_range_splitter(ips): - # Find any floating IPs that were not auto_assigned and - # thus need quota released. - query = model_query(context, models.FloatingIp).\ - filter(models.FloatingIp.address.in_(ip_block)).\ - filter_by(auto_assigned=False) - for row in query.all(): - # The count is negative since we release quota by - # reserving negative quota. - project_id_to_quota_count[row['project_id']] -= 1 - # Delete the floating IPs. - model_query(context, models.FloatingIp).\ - filter(models.FloatingIp.address.in_(ip_block)).\ - soft_delete(synchronize_session='fetch') - - -@require_context -@pick_context_manager_writer -def floating_ip_create(context, values): - floating_ip_ref = models.FloatingIp() - floating_ip_ref.update(values) - try: - floating_ip_ref.save(context.session) - except db_exc.DBDuplicateEntry: - raise exception.FloatingIpExists(address=values['address']) - return floating_ip_ref - - -@require_context -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -@pick_context_manager_writer -def floating_ip_fixed_ip_associate(context, floating_address, - fixed_address, host): - fixed_ip_ref = model_query(context, models.FixedIp).\ - filter_by(address=fixed_address).\ - options(joinedload('network')).\ - first() - if not fixed_ip_ref: - raise exception.FixedIpNotFoundForAddress(address=fixed_address) - rows = model_query(context, models.FloatingIp).\ - filter_by(address=floating_address).\ - filter(models.FloatingIp.project_id == - context.project_id).\ - filter(or_(models.FloatingIp.fixed_ip_id == - fixed_ip_ref['id'], - models.FloatingIp.fixed_ip_id.is_(None))).\ - update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host}) - - if not rows: - raise exception.FloatingIpAssociateFailed(address=floating_address) - - return fixed_ip_ref - - -@require_context -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -@pick_context_manager_writer -def floating_ip_deallocate(context, address): - return model_query(context, models.FloatingIp).\ - filter_by(address=address).\ - filter(and_(models.FloatingIp.project_id != null()), - models.FloatingIp.fixed_ip_id == null()).\ - update({'project_id': None, - 'host': None, - 'auto_assigned': False}, - synchronize_session=False) - - -@require_context -@pick_context_manager_writer -def floating_ip_destroy(context, address): - model_query(context, models.FloatingIp).\ - filter_by(address=address).\ - delete() - - -@require_context -@pick_context_manager_writer -def floating_ip_disassociate(context, address): - floating_ip_ref = model_query(context, - models.FloatingIp).\ - filter_by(address=address).\ - first() - if not floating_ip_ref: - raise exception.FloatingIpNotFoundForAddress(address=address) - - fixed_ip_ref = model_query(context, models.FixedIp).\ - filter_by(id=floating_ip_ref['fixed_ip_id']).\ - options(joinedload('network')).\ - first() - floating_ip_ref.fixed_ip_id = None - floating_ip_ref.host = None - - return fixed_ip_ref - - -def _floating_ip_get_all(context): - return model_query(context, models.FloatingIp, read_deleted="no") - - -@pick_context_manager_reader -def floating_ip_get_all(context): - floating_ip_refs = _floating_ip_get_all(context).\ - options(joinedload('fixed_ip')).\ - all() - if not floating_ip_refs: - raise exception.NoFloatingIpsDefined() - return floating_ip_refs - - -@pick_context_manager_reader -def floating_ip_get_all_by_host(context, host): - floating_ip_refs = _floating_ip_get_all(context).\ - filter_by(host=host).\ - options(joinedload('fixed_ip')).\ - all() - if not floating_ip_refs: - raise exception.FloatingIpNotFoundForHost(host=host) - return floating_ip_refs - - -@require_context -@pick_context_manager_reader -def floating_ip_get_all_by_project(context, project_id): - nova.context.authorize_project_context(context, project_id) - # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? - return _floating_ip_get_all(context).\ - filter_by(project_id=project_id).\ - filter_by(auto_assigned=False).\ - options(_joinedload_all('fixed_ip.instance')).\ - all() - - -@require_context -@pick_context_manager_reader -def floating_ip_get_by_address(context, address): - return _floating_ip_get_by_address(context, address) - - -def _floating_ip_get_by_address(context, address): - - # if address string is empty explicitly set it to None - if not address: - address = None - try: - result = model_query(context, models.FloatingIp).\ - filter_by(address=address).\ - options(_joinedload_all('fixed_ip.instance')).\ - first() - - if not result: - raise exception.FloatingIpNotFoundForAddress(address=address) - except db_exc.DBError: - msg = _("Invalid floating IP %s in request") % address - LOG.warning(msg) - raise exception.InvalidIpAddressError(msg) - - # If the floating IP has a project ID set, check to make sure - # the non-admin user has access. - if result.project_id and nova.context.is_user_context(context): - nova.context.authorize_project_context(context, result.project_id) - - return result - - -@require_context -@pick_context_manager_reader -def floating_ip_get_by_fixed_address(context, fixed_address): - return model_query(context, models.FloatingIp).\ - outerjoin(models.FixedIp, - models.FixedIp.id == - models.FloatingIp.fixed_ip_id).\ - filter(models.FixedIp.address == fixed_address).\ - all() - - -@require_context -@pick_context_manager_reader -def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): - return model_query(context, models.FloatingIp).\ - filter_by(fixed_ip_id=fixed_ip_id).\ - all() - - -@require_context -@pick_context_manager_writer -def floating_ip_update(context, address, values): - float_ip_ref = _floating_ip_get_by_address(context, address) - float_ip_ref.update(values) - try: - float_ip_ref.save(context.session) - except db_exc.DBDuplicateEntry: - raise exception.FloatingIpExists(address=values['address']) - return float_ip_ref - - -################### - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -@pick_context_manager_writer -def fixed_ip_associate(context, address, instance_uuid, network_id=None, - reserved=False, virtual_interface_id=None): - """Keyword arguments: - reserved -- should be a boolean value(True or False), exact value will be - used to filter on the fixed IP address - """ - if not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - network_or_none = or_(models.FixedIp.network_id == network_id, - models.FixedIp.network_id == null()) - fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\ - filter(network_or_none).\ - filter_by(reserved=reserved).\ - filter_by(address=address).\ - first() - - if fixed_ip_ref is None: - raise exception.FixedIpNotFoundForNetwork(address=address, - network_uuid=network_id) - if fixed_ip_ref.instance_uuid: - raise exception.FixedIpAlreadyInUse(address=address, - instance_uuid=instance_uuid) - - params = {'instance_uuid': instance_uuid, - 'allocated': virtual_interface_id is not None} - if not fixed_ip_ref.network_id: - params['network_id'] = network_id - if virtual_interface_id: - params['virtual_interface_id'] = virtual_interface_id - - rows_updated = model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(id=fixed_ip_ref.id).\ - filter(network_or_none).\ - filter_by(reserved=reserved).\ - filter_by(address=address).\ - update(params, synchronize_session='evaluate') - - if not rows_updated: - LOG.debug('The row was updated in a concurrent transaction, ' - 'we will fetch another row') - raise db_exc.RetryRequest( - exception.FixedIpAssociateFailed(net=network_id)) - - return fixed_ip_ref - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -@pick_context_manager_writer -def fixed_ip_associate_pool(context, network_id, instance_uuid=None, - host=None, virtual_interface_id=None): - """allocate a fixed ip out of a fixed ip network pool. - - This allocates an unallocated fixed ip out of a specified - network. We sort by updated_at to hand out the oldest address in - the list. - - """ - if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - network_or_none = or_(models.FixedIp.network_id == network_id, - models.FixedIp.network_id == null()) - fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\ - filter(network_or_none).\ - filter_by(reserved=False).\ - filter_by(instance_uuid=None).\ - filter_by(host=None).\ - filter_by(leased=False).\ - order_by(asc(models.FixedIp.updated_at)).\ - first() - - if not fixed_ip_ref: - raise exception.NoMoreFixedIps(net=network_id) - - params = {'allocated': virtual_interface_id is not None} - if fixed_ip_ref['network_id'] is None: - params['network_id'] = network_id - if instance_uuid: - params['instance_uuid'] = instance_uuid - if host: - params['host'] = host - if virtual_interface_id: - params['virtual_interface_id'] = virtual_interface_id - - rows_updated = model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(id=fixed_ip_ref['id']).\ - filter_by(network_id=fixed_ip_ref['network_id']).\ - filter_by(reserved=False).\ - filter_by(instance_uuid=None).\ - filter_by(host=None).\ - filter_by(leased=False).\ - filter_by(address=fixed_ip_ref['address']).\ - update(params, synchronize_session='evaluate') - - if not rows_updated: - LOG.debug('The row was updated in a concurrent transaction, ' - 'we will fetch another row') - raise db_exc.RetryRequest( - exception.FixedIpAssociateFailed(net=network_id)) - - return fixed_ip_ref - - -@require_context -@pick_context_manager_writer -def fixed_ip_create(context, values): - fixed_ip_ref = models.FixedIp() - fixed_ip_ref.update(values) - try: - fixed_ip_ref.save(context.session) - except db_exc.DBDuplicateEntry: - raise exception.FixedIpExists(address=values['address']) - return fixed_ip_ref - - -@require_context -@pick_context_manager_writer -def fixed_ip_bulk_create(context, ips): - try: - tab = models.FixedIp.__table__ - context.session.execute(tab.insert(), ips) - except db_exc.DBDuplicateEntry as e: - raise exception.FixedIpExists(address=e.value) - - -@require_context -@pick_context_manager_writer -def fixed_ip_disassociate(context, address): - _fixed_ip_get_by_address(context, address).update( - {'instance_uuid': None, - 'virtual_interface_id': None}) - - -@pick_context_manager_writer -def fixed_ip_disassociate_all_by_timeout(context, host, time): - # NOTE(vish): only update fixed ips that "belong" to this - # host; i.e. the network host or the instance - # host matches. Two queries necessary because - # join with update doesn't work. - host_filter = or_(and_(models.Instance.host == host, - models.Network.multi_host == true()), - models.Network.host == host) - result = model_query(context, models.FixedIp, (models.FixedIp.id,), - read_deleted="no").\ - filter(models.FixedIp.allocated == false()).\ - filter(models.FixedIp.updated_at < time).\ - join((models.Network, - models.Network.id == models.FixedIp.network_id)).\ - join((models.Instance, - models.Instance.uuid == models.FixedIp.instance_uuid)).\ - filter(host_filter).\ - all() - fixed_ip_ids = [fip[0] for fip in result] - if not fixed_ip_ids: - return 0 - result = model_query(context, models.FixedIp).\ - filter(models.FixedIp.id.in_(fixed_ip_ids)).\ - update({'instance_uuid': None, - 'leased': False, - 'updated_at': timeutils.utcnow()}, - synchronize_session='fetch') - return result - - -@require_context -@pick_context_manager_reader -def fixed_ip_get(context, id, get_network=False): - query = model_query(context, models.FixedIp).filter_by(id=id) - if get_network: - query = query.options(joinedload('network')) - result = query.first() - if not result: - raise exception.FixedIpNotFound(id=id) - - # FIXME(sirp): shouldn't we just use project_only here to restrict the - # results? - if (nova.context.is_user_context(context) and - result['instance_uuid'] is not None): - instance = instance_get_by_uuid(context.elevated(read_deleted='yes'), - result['instance_uuid']) - nova.context.authorize_project_context(context, instance.project_id) - - return result - - -@pick_context_manager_reader -def fixed_ip_get_all(context): - result = model_query(context, models.FixedIp, read_deleted="yes").all() - if not result: - raise exception.NoFixedIpsDefined() - - return result - - -@require_context -@pick_context_manager_reader -def fixed_ip_get_by_address(context, address, columns_to_join=None): - return _fixed_ip_get_by_address(context, address, - columns_to_join=columns_to_join) - - -def _fixed_ip_get_by_address(context, address, columns_to_join=None): - if columns_to_join is None: - columns_to_join = [] - - try: - result = model_query(context, models.FixedIp) - for column in columns_to_join: - result = result.options(_joinedload_all(column)) - result = result.filter_by(address=address).first() - if not result: - raise exception.FixedIpNotFoundForAddress(address=address) - except db_exc.DBError: - msg = _("Invalid fixed IP Address %s in request") % address - LOG.warning(msg) - raise exception.FixedIpInvalid(msg) - - # NOTE(sirp): shouldn't we just use project_only here to restrict the - # results? - if (nova.context.is_user_context(context) and - result['instance_uuid'] is not None): - instance = _instance_get_by_uuid( - context.elevated(read_deleted='yes'), - result['instance_uuid']) - nova.context.authorize_project_context(context, - instance.project_id) - return result - - -@require_context -@pick_context_manager_reader -def fixed_ip_get_by_floating_address(context, floating_address): - return model_query(context, models.FixedIp).\ - join(models.FloatingIp, - models.FloatingIp.fixed_ip_id == - models.FixedIp.id).\ - filter(models.FloatingIp.address == floating_address).\ - first() - # NOTE(tr3buchet) please don't invent an exception here, None is fine - - -@require_context -@pick_context_manager_reader -def fixed_ip_get_by_instance(context, instance_uuid): - if not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - vif_and = and_(models.VirtualInterface.id == - models.FixedIp.virtual_interface_id, - models.VirtualInterface.deleted == 0) - result = model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(instance_uuid=instance_uuid).\ - outerjoin(models.VirtualInterface, vif_and).\ - options(contains_eager("virtual_interface")).\ - options(joinedload('network')).\ - options(joinedload('floating_ips')).\ - order_by(asc(models.VirtualInterface.created_at), - asc(models.VirtualInterface.id)).\ - all() - - if not result: - raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) - - return result - - -@pick_context_manager_reader -def fixed_ip_get_by_host(context, host): - instance_uuids = _instance_get_all_uuids_by_hosts( - context, [host]).get(host, []) - if not instance_uuids: - return [] - - return model_query(context, models.FixedIp).\ - filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\ - all() - - -@require_context -@pick_context_manager_reader -def fixed_ip_get_by_network_host(context, network_id, host): - result = model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(network_id=network_id).\ - filter_by(host=host).\ - first() - - if not result: - raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, - host=host) - return result - - -@require_context -@pick_context_manager_reader -def fixed_ips_by_virtual_interface(context, vif_id): - result = model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(virtual_interface_id=vif_id).\ - options(joinedload('network')).\ - options(joinedload('floating_ips')).\ - all() - - return result - - -@require_context -@pick_context_manager_writer -def fixed_ip_update(context, address, values): - _fixed_ip_get_by_address(context, address).update(values) - - -################### - - -@require_context @pick_context_manager_writer def virtual_interface_create(context, values): """Create a new virtual interface record in the database. @@ -2660,21 +2048,6 @@ filter(models.Instance.instance_type_id != type_id).all()) -@require_context -@pick_context_manager_reader -def instance_floating_address_get_all(context, instance_uuid): - if not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - floating_ips = model_query(context, - models.FloatingIp, - (models.FloatingIp.address,)).\ - join(models.FloatingIp.fixed_ip).\ - filter_by(instance_uuid=instance_uuid) - - return [floating_ip.address for floating_ip in floating_ips] - - # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0. @pick_context_manager_reader def instance_get_all_hung_in_rebooting(context, reboot_window): @@ -3083,319 +2456,6 @@ ################### -@pick_context_manager_writer -def network_associate(context, project_id, network_id=None, force=False): - """Associate a project with a network. - - called by project_get_networks under certain conditions - and network manager add_network_to_project() - - only associate if the project doesn't already have a network - or if force is True - - force solves race condition where a fresh project has multiple instance - builds simultaneously picked up by multiple network hosts which attempt - to associate the project with multiple networks - force should only be used as a direct consequence of user request - all automated requests should not use force - """ - def network_query(project_filter, id=None): - filter_kwargs = {'project_id': project_filter} - if id is not None: - filter_kwargs['id'] = id - return model_query(context, models.Network, read_deleted="no").\ - filter_by(**filter_kwargs).\ - with_for_update().\ - first() - - if not force: - # find out if project has a network - network_ref = network_query(project_id) - - if force or not network_ref: - # in force mode or project doesn't have a network so associate - # with a new network - - # get new network - network_ref = network_query(None, network_id) - if not network_ref: - raise exception.NoMoreNetworks() - - # associate with network - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - network_ref['project_id'] = project_id - context.session.add(network_ref) - return network_ref - - -def _network_ips_query(context, network_id): - return model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(network_id=network_id) - - -@pick_context_manager_reader -def network_count_reserved_ips(context, network_id): - return _network_ips_query(context, network_id).\ - filter_by(reserved=True).\ - count() - - -@pick_context_manager_writer -def network_create_safe(context, values): - network_ref = models.Network() - network_ref['uuid'] = uuidutils.generate_uuid() - network_ref.update(values) - - try: - network_ref.save(context.session) - return network_ref - except db_exc.DBDuplicateEntry: - raise exception.DuplicateVlan(vlan=values['vlan']) - - -@pick_context_manager_writer -def network_delete_safe(context, network_id): - result = model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(network_id=network_id).\ - filter_by(allocated=True).\ - count() - if result != 0: - raise exception.NetworkInUse(network_id=network_id) - network_ref = _network_get(context, network_id=network_id) - - model_query(context, models.FixedIp, read_deleted="no").\ - filter_by(network_id=network_id).\ - soft_delete() - - context.session.delete(network_ref) - - -@pick_context_manager_writer -def network_disassociate(context, network_id, disassociate_host, - disassociate_project): - net_update = {} - if disassociate_project: - net_update['project_id'] = None - if disassociate_host: - net_update['host'] = None - network_update(context, network_id, net_update) - - -def _network_get(context, network_id, project_only='allow_none'): - result = model_query(context, models.Network, project_only=project_only).\ - filter_by(id=network_id).\ - first() - - if not result: - raise exception.NetworkNotFound(network_id=network_id) - - return result - - -@require_context -@pick_context_manager_reader -def network_get(context, network_id, project_only='allow_none'): - return _network_get(context, network_id, project_only=project_only) - - -@require_context -@pick_context_manager_reader -def network_get_all(context, project_only): - result = model_query(context, models.Network, read_deleted="no", - project_only=project_only).all() - - if not result: - raise exception.NoNetworksFound() - - return result - - -@require_context -@pick_context_manager_reader -def network_get_all_by_uuids(context, network_uuids, project_only): - result = model_query(context, models.Network, read_deleted="no", - project_only=project_only).\ - filter(models.Network.uuid.in_(network_uuids)).\ - all() - - if not result: - raise exception.NoNetworksFound() - - # check if the result contains all the networks - # we are looking for - for network_uuid in network_uuids: - for network in result: - if network['uuid'] == network_uuid: - break - else: - if project_only: - raise exception.NetworkNotFoundForProject( - network_uuid=network_uuid, project_id=context.project_id) - raise exception.NetworkNotFound(network_id=network_uuid) - - return result - - -def _get_associated_fixed_ips_query(context, network_id, host=None): - # NOTE(vish): The ugly joins here are to solve a performance issue and - # should be removed once we can add and remove leases - # without regenerating the whole list - vif_and = and_(models.VirtualInterface.id == - models.FixedIp.virtual_interface_id, - models.VirtualInterface.deleted == 0) - inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid, - models.Instance.deleted == 0) - # NOTE(vish): This subquery left joins the minimum interface id for each - # instance. If the join succeeds (i.e. the 11th column is not - # null), then the fixed ip is on the first interface. - subq = context.session.query( - func.min(models.VirtualInterface.id).label("id"), - models.VirtualInterface.instance_uuid).\ - group_by(models.VirtualInterface.instance_uuid).subquery() - subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id, - subq.c.instance_uuid == models.VirtualInterface.instance_uuid) - query = context.session.query( - models.FixedIp.address, - models.FixedIp.instance_uuid, - models.FixedIp.network_id, - models.FixedIp.virtual_interface_id, - models.VirtualInterface.address, - models.Instance.hostname, - models.Instance.updated_at, - models.Instance.created_at, - models.FixedIp.allocated, - models.FixedIp.leased, - subq.c.id).\ - filter(models.FixedIp.deleted == 0).\ - filter(models.FixedIp.network_id == network_id).\ - join((models.VirtualInterface, vif_and)).\ - join((models.Instance, inst_and)).\ - outerjoin((subq, subq_and)).\ - filter(models.FixedIp.instance_uuid != null()).\ - filter(models.FixedIp.virtual_interface_id != null()) - if host: - query = query.filter(models.Instance.host == host) - return query - - -@pick_context_manager_reader -def network_get_associated_fixed_ips(context, network_id, host=None): - # FIXME(sirp): since this returns fixed_ips, this would be better named - # fixed_ip_get_all_by_network. - query = _get_associated_fixed_ips_query(context, network_id, host) - result = query.all() - data = [] - for datum in result: - cleaned = {} - cleaned['address'] = datum[0] - cleaned['instance_uuid'] = datum[1] - cleaned['network_id'] = datum[2] - cleaned['vif_id'] = datum[3] - cleaned['vif_address'] = datum[4] - cleaned['instance_hostname'] = datum[5] - cleaned['instance_updated'] = datum[6] - cleaned['instance_created'] = datum[7] - cleaned['allocated'] = datum[8] - cleaned['leased'] = datum[9] - # NOTE(vish): default_route is True if this fixed ip is on the first - # interface its instance. - cleaned['default_route'] = datum[10] is not None - data.append(cleaned) - return data - - -@pick_context_manager_reader -def network_in_use_on_host(context, network_id, host): - query = _get_associated_fixed_ips_query(context, network_id, host) - return query.count() > 0 - - -def _network_get_query(context): - return model_query(context, models.Network, read_deleted="no") - - -@pick_context_manager_reader -def network_get_by_uuid(context, uuid): - result = _network_get_query(context).filter_by(uuid=uuid).first() - - if not result: - raise exception.NetworkNotFoundForUUID(uuid=uuid) - - return result - - -@pick_context_manager_reader -def network_get_by_cidr(context, cidr): - result = _network_get_query(context).\ - filter(or_(models.Network.cidr == cidr, - models.Network.cidr_v6 == cidr)).\ - first() - - if not result: - raise exception.NetworkNotFoundForCidr(cidr=cidr) - - return result - - -@pick_context_manager_reader -def network_get_all_by_host(context, host): - fixed_host_filter = or_(models.FixedIp.host == host, - and_(models.FixedIp.instance_uuid != null(), - models.Instance.host == host)) - fixed_ip_query = model_query(context, models.FixedIp, - (models.FixedIp.network_id,)).\ - outerjoin((models.Instance, - models.Instance.uuid == - models.FixedIp.instance_uuid)).\ - filter(fixed_host_filter) - # NOTE(vish): return networks that have host set - # or that have a fixed ip with host set - # or that have an instance with host set - host_filter = or_(models.Network.host == host, - models.Network.id.in_(fixed_ip_query.subquery())) - return _network_get_query(context).filter(host_filter).all() - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -@pick_context_manager_writer -def network_set_host(context, network_id, host_id): - network_ref = _network_get_query(context).\ - filter_by(id=network_id).\ - first() - - if not network_ref: - raise exception.NetworkNotFound(network_id=network_id) - - if network_ref.host: - return None - - rows_updated = _network_get_query(context).\ - filter_by(id=network_id).\ - filter_by(host=None).\ - update({'host': host_id}) - - if not rows_updated: - LOG.debug('The row was updated in a concurrent transaction, ' - 'we will fetch another row') - raise db_exc.RetryRequest( - exception.NetworkSetHostFailed(network_id=network_id)) - - -@require_context -@pick_context_manager_writer -def network_update(context, network_id, values): - network_ref = _network_get(context, network_id) - network_ref.update(values) - try: - network_ref.save(context.session) - except db_exc.DBDuplicateEntry: - raise exception.DuplicateVlan(vlan=values['vlan']) - return network_ref - - -################### - @require_context @pick_context_manager_reader @@ -3588,97 +2648,6 @@ ################### -def _ec2_volume_get_query(context): - return model_query(context, models.VolumeIdMapping, read_deleted='yes') - - -def _ec2_snapshot_get_query(context): - return model_query(context, models.SnapshotIdMapping, read_deleted='yes') - - -@require_context -@pick_context_manager_writer -def ec2_volume_create(context, volume_uuid, id=None): - """Create ec2 compatible volume by provided uuid.""" - ec2_volume_ref = models.VolumeIdMapping() - ec2_volume_ref.update({'uuid': volume_uuid}) - if id is not None: - ec2_volume_ref.update({'id': id}) - - ec2_volume_ref.save(context.session) - - return ec2_volume_ref - - -@require_context -@pick_context_manager_reader -def ec2_volume_get_by_uuid(context, volume_uuid): - result = _ec2_volume_get_query(context).\ - filter_by(uuid=volume_uuid).\ - first() - - if not result: - raise exception.VolumeNotFound(volume_id=volume_uuid) - - return result - - -@require_context -@pick_context_manager_reader -def ec2_volume_get_by_id(context, volume_id): - result = _ec2_volume_get_query(context).\ - filter_by(id=volume_id).\ - first() - - if not result: - raise exception.VolumeNotFound(volume_id=volume_id) - - return result - - -@require_context -@pick_context_manager_writer -def ec2_snapshot_create(context, snapshot_uuid, id=None): - """Create ec2 compatible snapshot by provided uuid.""" - ec2_snapshot_ref = models.SnapshotIdMapping() - ec2_snapshot_ref.update({'uuid': snapshot_uuid}) - if id is not None: - ec2_snapshot_ref.update({'id': id}) - - ec2_snapshot_ref.save(context.session) - - return ec2_snapshot_ref - - -@require_context -@pick_context_manager_reader -def ec2_snapshot_get_by_ec2_id(context, ec2_id): - result = _ec2_snapshot_get_query(context).\ - filter_by(id=ec2_id).\ - first() - - if not result: - raise exception.SnapshotNotFound(snapshot_id=ec2_id) - - return result - - -@require_context -@pick_context_manager_reader -def ec2_snapshot_get_by_uuid(context, snapshot_uuid): - result = _ec2_snapshot_get_query(context).\ - filter_by(uuid=snapshot_uuid).\ - first() - - if not result: - raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid) - - return result - - -################### - - def _block_device_mapping_get_query(context, columns_to_join=None): if columns_to_join is None: columns_to_join = [] @@ -4079,106 +3048,6 @@ ################### - - -def _security_group_rule_create(context, values): - security_group_rule_ref = models.SecurityGroupIngressRule() - security_group_rule_ref.update(values) - security_group_rule_ref.save(context.session) - return security_group_rule_ref - - -def _security_group_rule_get_query(context): - return model_query(context, models.SecurityGroupIngressRule) - - -@require_context -@pick_context_manager_reader -def security_group_rule_get(context, security_group_rule_id): - result = (_security_group_rule_get_query(context). - filter_by(id=security_group_rule_id). - first()) - - if not result: - raise exception.SecurityGroupNotFoundForRule( - rule_id=security_group_rule_id) - - return result - - -@require_context -@pick_context_manager_reader -def security_group_rule_get_by_security_group(context, security_group_id, - columns_to_join=None): - if columns_to_join is None: - columns_to_join = ['grantee_group.instances.system_metadata', - 'grantee_group.instances.info_cache'] - query = (_security_group_rule_get_query(context). - filter_by(parent_group_id=security_group_id)) - for column in columns_to_join: - query = query.options(_joinedload_all(column)) - return query.all() - - -@require_context -@pick_context_manager_reader -def security_group_rule_get_by_instance(context, instance_uuid): - return (_security_group_rule_get_query(context). - join('parent_group', 'instances'). - filter_by(uuid=instance_uuid). - options(joinedload('grantee_group')). - all()) - - -@require_context -@pick_context_manager_writer -def security_group_rule_create(context, values): - return _security_group_rule_create(context, values) - - -@require_context -@pick_context_manager_writer -def security_group_rule_destroy(context, security_group_rule_id): - count = (_security_group_rule_get_query(context). - filter_by(id=security_group_rule_id). - soft_delete()) - if count == 0: - raise exception.SecurityGroupNotFoundForRule( - rule_id=security_group_rule_id) - - -@require_context -@pick_context_manager_reader -def security_group_rule_count_by_group(context, security_group_id): - return (model_query(context, models.SecurityGroupIngressRule, - read_deleted="no"). - filter_by(parent_group_id=security_group_id). - count()) - - -################### - - -@require_context -@pick_context_manager_writer -def project_get_networks(context, project_id, associate=True): - # NOTE(tr3buchet): as before this function will associate - # a project with a network if it doesn't have one and - # associate is true - result = model_query(context, models.Network, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - if not result: - if not associate: - return [] - - return [network_associate(context, project_id)] - - return result - - -################### @pick_context_manager_writer diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/sqlalchemy/migration.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/sqlalchemy/migration.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/sqlalchemy/migration.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/sqlalchemy/migration.py 2020-04-10 17:57:57.000000000 +0000 @@ -62,6 +62,40 @@ def db_version(database='main', context=None): repository = _find_migrate_repo(database) + + # NOTE(mdbooth): This is a crude workaround for races in _db_version. The 2 + # races we have seen in practise are: + # * versioning_api.db_version() fails because the migrate_version table + # doesn't exist, but meta.tables subsequently contains tables because + # another thread has already started creating the schema. This results in + # the 'Essex' error. + # * db_version_control() fails with pymysql.error.InternalError(1050) + # (Create table failed) because of a race in sqlalchemy-migrate's + # ControlledSchema._create_table_version, which does: + # if not table.exists(): table.create() + # This means that it doesn't raise the advertised + # DatabaseAlreadyControlledError, which we could have handled explicitly. + # + # I believe the correct fix should be: + # * Delete the Essex-handling code as unnecessary complexity which nobody + # should still need. + # * Fix the races in sqlalchemy-migrate such that version_control() always + # raises a well-defined error, and then handle that error here. + # + # Until we do that, though, we should be able to just try again if we + # failed for any reason. In both of the above races, trying again should + # succeed the second time round. + # + # For additional context, see: + # * https://bugzilla.redhat.com/show_bug.cgi?id=1652287 + # * https://bugs.launchpad.net/nova/+bug/1804652 + try: + return _db_version(repository, database, context) + except Exception: + return _db_version(repository, database, context) + + +def _db_version(repository, database, context): try: return versioning_api.db_version(get_engine(database, context=context), repository) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/sqlalchemy/models.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/sqlalchemy/models.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/db/sqlalchemy/models.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/db/sqlalchemy/models.py 2020-04-10 17:57:57.000000000 +0000 @@ -308,7 +308,7 @@ # locked is superseded by locked_by and locked is not really # necessary but still used in API code so it remains. locked = Column(Boolean) - locked_by = Column(Enum('owner', 'admin')) + locked_by = Column(Enum('owner', 'admin', name='instances0locked_by')) os_type = Column(String(255)) architecture = Column(String(255)) @@ -689,6 +689,8 @@ backref='security_groups') +# TODO(stephenfin): Remove this in the V release or later, once we're sure we +# won't want it back (it's for nova-network, so we won't) class SecurityGroupIngressRule(BASE, NovaBase, models.SoftDeleteMixin): """Represents a rule in a security group.""" __tablename__ = 'security_group_rules' @@ -792,7 +794,7 @@ # TODO(_cerberus_): enum status = Column(String(255)) migration_type = Column(Enum('migration', 'resize', 'live-migration', - 'evacuation'), + 'evacuation', name='migration_type'), nullable=True) hidden = Column(Boolean, default=False) memory_total = Column(BigInteger, nullable=True) @@ -812,6 +814,8 @@ '0)') +# TODO(stephenfin): Remove this in the V release or later, once we're sure we +# won't want it back (it's for nova-network, so we won't) class Network(BASE, NovaBase, models.SoftDeleteMixin): """Represents a network.""" __tablename__ = 'networks' @@ -882,7 +886,8 @@ tag = Column(String(255)) -# TODO(vish): can these both come from the same baseclass? +# TODO(stephenfin): Remove this in the V release or later, once we're sure we +# won't want it back (it's for nova-network, so we won't) class FixedIp(BASE, NovaBase, models.SoftDeleteMixin): """Represents a fixed IP for an instance.""" __tablename__ = 'fixed_ips' @@ -941,6 +946,8 @@ 'VirtualInterface.deleted == 0)') +# TODO(stephenfin): Remove this in the V release or later, once we're sure we +# won't want it back (it's for nova-network, so we won't) class FloatingIp(BASE, NovaBase, models.SoftDeleteMixin): """Represents a floating IP that dynamically forwards to a fixed IP.""" __tablename__ = 'floating_ips' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/exception.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/exception.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/exception.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/exception.py 2020-04-10 17:57:57.000000000 +0000 @@ -156,6 +156,11 @@ code = 403 +class ForbiddenWithAccelerators(NovaException): + msg_fmt = _("Forbidden with instances that have accelerators.") + code = 403 + + class AdminRequired(Forbidden): msg_fmt = _("User does not have admin privileges") @@ -710,23 +715,10 @@ msg_fmt = _("Instance %(uuid)s has no mapping to a cell.") -class NetworkInUse(NovaException): - msg_fmt = _("Network %(network_id)s is still in use.") - - -class NetworkSetHostFailed(NovaException): - msg_fmt = _("Network set host failed for network %(network_id)s.") - - class InvalidCidr(Invalid): msg_fmt = _("%(cidr)s is not a valid IP network.") -class DuplicateVlan(NovaException): - msg_fmt = _("Detected existing vlan with id %(vlan)d") - code = 409 - - class NetworkNotFound(NotFound): msg_fmt = _("Network %(network_id)s could not be found.") @@ -739,31 +731,10 @@ msg_fmt = _("Network could not be found for bridge %(bridge)s") -class NetworkNotFoundForUUID(NetworkNotFound): - msg_fmt = _("Network could not be found for uuid %(uuid)s") - - -class NetworkNotFoundForCidr(NetworkNotFound): - msg_fmt = _("Network could not be found with cidr %(cidr)s.") - - class NetworkNotFoundForInstance(NetworkNotFound): msg_fmt = _("Network could not be found for instance %(instance_id)s.") -class NoNetworksFound(NotFound): - msg_fmt = _("No networks defined.") - - -class NoMoreNetworks(NovaException): - msg_fmt = _("No more available networks.") - - -class NetworkNotFoundForProject(NetworkNotFound): - msg_fmt = _("Either network uuid %(network_uuid)s is not present or " - "is not assigned to the project %(project_id)s.") - - class NetworkAmbiguous(Invalid): msg_fmt = _("More than one possible network found. Specify " "network ID(s) to select which one(s) to connect to.") @@ -847,38 +818,12 @@ 'specified during server creation.') -class FixedIpExists(NovaException): - msg_fmt = _("Fixed IP %(address)s already exists.") - - -class FixedIpNotFound(NotFound): - msg_fmt = _("No fixed IP associated with id %(id)s.") - - -class FixedIpNotFoundForAddress(FixedIpNotFound): +class FixedIpNotFoundForAddress(NotFound): msg_fmt = _("Fixed IP not found for address %(address)s.") -class FixedIpNotFoundForInstance(FixedIpNotFound): - msg_fmt = _("Instance %(instance_uuid)s has zero fixed IPs.") - - -class FixedIpNotFoundForNetworkHost(FixedIpNotFound): - msg_fmt = _("Network host %(host)s has zero fixed IPs " - "in network %(network_id)s.") - - -class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): - msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed IP '%(ip)s'.") - - -class FixedIpNotFoundForNetwork(FixedIpNotFound): - msg_fmt = _("Fixed IP address (%(address)s) does not exist in " - "network (%(network_uuid)s).") - - -class FixedIpAssociateFailed(NovaException): - msg_fmt = _("Fixed IP associate failed for network: %(net)s.") +class FixedIpNotFoundForInstance(NotFound): + msg_fmt = _("Instance %(instance_uuid)s does not have fixed IP '%(ip)s'.") class FixedIpAlreadyInUse(NovaException): @@ -891,10 +836,6 @@ "'%(address)s'.") -class FixedIpInvalid(Invalid): - msg_fmt = _("Fixed IP address %(address)s is invalid.") - - class FixedIpInvalidOnHost(Invalid): msg_fmt = _("The fixed IP associated with port %(port_id)s is not " "compatible with the host.") @@ -904,14 +845,6 @@ msg_fmt = _("No fixed IP addresses available for network: %(net)s") -class NoFixedIpsDefined(NotFound): - msg_fmt = _("Zero fixed IPs could be found.") - - -class FloatingIpExists(NovaException): - msg_fmt = _("Floating IP %(address)s already exists.") - - class FloatingIpNotFound(NotFound): msg_fmt = _("Floating IP not found for ID %(id)s.") @@ -920,10 +853,6 @@ msg_fmt = _("Floating IP not found for address %(address)s.") -class FloatingIpNotFoundForHost(FloatingIpNotFound): - msg_fmt = _("Floating IP not found for host %(host)s.") - - class FloatingIpMultipleFoundForAddress(NovaException): msg_fmt = _("Multiple floating IPs are found for address %(address)s.") @@ -942,22 +871,10 @@ msg_fmt = _("Floating IP %(address)s is associated.") -class FloatingIpNotAssociated(NovaException): - msg_fmt = _("Floating IP %(address)s is not associated.") - - -class NoFloatingIpsDefined(NotFound): - msg_fmt = _("Zero floating IPs exist.") - - class NoFloatingIpInterface(NotFound): msg_fmt = _("Interface %(interface)s not found.") -class FloatingIpAllocateFailed(NovaException): - msg_fmt = _("Floating IP allocate failed.") - - class FloatingIpAssociateFailed(NovaException): msg_fmt = _("Floating IP %(address)s association has failed.") @@ -966,10 +883,6 @@ msg_fmt = _("The floating IP request failed with a BadRequest") -class CannotDisassociateAutoAssignedFloatingIP(NovaException): - msg_fmt = _("Cannot disassociate auto assigned floating IP") - - class KeypairNotFound(NotFound): msg_fmt = _("Keypair %(name)s not found for user %(user_id)s") @@ -1055,25 +968,11 @@ "for project %(project_id)s.") -class SecurityGroupNotFoundForRule(SecurityGroupNotFound): - msg_fmt = _("Security group with rule %(rule_id)s not found.") - - class SecurityGroupExists(Invalid): msg_fmt = _("Security group %(security_group_name)s already exists " "for project %(project_id)s.") -class SecurityGroupExistsForInstance(Invalid): - msg_fmt = _("Security group %(security_group_id)s is already associated" - " with the instance %(instance_id)s") - - -class SecurityGroupNotExistsForInstance(Invalid): - msg_fmt = _("Security group %(security_group_id)s is not associated with" - " the instance %(instance_id)s") - - class SecurityGroupCannotBeApplied(Invalid): msg_fmt = _("Network requires port_security_enabled and subnet associated" " in order to apply security groups.") @@ -1313,10 +1212,6 @@ msg_fmt = _("Maximum number of floating IPs exceeded") -class FixedIpLimitExceeded(QuotaError): - msg_fmt = _("Maximum number of fixed IPs exceeded") - - class MetadataLimitExceeded(QuotaError): msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d") @@ -1498,6 +1393,15 @@ "the '%(virt)s' virt driver") +class UnsupportedRescueBus(Invalid): + msg_fmt = _("Requested rescue bus '%(bus)s' is not supported by " + "the '%(virt)s' virt driver") + + +class UnsupportedRescueDevice(Invalid): + msg_fmt = _("Requested rescue device '%(device)s' is not supported") + + class Base64Exception(NovaException): msg_fmt = _("Invalid Base 64 data for file %(path)s") @@ -2384,7 +2288,7 @@ class RequestGroupSuffixConflict(NovaException): - msg_fmt = _("Duplicate request group suffix %(suffix)s!") + msg_fmt = _("Duplicate request group suffix %(suffix)s.") class AmbiguousResourceProviderForPCIRequest(NovaException): @@ -2399,3 +2303,15 @@ "formatted name. Expected name format is " "::, but got " "%(provider_name)s") + + +class DeviceProfileError(NovaException): + msg_fmt = _("Device profile name %(name)s: %(msg)s") + + +class AcceleratorRequestOpFailed(NovaException): + msg_fmt = _("Failed to %(op)s accelerator requests: %(msg)s") + + +class InvalidLibvirtGPUConfig(NovaException): + msg_fmt = _('Invalid configuration for GPU devices: %(reason)s') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/image/api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/image/api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/image/api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/image/api.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,199 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Main abstraction layer for retrieving and storing information about disk -images used by the compute layer. -""" - -from nova.image import glance -from nova import profiler - - -@profiler.trace_cls("nova_image") -class API(object): - - """Responsible for exposing a relatively stable internal API for other - modules in Nova to retrieve information about disk images. This API - attempts to match the nova.volume.api and nova.network.api calling - interface. - """ - - def _get_session_and_image_id(self, context, id_or_uri): - """Returns a tuple of (session, image_id). If the supplied `id_or_uri` - is an image ID, then the default client session will be returned - for the context's user, along with the image ID. If the supplied - `id_or_uri` parameter is a URI, then a client session connecting to - the URI's image service endpoint will be returned along with a - parsed image ID from that URI. - - :param context: The `nova.context.Context` object for the request - :param id_or_uri: A UUID identifier or an image URI to look up image - information for. - """ - return glance.get_remote_image_service(context, id_or_uri) - - def _get_session(self, _context): - """Returns a client session that can be used to query for image - information. - - :param _context: The `nova.context.Context` object for the request - """ - # TODO(jaypipes): Refactor glance.get_remote_image_service and - # glance.get_default_image_service into a single - # method that takes a context and actually respects - # it, returning a real session object that keeps - # the context alive... - return glance.get_default_image_service() - - @staticmethod - def generate_image_url(image_ref, context): - """Generate an image URL from an image_ref. - - :param image_ref: The image ref to generate URL - :param context: The `nova.context.Context` object for the request - """ - return "%s/images/%s" % (next(glance.get_api_servers(context)), - image_ref) - - def get_all(self, context, **kwargs): - """Retrieves all information records about all disk images available - to show to the requesting user. If the requesting user is an admin, - all images in an ACTIVE status are returned. If the requesting user - is not an admin, the all public images and all private images that - are owned by the requesting user in the ACTIVE status are returned. - - :param context: The `nova.context.Context` object for the request - :param kwargs: A dictionary of filter and pagination values that - may be passed to the underlying image info driver. - """ - session = self._get_session(context) - return session.detail(context, **kwargs) - - def get(self, context, id_or_uri, include_locations=False, - show_deleted=True): - """Retrieves the information record for a single disk image. If the - supplied identifier parameter is a UUID, the default driver will - be used to return information about the image. If the supplied - identifier is a URI, then the driver that matches that URI endpoint - will be used to query for image information. - - :param context: The `nova.context.Context` object for the request - :param id_or_uri: A UUID identifier or an image URI to look up image - information for. - :param include_locations: (Optional) include locations in the returned - dict of information if the image service API - supports it. If the image service API does - not support the locations attribute, it will - still be included in the returned dict, as an - empty list. - :param show_deleted: (Optional) show the image even the status of - image is deleted. - """ - session, image_id = self._get_session_and_image_id(context, id_or_uri) - return session.show(context, image_id, - include_locations=include_locations, - show_deleted=show_deleted) - - def create(self, context, image_info, data=None): - """Creates a new image record, optionally passing the image bits to - backend storage. - - :param context: The `nova.context.Context` object for the request - :param image_info: A dict of information about the image that is - passed to the image registry. - :param data: Optional file handle or bytestream iterator that is - passed to backend storage. - """ - session = self._get_session(context) - return session.create(context, image_info, data=data) - - def update(self, context, id_or_uri, image_info, - data=None, purge_props=False): - """Update the information about an image, optionally along with a file - handle or bytestream iterator for image bits. If the optional file - handle for updated image bits is supplied, the image may not have - already uploaded bits for the image. - - :param context: The `nova.context.Context` object for the request - :param id_or_uri: A UUID identifier or an image URI to look up image - information for. - :param image_info: A dict of information about the image that is - passed to the image registry. - :param data: Optional file handle or bytestream iterator that is - passed to backend storage. - :param purge_props: Optional, defaults to False. If set, the backend - image registry will clear all image properties - and replace them the image properties supplied - in the image_info dictionary's 'properties' - collection. - """ - session, image_id = self._get_session_and_image_id(context, id_or_uri) - return session.update(context, image_id, image_info, data=data, - purge_props=purge_props) - - def delete(self, context, id_or_uri): - """Delete the information about an image and mark the image bits for - deletion. - - :param context: The `nova.context.Context` object for the request - :param id_or_uri: A UUID identifier or an image URI to look up image - information for. - """ - session, image_id = self._get_session_and_image_id(context, id_or_uri) - return session.delete(context, image_id) - - def download(self, context, id_or_uri, data=None, dest_path=None, - trusted_certs=None): - """Transfer image bits from Glance or a known source location to the - supplied destination filepath. - - :param context: The `nova.context.RequestContext` object for the - request - :param id_or_uri: A UUID identifier or an image URI to look up image - information for. - :param data: A file object to use in downloading image data. - :param dest_path: Filepath to transfer image bits to. - :param trusted_certs: A 'nova.objects.trusted_certs.TrustedCerts' - object with a list of trusted image certificate - IDs. - - Note that because of the poor design of the - `glance.ImageService.download` method, the function returns different - things depending on what arguments are passed to it. If a data argument - is supplied but no dest_path is specified (only done in the XenAPI virt - driver's image.utils module) then None is returned from the method. If - the data argument is not specified but a destination path *is* - specified, then a writeable file handle to the destination path is - constructed in the method and the image bits written to that file, and - again, None is returned from the method. If no data argument is - supplied and no dest_path argument is supplied (VMWare and XenAPI virt - drivers), then the method returns an iterator to the image bits that - the caller uses to write to wherever location it wants. Finally, if the - allow_direct_url_schemes CONF option is set to something, then the - nova.image.download modules are used to attempt to do an SCP copy of - the image bits from a file location to the dest_path and None is - returned after retrying one or more download locations (libvirt and - Hyper-V virt drivers through nova.virt.images.fetch). - - I think the above points to just how hacky/wacky all of this code is, - and the reason it needs to be cleaned up and standardized across the - virt driver callers. - """ - # TODO(jaypipes): Deprecate and remove this method entirely when we - # move to a system that simply returns a file handle - # to a bytestream iterator and allows the caller to - # handle streaming/copying/zero-copy as they see fit. - session, image_id = self._get_session_and_image_id(context, id_or_uri) - return session.download(context, image_id, data=data, - dst_path=dest_path, - trusted_certs=trusted_certs) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/image/glance.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/image/glance.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/image/glance.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/image/glance.py 2020-04-10 17:57:57.000000000 +0000 @@ -48,6 +48,7 @@ import nova.image.download as image_xfers from nova import objects from nova.objects import fields +from nova import profiler from nova import service_auth from nova import utils @@ -1012,7 +1013,181 @@ self.image_stream = stream def start(self): - image_service, image_id = ( - get_remote_image_service(self.context, self.image_id)) + image_service, image_id = get_remote_image_service( + self.context, self.image_id) image_service.update(self.context, image_id, self.metadata, self.image_stream, purge_props=False) + + +@profiler.trace_cls("nova_image") +class API(object): + """API for interacting with the image service.""" + + def _get_session_and_image_id(self, context, id_or_uri): + """Returns a tuple of (session, image_id). If the supplied `id_or_uri` + is an image ID, then the default client session will be returned + for the context's user, along with the image ID. If the supplied + `id_or_uri` parameter is a URI, then a client session connecting to + the URI's image service endpoint will be returned along with a + parsed image ID from that URI. + + :param context: The `nova.context.Context` object for the request + :param id_or_uri: A UUID identifier or an image URI to look up image + information for. + """ + return get_remote_image_service(context, id_or_uri) + + def _get_session(self, _context): + """Returns a client session that can be used to query for image + information. + + :param _context: The `nova.context.Context` object for the request + """ + # TODO(jaypipes): Refactor get_remote_image_service and + # get_default_image_service into a single + # method that takes a context and actually respects + # it, returning a real session object that keeps + # the context alive... + return get_default_image_service() + + @staticmethod + def generate_image_url(image_ref, context): + """Generate an image URL from an image_ref. + + :param image_ref: The image ref to generate URL + :param context: The `nova.context.Context` object for the request + """ + return "%s/images/%s" % (next(get_api_servers(context)), image_ref) + + def get_all(self, context, **kwargs): + """Retrieves all information records about all disk images available + to show to the requesting user. If the requesting user is an admin, + all images in an ACTIVE status are returned. If the requesting user + is not an admin, the all public images and all private images that + are owned by the requesting user in the ACTIVE status are returned. + + :param context: The `nova.context.Context` object for the request + :param kwargs: A dictionary of filter and pagination values that + may be passed to the underlying image info driver. + """ + session = self._get_session(context) + return session.detail(context, **kwargs) + + def get(self, context, id_or_uri, include_locations=False, + show_deleted=True): + """Retrieves the information record for a single disk image. If the + supplied identifier parameter is a UUID, the default driver will + be used to return information about the image. If the supplied + identifier is a URI, then the driver that matches that URI endpoint + will be used to query for image information. + + :param context: The `nova.context.Context` object for the request + :param id_or_uri: A UUID identifier or an image URI to look up image + information for. + :param include_locations: (Optional) include locations in the returned + dict of information if the image service API + supports it. If the image service API does + not support the locations attribute, it will + still be included in the returned dict, as an + empty list. + :param show_deleted: (Optional) show the image even the status of + image is deleted. + """ + session, image_id = self._get_session_and_image_id(context, id_or_uri) + return session.show(context, image_id, + include_locations=include_locations, + show_deleted=show_deleted) + + def create(self, context, image_info, data=None): + """Creates a new image record, optionally passing the image bits to + backend storage. + + :param context: The `nova.context.Context` object for the request + :param image_info: A dict of information about the image that is + passed to the image registry. + :param data: Optional file handle or bytestream iterator that is + passed to backend storage. + """ + session = self._get_session(context) + return session.create(context, image_info, data=data) + + def update(self, context, id_or_uri, image_info, + data=None, purge_props=False): + """Update the information about an image, optionally along with a file + handle or bytestream iterator for image bits. If the optional file + handle for updated image bits is supplied, the image may not have + already uploaded bits for the image. + + :param context: The `nova.context.Context` object for the request + :param id_or_uri: A UUID identifier or an image URI to look up image + information for. + :param image_info: A dict of information about the image that is + passed to the image registry. + :param data: Optional file handle or bytestream iterator that is + passed to backend storage. + :param purge_props: Optional, defaults to False. If set, the backend + image registry will clear all image properties + and replace them the image properties supplied + in the image_info dictionary's 'properties' + collection. + """ + session, image_id = self._get_session_and_image_id(context, id_or_uri) + return session.update(context, image_id, image_info, data=data, + purge_props=purge_props) + + def delete(self, context, id_or_uri): + """Delete the information about an image and mark the image bits for + deletion. + + :param context: The `nova.context.Context` object for the request + :param id_or_uri: A UUID identifier or an image URI to look up image + information for. + """ + session, image_id = self._get_session_and_image_id(context, id_or_uri) + return session.delete(context, image_id) + + def download(self, context, id_or_uri, data=None, dest_path=None, + trusted_certs=None): + """Transfer image bits from Glance or a known source location to the + supplied destination filepath. + + :param context: The `nova.context.RequestContext` object for the + request + :param id_or_uri: A UUID identifier or an image URI to look up image + information for. + :param data: A file object to use in downloading image data. + :param dest_path: Filepath to transfer image bits to. + :param trusted_certs: A 'nova.objects.trusted_certs.TrustedCerts' + object with a list of trusted image certificate + IDs. + + Note that because of the poor design of the + `glance.ImageService.download` method, the function returns different + things depending on what arguments are passed to it. If a data argument + is supplied but no dest_path is specified (only done in the XenAPI virt + driver's image.utils module) then None is returned from the method. If + the data argument is not specified but a destination path *is* + specified, then a writeable file handle to the destination path is + constructed in the method and the image bits written to that file, and + again, None is returned from the method. If no data argument is + supplied and no dest_path argument is supplied (VMWare and XenAPI virt + drivers), then the method returns an iterator to the image bits that + the caller uses to write to wherever location it wants. Finally, if the + allow_direct_url_schemes CONF option is set to something, then the + nova.image.download modules are used to attempt to do an SCP copy of + the image bits from a file location to the dest_path and None is + returned after retrying one or more download locations (libvirt and + Hyper-V virt drivers through nova.virt.images.fetch). + + I think the above points to just how hacky/wacky all of this code is, + and the reason it needs to be cleaned up and standardized across the + virt driver callers. + """ + # TODO(jaypipes): Deprecate and remove this method entirely when we + # move to a system that simply returns a file handle + # to a bytestream iterator and allows the caller to + # handle streaming/copying/zero-copy as they see fit. + session, image_id = self._get_session_and_image_id(context, id_or_uri) + return session.download(context, image_id, data=data, + dst_path=dest_path, + trusted_certs=trusted_certs) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/image/__init__.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/image/__init__.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/image/__init__.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/image/__init__.py 2020-04-10 17:57:57.000000000 +0000 @@ -1,17 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def API(): - # Needed to prevent circular import... - import nova.image.api - return nova.image.api.API() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/monkey_patch.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/monkey_patch.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/monkey_patch.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/monkey_patch.py 2020-04-10 17:57:57.000000000 +0000 @@ -59,6 +59,13 @@ else: eventlet.monkey_patch() + # Monkey patch the original current_thread to use the up-to-date _active + # global variable. See https://bugs.launchpad.net/bugs/1863021 and + # https://github.com/eventlet/eventlet/issues/592 + import __original_module_threading as orig_threading + import threading + orig_threading.current_thread.__globals__['_active'] = threading._active + # NOTE(rpodolyaka): import oslo_service first, so that it makes eventlet # hub use a monotonic clock to avoid issues with drifts of system time (see # LP 1510234 for details) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/network/neutron.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/network/neutron.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/network/neutron.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/network/neutron.py 2020-04-10 17:57:57.000000000 +0000 @@ -236,7 +236,8 @@ session = _get_session() client_args = dict(session=session, auth=auth_plugin, - global_request_id=context.global_id) + global_request_id=context.global_id, + connect_retries=CONF.neutron.http_retries) # NOTE(efried): We build an adapter # to pull conf options @@ -1992,7 +1993,7 @@ instance=instance) return self._get_instance_nw_info(context, instance) - raise exception.FixedIpNotFoundForSpecificInstance( + raise exception.FixedIpNotFoundForInstance( instance_uuid=instance.uuid, ip=address) def _get_physnet_tunneled_info(self, context, neutron, net_id): @@ -3433,9 +3434,12 @@ pci_slot) # NOTE(gibi): during live migration the conductor already sets the - # allocation key in the port binding + # allocation key in the port binding. However during resize, cold + # migrate, evacuate and unshelve we have to set the binding here. + # Also note that during unshelve no migration object is created. if (p.get('resource_request') and - migration['migration_type'] != constants.LIVE_MIGRATION): + (migration is None or + migration['migration_type'] != constants.LIVE_MIGRATION)): if not provider_mappings: # TODO(gibi): Remove this check when compute RPC API is # bumped to 6.0 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/network/security_group_api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/network/security_group_api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/network/security_group_api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/network/security_group_api.py 2020-04-10 17:57:57.000000000 +0000 @@ -31,6 +31,7 @@ from six.moves import urllib from webob import exc +from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import neutron as neutronapi @@ -53,6 +54,33 @@ return id +def validate_name( + context: nova_context.RequestContext, + name: str): + """Validate a security group name and return the corresponding UUID. + + :param context: The nova request context. + :param name: The name of the security group. + :raises NoUniqueMatch: If there is no unique match for the provided name. + :raises SecurityGroupNotFound: If there's no match for the provided name. + :raises NeutronClientException: For all other exceptions. + """ + neutron = neutronapi.get_client(context) + try: + return neutronv20.find_resourceid_by_name_or_id( + neutron, 'security_group', name, context.project_id) + except n_exc.NeutronClientNoUniqueMatch as e: + raise exception.NoUniqueMatch(six.text_type(e)) + except n_exc.NeutronClientException as e: + exc_info = sys.exc_info() + if e.status_code == 404: + LOG.debug('Neutron security group %s not found', name) + raise exception.SecurityGroupNotFound(six.text_type(e)) + else: + LOG.error('Neutron Error: %s', e) + six.reraise(*exc_info) + + def parse_cidr(cidr): if not cidr: return '0.0.0.0/0' @@ -280,44 +308,26 @@ return nova_rule -def get(context, name=None, id=None, map_exception=False): +def get(context, id): neutron = neutronapi.get_client(context) try: - if not id and name: - # NOTE(flwang): The project id should be honoured so as to get - # the correct security group id when user(with admin role but - # non-admin project) try to query by name, so as to avoid - # getting more than duplicated records with the same name. - id = neutronv20.find_resourceid_by_name_or_id( - neutron, 'security_group', name, context.project_id) group = neutron.show_security_group(id).get('security_group') return _convert_to_nova_security_group_format(group) - except n_exc.NeutronClientNoUniqueMatch as e: - raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: - LOG.debug("Neutron security group %s not found", name) + LOG.debug('Neutron security group %s not found', id) raise exception.SecurityGroupNotFound(six.text_type(e)) else: LOG.error("Neutron Error: %s", e) six.reraise(*exc_info) - except TypeError as e: - LOG.error("Neutron Error: %s", e) - msg = _("Invalid security group name: %(name)s.") % {"name": name} - raise exception.SecurityGroupNotFound(six.text_type(msg)) -def list(context, names=None, ids=None, project=None, - search_opts=None): +def list(context, project, search_opts=None): """Returns list of security group rules owned by tenant.""" neutron = neutronapi.get_client(context) params = {} search_opts = search_opts if search_opts else {} - if names: - params['name'] = names - if ids: - params['id'] = ids # NOTE(jeffrey4l): list all the security groups when following # conditions are met @@ -325,23 +335,25 @@ # * it is admin context and all_tenants exist in search_opts. # * project is not specified. list_all_tenants = (context.is_admin and - 'all_tenants' in search_opts and - not any([names, ids])) - # NOTE(jeffrey4l): The neutron doesn't have `all-tenants` concept. + 'all_tenants' in search_opts) + # NOTE(jeffrey4l): neutron doesn't have `all-tenants` concept. # All the security group will be returned if the project/tenant # id is not passed. - if project and not list_all_tenants: + if not list_all_tenants: params['tenant_id'] = project + try: security_groups = neutron.list_security_groups(**params).get( 'security_groups') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error getting security groups") + converted_rules = [] for security_group in security_groups: converted_rules.append( _convert_to_nova_security_group_format(security_group)) + return converted_rules diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/notifications/base.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/notifications/base.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/notifications/base.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/notifications/base.py 2020-04-10 17:57:57.000000000 +0000 @@ -28,7 +28,7 @@ import nova.conf import nova.context from nova import exception -from nova import image as image_api +from nova.image import glance from nova.network import model as network_model from nova.network import neutron from nova.notifications.objects import base as notification_base @@ -370,7 +370,7 @@ # NOTE(mriedem): We can eventually drop this when we no longer # support legacy notifications since versioned notifications don't # use this. - image_ref_url = image_api.API().generate_image_url( + image_ref_url = glance.API().generate_image_url( instance.image_ref, context) except ks_exc.EndpointNotFound: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/base.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/base.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/base.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/base.py 2020-04-10 17:57:57.000000000 +0000 @@ -332,7 +332,20 @@ else arg for arg in args] for k, v in kwargs.items(): if k == 'exc_val' and v: - kwargs[k] = six.text_type(v) + try: + # NOTE(danms): When we run this for a remotable method, + # we need to attempt to format_message() the exception to + # get the sanitized message, and if it's not a + # NovaException, fall back to just the exception class + # name. However, a remotable will end up calling this again + # on the other side of the RPC call, so we must not try + # to do that again, otherwise we will always end up with + # just str. So, only do that if exc_val is an Exception + # class. + kwargs[k] = (v.format_message() if isinstance(v, Exception) + else v) + except Exception: + kwargs[k] = v.__class__.__name__ elif k == 'exc_tb' and v and not isinstance(v, six.string_types): kwargs[k] = ''.join(traceback.format_tb(v)) elif isinstance(v, datetime.datetime): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/external_event.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/external_event.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/external_event.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/external_event.py 2020-04-10 17:57:57.000000000 +0000 @@ -29,6 +29,10 @@ # Power state has changed for this instance 'power-update', + + # Accelerator Request got bound, tag is ARQ uuid. + # Sent when an ARQ for an instance has been bound or failed to bind. + 'accelerator-request-bound', ] EVENT_STATUSES = ['failed', 'completed', 'in-progress'] @@ -45,7 +49,8 @@ # Version 1.1: adds network-vif-deleted event # Version 1.2: adds volume-extended event # Version 1.3: adds power-update event - VERSION = '1.3' + # Version 1.4: adds accelerator-request-bound event + VERSION = '1.4' fields = { 'instance_uuid': fields.UUIDField(), diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/fixed_ip.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/fixed_ip.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/fixed_ip.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/fixed_ip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,315 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import timeutils -from oslo_utils import versionutils - -from nova.db import api as db -from nova.db.sqlalchemy import api as db_api -from nova.db.sqlalchemy import models -from nova import exception -from nova import objects -from nova.objects import base as obj_base -from nova.objects import fields -from nova import utils - - -FIXED_IP_OPTIONAL_ATTRS = ['instance', 'network', 'virtual_interface', - 'floating_ips'] - - -# TODO(berrange): Remove NovaObjectDictCompat -@obj_base.NovaObjectRegistry.register -class FixedIP(obj_base.NovaPersistentObject, obj_base.NovaObject, - obj_base.NovaObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Added virtual_interface field - # Version 1.2: Instance version 1.14 - # Version 1.3: Instance 1.15 - # Version 1.4: Added default_route field - # Version 1.5: Added floating_ips field - # Version 1.6: Instance 1.16 - # Version 1.7: Instance 1.17 - # Version 1.8: Instance 1.18 - # Version 1.9: Instance 1.19 - # Version 1.10: Instance 1.20 - # Version 1.11: Instance 1.21 - # Version 1.12: Instance 1.22, FloatingIPList 1.9 - # Version 1.13: Instance 1.23, FloatingIPList 1.10 - # Version 1.14: Added vif_id kwarg to associate(_pool), FloatingIPList 1.11 - VERSION = '1.14' - - fields = { - 'id': fields.IntegerField(), - 'address': fields.IPV4AndV6AddressField(), - 'network_id': fields.IntegerField(nullable=True), - 'virtual_interface_id': fields.IntegerField(nullable=True), - 'instance_uuid': fields.UUIDField(nullable=True), - 'allocated': fields.BooleanField(), - 'leased': fields.BooleanField(), - 'reserved': fields.BooleanField(), - 'host': fields.StringField(nullable=True), - 'default_route': fields.BooleanField(), - 'instance': fields.ObjectField('Instance', nullable=True), - 'network': fields.ObjectField('Network', nullable=True), - 'virtual_interface': fields.ObjectField('VirtualInterface', - nullable=True), - # NOTE(danms): This should not ever be made lazy-loadable - # because it would create a bit of a loop between FixedIP - # and FloatingIP - 'floating_ips': fields.ObjectField('FloatingIPList'), - } - - def obj_make_compatible(self, primitive, target_version): - super(FixedIP, self).obj_make_compatible(primitive, target_version) - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 4) and 'default_route' in primitive: - del primitive['default_route'] - - @staticmethod - def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for field in fixedip.fields: - if field == 'default_route': - # NOTE(danms): This field is only set when doing a - # FixedIPList.get_by_network() because it's a relatively - # special-case thing, so skip it here - continue - if field not in FIXED_IP_OPTIONAL_ATTRS: - fixedip[field] = db_fixedip[field] - # NOTE(danms): Instance could be deleted, and thus None - if 'instance' in expected_attrs: - fixedip.instance = objects.Instance._from_db_object( - context, - objects.Instance(context), - db_fixedip['instance']) if db_fixedip['instance'] else None - if 'network' in expected_attrs: - fixedip.network = objects.Network._from_db_object( - context, - objects.Network(context), - db_fixedip['network']) if db_fixedip['network'] else None - if 'virtual_interface' in expected_attrs: - db_vif = db_fixedip['virtual_interface'] - vif = objects.VirtualInterface._from_db_object( - context, - objects.VirtualInterface(context), - db_fixedip['virtual_interface']) if db_vif else None - fixedip.virtual_interface = vif - if 'floating_ips' in expected_attrs: - fixedip.floating_ips = obj_base.obj_make_list( - context, objects.FloatingIPList(context), - objects.FloatingIP, db_fixedip['floating_ips']) - fixedip._context = context - fixedip.obj_reset_changes() - return fixedip - - @obj_base.remotable_classmethod - def get_by_id(cls, context, id, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - get_network = 'network' in expected_attrs - db_fixedip = db.fixed_ip_get(context, id, get_network=get_network) - return cls._from_db_object(context, cls(context), db_fixedip, - expected_attrs) - - @obj_base.remotable_classmethod - def get_by_address(cls, context, address, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - db_fixedip = db.fixed_ip_get_by_address(context, str(address), - columns_to_join=expected_attrs) - return cls._from_db_object(context, cls(context), db_fixedip, - expected_attrs) - - @obj_base.remotable_classmethod - def get_by_floating_address(cls, context, address): - db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address)) - if db_fixedip is not None: - return cls._from_db_object(context, cls(context), db_fixedip) - - @obj_base.remotable_classmethod - def get_by_network_and_host(cls, context, network_id, host): - db_fixedip = db.fixed_ip_get_by_network_host(context, network_id, host) - return cls._from_db_object(context, cls(context), db_fixedip) - - @obj_base.remotable_classmethod - def associate(cls, context, address, instance_uuid, network_id=None, - reserved=False, vif_id=None): - db_fixedip = db.fixed_ip_associate(context, address, instance_uuid, - network_id=network_id, - reserved=reserved, - virtual_interface_id=vif_id) - return cls._from_db_object(context, cls(context), db_fixedip) - - @obj_base.remotable_classmethod - def associate_pool(cls, context, network_id, instance_uuid=None, - host=None, vif_id=None): - db_fixedip = db.fixed_ip_associate_pool(context, network_id, - instance_uuid=instance_uuid, - host=host, - virtual_interface_id=vif_id) - return cls._from_db_object(context, cls(context), db_fixedip) - - @obj_base.remotable_classmethod - def disassociate_by_address(cls, context, address): - db.fixed_ip_disassociate(context, address) - - @obj_base.remotable_classmethod - def _disassociate_all_by_timeout(cls, context, host, time_str): - time = timeutils.parse_isotime(time_str) - return db.fixed_ip_disassociate_all_by_timeout(context, host, time) - - @classmethod - def disassociate_all_by_timeout(cls, context, host, time): - return cls._disassociate_all_by_timeout(context, host, - utils.isotime(time)) - - @obj_base.remotable - def create(self): - updates = self.obj_get_changes() - if 'id' in updates: - raise exception.ObjectActionError(action='create', - reason='already created') - if 'address' in updates: - updates['address'] = str(updates['address']) - db_fixedip = db.fixed_ip_create(self._context, updates) - self._from_db_object(self._context, self, db_fixedip) - - @obj_base.remotable - def save(self): - updates = self.obj_get_changes() - if 'address' in updates: - raise exception.ObjectActionError(action='save', - reason='address is not mutable') - db.fixed_ip_update(self._context, str(self.address), updates) - self.obj_reset_changes() - - @obj_base.remotable - def disassociate(self): - db.fixed_ip_disassociate(self._context, str(self.address)) - self.instance_uuid = None - self.instance = None - self.obj_reset_changes(['instance_uuid', 'instance']) - - -@obj_base.NovaObjectRegistry.register -class FixedIPList(obj_base.ObjectListBase, obj_base.NovaObject): - # Version 1.0: Initial version - # Version 1.1: Added get_by_network() - # Version 1.2: FixedIP <= version 1.2 - # Version 1.3: FixedIP <= version 1.3 - # Version 1.4: FixedIP <= version 1.4 - # Version 1.5: FixedIP <= version 1.5, added expected attrs to gets - # Version 1.6: FixedIP <= version 1.6 - # Version 1.7: FixedIP <= version 1.7 - # Version 1.8: FixedIP <= version 1.8 - # Version 1.9: FixedIP <= version 1.9 - # Version 1.10: FixedIP <= version 1.10 - # Version 1.11: FixedIP <= version 1.11 - # Version 1.12: FixedIP <= version 1.12 - # Version 1.13: FixedIP <= version 1.13 - # Version 1.14: FixedIP <= version 1.14 - # Version 1.15: Added get_count_by_project() for quotas - VERSION = '1.15' - - fields = { - 'objects': fields.ListOfObjectsField('FixedIP'), - } - - @staticmethod - @db_api.pick_context_manager_reader - def _get_count_by_project_from_db(context, project_id): - return context.session.query(models.FixedIp.id).\ - filter_by(deleted=0).\ - join(models.Instance, - models.Instance.uuid == models.FixedIp.instance_uuid).\ - filter(models.Instance.project_id == project_id).\ - count() - - @obj_base.remotable_classmethod - def get_all(cls, context): - db_fixedips = db.fixed_ip_get_all(context) - return obj_base.obj_make_list(context, cls(context), - objects.FixedIP, db_fixedips) - - @obj_base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid): - expected_attrs = ['network', 'virtual_interface', 'floating_ips'] - db_fixedips = db.fixed_ip_get_by_instance(context, instance_uuid) - return obj_base.obj_make_list(context, cls(context), - objects.FixedIP, db_fixedips, - expected_attrs=expected_attrs) - - @obj_base.remotable_classmethod - def get_by_host(cls, context, host): - db_fixedips = db.fixed_ip_get_by_host(context, host) - return obj_base.obj_make_list(context, cls(context), - objects.FixedIP, db_fixedips) - - @obj_base.remotable_classmethod - def get_by_virtual_interface_id(cls, context, vif_id): - expected_attrs = ['network', 'floating_ips'] - db_fixedips = db.fixed_ips_by_virtual_interface(context, vif_id) - return obj_base.obj_make_list(context, cls(context), - objects.FixedIP, db_fixedips, - expected_attrs=expected_attrs) - - @obj_base.remotable_classmethod - def get_by_network(cls, context, network, host=None): - ipinfo = db.network_get_associated_fixed_ips(context, - network['id'], - host=host) - if not ipinfo: - return [] - - fips = cls(context=context, objects=[]) - - for info in ipinfo: - inst = objects.Instance(context=context, - uuid=info['instance_uuid'], - hostname=info['instance_hostname'], - created_at=info['instance_created'], - updated_at=info['instance_updated']) - vif = objects.VirtualInterface(context=context, - id=info['vif_id'], - address=info['vif_address']) - fip = objects.FixedIP(context=context, - address=info['address'], - instance_uuid=info['instance_uuid'], - network_id=info['network_id'], - virtual_interface_id=info['vif_id'], - allocated=info['allocated'], - leased=info['leased'], - default_route=info['default_route'], - instance=inst, - virtual_interface=vif) - fips.objects.append(fip) - fips.obj_reset_changes() - return fips - - @obj_base.remotable_classmethod - def bulk_create(self, context, fixed_ips): - ips = [] - for fixedip in fixed_ips: - ip = obj_base.obj_to_primitive(fixedip) - if 'id' in ip: - raise exception.ObjectActionError(action='create', - reason='already created') - ips.append(ip) - db.fixed_ip_bulk_create(context, ips) - - @obj_base.remotable_classmethod - def get_count_by_project(cls, context, project_id): - return cls._get_count_by_project_from_db(context, project_id) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/floating_ip.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/floating_ip.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/floating_ip.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/floating_ip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,258 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.db import api as db -from nova.db.sqlalchemy import api as db_api -from nova.db.sqlalchemy import models -from nova import exception -from nova import objects -from nova.objects import base as obj_base -from nova.objects import fields - -FLOATING_IP_OPTIONAL_ATTRS = ['fixed_ip'] - - -# TODO(berrange): Remove NovaObjectDictCompat -@obj_base.NovaObjectRegistry.register -class FloatingIP(obj_base.NovaPersistentObject, obj_base.NovaObject, - obj_base.NovaObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Added _get_addresses_by_instance_uuid() - # Version 1.2: FixedIP <= version 1.2 - # Version 1.3: FixedIP <= version 1.3 - # Version 1.4: FixedIP <= version 1.4 - # Version 1.5: FixedIP <= version 1.5 - # Version 1.6: FixedIP <= version 1.6 - # Version 1.7: FixedIP <= version 1.11 - # Version 1.8: FixedIP <= version 1.12 - # Version 1.9: FixedIP <= version 1.13 - # Version 1.10: FixedIP <= version 1.14 - VERSION = '1.10' - fields = { - 'id': fields.IntegerField(), - 'address': fields.IPAddressField(), - 'fixed_ip_id': fields.IntegerField(nullable=True), - 'project_id': fields.UUIDField(nullable=True), - 'host': fields.StringField(nullable=True), - 'auto_assigned': fields.BooleanField(), - 'pool': fields.StringField(nullable=True), - 'interface': fields.StringField(nullable=True), - 'fixed_ip': fields.ObjectField('FixedIP', nullable=True), - } - - @staticmethod - def _from_db_object(context, floatingip, db_floatingip, - expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for field in floatingip.fields: - if field not in FLOATING_IP_OPTIONAL_ATTRS: - floatingip[field] = db_floatingip[field] - if ('fixed_ip' in expected_attrs and - db_floatingip['fixed_ip'] is not None): - floatingip.fixed_ip = objects.FixedIP._from_db_object( - context, objects.FixedIP(context), db_floatingip['fixed_ip']) - floatingip._context = context - floatingip.obj_reset_changes() - return floatingip - - def obj_load_attr(self, attrname): - if attrname not in FLOATING_IP_OPTIONAL_ATTRS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason='attribute %s is not lazy-loadable' % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - if self.fixed_ip_id is not None: - self.fixed_ip = objects.FixedIP.get_by_id( - self._context, self.fixed_ip_id, expected_attrs=['network']) - else: - self.fixed_ip = None - - @obj_base.remotable_classmethod - def get_by_id(cls, context, id): - db_floatingip = db.floating_ip_get(context, id) - # XXX joins fixed.instance - return cls._from_db_object(context, cls(context), db_floatingip, - expected_attrs=['fixed_ip']) - - @obj_base.remotable_classmethod - def get_by_address(cls, context, address): - db_floatingip = db.floating_ip_get_by_address(context, str(address)) - return cls._from_db_object(context, cls(context), db_floatingip) - - @obj_base.remotable_classmethod - def get_pool_names(cls, context): - return [x['name'] for x in db.floating_ip_get_pools(context)] - - @obj_base.remotable_classmethod - def allocate_address(cls, context, project_id, pool, auto_assigned=False): - return db.floating_ip_allocate_address(context, project_id, pool, - auto_assigned=auto_assigned) - - @obj_base.remotable_classmethod - def associate(cls, context, floating_address, fixed_address, host): - db_fixed = db.floating_ip_fixed_ip_associate(context, - str(floating_address), - str(fixed_address), - host) - if db_fixed is None: - return None - - floating = FloatingIP( - context=context, address=floating_address, host=host, - fixed_ip_id=db_fixed['id'], - fixed_ip=objects.FixedIP._from_db_object( - context, objects.FixedIP(context), db_fixed, - expected_attrs=['network'])) - return floating - - @obj_base.remotable_classmethod - def deallocate(cls, context, address): - return db.floating_ip_deallocate(context, str(address)) - - @obj_base.remotable_classmethod - def destroy(cls, context, address): - db.floating_ip_destroy(context, str(address)) - - @obj_base.remotable_classmethod - def disassociate(cls, context, address): - db_fixed = db.floating_ip_disassociate(context, str(address)) - - return cls(context=context, address=address, - fixed_ip_id=db_fixed['id'], - fixed_ip=objects.FixedIP._from_db_object( - context, objects.FixedIP(context), db_fixed, - expected_attrs=['network'])) - - @obj_base.remotable_classmethod - def _get_addresses_by_instance_uuid(cls, context, instance_uuid): - return db.instance_floating_address_get_all(context, instance_uuid) - - @classmethod - def get_addresses_by_instance(cls, context, instance): - return cls._get_addresses_by_instance_uuid(context, instance['uuid']) - - @obj_base.remotable - def save(self): - updates = self.obj_get_changes() - if 'address' in updates: - raise exception.ObjectActionError(action='save', - reason='address is not mutable') - if 'fixed_ip_id' in updates: - reason = 'fixed_ip_id is not mutable' - raise exception.ObjectActionError(action='save', reason=reason) - - # NOTE(danms): Make sure we don't pass the calculated fixed_ip - # relationship to the DB update method - updates.pop('fixed_ip', None) - - db_floatingip = db.floating_ip_update(self._context, str(self.address), - updates) - self._from_db_object(self._context, self, db_floatingip) - - -@obj_base.NovaObjectRegistry.register -class FloatingIPList(obj_base.ObjectListBase, obj_base.NovaObject): - # Version 1.3: FloatingIP 1.2 - # Version 1.4: FloatingIP 1.3 - # Version 1.5: FloatingIP 1.4 - # Version 1.6: FloatingIP 1.5 - # Version 1.7: FloatingIP 1.6 - # Version 1.8: FloatingIP 1.7 - # Version 1.9: FloatingIP 1.8 - # Version 1.10: FloatingIP 1.9 - # Version 1.11: FloatingIP 1.10 - # Version 1.12: Added get_count_by_project() for quotas - fields = { - 'objects': fields.ListOfObjectsField('FloatingIP'), - } - VERSION = '1.12' - - @staticmethod - @db_api.pick_context_manager_reader - def _get_count_by_project_from_db(context, project_id): - return context.session.query(models.FloatingIp.id).\ - filter_by(deleted=0).\ - filter_by(project_id=project_id).\ - filter_by(auto_assigned=False).\ - count() - - @obj_base.remotable_classmethod - def get_all(cls, context): - db_floatingips = db.floating_ip_get_all(context) - return obj_base.obj_make_list(context, cls(context), - objects.FloatingIP, db_floatingips) - - @obj_base.remotable_classmethod - def get_by_host(cls, context, host): - db_floatingips = db.floating_ip_get_all_by_host(context, host) - return obj_base.obj_make_list(context, cls(context), - objects.FloatingIP, db_floatingips) - - @obj_base.remotable_classmethod - def get_by_project(cls, context, project_id): - db_floatingips = db.floating_ip_get_all_by_project(context, project_id) - return obj_base.obj_make_list(context, cls(context), - objects.FloatingIP, db_floatingips) - - @obj_base.remotable_classmethod - def get_by_fixed_address(cls, context, fixed_address): - db_floatingips = db.floating_ip_get_by_fixed_address( - context, str(fixed_address)) - return obj_base.obj_make_list(context, cls(context), - objects.FloatingIP, db_floatingips) - - @obj_base.remotable_classmethod - def get_by_fixed_ip_id(cls, context, fixed_ip_id): - db_floatingips = db.floating_ip_get_by_fixed_ip_id(context, - fixed_ip_id) - return obj_base.obj_make_list(context, cls(), FloatingIP, - db_floatingips) - - @staticmethod - def make_ip_info(address, pool, interface): - return {'address': str(address), - 'pool': pool, - 'interface': interface} - - @obj_base.remotable_classmethod - def create(cls, context, ip_info, want_result=False): - db_floatingips = db.floating_ip_bulk_create(context, ip_info, - want_result=want_result) - if want_result: - return obj_base.obj_make_list(context, cls(), FloatingIP, - db_floatingips) - - @obj_base.remotable_classmethod - def destroy(cls, context, ips): - db.floating_ip_bulk_destroy(context, ips) - - @obj_base.remotable_classmethod - def get_count_by_project(cls, context, project_id): - return cls._get_count_by_project_from_db(context, project_id) - - -# We don't want to register this object because it will not be passed -# around on RPC, it just makes our lives a lot easier in the API when -# dealing with floating IP operations -@obj_base.NovaObjectRegistry.register_if(False) -class NeutronFloatingIP(FloatingIP): - # Version 1.0: Initial Version - VERSION = '1.0' - fields = { - 'id': fields.UUIDField(), - 'fixed_ip_id': fields.UUIDField(nullable=True) - } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/__init__.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/__init__.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/__init__.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/__init__.py 2020-04-10 17:57:57.000000000 +0000 @@ -35,9 +35,7 @@ __import__('nova.objects.console_auth_token') __import__('nova.objects.ec2') __import__('nova.objects.external_event') - __import__('nova.objects.fixed_ip') __import__('nova.objects.flavor') - __import__('nova.objects.floating_ip') __import__('nova.objects.host_mapping') __import__('nova.objects.hv_spec') __import__('nova.objects.image_meta') @@ -55,7 +53,6 @@ __import__('nova.objects.migration') __import__('nova.objects.migration_context') __import__('nova.objects.monitor_metric') - __import__('nova.objects.network') __import__('nova.objects.network_metadata') __import__('nova.objects.network_request') __import__('nova.objects.numa') @@ -66,7 +63,6 @@ __import__('nova.objects.quotas') __import__('nova.objects.resource') __import__('nova.objects.security_group') - __import__('nova.objects.security_group_rule') __import__('nova.objects.selection') __import__('nova.objects.service') __import__('nova.objects.task_log') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/instance_action.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/instance_action.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/instance_action.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/instance_action.py 2020-04-10 17:57:57.000000000 +0000 @@ -14,7 +14,9 @@ from oslo_utils import timeutils from oslo_utils import versionutils +import six +from nova.compute import utils as compute_utils from nova.db import api as db from nova import exception from nova import objects @@ -138,7 +140,8 @@ # Version 1.1: event_finish_with_failure decorated with serialize_args # Version 1.2: Add 'host' field # Version 1.3: Add create() method. - VERSION = '1.3' + # Version 1.4: Added 'details' field. + VERSION = '1.4' fields = { 'id': fields.IntegerField(), 'event': fields.StringField(nullable=True), @@ -148,10 +151,13 @@ 'result': fields.StringField(nullable=True), 'traceback': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), + 'details': fields.StringField(nullable=True) } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) + if target_version < (1, 4) and 'details' in primitive: + del primitive['details'] if target_version < (1, 2) and 'host' in primitive: del primitive['host'] @@ -184,18 +190,18 @@ values['result'] = 'Success' else: values['result'] = 'Error' - # FIXME(mriedem): message is not used. The instance_actions_events - # table has a "details" column but not a "message" column which - # means the exc_val is never stored in the record. So far it does - # not matter because the exc_val is not exposed out of the API, - # but we should consider storing at least the exception type so - # we could expose that to non-admin owners of a server in the API - # since then they could see something like NoValidHost to know why - # the operation failed. Note by default policy non-admins are not - # allowed to see the traceback field. If we expose exc_val somehow - # we might consider re-using logic from exception_to_dict which - # is used to store an instance fault message. - values['message'] = exc_val + # Store the details using the same logic as storing an instance + # fault message. + if exc_val: + # If we got a string for exc_val it's probably because of + # the serialize_args decorator on event_finish_with_failure + # so pass that as the message to exception_to_dict otherwise + # the details will just the exception class name since it + # cannot format the message as a NovaException. + message = ( + exc_val if isinstance(exc_val, six.string_types) else None) + values['details'] = compute_utils.exception_to_dict( + exc_val, message=message)['message'] values['traceback'] = exc_tb return values diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/network.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/network.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/network.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/network.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,229 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -from oslo_utils import versionutils - -import nova.conf -from nova.db import api as db -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.objects import base as obj_base -from nova.objects import fields - -CONF = nova.conf.CONF - - -# TODO(berrange): Remove NovaObjectDictCompat -@obj_base.NovaObjectRegistry.register -class Network(obj_base.NovaPersistentObject, obj_base.NovaObject, - obj_base.NovaObjectDictCompat): - # Version 1.0: Initial version - # Version 1.1: Added in_use_on_host() - # Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address - VERSION = '1.2' - - fields = { - 'id': fields.IntegerField(), - 'label': fields.StringField(), - 'injected': fields.BooleanField(), - 'cidr': fields.IPV4NetworkField(nullable=True), - 'cidr_v6': fields.IPV6NetworkField(nullable=True), - 'multi_host': fields.BooleanField(), - 'netmask': fields.IPV4AddressField(nullable=True), - 'gateway': fields.IPV4AddressField(nullable=True), - 'broadcast': fields.IPV4AddressField(nullable=True), - 'netmask_v6': fields.IPV6AddressField(nullable=True), - 'gateway_v6': fields.IPV6AddressField(nullable=True), - 'bridge': fields.StringField(nullable=True), - 'bridge_interface': fields.StringField(nullable=True), - 'dns1': fields.IPAddressField(nullable=True), - 'dns2': fields.IPAddressField(nullable=True), - 'vlan': fields.IntegerField(nullable=True), - 'vpn_public_address': fields.IPAddressField(nullable=True), - 'vpn_public_port': fields.IntegerField(nullable=True), - 'vpn_private_address': fields.IPAddressField(nullable=True), - 'dhcp_start': fields.IPV4AddressField(nullable=True), - 'rxtx_base': fields.IntegerField(nullable=True), - 'project_id': fields.UUIDField(nullable=True), - 'priority': fields.IntegerField(nullable=True), - 'host': fields.StringField(nullable=True), - 'uuid': fields.UUIDField(), - 'mtu': fields.IntegerField(nullable=True), - 'dhcp_server': fields.IPAddressField(nullable=True), - 'enable_dhcp': fields.BooleanField(), - 'share_address': fields.BooleanField(), - } - - @staticmethod - def _convert_legacy_ipv6_netmask(netmask): - """Handle netmask_v6 possibilities from the database. - - Historically, this was stored as just an integral CIDR prefix, - but in the future it should be stored as an actual netmask. - Be tolerant of either here. - """ - try: - prefix = int(netmask) - return netaddr.IPNetwork('1::/%i' % prefix).netmask - except ValueError: - pass - - try: - return netaddr.IPNetwork(netmask).netmask - except netaddr.AddrFormatError: - raise ValueError(_('IPv6 netmask "%s" must be a netmask ' - 'or integral prefix') % netmask) - - def obj_make_compatible(self, primitive, target_version): - target_version = versionutils.convert_version_to_tuple(target_version) - if target_version < (1, 2): - if 'mtu' in primitive: - del primitive['mtu'] - if 'enable_dhcp' in primitive: - del primitive['enable_dhcp'] - if 'dhcp_server' in primitive: - del primitive['dhcp_server'] - if 'share_address' in primitive: - del primitive['share_address'] - - @staticmethod - def _from_db_object(context, network, db_network): - for field in network.fields: - db_value = db_network[field] - if field == 'netmask_v6' and db_value is not None: - db_value = network._convert_legacy_ipv6_netmask(db_value) - elif field == 'dhcp_server' and db_value is None: - db_value = db_network['gateway'] - - network[field] = db_value - network._context = context - network.obj_reset_changes() - return network - - @obj_base.remotable_classmethod - def get_by_id(cls, context, network_id, project_only='allow_none'): - db_network = db.network_get(context, network_id, - project_only=project_only) - return cls._from_db_object(context, cls(), db_network) - - @obj_base.remotable_classmethod - def get_by_uuid(cls, context, network_uuid): - db_network = db.network_get_by_uuid(context, network_uuid) - return cls._from_db_object(context, cls(), db_network) - - @obj_base.remotable_classmethod - def get_by_cidr(cls, context, cidr): - db_network = db.network_get_by_cidr(context, cidr) - return cls._from_db_object(context, cls(), db_network) - - # TODO(stephenfin): This is no longer used and can be removed - @obj_base.remotable_classmethod - def associate(cls, context, project_id, network_id=None, force=False): - db.network_associate(context, project_id, network_id=network_id, - force=force) - - # TODO(stephenfin): This is no longer used and can be removed - @obj_base.remotable_classmethod - def disassociate(cls, context, network_id, host=False, project=False): - db.network_disassociate(context, network_id, host, project) - - @obj_base.remotable_classmethod - def in_use_on_host(cls, context, network_id, host): - return db.network_in_use_on_host(context, network_id, host) - - def _get_primitive_changes(self): - changes = {} - for key, value in self.obj_get_changes().items(): - if isinstance(value, netaddr.IPAddress): - changes[key] = str(value) - else: - changes[key] = value - return changes - - @obj_base.remotable - def create(self): - updates = self._get_primitive_changes() - if 'id' in updates: - raise exception.ObjectActionError(action='create', - reason='already created') - db_network = db.network_create_safe(self._context, updates) - self._from_db_object(self._context, self, db_network) - - @obj_base.remotable - def destroy(self): - db.network_delete_safe(self._context, self.id) - self.deleted = True - self.obj_reset_changes(['deleted']) - - @obj_base.remotable - def save(self): - context = self._context - updates = self._get_primitive_changes() - if 'netmask_v6' in updates: - # NOTE(danms): For some reason, historical code stores the - # IPv6 netmask as just the CIDR mask length, so convert that - # back here before saving for now. - updates['netmask_v6'] = netaddr.IPNetwork( - updates['netmask_v6']).netmask - set_host = 'host' in updates - if set_host: - db.network_set_host(context, self.id, updates.pop('host')) - if updates: - db_network = db.network_update(context, self.id, updates) - elif set_host: - db_network = db.network_get(context, self.id) - else: - db_network = None - if db_network is not None: - self._from_db_object(context, self, db_network) - - -@obj_base.NovaObjectRegistry.register -class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject): - # Version 1.0: Initial version - # Version 1.1: Added get_by_project() - # Version 1.2: Network <= version 1.2 - VERSION = '1.2' - - fields = { - 'objects': fields.ListOfObjectsField('Network'), - } - - @obj_base.remotable_classmethod - def get_all(cls, context, project_only='allow_none'): - db_networks = db.network_get_all(context, project_only) - return obj_base.obj_make_list(context, cls(context), objects.Network, - db_networks) - - @obj_base.remotable_classmethod - def get_by_uuids(cls, context, network_uuids, project_only='allow_none'): - db_networks = db.network_get_all_by_uuids(context, network_uuids, - project_only) - return obj_base.obj_make_list(context, cls(context), objects.Network, - db_networks) - - @obj_base.remotable_classmethod - def get_by_host(cls, context, host): - db_networks = db.network_get_all_by_host(context, host) - return obj_base.obj_make_list(context, cls(context), objects.Network, - db_networks) - - @obj_base.remotable_classmethod - def get_by_project(cls, context, project_id, associate=True): - db_networks = db.project_get_networks(context, project_id, - associate=associate) - return obj_base.obj_make_list(context, cls(context), objects.Network, - db_networks) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/security_group_rule.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/security_group_rule.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/security_group_rule.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/security_group_rule.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# TODO(stephenfin): This is all nova-network related and can be deleted as soon -# as we remove the 'security_group' field from the 'Instance' object - -from nova.db import api as db -from nova import exception -from nova import objects -from nova.objects import base -from nova.objects import fields - -OPTIONAL_ATTRS = ['parent_group', 'grantee_group'] - - -@base.NovaObjectRegistry.register -class SecurityGroupRule(base.NovaPersistentObject, base.NovaObject): - # Version 1.0: Initial version - # Version 1.1: Added create() and set id as read_only - VERSION = '1.1' - - fields = { - 'id': fields.IntegerField(read_only=True), - 'protocol': fields.StringField(nullable=True), - 'from_port': fields.IntegerField(nullable=True), - 'to_port': fields.IntegerField(nullable=True), - 'cidr': fields.IPNetworkField(nullable=True), - 'parent_group': fields.ObjectField('SecurityGroup', nullable=True), - 'grantee_group': fields.ObjectField('SecurityGroup', nullable=True), - } - - @staticmethod - def _from_db_subgroup(context, db_group): - if db_group is None: - return None - return objects.SecurityGroup._from_db_object( - context, objects.SecurityGroup(context), db_group) - - @staticmethod - def _from_db_object(context, rule, db_rule, expected_attrs=None): - if expected_attrs is None: - expected_attrs = [] - for field in rule.fields: - if field in expected_attrs: - setattr(rule, field, - rule._from_db_subgroup(context, db_rule[field])) - elif field not in OPTIONAL_ATTRS: - setattr(rule, field, db_rule[field]) - rule._context = context - rule.obj_reset_changes() - return rule - - @base.remotable - def create(self): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason='already created') - updates = self.obj_get_changes() - parent_group = updates.pop('parent_group', None) - if parent_group: - updates['parent_group_id'] = parent_group.id - grantee_group = updates.pop('grantee_group', None) - if grantee_group: - updates['group_id'] = grantee_group.id - db_rule = db.security_group_rule_create(self._context, updates) - self._from_db_object(self._context, self, db_rule) - - @base.remotable_classmethod - def get_by_id(cls, context, rule_id): - db_rule = db.security_group_rule_get(context, rule_id) - return cls._from_db_object(context, cls(), db_rule) - - -@base.NovaObjectRegistry.register -class SecurityGroupRuleList(base.ObjectListBase, base.NovaObject): - fields = { - 'objects': fields.ListOfObjectsField('SecurityGroupRule'), - } - VERSION = '1.2' - - @base.remotable_classmethod - def get_by_security_group_id(cls, context, secgroup_id): - db_rules = db.security_group_rule_get_by_security_group( - context, secgroup_id, columns_to_join=['grantee_group']) - return base.obj_make_list(context, cls(context), - objects.SecurityGroupRule, db_rules, - expected_attrs=['grantee_group']) - - @classmethod - def get_by_security_group(cls, context, security_group): - return cls.get_by_security_group_id(context, security_group.id) - - @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid): - db_rules = db.security_group_rule_get_by_instance(context, - instance_uuid) - return base.obj_make_list(context, cls(context), - objects.SecurityGroupRule, db_rules, - expected_attrs=['grantee_group']) - - @classmethod - def get_by_instance(cls, context, instance): - return cls.get_by_instance_uuid(context, instance.uuid) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/service.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/service.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/objects/service.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/objects/service.py 2020-04-10 17:57:57.000000000 +0000 @@ -31,7 +31,7 @@ # NOTE(danms): This is the global service version counter -SERVICE_VERSION = 48 +SERVICE_VERSION = 51 # NOTE(danms): This is our SERVICE_VERSION history. The idea is that any @@ -178,6 +178,13 @@ {'compute_rpc': '5.10'}, # Version 48: Drivers report COMPUTE_SAME_HOST_COLD_MIGRATE trait. {'compute_rpc': '5.10'}, + # Version 49: Compute now support server move operations with qos ports + {'compute_rpc': '5.10'}, + # Version 50: Compute RPC v5.11: + # Add accel_uuids (accelerator requests) param to build_and_run_instance + {'compute_rpc': '5.11'}, + # Version 51: Add support for live migration with vpmem + {'compute_rpc': '5.11'}, ) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/admin_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/admin_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/admin_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/admin_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,10 +23,10 @@ admin_actions_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'reset_state', - base.RULE_ADMIN_API, - "Reset the state of a given server", - [ + name=POLICY_ROOT % 'reset_state', + check_str=base.SYSTEM_ADMIN, + description="Reset the state of a given server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-resetState)' @@ -34,10 +34,10 @@ ], scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'inject_network_info', - base.RULE_ADMIN_API, - "Inject network information into the server", - [ + name=POLICY_ROOT % 'inject_network_info', + check_str=base.SYSTEM_ADMIN, + description="Inject network information into the server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (injectNetworkInfo)' @@ -45,10 +45,10 @@ ], scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'reset_network', - base.RULE_ADMIN_API, - "Reset networking on a server", - [ + name=POLICY_ROOT % 'reset_network', + check_str=base.SYSTEM_ADMIN, + description="Reset networking on a server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (resetNetwork)' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/admin_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/admin_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/admin_password.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/admin_password.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ admin_password_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "Change the administrative password for a server", - [ + name=BASE_POLICY_NAME, + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Change the administrative password for a server", + operations=[ { 'path': '/servers/{server_id}/action (changePassword)', 'method': 'POST' } - ]) + ], + scope_types=['system', 'project']) ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/agents.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/agents.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/agents.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/agents.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,36 +18,89 @@ from nova.policies import base -BASE_POLICY_NAME = 'os_compute_api:os-agents' +BASE_POLICY_NAME = 'os_compute_api:os-agents:%s' +DEPRECATED_AGENTS_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-agents', + base.RULE_ADMIN_API, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" agents_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - """Create, list, update, and delete guest agent builds - + name=BASE_POLICY_NAME % 'list', + check_str=base.SYSTEM_READER, + description="""List guest agent builds This is XenAPI driver specific. It is used to force the upgrade of the XenAPI guest agent on instance boot. """, - [ + operations=[ { 'path': '/os-agents', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_AGENTS_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'create', + check_str=base.SYSTEM_ADMIN, + description="""Create guest agent builds +This is XenAPI driver specific. +It is used to force the upgrade of the XenAPI guest agent on instance boot. +""", + operations=[ + { 'path': '/os-agents', 'method': 'POST' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_AGENTS_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'update', + check_str=base.SYSTEM_ADMIN, + description="""Update guest agent builds +This is XenAPI driver specific. +It is used to force the upgrade of the XenAPI guest agent on instance boot. +""", + operations=[ { 'path': '/os-agents/{agent_build_id}', 'method': 'PUT' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_AGENTS_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'delete', + check_str=base.SYSTEM_ADMIN, + description="""Delete guest agent builds +This is XenAPI driver specific. +It is used to force the upgrade of the XenAPI guest agent on instance boot. +""", + operations=[ { 'path': '/os-agents/{agent_build_id}', 'method': 'DELETE' } - ]), + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_AGENTS_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/aggregates.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/aggregates.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/aggregates.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/aggregates.py 2020-04-10 17:57:57.000000000 +0000 @@ -24,95 +24,104 @@ aggregates_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'set_metadata', - base.RULE_ADMIN_API, - "Create or replace metadata for an aggregate", - [ + name=POLICY_ROOT % 'set_metadata', + check_str=base.RULE_ADMIN_API, + description="Create or replace metadata for an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}/action (set_metadata)', 'method': 'POST' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'add_host', - base.RULE_ADMIN_API, - "Add a host to an aggregate", - [ + name=POLICY_ROOT % 'add_host', + check_str=base.RULE_ADMIN_API, + description="Add a host to an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}/action (add_host)', 'method': 'POST' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_API, - "Create an aggregate", - [ + name=POLICY_ROOT % 'create', + check_str=base.RULE_ADMIN_API, + description="Create an aggregate", + operations=[ { 'path': '/os-aggregates', 'method': 'POST' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'remove_host', - base.RULE_ADMIN_API, - "Remove a host from an aggregate", - [ + name=POLICY_ROOT % 'remove_host', + check_str=base.RULE_ADMIN_API, + description="Remove a host from an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}/action (remove_host)', 'method': 'POST' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update', - base.RULE_ADMIN_API, - "Update name and/or availability zone for an aggregate", - [ + name=POLICY_ROOT % 'update', + check_str=base.RULE_ADMIN_API, + description="Update name and/or availability zone for an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}', 'method': 'PUT' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_API, - "List all aggregates", - [ + name=POLICY_ROOT % 'index', + check_str=base.RULE_ADMIN_API, + description="List all aggregates", + operations=[ { 'path': '/os-aggregates', 'method': 'GET' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_API, - "Delete an aggregate", - [ + name=POLICY_ROOT % 'delete', + check_str=base.RULE_ADMIN_API, + description="Delete an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}', 'method': 'DELETE' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_API, - "Show details for an aggregate", - [ + name=POLICY_ROOT % 'show', + check_str=base.RULE_ADMIN_API, + description="Show details for an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}', 'method': 'GET' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - NEW_POLICY_ROOT % 'images', - base.RULE_ADMIN_API, - "Request image caching for an aggregate", - [ + name=NEW_POLICY_ROOT % 'images', + check_str=base.RULE_ADMIN_API, + description="Request image caching for an aggregate", + operations=[ { 'path': '/os-aggregates/{aggregate_id}/images', 'method': 'POST' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/assisted_volume_snapshots.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/assisted_volume_snapshots.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/assisted_volume_snapshots.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/assisted_volume_snapshots.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,25 +23,27 @@ assisted_volume_snapshots_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_API, - "Create an assisted volume snapshot", - [ + name=POLICY_ROOT % 'create', + check_str=base.SYSTEM_ADMIN, + description="Create an assisted volume snapshot", + operations=[ { 'path': '/os-assisted-volume-snapshots', 'method': 'POST' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_API, - "Delete an assisted volume snapshot", - [ + name=POLICY_ROOT % 'delete', + check_str=base.SYSTEM_ADMIN, + description="Delete an assisted volume snapshot", + operations=[ { 'path': '/os-assisted-volume-snapshots/{snapshot_id}', 'method': 'DELETE' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/attach_interfaces.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/attach_interfaces.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/attach_interfaces.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/attach_interfaces.py 2020-04-10 17:57:57.000000000 +0000 @@ -20,44 +20,74 @@ BASE_POLICY_NAME = 'os_compute_api:os-attach-interfaces' POLICY_ROOT = 'os_compute_api:os-attach-interfaces:%s' +DEPRECATED_INTERFACES_POLICY = policy.DeprecatedRule( + BASE_POLICY_NAME, + base.RULE_ADMIN_OR_OWNER, +) +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" attach_interfaces_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "List port interfaces or show details of a port interface attached " - "to a server", - [ + name=POLICY_ROOT % 'list', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="List port interfaces attached to a server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-interface' }, + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_INTERFACES_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=POLICY_ROOT % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="Show details of a port interface attached to a server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-interface/{port_id}' } - ]), + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_INTERFACES_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_OR_OWNER, - "Attach an interface to a server", - [ + name=POLICY_ROOT % 'create', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Attach an interface to a server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/os-interface' } - ]), + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_INTERFACES_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_OR_OWNER, - "Detach an interface from a server", - [ + name=POLICY_ROOT % 'delete', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Detach an interface from a server", + operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-interface/{port_id}' } - ]) + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_INTERFACES_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0') ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/availability_zone.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/availability_zone.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/availability_zone.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/availability_zone.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,25 +23,29 @@ availability_zone_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'list', - base.RULE_ADMIN_OR_OWNER, - "List availability zone information without host information", - [ + name=POLICY_ROOT % 'list', + check_str=base.RULE_ANY, + description="List availability zone information without host " + "information", + operations=[ { 'method': 'GET', 'path': '/os-availability-zone' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'detail', - base.RULE_ADMIN_API, - "List detailed availability zone information with host information", - [ + name=POLICY_ROOT % 'detail', + check_str=base.SYSTEM_READER, + description="List detailed availability zone information with host " + "information", + operations=[ { 'method': 'GET', 'path': '/os-availability-zone/detail' } - ]) + ], + scope_types=['system']) ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/base.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/base.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/base.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/base.py 2020-04-10 17:57:57.000000000 +0000 @@ -17,6 +17,22 @@ RULE_ANY = '@' # Any user is allowed to perform the action. RULE_NOBODY = '!' # No users are allowed to perform the action. +DEPRECATED_ADMIN_POLICY = policy.DeprecatedRule( + name=RULE_ADMIN_API, + check_str='is_admin:True', +) + +DEPRECATED_ADMIN_OR_OWNER_POLICY = policy.DeprecatedRule( + name=RULE_ADMIN_OR_OWNER, + check_str='is_admin:True or project_id:%(project_id)s', +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" + # TODO(gmann): # Special string ``system_scope:all`` is added for system # scoped policies for backwards compatibility where ``nova.conf [oslo_policy] # enforce_scope = False``. @@ -71,19 +87,31 @@ policy.RuleDefault( "admin_or_owner", "is_admin:True or project_id:%(project_id)s", - "Default rule for most non-Admin APIs."), + "Default rule for most non-Admin APIs.", + deprecated_for_removal=True, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), policy.RuleDefault( "admin_api", "is_admin:True", - "Default rule for most Admin APIs."), - policy.RuleDefault( - "system_admin_api", - 'role:admin and system_scope:all', - "Default rule for System Admin APIs."), - policy.RuleDefault( - "system_reader_api", - "role:reader and system_scope:all", - "Default rule for System level read only APIs."), + "Default rule for most Admin APIs.", + deprecated_for_removal=True, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.RuleDefault( + name="system_admin_api", + check_str='role:admin and system_scope:all', + description="Default rule for System Admin APIs.", + deprecated_rule=DEPRECATED_ADMIN_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.RuleDefault( + name="system_reader_api", + check_str="role:reader and system_scope:all", + description="Default rule for System level read only APIs.", + deprecated_rule=DEPRECATED_ADMIN_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), policy.RuleDefault( "project_member_api", "role:member and project_id:%(project_id)s", @@ -93,13 +121,19 @@ "role:reader and project_id:%(project_id)s", "Default rule for Project level read only APIs."), policy.RuleDefault( - "system_admin_or_owner", - "rule:system_admin_api or rule:project_member_api", - "Default rule for System admin+owner APIs."), + name="system_admin_or_owner", + check_str="rule:system_admin_api or rule:project_member_api", + description="Default rule for System admin+owner APIs.", + deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), policy.RuleDefault( "system_or_project_reader", "rule:system_reader_api or rule:project_reader_api", - "Default rule for System+Project read only APIs.") + "Default rule for System+Project read only APIs.", + deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0') ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/console_auth_tokens.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/console_auth_tokens.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/console_auth_tokens.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/console_auth_tokens.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,16 +23,17 @@ console_auth_tokens_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - "Show console connection information for a given console " + name=BASE_POLICY_NAME, + check_str=base.SYSTEM_READER, + description="Show console connection information for a given console " "authentication token", - [ + operations=[ { 'method': 'GET', 'path': '/os-console-auth-tokens/{console_token}' } - ]) + ], + scope_types=['system']) ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/console_output.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/console_output.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/console_output.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/console_output.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ console_output_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - 'Show console output for a server', - [ + name=BASE_POLICY_NAME, + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description='Show console output for a server', + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-getConsoleOutput)' } - ]) + ], + scope_types=['system', 'project']) ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/create_backup.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/create_backup.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/create_backup.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/create_backup.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ create_backup_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - 'Create a back up of a server', - [ + name=BASE_POLICY_NAME, + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description='Create a back up of a server', + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (createBackup)' } - ]) + ], + scope_types=['system', 'project']) ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/deferred_delete.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/deferred_delete.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/deferred_delete.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/deferred_delete.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,25 +18,48 @@ from nova.policies import base -BASE_POLICY_NAME = 'os_compute_api:os-deferred-delete' +BASE_POLICY_NAME = 'os_compute_api:os-deferred-delete:%s' +DEPRECATED_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-deferred-delete', + base.RULE_ADMIN_OR_OWNER, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" deferred_delete_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "Restore a soft deleted server or force delete a server before " - "deferred cleanup", - [ + name=BASE_POLICY_NAME % 'restore', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Restore a soft deleted server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (restore)' }, + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'force', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Force delete a server before deferred cleanup", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (forceDelete)' } - ]) + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0') ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/evacuate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/evacuate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/evacuate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/evacuate.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ evacuate_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - "Evacuate a server from a failed host to a new host", - [ + name=BASE_POLICY_NAME, + check_str=base.RULE_ADMIN_API, + description="Evacuate a server from a failed host to a new host", + operations=[ { 'path': '/servers/{server_id}/action (evacuate)', 'method': 'POST' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/flavor_access.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/flavor_access.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/flavor_access.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/flavor_access.py 2020-04-10 17:57:57.000000000 +0000 @@ -22,42 +22,65 @@ BASE_POLICY_NAME = 'os_compute_api:os-flavor-access' POLICY_ROOT = 'os_compute_api:os-flavor-access:%s' +# NOTE(gmann): Deprecating this policy explicitly as old defaults +# admin or owner is not suitable for that which should be admin (Bug#1867840) +# but changing that will break old deployment so let's keep supporting +# the old default also and new default can be SYSTEM_READER +# SYSTEM_READER rule in base class is defined with the deprecated rule of admin +# not admin or owner which is the main reason that we need to explicitly +# deprecate this policy here. +DEPRECATED_FLAVOR_ACCESS_POLICY = policy.DeprecatedRule( + BASE_POLICY_NAME, + base.RULE_ADMIN_OR_OWNER, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" flavor_access_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'add_tenant_access', - base.RULE_ADMIN_API, - "Add flavor access to a tenant", - [ + name=POLICY_ROOT % 'add_tenant_access', + check_str=base.SYSTEM_ADMIN, + description="Add flavor access to a tenant", + operations=[ { 'method': 'POST', 'path': '/flavors/{flavor_id}/action (addTenantAccess)' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'remove_tenant_access', - base.RULE_ADMIN_API, - "Remove flavor access from a tenant", - [ + name=POLICY_ROOT % 'remove_tenant_access', + check_str=base.SYSTEM_ADMIN, + description="Remove flavor access from a tenant", + operations=[ { 'method': 'POST', 'path': '/flavors/{flavor_id}/action (removeTenantAccess)' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - """List flavor access information + name=BASE_POLICY_NAME, + check_str=base.SYSTEM_READER, + description="""List flavor access information Allows access to the full list of tenants that have access to a flavor via an os-flavor-access API. """, - [ + operations=[ { 'method': 'GET', 'path': '/flavors/{flavor_id}/os-flavor-access' }, - ]), + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_FLAVOR_ACCESS_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/flavor_manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/flavor_manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/flavor_manage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/flavor_manage.py 2020-04-10 17:57:57.000000000 +0000 @@ -24,35 +24,38 @@ flavor_manage_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_API, - "Create a flavor", - [ + name=POLICY_ROOT % 'create', + check_str=base.SYSTEM_ADMIN, + description="Create a flavor", + operations=[ { 'method': 'POST', 'path': '/flavors' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update', - base.RULE_ADMIN_API, - "Update a flavor", - [ + name=POLICY_ROOT % 'update', + check_str=base.SYSTEM_ADMIN, + description="Update a flavor", + operations=[ { 'method': 'PUT', 'path': '/flavors/{flavor_id}' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_API, - "Delete a flavor", - [ + name=POLICY_ROOT % 'delete', + check_str=base.SYSTEM_ADMIN, + description="Delete a flavor", + operations=[ { 'method': 'DELETE', 'path': '/flavors/{flavor_id}' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/hypervisors.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/hypervisors.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/hypervisors.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/hypervisors.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,54 +18,121 @@ from nova.policies import base -BASE_POLICY_NAME = 'os_compute_api:os-hypervisors' +BASE_POLICY_NAME = 'os_compute_api:os-hypervisors:%s' +DEPRECATED_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-hypervisors', + base.RULE_ADMIN_API, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" hypervisors_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - """Policy rule for hypervisor related APIs. - -This rule will be checked for the following APIs: - -List all hypervisors, list all hypervisors with details, show -summary statistics for all hypervisors over all compute nodes, -show details for a hypervisor, show the uptime of a hypervisor, -search hypervisor by hypervisor_hostname pattern and list all -servers on hypervisors that can match the provided -hypervisor_hostname pattern.""", - [ + name=BASE_POLICY_NAME % 'list', + check_str=base.SYSTEM_READER, + description="List all hypervisors.", + operations=[ { 'path': '/os-hypervisors', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'list-detail', + check_str=base.SYSTEM_READER, + description="List all hypervisors with details", + operations=[ { 'path': '/os-hypervisors/details', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'statistics', + check_str=base.SYSTEM_READER, + description="Show summary statistics for all hypervisors " + "over all compute nodes.", + operations=[ { 'path': '/os-hypervisors/statistics', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'show', + check_str=base.SYSTEM_READER, + description="Show details for a hypervisor.", + operations=[ { 'path': '/os-hypervisors/{hypervisor_id}', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'uptime', + check_str=base.SYSTEM_READER, + description="Show the uptime of a hypervisor.", + operations=[ { 'path': '/os-hypervisors/{hypervisor_id}/uptime', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'search', + check_str=base.SYSTEM_READER, + description="Search hypervisor by hypervisor_hostname pattern.", + operations=[ { 'path': '/os-hypervisors/{hypervisor_hostname_pattern}/search', 'method': 'GET' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'servers', + check_str=base.SYSTEM_READER, + description="List all servers on hypervisors that can match " + "the provided hypervisor_hostname pattern.", + operations=[ { 'path': '/os-hypervisors/{hypervisor_hostname_pattern}/servers', 'method': 'GET' } - ] + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0', ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/__init__.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/__init__.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/__init__.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/__init__.py 2020-04-10 17:57:57.000000000 +0000 @@ -66,7 +66,6 @@ from nova.policies import simple_tenant_usage from nova.policies import suspend_server from nova.policies import tenant_networks -from nova.policies import used_limits from nova.policies import volumes from nova.policies import volumes_attachments @@ -126,7 +125,6 @@ simple_tenant_usage.list_rules(), suspend_server.list_rules(), tenant_networks.list_rules(), - used_limits.list_rules(), volumes.list_rules(), volumes_attachments.list_rules() ) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/instance_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/instance_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/instance_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/instance_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,43 +18,87 @@ from nova.policies import base -BASE_POLICY_NAME = 'os_compute_api:os-instance-actions' -POLICY_ROOT = 'os_compute_api:os-instance-actions:%s' +ROOT_POLICY = 'os_compute_api:os-instance-actions' +BASE_POLICY_NAME = 'os_compute_api:os-instance-actions:%s' +DEPRECATED_INSTANCE_ACTION_POLICY = policy.DeprecatedRule( + ROOT_POLICY, + base.RULE_ADMIN_OR_OWNER, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" instance_actions_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'events', - base.RULE_ADMIN_API, - """Add events details in action details for a server. + name=BASE_POLICY_NAME % 'events:details', + check_str=base.SYSTEM_READER, + description="""Add "details" key in action events for a server. This check is performed only after the check -os_compute_api:os-instance-actions passes. Beginning with -Microversion 2.51, events details are always included; traceback -information is provided per event if policy enforcement passes. -Beginning with Microversion 2.62, each event includes a hashed -host identifier and, if policy enforcement passes, the name of -the host.""", - [ +os_compute_api:os-instance-actions:show passes. Beginning with Microversion +2.84, new field 'details' is exposed via API which can have more details about +event failure. That field is controlled by this policy which is system reader +by default. Making the 'details' field visible to the non-admin user helps to +understand the nature of the problem (i.e. if the action can be retried), +but in the other hand it might leak information about the deployment +(e.g. the type of the hypervisor). +""", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions/{request_id}' } - ]), + ], + scope_types=['system']), policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - """List actions and show action details for a server.""", - [ + name=BASE_POLICY_NAME % 'events', + check_str=base.SYSTEM_READER, + description="""Add events details in action details for a server. +This check is performed only after the check +os_compute_api:os-instance-actions:show passes. Beginning with Microversion +2.51, events details are always included; traceback information is provided +per event if policy enforcement passes. Beginning with Microversion 2.62, +each event includes a hashed host identifier and, if policy enforcement +passes, the name of the host.""", + operations=[ + { + 'method': 'GET', + 'path': '/servers/{server_id}/os-instance-actions/{request_id}' + } + ], + scope_types=['system']), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'list', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="""List actions for a server.""", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions' - }, + } + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="""Show action details for a server.""", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions/{request_id}' } - ]), + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/instance_usage_audit_log.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/instance_usage_audit_log.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/instance_usage_audit_log.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/instance_usage_audit_log.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,26 +18,51 @@ from nova.policies import base -BASE_POLICY_NAME = 'os_compute_api:os-instance-usage-audit-log' +BASE_POLICY_NAME = 'os_compute_api:os-instance-usage-audit-log:%s' +DEPRECATED_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-instance-usage-audit-log', + base.RULE_ADMIN_API, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" instance_usage_audit_log_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - "List all usage audits and that occurred before a specified time " - "for all servers on all compute hosts where usage auditing is " - "configured", - [ + name=BASE_POLICY_NAME % 'list', + check_str=base.SYSTEM_READER, + description="List all usage audits.", + operations=[ { 'method': 'GET', 'path': '/os-instance_usage_audit_log' }, + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'show', + check_str=base.SYSTEM_READER, + description="List all usage audits occurred before " + "a specified time for all servers on all compute hosts where " + "usage auditing is configured", + operations=[ + { 'method': 'GET', 'path': '/os-instance_usage_audit_log/{before_timestamp}' } - ]), + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/ips.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/ips.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/ips.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/ips.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,25 +23,28 @@ ips_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_OR_OWNER, - "Show IP addresses details for a network label of a server", - [ + name=POLICY_ROOT % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="Show IP addresses details for a network label of a " + " server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/ips/{network_label}' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_OR_OWNER, - "List IP addresses that are assigned to a server", - [ + name=POLICY_ROOT % 'index', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="List IP addresses that are assigned to a server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/ips' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/limits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/limits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/limits.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/limits.py 2020-04-10 17:57:57.000000000 +0000 @@ -19,19 +19,49 @@ BASE_POLICY_NAME = 'os_compute_api:limits' - +OTHER_PROJECT_LIMIT_POLICY_NAME = 'os_compute_api:limits:other_project' +DEPRECATED_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-used-limits', + base.RULE_ADMIN_API, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" limits_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "Show rate and absolute limits for the project", - [ + name=BASE_POLICY_NAME, + check_str=base.RULE_ANY, + description="Show rate and absolute limits for the current user " + "project", + operations=[ + { + 'method': 'GET', + 'path': '/limits' + } + ], + scope_types=['system', 'project']), + policy.DocumentedRuleDefault( + name=OTHER_PROJECT_LIMIT_POLICY_NAME, + check_str=base.SYSTEM_READER, + description="""Show rate and absolute limits of other project. + +This policy only checks if the user has access to the requested +project limits. And this check is performed only after the check +os_compute_api:limits passes""", + operations=[ { 'method': 'GET', 'path': '/limits' } - ]), + ], + scope_types=['system'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/lock_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/lock_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/lock_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/lock_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,40 +23,43 @@ lock_server_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'lock', - base.RULE_ADMIN_OR_OWNER, - "Lock a server", - [ + name=POLICY_ROOT % 'lock', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Lock a server", + operations=[ { 'path': '/servers/{server_id}/action (lock)', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'unlock', - base.RULE_ADMIN_OR_OWNER, - "Unlock a server", - [ + name=POLICY_ROOT % 'unlock', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Unlock a server", + operations=[ { 'path': '/servers/{server_id}/action (unlock)', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'unlock:unlock_override', - base.RULE_ADMIN_API, - """Unlock a server, regardless who locked the server. + name=POLICY_ROOT % 'unlock:unlock_override', + check_str=base.SYSTEM_ADMIN, + description="""Unlock a server, regardless who locked the server. This check is performed only after the check os_compute_api:os-lock-server:unlock passes""", - [ + operations=[ { 'path': '/servers/{server_id}/action (unlock)', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/migrate_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/migrate_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/migrate_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/migrate_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,25 +23,27 @@ migrate_server_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'migrate', - base.RULE_ADMIN_API, - "Cold migrate a server to a host", - [ + name=POLICY_ROOT % 'migrate', + check_str=base.SYSTEM_ADMIN, + description="Cold migrate a server to a host", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (migrate)' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'migrate_live', - base.RULE_ADMIN_API, - "Live migrate a server to a new host without a reboot", - [ + name=POLICY_ROOT % 'migrate_live', + check_str=base.SYSTEM_ADMIN, + description="Live migrate a server to a new host without a reboot", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-migrateLive)' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/migrations.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ migrations_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_API, - "List migrations", - [ + name=POLICY_ROOT % 'index', + check_str=base.SYSTEM_READER, + description="List migrations", + operations=[ { 'method': 'GET', 'path': '/os-migrations' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/pause_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/pause_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/pause_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/pause_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,26 +23,28 @@ pause_server_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'pause', - base.RULE_ADMIN_OR_OWNER, - "Pause a server", - [ + name=POLICY_ROOT % 'pause', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Pause a server", + operations=[ { 'path': '/servers/{server_id}/action (pause)', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'unpause', - base.RULE_ADMIN_OR_OWNER, - "Unpause a paused server", - [ + name=POLICY_ROOT % 'unpause', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Unpause a paused server", + operations=[ { 'path': '/servers/{server_id}/action (unpause)', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/remote_consoles.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/remote_consoles.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/remote_consoles.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/remote_consoles.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,10 +23,18 @@ remote_consoles_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "Generate a URL to access remove server console", - [ + name=BASE_POLICY_NAME, + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="""Generate a URL to access remove server console. + +This policy is for ``POST /remote-consoles`` API and below Server actions APIs +are deprecated: + +- ``os-getRDPConsole`` +- ``os-getSerialConsole`` +- ``os-getSPICEConsole`` +- ``os-getVNCConsole``.""", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-getRDPConsole)' @@ -47,7 +55,8 @@ 'method': 'POST', 'path': '/servers/{server_id}/remote-consoles' }, - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/rescue.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/rescue.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/rescue.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/rescue.py 2020-04-10 17:57:57.000000000 +0000 @@ -19,23 +19,45 @@ BASE_POLICY_NAME = 'os_compute_api:os-rescue' +UNRESCUE_POLICY_NAME = 'os_compute_api:os-unrescue' + +DEPRECATED_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-rescue', + base.RULE_ADMIN_OR_OWNER, +) + +DEPRECATED_REASON = """ +Rescue/Unrescue API policies are made granular with new policy +for unrescue and keeping old policy for rescue. +""" rescue_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "Rescue/unrescue a server", - [ + name=BASE_POLICY_NAME, + check_str=base.RULE_ADMIN_OR_OWNER, + description="Rescue a server", + operations=[ { 'path': '/servers/{server_id}/action (rescue)', 'method': 'POST' }, + ], + scope_types=['system', 'project']), + policy.DocumentedRuleDefault( + name=UNRESCUE_POLICY_NAME, + check_str=base.RULE_ADMIN_OR_OWNER, + description="Unrescue a server", + operations=[ { 'path': '/servers/{server_id}/action (unrescue)', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0' ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/security_groups.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/security_groups.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/security_groups.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/security_groups.py 2020-04-10 17:57:57.000000000 +0000 @@ -20,56 +20,93 @@ BASE_POLICY_NAME = 'os_compute_api:os-security-groups' +POLICY_NAME = 'os_compute_api:os-security-groups:%s' + +DEPRECATED_POLICY = policy.DeprecatedRule( + BASE_POLICY_NAME, + base.RULE_ADMIN_OR_OWNER, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" security_groups_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - """List, show, add, or remove security groups. + name=BASE_POLICY_NAME, + check_str=base.RULE_ADMIN_OR_OWNER, + description="""List, show, add, or remove security groups. APIs which are directly related to security groups resource are deprecated: Lists, shows information for, creates, updates and deletes security groups. Creates and deletes security group rules. All these -APIs are deprecated. - -APIs which are related to server resource are not deprecated: -Lists Security Groups for a server. Add Security Group to a server -and remove security group from a server.""", - [ - { - 'method': 'GET', - 'path': '/os-security-groups' - }, - { - 'method': 'GET', - 'path': '/os-security-groups/{security_group_id}' - }, - { - 'method': 'POST', - 'path': '/os-security-groups' - }, - { - 'method': 'PUT', - 'path': '/os-security-groups/{security_group_id}' - }, - { - 'method': 'DELETE', - 'path': '/os-security-groups/{security_group_id}' - }, - { - 'method': 'GET', - 'path': '/servers/{server_id}/os-security-groups' - }, - { - 'method': 'POST', - 'path': '/servers/{server_id}/action (addSecurityGroup)' - }, - { - 'method': 'POST', - 'path': '/servers/{server_id}/action (removeSecurityGroup)' - }, - ], - ), +APIs are deprecated.""", + operations=[ + { + 'method': 'GET', + 'path': '/os-security-groups' + }, + { + 'method': 'GET', + 'path': '/os-security-groups/{security_group_id}' + }, + { + 'method': 'POST', + 'path': '/os-security-groups' + }, + { + 'method': 'PUT', + 'path': '/os-security-groups/{security_group_id}' + }, + { + 'method': 'DELETE', + 'path': '/os-security-groups/{security_group_id}' + }, + ]), + policy.DocumentedRuleDefault( + name=POLICY_NAME % 'list', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="List security groups of server.", + operations=[ + { + 'method': 'GET', + 'path': '/servers/{server_id}/os-security-groups' + }, + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=POLICY_NAME % 'add', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Add security groups to server.", + operations=[ + { + 'method': 'POST', + 'path': '/servers/{server_id}/action (addSecurityGroup)' + }, + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=POLICY_NAME % 'remove', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Remove security groups from server.", + operations=[ + { + 'method': 'POST', + 'path': '/servers/{server_id}/action (removeSecurityGroup)' + }, + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_diagnostics.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_diagnostics.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_diagnostics.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_diagnostics.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ server_diagnostics_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - "Show the usage data for a server", - [ + name=BASE_POLICY_NAME, + check_str=base.SYSTEM_ADMIN, + description="Show the usage data for a server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/diagnostics' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_external_events.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_external_events.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_external_events.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_external_events.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,15 +23,16 @@ server_external_events_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_API, - "Create one or more external events", - [ + name=POLICY_ROOT % 'create', + check_str=base.RULE_ADMIN_API, + description="Create one or more external events", + operations=[ { 'method': 'POST', 'path': '/os-server-external-events' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_groups.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_groups.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_groups.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_groups.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,48 +23,52 @@ server_groups_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_OR_OWNER, - "Create a new server group", - [ + name=POLICY_ROOT % 'create', + check_str=base.RULE_ADMIN_OR_OWNER, + description="Create a new server group", + operations=[ { 'path': '/os-server-groups', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_OR_OWNER, - "Delete a server group", - [ + name=POLICY_ROOT % 'delete', + check_str=base.RULE_ADMIN_OR_OWNER, + description="Delete a server group", + operations=[ { 'path': '/os-server-groups/{server_group_id}', 'method': 'DELETE' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_OR_OWNER, - "List all server groups", - [ + name=POLICY_ROOT % 'index', + check_str=base.RULE_ADMIN_OR_OWNER, + description="List all server groups", + operations=[ { 'path': '/os-server-groups', 'method': 'GET' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_OR_OWNER, - "Show details of a server group", - [ + name=POLICY_ROOT % 'show', + check_str=base.RULE_ADMIN_OR_OWNER, + description="Show details of a server group", + operations=[ { 'path': '/os-server-groups/{server_group_id}', 'method': 'GET' } - ] + ], + scope_types=['system', 'project'] ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_metadata.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,70 +23,76 @@ server_metadata_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_OR_OWNER, - "List all metadata of a server", - [ + name=POLICY_ROOT % 'index', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="List all metadata of a server", + operations=[ { 'path': '/servers/{server_id}/metadata', 'method': 'GET' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_OR_OWNER, - "Show metadata for a server", - [ + name=POLICY_ROOT % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="Show metadata for a server", + operations=[ { 'path': '/servers/{server_id}/metadata/{key}', 'method': 'GET' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_OR_OWNER, - "Create metadata for a server", - [ + name=POLICY_ROOT % 'create', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Create metadata for a server", + operations=[ { 'path': '/servers/{server_id}/metadata', 'method': 'POST' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update_all', - base.RULE_ADMIN_OR_OWNER, - "Replace metadata for a server", - [ + name=POLICY_ROOT % 'update_all', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Replace metadata for a server", + operations=[ { 'path': '/servers/{server_id}/metadata', 'method': 'PUT' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update', - base.RULE_ADMIN_OR_OWNER, - "Update metadata from a server", - [ + name=POLICY_ROOT % 'update', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Update metadata from a server", + operations=[ { 'path': '/servers/{server_id}/metadata/{key}', 'method': 'PUT' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_OR_OWNER, - "Delete metadata from a server", - [ + name=POLICY_ROOT % 'delete', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Delete metadata from a server", + operations=[ { 'path': '/servers/{server_id}/metadata/{key}', 'method': 'DELETE' } - ] + ], + scope_types=['system', 'project'] ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_password.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_password.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,24 +18,51 @@ from nova.policies import base -BASE_POLICY_NAME = 'os_compute_api:os-server-password' +BASE_POLICY_NAME = 'os_compute_api:os-server-password:%s' + +DEPRECATED_POLICY = policy.DeprecatedRule( + 'os_compute_api:os-server-password', + base.RULE_ADMIN_OR_OWNER, +) + +DEPRECATED_REASON = """ +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. +""" server_password_policies = [ policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_OR_OWNER, - "Show and clear the encrypted administrative password of a server", - [ + name=BASE_POLICY_NAME % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="Show the encrypted administrative " + "password of a server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-server-password' }, + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'clear', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Clear the encrypted administrative " + "password of a server", + operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-server-password' } - ]), + ], + scope_types=['system', 'project'], + deprecated_rule=DEPRECATED_POLICY, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/servers_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/servers_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/servers_migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/servers_migrations.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,46 +23,52 @@ servers_migrations_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_API, - "Show details for an in-progress live migration for a given server", - [ + name=POLICY_ROOT % 'show', + check_str=base.SYSTEM_READER, + description="Show details for an in-progress live migration for a " + "given server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/migrations/{migration_id}' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'force_complete', - base.RULE_ADMIN_API, - "Force an in-progress live migration for a given server to complete", - [ + name=POLICY_ROOT % 'force_complete', + check_str=base.SYSTEM_ADMIN, + description="Force an in-progress live migration for a given server " + "to complete", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/migrations/{migration_id}' '/action (force_complete)' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_API, - "Delete(Abort) an in-progress live migration", - [ + name=POLICY_ROOT % 'delete', + check_str=base.SYSTEM_ADMIN, + description="Delete(Abort) an in-progress live migration", + operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/migrations/{migration_id}' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_API, - "Lists in-progress live migrations for a given server", - [ + name=POLICY_ROOT % 'index', + check_str=base.SYSTEM_READER, + description="Lists in-progress live migrations for a given server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/migrations' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_tags.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_tags.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/server_tags.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/server_tags.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,68 +23,76 @@ server_tags_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete_all', - base.RULE_ADMIN_OR_OWNER, - "Delete all the server tags", - [ + name=POLICY_ROOT % 'delete_all', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Delete all the server tags", + operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/tags' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_OR_OWNER, - "List all tags for given server", - [ + name=POLICY_ROOT % 'index', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="List all tags for given server", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/tags' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update_all', - base.RULE_ADMIN_OR_OWNER, - "Replace all tags on specified server with the new set of tags.", - [ + name=POLICY_ROOT % 'update_all', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Replace all tags on specified server with the new set " + "of tags.", + operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/tags' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_OR_OWNER, - "Delete a single tag from the specified server", - [ + name=POLICY_ROOT % 'delete', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Delete a single tag from the specified server", + operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/tags/{tag}' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update', - base.RULE_ADMIN_OR_OWNER, - "Add a single tag to the server if server has no specified tag", - [ + name=POLICY_ROOT % 'update', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Add a single tag to the server if server has no " + "specified tag", + operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/tags/{tag}' } - ] + ], + scope_types=['system', 'project'] ), policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_OR_OWNER, - "Check tag existence on the server.", - [ + name=POLICY_ROOT % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="Check tag existence on the server.", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/tags/{tag}' } - ] + ], + scope_types=['system', 'project'] ), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/services.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/services.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/services.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/services.py 2020-04-10 17:57:57.000000000 +0000 @@ -25,14 +25,9 @@ ) DEPRECATED_REASON = """ -Since Ussuri release, nova API policies are introducing new default roles -with scope_type capabilities. These new changes improve the security level -and manageability. New policies are more rich in term of handling access -at system and project level token with read, write roles. -Start using the new policies and enable the scope checks via config option -``nova.conf [oslo_policy] enforce_scope=True`` which is False by default. -Old policies are marked as deprecated and silently going to be ignored -in nova 23.0.0 (OpenStack W) release +Nova API policies are introducing new default roles with scope_type +capabilities. Old policies are deprecated and silently going to be ignored +in nova 23.0.0 release. """ services_policies = [ @@ -49,7 +44,7 @@ scope_types=['system'], deprecated_rule=DEPRECATED_SERVICE_POLICY, deprecated_reason=DEPRECATED_REASON, - deprecated_since='20.0.0'), + deprecated_since='21.0.0'), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.SYSTEM_ADMIN, @@ -64,7 +59,7 @@ scope_types=['system'], deprecated_rule=DEPRECATED_SERVICE_POLICY, deprecated_reason=DEPRECATED_REASON, - deprecated_since='20.0.0'), + deprecated_since='21.0.0'), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.SYSTEM_ADMIN, @@ -78,7 +73,7 @@ scope_types=['system'], deprecated_rule=DEPRECATED_SERVICE_POLICY, deprecated_reason=DEPRECATED_REASON, - deprecated_since='20.0.0'), + deprecated_since='21.0.0'), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/shelve.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/shelve.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/shelve.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/shelve.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,35 +23,38 @@ shelve_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'shelve', - base.RULE_ADMIN_OR_OWNER, - "Shelve server", - [ + name=POLICY_ROOT % 'shelve', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Shelve server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (shelve)' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'unshelve', - base.RULE_ADMIN_OR_OWNER, - "Unshelve (restore) shelved server", - [ + name=POLICY_ROOT % 'unshelve', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Unshelve (restore) shelved server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (unshelve)' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'shelve_offload', - base.RULE_ADMIN_API, - "Shelf-offload (remove) server", - [ + name=POLICY_ROOT % 'shelve_offload', + check_str=base.SYSTEM_ADMIN, + description="Shelf-offload (remove) server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (shelveOffload)' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/simple_tenant_usage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/simple_tenant_usage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/simple_tenant_usage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/simple_tenant_usage.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,25 +23,27 @@ simple_tenant_usage_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_OR_OWNER, - "Show usage statistics for a specific tenant", - [ + name=POLICY_ROOT % 'show', + check_str=base.RULE_ADMIN_OR_OWNER, + description="Show usage statistics for a specific tenant", + operations=[ { 'method': 'GET', 'path': '/os-simple-tenant-usage/{tenant_id}' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'list', - base.RULE_ADMIN_API, - "List per tenant usage statistics for all tenants", - [ + name=POLICY_ROOT % 'list', + check_str=base.RULE_ADMIN_API, + description="List per tenant usage statistics for all tenants", + operations=[ { 'method': 'GET', 'path': '/os-simple-tenant-usage' } - ]), + ], + scope_types=['system']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/suspend_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/suspend_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/suspend_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/suspend_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,25 +23,27 @@ suspend_server_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'resume', - base.RULE_ADMIN_OR_OWNER, - "Resume suspended server", - [ + name=POLICY_ROOT % 'resume', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Resume suspended server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (resume)' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'suspend', - base.RULE_ADMIN_OR_OWNER, - "Suspend server", - [ + name=POLICY_ROOT % 'suspend', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Suspend server", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (suspend)' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/used_limits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/used_limits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/used_limits.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/used_limits.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -from nova.policies import base - - -BASE_POLICY_NAME = 'os_compute_api:os-used-limits' - - -used_limits_policies = [ - # TODO(aunnam): Remove this rule after we separate the scope check from - # policies, as this is only checking the scope. - policy.DocumentedRuleDefault( - BASE_POLICY_NAME, - base.RULE_ADMIN_API, - """Show rate and absolute limits for the project. - -This policy only checks if the user has access to the requested -project limits. And this check is performed only after the check -os_compute_api:limits passes""", - [ - { - 'method': 'GET', - 'path': '/limits' - } - ]), -] - - -def list_rules(): - return used_limits_policies diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/volumes_attachments.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/volumes_attachments.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policies/volumes_attachments.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policies/volumes_attachments.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,57 +23,78 @@ volumes_attachments_policies = [ policy.DocumentedRuleDefault( - POLICY_ROOT % 'index', - base.RULE_ADMIN_OR_OWNER, - "List volume attachments for an instance", - [ + name=POLICY_ROOT % 'index', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="List volume attachments for an instance", + operations=[ {'method': 'GET', 'path': '/servers/{server_id}/os-volume_attachments' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'create', - base.RULE_ADMIN_OR_OWNER, - "Attach a volume to an instance", - [ + name=POLICY_ROOT % 'create', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Attach a volume to an instance", + operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/os-volume_attachments' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'show', - base.RULE_ADMIN_OR_OWNER, - "Show details of a volume attachment", - [ + name=POLICY_ROOT % 'show', + check_str=base.PROJECT_READER_OR_SYSTEM_READER, + description="Show details of a volume attachment", + operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'update', - base.RULE_ADMIN_API, - "Update a volume attachment", - [ + name=POLICY_ROOT % 'update', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="""Update a volume attachment. +New 'update' policy about 'swap + update' request (which is possible +only >2.85) only is checked. We expect to be +always superset of this policy permission. +""", + operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } - ]), + ], + scope_types=['system', 'project']), policy.DocumentedRuleDefault( - POLICY_ROOT % 'delete', - base.RULE_ADMIN_OR_OWNER, - "Detach a volume from an instance", - [ + name=POLICY_ROOT % 'swap', + check_str=base.SYSTEM_ADMIN, + description="Update a volume attachment with a different volumeId", + operations=[ + { + 'method': 'PUT', + 'path': + '/servers/{server_id}/os-volume_attachments/{volume_id}' + } + ], + scope_types=['system']), + policy.DocumentedRuleDefault( + name=POLICY_ROOT % 'delete', + check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + description="Detach a volume from an instance", + operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } - ]), + ], + scope_types=['system', 'project']), ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/policy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/policy.py 2020-04-10 17:57:57.000000000 +0000 @@ -49,8 +49,10 @@ _ENFORCER = None +# TODO(gmann): Make suppress_deprecation_warnings default to False, once +# we find the way to disable warning for default change on oslo side. def init(policy_file=None, rules=None, default_rule=None, use_conf=True, - suppress_deprecation_warnings=False): + suppress_deprecation_warnings=True): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/privsep/qemu.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/privsep/qemu.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/privsep/qemu.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/privsep/qemu.py 2020-04-10 17:57:57.000000000 +0000 @@ -16,14 +16,22 @@ Helpers for qemu tasks. """ +import os + from oslo_concurrency import processutils from oslo_log import log as logging +from oslo_utils import units +from nova import exception +from nova.i18n import _ import nova.privsep.utils - LOG = logging.getLogger(__name__) +QEMU_IMG_LIMITS = processutils.ProcessLimits( + cpu_time=30, + address_space=1 * units.Gi) + @nova.privsep.sys_admin_pctxt.entrypoint def convert_image(source, dest, in_format, out_format, instances_path, @@ -71,3 +79,54 @@ cmd = cmd + (source, dest) processutils.execute(*cmd) + + +@nova.privsep.sys_admin_pctxt.entrypoint +def privileged_qemu_img_info(path, format=None, output_format=None): + """Return an oject containing the parsed output from qemu-img info + + This is a privileged call to qemu-img info using the sys_admin_pctxt + entrypoint allowing host block devices etc to be accessed. + """ + return unprivileged_qemu_img_info( + path, format=format, output_format=output_format) + + +def unprivileged_qemu_img_info(path, format=None, output_format=None): + """Return an object containing the parsed output from qemu-img info.""" + try: + # The following check is about ploop images that reside within + # directories and always have DiskDescriptor.xml file beside them + if (os.path.isdir(path) and + os.path.exists(os.path.join(path, "DiskDescriptor.xml"))): + path = os.path.join(path, "root.hds") + + cmd = ( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', + ) + if format is not None: + cmd = cmd + ('-f', format) + if output_format is not None: + cmd = cmd + ("--output=%s" % (output_format),) + out, err = processutils.execute(*cmd, prlimit=QEMU_IMG_LIMITS) + except processutils.ProcessExecutionError as exp: + if exp.exit_code == -9: + # this means we hit prlimits, make the exception more specific + msg = (_("qemu-img aborted by prlimits when inspecting " + "%(path)s : %(exp)s") % {'path': path, 'exp': exp}) + elif exp.exit_code == 1 and 'No such file or directory' in exp.stderr: + # The os.path.exists check above can race so this is a simple + # best effort at catching that type of failure and raising a more + # specific error. + raise exception.DiskNotFound(location=path) + else: + msg = (_("qemu-img failed to execute on %(path)s : %(exp)s") % + {'path': path, 'exp': exp}) + raise exception.InvalidDiskInfo(reason=msg) + + if not out: + msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") % + {'path': path, 'error': err}) + raise exception.InvalidDiskInfo(reason=msg) + return out diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/host_manager.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/host_manager.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/host_manager.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/host_manager.py 2020-04-10 17:57:57.000000000 +0000 @@ -411,7 +411,7 @@ """ def _async_init_instance_info(computes_by_cell): - context = context_module.RequestContext() + context = context_module.get_admin_context() LOG.debug("START:_async_init_instance_info") self._instance_info = {} @@ -442,7 +442,7 @@ "deleted": False} with context_module.target_cell(context, cell) as cctxt: result = objects.InstanceList.get_by_filters( - cctxt.elevated(), filters) + cctxt, filters) instances = result.objects LOG.debug("Adding %s instances for hosts %s-%s", len(instances), start_node, end_node) @@ -718,7 +718,7 @@ def refresh_cells_caches(self): # NOTE(tssurya): This function is called from the scheduler manager's # reset signal handler and also upon startup of the scheduler. - context = context_module.RequestContext() + context = context_module.get_admin_context() temp_cells = objects.CellMappingList.get_all(context) # NOTE(tssurya): filtering cell0 from the list since it need # not be considered for scheduling. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/manager.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/manager.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/manager.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/manager.py 2020-04-10 17:57:57.000000000 +0000 @@ -55,16 +55,17 @@ _sentinel = object() - def __init__(self, scheduler_driver=None, *args, **kwargs): + def __init__(self, *args, **kwargs): self.placement_client = report.SchedulerReportClient() - if not scheduler_driver: - scheduler_driver = CONF.scheduler.driver self.driver = driver.DriverManager( - "nova.scheduler.driver", - scheduler_driver, - invoke_on_load=True).driver - super(SchedulerManager, self).__init__(service_name='scheduler', - *args, **kwargs) + 'nova.scheduler.driver', + CONF.scheduler.driver, + invoke_on_load=True + ).driver + + super(SchedulerManager, self).__init__( + service_name='scheduler', *args, **kwargs + ) @periodic_task.periodic_task( spacing=CONF.scheduler.discover_hosts_in_cells_interval, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/request_filter.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/request_filter.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/request_filter.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/request_filter.py 2020-04-10 17:57:57.000000000 +0000 @@ -194,6 +194,47 @@ @trace_request_filter +def transform_image_metadata(ctxt, request_spec): + """Transform image metadata to required traits. + + This will modify the request_spec to request hosts that support + virtualisation capabilities based on the image metadata properties. + """ + if not CONF.scheduler.image_metadata_prefilter: + return False + + prefix_map = { + 'hw_cdrom_bus': 'COMPUTE_STORAGE_BUS', + 'hw_disk_bus': 'COMPUTE_STORAGE_BUS', + 'hw_video_model': 'COMPUTE_GRAPHICS_MODEL', + 'hw_vif_model': 'COMPUTE_NET_VIF_MODEL', + } + + trait_names = [] + + for key, prefix in prefix_map.items(): + if key in request_spec.image.properties: + value = request_spec.image.properties.get(key).replace( + '-', '_').upper() + trait_name = f'{prefix}_{value}' + if not hasattr(os_traits, trait_name): + LOG.error(('Computed trait name %r is not valid; ' + 'is os-traits up to date?'), trait_name) + return False + + trait_names.append(trait_name) + + for trait_name in trait_names: + LOG.debug( + 'transform_image_metadata request filter added required ' + 'trait %s', trait_name + ) + request_spec.root_required.add(trait_name) + + return True + + +@trace_request_filter def compute_status_filter(ctxt, request_spec): """Pre-filter compute node resource providers using COMPUTE_STATUS_DISABLED @@ -209,12 +250,30 @@ return True +@trace_request_filter +def accelerators_filter(ctxt, request_spec): + """Allow only compute nodes with accelerator support. + + This filter retains only nodes whose compute manager published the + COMPUTE_ACCELERATORS trait, thus indicating the version of n-cpu is + sufficient to handle accelerator requests. + """ + trait_name = os_traits.COMPUTE_ACCELERATORS + if request_spec.flavor.extra_specs.get('accel:device_profile'): + request_spec.root_required.add(trait_name) + LOG.debug('accelerators_filter request filter added required ' + 'trait %s', trait_name) + return True + + ALL_REQUEST_FILTERS = [ require_tenant_aggregate, map_az_to_placement_aggregate, require_image_type_support, compute_status_filter, isolate_aggregates, + transform_image_metadata, + accelerators_filter, ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/scheduler/utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/scheduler/utils.py 2020-04-10 17:57:57.000000000 +0000 @@ -73,7 +73,7 @@ ...where ``$S`` is a string suffix as supported via Placement microversion 1.33 - https://docs.openstack.org/placement/train/specs/train/implemented/2005575-nested-magic-1.html#arbitrary-group-suffixes # noqa + https://docs.openstack.org/placement/train/specs/train/implemented/2005575-nested-magic-1.html#arbitrary-group-suffixes .. note:: @@ -82,7 +82,7 @@ The string suffix is used as the RequestGroup.requester_id to facilitate mapping of requests to allocation candidates using the ``mappings`` piece of the response added in Placement microversion 1.34 - https://docs.openstack.org/placement/train/specs/train/implemented/placement-resource-provider-request-group-mapping-in-allocation-candidates.html # noqa + https://docs.openstack.org/placement/train/specs/train/implemented/placement-resource-provider-request-group-mapping-in-allocation-candidates.html For image metadata, traits are extracted from the ``traits_required`` property, if present. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/service.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/service.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/service.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/service.py 2020-04-10 17:57:57.000000000 +0000 @@ -149,6 +149,20 @@ This includes starting an RPC service, initializing periodic tasks, etc. """ + # NOTE(melwitt): Clear the cell cache holding database transaction + # context manager objects. We do this to ensure we create new internal + # oslo.db locks to avoid a situation where a child process receives an + # already locked oslo.db lock when it is forked. When a child process + # inherits a locked oslo.db lock, database accesses through that + # transaction context manager will never be able to acquire the lock + # and requests will fail with CellTimeout errors. + # See https://bugs.python.org/issue6721 for more information. + # With python 3.7, it would be possible for oslo.db to make use of the + # os.register_at_fork() method to reinitialize its lock. Until we + # require python 3.7 as a mininum version, we must handle the situation + # outside of oslo.db. + context.CELL_CACHE = {} + assert_eventlet_uses_monotonic_clock() verstr = version.version_string_with_package() @@ -304,6 +318,8 @@ def reset(self): """reset the service.""" self.manager.reset() + # Reset the cell cache that holds database transaction context managers + context.CELL_CACHE = {} class WSGIService(service.Service): @@ -358,13 +374,18 @@ setup_profiler(name, self.host) def reset(self): - """Reset server greenpool size to default and service version cache. + """Reset the following: + + * server greenpool size to default + * service version cache + * cell cache holding database transaction context managers :returns: None """ self.server.reset() service_obj.Service.clear_min_version_cache() + context.CELL_CACHE = {} def _get_manager(self): """Initialize a Manager object appropriate for this service. @@ -392,6 +413,20 @@ :returns: None """ + # NOTE(melwitt): Clear the cell cache holding database transaction + # context manager objects. We do this to ensure we create new internal + # oslo.db locks to avoid a situation where a child process receives an + # already locked oslo.db lock when it is forked. When a child process + # inherits a locked oslo.db lock, database accesses through that + # transaction context manager will never be able to acquire the lock + # and requests will fail with CellTimeout errors. + # See https://bugs.python.org/issue6721 for more information. + # With python 3.7, it would be possible for oslo.db to make use of the + # os.register_at_fork() method to reinitialize its lock. Until we + # require python 3.7 as a mininum version, we must handle the situation + # outside of oslo.db. + context.CELL_CACHE = {} + ctxt = context.get_admin_context() service_ref = objects.Service.get_by_host_and_binary(ctxt, self.host, self.binary) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/test.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/test.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/test.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/test.py 2020-04-10 17:57:57.000000000 +0000 @@ -76,6 +76,7 @@ CONF.set_override('use_stderr', False) logging.setup(CONF, 'nova') cache.configure(CONF) +LOG = logging.getLogger(__name__) _TRUE_VALUES = ('True', 'true', '1', 'yes') CELL1_NAME = 'cell1' @@ -96,6 +97,39 @@ mock_fixture.patch_mock_module() +def _poison_unfair_compute_resource_semaphore_locking(): + """Ensure that every locking on COMPUTE_RESOURCE_SEMAPHORE is called with + fair=True. + """ + orig_synchronized = utils.synchronized + + def poisoned_synchronized(*args, **kwargs): + # Only check fairness if the decorator is used with + # COMPUTE_RESOURCE_SEMAPHORE. But the name of the semaphore can be + # passed as args or as kwargs. + # Note that we cannot import COMPUTE_RESOURCE_SEMAPHORE as that would + # apply the decorators we want to poison here. + if len(args) >= 1: + name = args[0] + else: + name = kwargs.get("name") + if name == "compute_resources" and not kwargs.get("fair", False): + raise AssertionError( + 'Locking on COMPUTE_RESOURCE_SEMAPHORE should always be fair. ' + 'See bug 1864122.') + # go and act like the original decorator + return orig_synchronized(*args, **kwargs) + + # replace the synchronized decorator factory with our own that checks the + # params passed in + utils.synchronized = poisoned_synchronized + + +# NOTE(gibi): This poisoning needs to be done in import time as decorators are +# applied in import time on the ResourceTracker +_poison_unfair_compute_resource_semaphore_locking() + + class NovaExceptionReraiseFormatError(object): real_log_exception = exception.NovaException._log_exception @@ -200,6 +234,7 @@ context.CELL_CACHE = {} context.CELLS = [] + self.computes = {} self.cell_mappings = {} self.host_mappings = {} # NOTE(danms): If the test claims to want to set up the database @@ -343,7 +378,7 @@ for k, v in kw.items(): CONF.set_override(k, v, group) - def start_service(self, name, host=None, **kwargs): + def start_service(self, name, host=None, cell_name=None, **kwargs): # Disallow starting multiple scheduler services if name == 'scheduler' and self._service_fixture_count[name]: raise TestingException("Duplicate start_service(%s)!" % name) @@ -360,7 +395,7 @@ # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() - cell_name = kwargs.pop('cell', CELL1_NAME) or CELL1_NAME + cell_name = cell_name or CELL1_NAME cell = self.cell_mappings[cell_name] if (host or name) not in self.host_mappings: # NOTE(gibi): If the HostMapping does not exists then this is @@ -388,6 +423,36 @@ return svc.service + def _start_compute(self, host, cell_name=None): + """Start a nova compute service on the given host + + :param host: the name of the host that will be associated to the + compute service. + :param cell_name: optional name of the cell in which to start the + compute service + :return: the nova compute service object + """ + compute = self.start_service('compute', host=host, cell_name=cell_name) + self.computes[host] = compute + return compute + + def _run_periodics(self): + """Run the update_available_resource task on every compute manager + + This runs periodics on the computes in an undefined order; some child + class redefine this function to force a specific order. + """ + + ctx = context.get_admin_context() + for host, compute in self.computes.items(): + LOG.info('Running periodic for compute (%s)', host) + # Make sure the context is targeted to the proper cell database + # for multi-cell tests. + with context.target_cell( + ctx, self.host_mappings[host].cell_mapping) as cctxt: + compute.manager.update_available_resource(cctxt) + LOG.info('Finished with periodics') + def restart_compute_service(self, compute, keep_hypervisor_state=True): """Stops the service and starts a new one to have realistic restart @@ -431,10 +496,10 @@ 'nova.virt.driver.load_compute_driver') as load_driver: load_driver.return_value = old_driver new_compute = self.start_service( - 'compute', host=compute.host, cell=cell_name) + 'compute', host=compute.host, cell_name=cell_name) else: new_compute = self.start_service( - 'compute', host=compute.host, cell=cell_name) + 'compute', host=compute.host, cell_name=cell_name) return new_compute diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/fixtures.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/fixtures.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/fixtures.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/fixtures.py 2020-04-10 17:57:57.000000000 +0000 @@ -874,7 +874,7 @@ def __init__(self, api_version='v2', project_id='6f70656e737461636b20342065766572', - use_project_id_in_urls=False): + use_project_id_in_urls=False, stub_keystone=True): """Constructor :param api_version: the API version that we're interested in @@ -883,11 +883,14 @@ :param project_id: the project id to use on the API. :param use_project_id_in_urls: If True, act like the "endpoint" in the "service catalog" has the legacy format including the project_id. + :param stub_keystone: If True, stub keystonemiddleware and + NovaKeystoneContext to simulate (but not perform) real auth. """ super(OSAPIFixture, self).__init__() self.api_version = api_version self.project_id = project_id self.use_project_id_in_urls = use_project_id_in_urls + self.stub_keystone = stub_keystone def setUp(self): super(OSAPIFixture, self).setUp() @@ -903,22 +906,8 @@ } self.useFixture(ConfPatcher(**conf_overrides)) - # Stub out authentication middleware - # TODO(efried): Use keystonemiddleware.fixtures.AuthTokenFixture - self.useFixture(fixtures.MockPatch( - 'keystonemiddleware.auth_token.filter_factory', - return_value=lambda _app: _app)) - - # Stub out context middleware - def fake_ctx(env, **kwargs): - user_id = env['HTTP_X_AUTH_USER'] - project_id = env['HTTP_X_AUTH_PROJECT_ID'] - is_admin = user_id == 'admin' - return context.RequestContext( - user_id, project_id, is_admin=is_admin, **kwargs) - - self.useFixture(fixtures.MonkeyPatch( - 'nova.api.auth.NovaKeystoneContext._create_context', fake_ctx)) + if self.stub_keystone: + self._stub_keystone() # Turn off manipulation of socket_options in TCPKeepAliveAdapter # to keep wsgi-intercept happy. Replace it with the method @@ -950,6 +939,24 @@ # the fixture. self.app = app + def _stub_keystone(self): + # Stub out authentication middleware + # TODO(efried): Use keystonemiddleware.fixtures.AuthTokenFixture + self.useFixture(fixtures.MockPatch( + 'keystonemiddleware.auth_token.filter_factory', + return_value=lambda _app: _app)) + + # Stub out context middleware + def fake_ctx(env, **kwargs): + user_id = env['HTTP_X_AUTH_USER'] + project_id = env['HTTP_X_AUTH_PROJECT_ID'] + is_admin = user_id == 'admin' + return context.RequestContext( + user_id, project_id, is_admin=is_admin, **kwargs) + + self.useFixture(fixtures.MonkeyPatch( + 'nova.api.auth.NovaKeystoneContext._create_context', fake_ctx)) + class OSMetadataServer(fixtures.Fixture): """Create an OS Metadata API server as a fixture. @@ -2401,7 +2408,7 @@ def fake_get_availability_zones( ctxt, hostapi, get_only_available=False, - with_hosts=False, enabled_services=None): + with_hosts=False, services=None): # A 2-item tuple is returned if get_only_available=False. if not get_only_available: return self.zones, [] @@ -2510,3 +2517,160 @@ self.useFixture(ConfPatcher( weight_classes=[__name__ + '.HostNameWeigher'], group='filter_scheduler')) + + +def _get_device_profile(dp_name, trait): + dp = [ + {'name': dp_name, + 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', + 'groups': [{ + 'resources:FPGA': '1', + 'trait:' + trait: 'required', + }], + # Skipping links key in Cyborg API return value + } + ] + return dp + + +def get_arqs(dp_name): + arq = { + 'uuid': 'b59d34d3-787b-4fb0-a6b9-019cd81172f8', + 'device_profile_name': dp_name, + 'device_profile_group_id': 0, + 'state': 'Initial', + 'device_rp_uuid': None, + 'hostname': None, + 'instance_uuid': None, + 'attach_handle_info': {}, + 'attach_handle_type': '', + } + bound_arq = copy.deepcopy(arq) + bound_arq.update( + {'state': 'Bound', + 'attach_handle_type': 'TEST_PCI', + 'attach_handle_info': { + 'bus': '0c', + 'device': '0', + 'domain': '0000', + 'function': '0' + }, + }) + return [arq], [bound_arq] + + +class CyborgFixture(fixtures.Fixture): + """Fixture that mocks Cyborg APIs used by nova/accelerator/cyborg.py""" + + dp_name = 'fakedev-dp' + trait = 'CUSTOM_FAKE_DEVICE' + arq_list, bound_arq_list = get_arqs(dp_name) + + # NOTE(Sundar): The bindings passed to the fake_bind_arqs() from the + # conductor are indexed by ARQ UUID and include the host name, device + # RP UUID and instance UUID. (See params to fake_bind_arqs below.) + # + # Later, when the compute manager calls fake_get_arqs_for_instance() with + # the instance UUID, the returned ARQs must contain the host name and + # device RP UUID. But these can vary from test to test. + # + # So, fake_bind_arqs() below takes bindings indexed by ARQ UUID and + # converts them to bindings indexed by instance UUID, which are then + # stored in the dict below. This dict looks like: + # { $instance_uuid: [ + # {'hostname': $hostname, + # 'device_rp_uuid': $device_rp_uuid, + # 'arq_uuid': $arq_uuid + # } + # ] + # } + # Since it is indexed by instance UUID, and that is presumably unique + # across concurrently executing tests, this should be safe for + # concurrent access. + bindings_by_instance = {} + + @staticmethod + def fake_bind_arqs(bindings): + """Simulate Cyborg ARQ bindings. + + Since Nova calls Cyborg for binding on per-instance basis, the + instance UUIDs would be the same for all ARQs in a single call. + + This function converts bindings indexed by ARQ UUID to bindings + indexed by instance UUID, so that fake_get_arqs_for_instance can + retrieve them later. + + :param bindings: + { "$arq_uuid": { + "hostname": STRING + "device_rp_uuid": UUID + "instance_uuid": UUID + }, + ... + } + :returns: None + """ + binding_by_instance = collections.defaultdict(list) + for index, arq_uuid in enumerate(bindings): + arq_binding = bindings[arq_uuid] + # instance_uuid is same for all ARQs in a single call. + instance_uuid = arq_binding['instance_uuid'] + newbinding = { + 'hostname': arq_binding['hostname'], + 'device_rp_uuid': arq_binding['device_rp_uuid'], + 'arq_uuid': arq_uuid, + } + binding_by_instance[instance_uuid].append(newbinding) + + CyborgFixture.bindings_by_instance.update(binding_by_instance) + + @staticmethod + def fake_get_arqs_for_instance(instance_uuid, only_resolved=False): + """Get list of bound ARQs for this instance. + + This function uses bindings indexed by instance UUID to + populate the bound ARQ templates in CyborgFixture.bound_arq_list. + """ + arq_host_rp_list = CyborgFixture.bindings_by_instance[instance_uuid] + # The above looks like: + # [{'hostname': $hostname, + # 'device_rp_uuid': $device_rp_uuid, + # 'arq_uuid': $arq_uuid + # }] + + bound_arq_list = copy.deepcopy(CyborgFixture.bound_arq_list) + for arq in bound_arq_list: + match = [(arq_host_rp['hostname'], + arq_host_rp['device_rp_uuid'], + instance_uuid) + for arq_host_rp in arq_host_rp_list + if arq_host_rp['arq_uuid'] == arq['uuid'] + ] + # Only 1 ARQ UUID would match, so len(match) == 1 + arq['hostname'], arq['device_rp_uuid'], arq['instance_uuid'] = ( + match[0][0], match[0][1], match[0][2]) + return bound_arq_list + + @staticmethod + def fake_delete_arqs_for_instance(instance_uuid): + return None + + def setUp(self): + super(CyborgFixture, self).setUp() + self.mock_get_dp = self.useFixture(fixtures.MockPatch( + 'nova.accelerator.cyborg._CyborgClient._get_device_profile_list', + return_value=_get_device_profile(self.dp_name, self.trait))).mock + self.mock_create_arqs = self.useFixture(fixtures.MockPatch( + 'nova.accelerator.cyborg._CyborgClient._create_arqs', + return_value=self.arq_list)).mock + self.mock_bind_arqs = self.useFixture(fixtures.MockPatch( + 'nova.accelerator.cyborg._CyborgClient.bind_arqs', + side_effect=self.fake_bind_arqs)).mock + self.mock_get_arqs = self.useFixture(fixtures.MockPatch( + 'nova.accelerator.cyborg._CyborgClient.' + 'get_arqs_for_instance', + side_effect=self.fake_get_arqs_for_instance)).mock + self.mock_del_arqs = self.useFixture(fixtures.MockPatch( + 'nova.accelerator.cyborg._CyborgClient.' + 'delete_arqs_for_instance', + side_effect=self.fake_delete_arqs_for_instance)).mock diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api/client.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api/client.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api/client.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api/client.py 2020-04-10 17:57:57.000000000 +0000 @@ -314,9 +314,31 @@ def delete_flavor(self, flavor_id): return self.api_delete('/flavors/%s' % flavor_id) - def post_extra_spec(self, flavor_id, spec): - return self.api_post('/flavors/%s/os-extra_specs' % - flavor_id, spec) + def get_extra_specs(self, flavor_id): + return self.api_get( + '/flavors/%s/os-extra_specs' % flavor_id + ).body['extra_specs'] + + def get_extra_spec(self, flavor_id, spec_id): + return self.api_get( + '/flavors/%s/os-extra_specs/%s' % (flavor_id, spec_id), + ).body + + def post_extra_spec(self, flavor_id, body, **_params): + url = '/flavors/%s/os-extra_specs' % flavor_id + if _params: + query_string = '?%s' % parse.urlencode(list(_params.items())) + url += query_string + + return self.api_post(url, body) + + def put_extra_spec(self, flavor_id, spec_id, body, **_params): + url = '/flavors/%s/os-extra_specs/%s' % (flavor_id, spec_id) + if _params: + query_string = '?%s' % parse.urlencode(list(_params.items())) + url += query_string + + return self.api_put(url, body) def get_volume(self, volume_id): return self.api_get('/os-volumes/%s' % volume_id).body['volume'] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "%(value1)s", - "key2": "%(value2)s" + "hw:cpu_policy": "%(value1)s", + "hw:numa_nodes": "%(value2)s" } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "%(value1)s", - "key2": "%(value2)s" + "hw:cpu_policy": "%(value1)s", + "hw:numa_nodes": "%(value2)s" } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -1,3 +1,3 @@ { - "key1": "%(value1)s" + "hw:numa_nodes": "%(value1)s" } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "%(value1)s", - "key2": "%(value2)s" + "hw:cpu_policy": "%(value1)s", + "hw:numa_nodes": "%(value2)s" } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -1,3 +1,3 @@ { - "key1": "%(value1)s" + "hw:numa_nodes": "%(value1)s" } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -1,3 +1,3 @@ { - "key1": "%(value1)s" + "hw:numa_nodes": "%(value1)s" } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavor-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavor-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavor-get-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavor-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -22,8 +22,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavors-detail-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavors-detail-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavors-detail-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.61/flavors-detail-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -169,8 +169,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavor-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavor-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavor-get-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavor-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -22,8 +22,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavors-detail-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavors-detail-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavors-detail-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/flavors/v2.75/flavors-detail-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -169,8 +169,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -{ - "keypair": { - "public_key": "%(public_key)s", - "name": "%(keypair_name)s", - "fingerprint": "%(fingerprint)s", - "user_id": "fake", - "deleted": false, - "created_at": "%(strtime)s", - "updated_at": null, - "deleted_at": null, - "id": 1 - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s", - "public_key": "%(public_key)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "public_key": "%(public_key)s", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "public_key": "%(public_key)s" - } - } - ] -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "private_key": "%(private_key)s", - "public_key": "%(public_key)s", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -{ - "keypair": { - "public_key": "%(public_key)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "fingerprint": "%(fingerprint)s", - "user_id": "%(user_id)s", - "deleted": false, - "created_at": "%(strtime)s", - "updated_at": null, - "deleted_at": null, - "id": 1 - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s", - "user_id": "%(user_id)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s", - "user_id": "%(user_id)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s" - } - } - ] -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "user_id": "%(user_id)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "private_key": "%(private_key)s", - "public_key": "%(public_key)s", - "user_id": "%(user_id)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -{ - "keypair": { - "public_key": "%(public_key)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "fingerprint": "%(fingerprint)s", - "user_id": "fake", - "deleted": false, - "created_at": "%(strtime)s", - "updated_at": null, - "deleted_at": null, - "id": 1 - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s" - } - } - ] -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s", - "type": "%(keypair_type)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "private_key": "%(private_key)s", - "public_key": "%(public_key)s", - "user_id": "fake" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s" - } - } - ], - "keypairs_links": [ - { - "href": "%(versioned_compute_endpoint)s/keypairs?limit=1&marker=%(keypair_name)s", - "rel": "next" - } - ] -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s" - } - } - ] -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -{ - "keypairs": [ - { - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "public_key": "%(public_key)s" - } - } - ], - "keypairs_links": [ - { - "href": "%(versioned_compute_endpoint)s/keypairs?limit=1&marker=%(keypair_name)s&user_id=user2", - "rel": "next" - } - ] -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -{ - "keypair": { - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "user_id": "%(user_id)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -{ - "keypair": { - "fingerprint": "%(fingerprint)s", - "name": "%(keypair_name)s", - "type": "%(keypair_type)s", - "private_key": "%(private_key)s", - "public_key": "%(public_key)s", - "user_id": "%(user_id)s" - } -} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -42,7 +42,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=2", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -9,7 +9,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=2", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -42,7 +42,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=%(hypervisor_id)s", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=%(hypervisor_id)s", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json.tpl 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -9,7 +9,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=%(hypervisor_id)s", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=%(hypervisor_id)s", "rel": "next" } ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,21 @@ +{ + "instanceAction": { + "action": "stop", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null, + "events": [ + { + "event": "compute_stop_instance", + "start_time": "%(strtime)s", + "finish_time": "%(strtime)s", + "result": "Success", + "hostId": "%(event_hostId)s" + } + ] + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "instanceAction": { + "action": "stop", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null, + "events": [ + { + "event": "compute_stop_instance", + "start_time": "%(strtime)s", + "finish_time": "%(strtime)s", + "result": "Success", + "traceback": null, + "host": "%(event_host)s", + "hostId": "%(event_hostId)s", + "details": null + } + ] + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + }, + { + "action": "create", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + }, + { + "action": "create", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + } + ], + "links": [ + { + "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s/os-instance-actions?limit=1&marker=%(request_id)s", + "rel": "next" + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "create", + "instance_uuid": "%(uuid)s", + "request_id": "%(request_id)s", + "user_id": "%(user_id)s", + "project_id": "%(project_id)s", + "start_time": "%(strtime)s", + "updated_at": "%(strtime)s", + "message": null + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,13 @@ +{ + "keypair": { + "public_key": "%(public_key)s", + "name": "%(keypair_name)s", + "fingerprint": "%(fingerprint)s", + "user_id": "fake", + "deleted": false, + "created_at": "%(strtime)s", + "updated_at": null, + "deleted_at": null, + "id": 1 + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "keypair": { + "name": "%(keypair_name)s", + "public_key": "%(public_key)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "public_key": "%(public_key)s", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,11 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "public_key": "%(public_key)s" + } + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "keypair": { + "name": "%(keypair_name)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "private_key": "%(private_key)s", + "public_key": "%(public_key)s", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "keypair": { + "public_key": "%(public_key)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "fingerprint": "%(fingerprint)s", + "user_id": "%(user_id)s", + "deleted": false, + "created_at": "%(strtime)s", + "updated_at": null, + "deleted_at": null, + "id": 1 + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,8 @@ +{ + "keypair": { + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s", + "user_id": "%(user_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s", + "user_id": "%(user_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s" + } + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "keypair": { + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "user_id": "%(user_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "private_key": "%(private_key)s", + "public_key": "%(public_key)s", + "user_id": "%(user_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-get-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-get-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-get-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-get-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,14 @@ +{ + "keypair": { + "public_key": "%(public_key)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "fingerprint": "%(fingerprint)s", + "user_id": "fake", + "deleted": false, + "created_at": "%(strtime)s", + "updated_at": null, + "deleted_at": null, + "id": 1 + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "keypair": { + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,9 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s" + } + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "keypair": { + "name": "%(keypair_name)s", + "type": "%(keypair_type)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "private_key": "%(private_key)s", + "public_key": "%(public_key)s", + "user_id": "fake" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s" + } + } + ], + "keypairs_links": [ + { + "href": "%(versioned_compute_endpoint)s/os-keypairs?limit=1&marker=%(keypair_name)s", + "rel": "next" + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,12 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s" + } + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "public_key": "%(public_key)s" + } + } + ], + "keypairs_links": [ + { + "href": "%(versioned_compute_endpoint)s/os-keypairs?limit=1&marker=%(keypair_name)s&user_id=user2", + "rel": "next" + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "keypair": { + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "user_id": "%(user_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "keypair": { + "fingerprint": "%(fingerprint)s", + "name": "%(keypair_name)s", + "type": "%(keypair_type)s", + "private_key": "%(private_key)s", + "public_key": "%(public_key)s", + "user_id": "%(user_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,7 @@ +{ + "volumeAttachment": { + "volumeId": "%(volume_id)s", + "tag": "%(tag)s", + "delete_on_termination": true + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "device": "%(device)s", + "id": "%(volume_id)s", + "serverId": "%(uuid)s", + "tag": "%(tag)s", + "volumeId": "%(volume_id)s", + "delete_on_termination": true + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "volumeAttachments": [ + { + "device": "%(device)s", + "id": "%(volume_id)s", + "serverId": "%(uuid)s", + "tag": "%(tag)s", + "volumeId": "%(volume_id)s", + "delete_on_termination": true + }, + { + "device": "%(text)s", + "id": "%(volume_id2)s", + "serverId": "%(uuid)s", + "tag": null, + "volumeId": "%(volume_id2)s", + "delete_on_termination": false + } + ] +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "volumeId": "%(volume_id)s", + "id": "%(volume_id)s", + "serverId": "%(server_id)s", + "device": "%(device)s", + "tag": "%(tag)s", + "delete_on_termination": true + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-req.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-req.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-req.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-req.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,5 @@ +{ + "volumeAttachment": { + "volumeId": "%(new_volume_id)s" + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json.tpl nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json.tpl --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json.tpl 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json.tpl 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "device": "%(device)s", + "id": "%(volume_id)s", + "serverId": "%(uuid)s", + "tag": "%(tag)s", + "volumeId": "%(volume_id)s", + "delete_on_termination": true + } +} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py 2020-04-10 17:57:57.000000000 +0000 @@ -21,8 +21,9 @@ sample_dir = 'flavor-extra-specs' def _flavor_extra_specs_create(self): - subs = {'value1': 'value1', - 'value2': 'value2' + subs = { + 'value1': 'shared', + 'value2': '1', } response = self._do_post('flavors/1/os-extra_specs', 'flavor-extra-specs-create-req', subs) @@ -30,15 +31,18 @@ subs, response, 200) def test_flavor_extra_specs_get(self): - subs = {'value1': 'value1'} + subs = { + 'value1': '1', + } self._flavor_extra_specs_create() - response = self._do_get('flavors/1/os-extra_specs/key1') + response = self._do_get('flavors/1/os-extra_specs/hw:numa_nodes') self._verify_response('flavor-extra-specs-get-resp', subs, response, 200) def test_flavor_extra_specs_list(self): - subs = {'value1': 'value1', - 'value2': 'value2' + subs = { + 'value1': 'shared', + 'value2': '1', } self._flavor_extra_specs_create() response = self._do_get('flavors/1/os-extra_specs') @@ -49,15 +53,17 @@ self._flavor_extra_specs_create() def test_flavor_extra_specs_update(self): - subs = {'value1': 'new_value1'} + subs = { + 'value1': '2', + } self._flavor_extra_specs_create() - response = self._do_put('flavors/1/os-extra_specs/key1', + response = self._do_put('flavors/1/os-extra_specs/hw:numa_nodes', 'flavor-extra-specs-update-req', subs) self._verify_response('flavor-extra-specs-update-resp', subs, response, 200) def test_flavor_extra_specs_delete(self): self._flavor_extra_specs_create() - response = self._do_delete('flavors/1/os-extra_specs/key1') + response = self._do_delete('flavors/1/os-extra_specs/hw:numa_nodes') self.assertEqual(200, response.status_code) self.assertEqual('', response.text) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_flavors.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_flavors.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_flavors.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_flavors.py 2020-04-10 17:57:57.000000000 +0000 @@ -121,7 +121,10 @@ new_flavor = objects.Flavor( ctxt, memory_mb=2048, vcpus=1, root_gb=20, flavorid=new_flavor_id, name='m1.small.description', description='test description', - extra_specs={"key1": "value1", "key2": "value2"}) + extra_specs={ + 'hw:numa_nodes': '1', + 'hw:cpu_policy': 'shared', + }) new_flavor.create() self.flavor_show_id = new_flavor_id self.subs = {'flavorid': new_flavor_id} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_instance_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_instance_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_instance_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_instance_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -15,6 +15,7 @@ from nova.tests.functional.api_sample_tests import test_servers from nova.tests.functional import api_samples_test_base +from nova.tests.unit import policy_fixture class ServerActionsSampleJsonTest(test_servers.ServersSampleBase): @@ -34,6 +35,7 @@ actions = api_samples_test_base.objectify(response_data) self.action_stop = actions['instanceActions'][0] self._wait_for_state_change({'id': self.uuid}, 'SHUTOFF') + self.policy = self.useFixture(policy_fixture.RealPolicyFixture()) def _get_subs(self): return { @@ -152,3 +154,12 @@ self._verify_response( 'instance-actions-list-with-changes-before', self._get_subs(), response, 200) + + +class ServerActionsV284SampleJsonTest(ServerActionsV266SampleJsonTest): + microversion = '2.84' + scenarios = [('2.84', {'api_major_version': 'v2.1'})] + + +class ServerActionsV284NonAdminSampleJsonTest(ServerActionsV284SampleJsonTest): + ADMIN_API = False diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_keypairs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_keypairs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_keypairs.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_keypairs.py 2020-04-10 17:57:57.000000000 +0000 @@ -23,7 +23,7 @@ class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): microversion = None - sample_dir = "keypairs" + sample_dir = 'os-keypairs' expected_delete_status_code = 202 expected_post_status_code = 200 @@ -246,7 +246,7 @@ class KeyPairsV235SampleJsonTest(api_sample_base.ApiSampleTestBaseV21): ADMIN_API = True - sample_dir = "keypairs" + sample_dir = 'os-keypairs' microversion = '2.35' expected_post_status_code = 201 scenarios = [('v2_35', {'api_major_version': 'v2.1'})] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_versions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_versions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_versions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_versions.py 2020-04-10 17:57:57.000000000 +0000 @@ -12,33 +12,59 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import ddt +import fixtures +import webob from nova.api.openstack import api_version_request as avr from nova.tests.functional.api_sample_tests import api_sample_base +@ddt.ddt class VersionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21): + """Validate that proper version documents can be fetched without auth.""" + + # Here we want to avoid stubbing keystone middleware. That will cause + # "real" keystone middleware to run (and fail) if it's in the pipeline. + # (The point of this test is to prove we do version discovery through + # pipelines that *don't* authenticate.) + STUB_KEYSTONE = False + sample_dir = 'versions' _use_project_id = False # NOTE(gmann): Setting empty scenario for 'version' API testing # as those does not send request on particular endpoint and running # its tests alone is enough. scenarios = [] - max_api_version = avr.max_api_version().get_string() + max_api_version = {'max_api_version': avr.max_api_version().get_string()} - def test_versions_get(self): - response = self._do_get('', strip_version=True) - self._verify_response('versions-get-resp', - {'max_api_version': self.max_api_version}, + def setUp(self): + super(VersionsSampleJsonTest, self).setUp() + # Version documents are supposed to be available without auth, so make + # the auth middleware "fail" authentication. + self.useFixture(fixtures.MockPatch( + # [api]auth_strategy is set to noauth2 by the ConfFixture + 'nova.api.openstack.auth.NoAuthMiddlewareBase.base_call', + return_value=webob.Response(status=401))) + + def _get(self, url): + return self._do_get( + url, + # Since we're explicitly getting discovery endpoints, strip the + # automatic /v2[.1] added by the fixture. + strip_version=True) + + @ddt.data('', '/') + def test_versions_get_base(self, url): + response = self._get(url) + self._verify_response('versions-get-resp', self.max_api_version, response, 200, update_links=False) - def test_versions_get_v2(self): - response = self._do_get('/v2', strip_version=True) - self._verify_response('v2-version-get-resp', {}, - response, 200, update_links=False) - - def test_versions_get_v21(self): - response = self._do_get('/v2.1', strip_version=True) - self._verify_response('v21-version-get-resp', - {'max_api_version': self.max_api_version}, - response, 200, update_links=False) + @ddt.data(('/v2', 'v2-version-get-resp', {}), + ('/v2/', 'v2-version-get-resp', {}), + ('/v2.1', 'v21-version-get-resp', max_api_version), + ('/v2.1/', 'v21-version-get-resp', max_api_version)) + @ddt.unpack + def test_versions_get_versioned(self, url, tplname, subs): + response = self._get(url) + self._verify_response(tplname, subs, response, 200, update_links=False) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_volumes.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_volumes.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/api_sample_tests/test_volumes.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/api_sample_tests/test_volumes.py 2020-04-10 17:57:57.000000000 +0000 @@ -287,3 +287,33 @@ """ microversion = '2.79' scenarios = [('v2_79', {'api_major_version': 'v2.1'})] + + +class UpdateVolumeAttachmentsSampleV285(VolumeAttachmentsSampleV279): + """Microversion 2.85 adds the ``PUT + /servers/{server_id}/os-volume_attachments/{volume_id}`` + support for specifying ``delete_on_termination`` field in the request + body to re-config the attached volume whether to delete when the instance + is deleted. + """ + microversion = '2.85' + scenarios = [('v2_85', {'api_major_version': 'v2.1'})] + + def test_volume_attachment_update(self): + subs = self.test_attach_volume_to_server() + attached_volume_id = subs['volume_id'] + subs['server_id'] = self.server_id + response = self._do_put('servers/%s/os-volume_attachments/%s' + % (self.server_id, attached_volume_id), + 'update-volume-attachment-delete-flag-req', + subs) + self.assertEqual(202, response.status_code) + self.assertEqual('', response.text) + + # Make sure the attached volume was changed + attachments = self.api.api_get( + '/servers/%s/os-volume_attachments' % self.server_id).body[ + 'volumeAttachments'] + self.assertEqual(1, len(attachments)) + self.assertEqual(self.server_id, attachments[0]['serverId']) + self.assertTrue(attachments[0]['delete_on_termination']) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/compute/test_cache_image.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/compute/test_cache_image.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/compute/test_cache_image.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/compute/test_cache_image.py 2020-04-10 17:57:57.000000000 +0000 @@ -34,11 +34,11 @@ self.compute1 = self.start_service('compute', host='compute1') self.compute2 = self.start_service('compute', host='compute2') self.compute3 = self.start_service('compute', host='compute3', - cell='cell2') + cell_name='cell2') self.compute4 = self.start_service('compute', host='compute4', - cell='cell2') + cell_name='cell2') self.compute5 = self.start_service('compute', host='compute5', - cell='cell2') + cell_name='cell2') cell2 = self.cell_mappings['cell2'] with context.target_cell(self.context, cell2) as cctxt: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/compute/test_resource_tracker.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/compute/test_resource_tracker.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/compute/test_resource_tracker.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/compute/test_resource_tracker.py 2020-04-10 17:57:57.000000000 +0000 @@ -22,9 +22,9 @@ from nova import conf from nova import context from nova import objects +from nova import test from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers -from nova.tests.functional import test_report_client as test_base from nova.virt import driver as virt_driver @@ -35,7 +35,7 @@ COMPUTE_HOST = 'compute-host' -class IronicResourceTrackerTest(test_base.SchedulerReportClientTestBase): +class IronicResourceTrackerTest(test.TestCase): """Tests the behaviour of the resource tracker with regards to the transitional period between adding support for custom resource classes in the placement API and integrating inventory and allocation records for diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/db/test_virtual_interface.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/db/test_virtual_interface.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/db/test_virtual_interface.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/db/test_virtual_interface.py 2020-04-10 17:57:57.000000000 +0000 @@ -70,9 +70,7 @@ fake_network.set_stub_network_methods(self) self.cells = objects.CellMappingList.get_all(self.context) - compute_cell0 = self.start_service( - 'compute', host='compute2', cell='cell0') - self.computes = [compute_cell0, self.compute] + self._start_compute('compute2') self.instances = [] def _create_instances(self, pre_newton=2, deleted=0, total=5, @@ -93,7 +91,7 @@ flavor=flavor, created_at=datetime.datetime(1985, 10, 25, 1, 21, 0), launched_at=datetime.datetime(1985, 10, 25, 1, 22, 0), - host=self.computes[0].host, + host=self.computes['compute2'].host, hostname='%s-inst%i' % (target_cell.name, i)) inst.create() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/fixtures.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/fixtures.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/fixtures.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/fixtures.py 2020-04-10 17:57:57.000000000 +0000 @@ -92,28 +92,27 @@ self.api = PlacementApiClient(self) @staticmethod - def _update_headers_with_version(headers, **kwargs): - version = kwargs.get("version") + def _update_headers_with_version(headers, version): if version is not None: # TODO(mriedem): Perform some version discovery at some point. headers.update({ 'OpenStack-API-Version': 'placement %s' % version }) - def _fake_get(self, *args, **kwargs): - (url,) = args[1:] + def _fake_get(self, client, url, version=None, global_request_id=None): # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 # in case a token is not provided. We should change that by creating # a fake token so we could remove adding the header below. headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) + self._update_headers_with_version(headers, version) return self._client.get( url, endpoint_override=self.endpoint, headers=headers) - def _fake_post(self, *args, **kwargs): - (url, data) = args[1:] + def _fake_post( + self, client, url, data, version=None, global_request_id=None + ): # NOTE(sdague): using json= instead of data= sets the # media type to application/json for us. Placement API is # more sensitive to this than other APIs in the OpenStack @@ -122,14 +121,15 @@ # in case a token is not provided. We should change that by creating # a fake token so we could remove adding the header below. headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) + self._update_headers_with_version(headers, version) return self._client.post( url, json=data, endpoint_override=self.endpoint, headers=headers) - def _fake_put(self, *args, **kwargs): - (url, data) = args[1:] + def _fake_put( + self, client, url, data, version=None, global_request_id=None + ): # NOTE(sdague): using json= instead of data= sets the # media type to application/json for us. Placement API is # more sensitive to this than other APIs in the OpenStack @@ -138,19 +138,20 @@ # in case a token is not provided. We should change that by creating # a fake token so we could remove adding the header below. headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) + self._update_headers_with_version(headers, version) return self._client.put( url, json=data, endpoint_override=self.endpoint, headers=headers) - def _fake_delete(self, *args, **kwargs): - (url,) = args[1:] + def _fake_delete( + self, client, url, version=None, global_request_id=None + ): # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 # in case a token is not provided. We should change that by creating # a fake token so we could remove adding the header below. headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) + self._update_headers_with_version(headers, version) return self._client.delete( url, endpoint_override=self.endpoint, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/integrated_helpers.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/integrated_helpers.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/integrated_helpers.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/integrated_helpers.py 2020-04-10 17:57:57.000000000 +0000 @@ -162,10 +162,12 @@ :param server: API response dict of the server being resized/migrated :param action: Either "resize" or "migrate" instance action. :param error_in_tb: Some expected part of the error event traceback. + :returns: The instance action event dict from the API response """ event = self._wait_for_action_fail_completion( server, action, 'conductor_migrate_server') self.assertIn(error_in_tb, event['traceback']) + return event def _wait_for_migration_status(self, server, expected_statuses): """Waits for a migration record with the given statuses to be found @@ -193,6 +195,21 @@ self.fail('The line "%(log_line)s" did not appear in the log') + def _wait_for_assert(self, assert_func, max_retries=10, sleep=0.5): + """Waits and retries the assert_func either until it does not raise + AssertionError any more or until the max_retries run out. + """ + last_error = None + for i in range(max_retries): + try: + return assert_func() + except AssertionError as e: + last_error = e + + time.sleep(sleep) + + raise last_error + def _create_aggregate(self, name, availability_zone=None): """Creates a host aggregate with the given name and optional AZ @@ -360,6 +377,10 @@ # This indicates whether to include the project ID in the URL for API # requests through OSAPIFixture. Overridden by subclasses. _use_project_id = False + # Override this in subclasses to avoid stubbing keystonemiddleware and + # NovaKeystoneContext, thus making those middlewares behave as they would + # in real life (i.e. try to do real authentication). + STUB_KEYSTONE = True def setUp(self): super(_IntegratedTestBase, self).setUp() @@ -377,7 +398,7 @@ self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) def _setup_compute_service(self): - return self.start_service('compute') + return self._start_compute('compute') def _setup_scheduler_service(self): return self.start_service('scheduler') @@ -396,7 +417,8 @@ self.api_fixture = self.useFixture( nova_fixtures.OSAPIFixture( api_version=self.api_major_version, - use_project_id_in_urls=self._use_project_id)) + use_project_id_in_urls=self._use_project_id, + stub_keystone=self.STUB_KEYSTONE)) # if the class needs to run as admin, make the api endpoint # the admin, otherwise it's safer to run as non admin user. @@ -427,7 +449,54 @@ ("The expected wsgi middlewares %s are not " "existed") % expected_middleware) + # TODO(sbauza): Drop this method once test classes inherit from a mixin + def _get_provider_uuid_by_name(self, name): + return self.placement_api.get( + '/resource_providers?name=%s' % name).body[ + 'resource_providers'][0]['uuid'] + + # TODO(sbauza): Drop this method once test classes inherit from a mixin + def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid): + rps = self.placement_api.get( + '/resource_providers?in_tree=%s' % in_tree_rp_uuid, + version='1.20').body['resource_providers'] + return [rp['uuid'] for rp in rps] + + # TODO(sbauza): Drop this method once test classes inherit from a mixin + def _get_provider_inventory(self, rp_uuid): + return self.placement_api.get( + '/resource_providers/%s/inventories' % rp_uuid).body['inventories'] + + # TODO(sbauza): Drop this method once test classes inherit from a mixin + def _get_provider_usages(self, provider_uuid): + return self.placement_api.get( + '/resource_providers/%s/usages' % provider_uuid).body['usages'] + + # TODO(sbauza): Drop this method once test classes inherit from a mixin + def _create_trait(self, trait): + return self.placement_api.put('/traits/%s' % trait, {}, version='1.6') + + # TODO(sbauza): Drop this method once test classes inherit from a mixin + def _set_provider_traits(self, rp_uuid, traits): + """This will overwrite any existing traits. + + :param rp_uuid: UUID of the resource provider to update + :param traits: list of trait strings to set on the provider + :returns: APIResponse object with the results + """ + provider = self.placement_api.get( + '/resource_providers/%s' % rp_uuid).body + put_traits_req = { + 'resource_provider_generation': provider['generation'], + 'traits': traits + } + return self.placement_api.put( + '/resource_providers/%s/traits' % rp_uuid, + put_traits_req, version='1.6') + +# FIXME(sbauza): There is little value to have this be a whole base testclass +# instead of a mixin only providing methods for accessing Placement endpoint. class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin): """Base test class for functional tests that check provider usage and consumer allocations in Placement during various operations. @@ -445,6 +514,7 @@ # nova.virt.libvirt.driver.LibvirtDriver.capabilities expected_libvirt_driver_capability_traits = set([ six.u(trait) for trait in [ + os_traits.COMPUTE_ACCELERATORS, os_traits.COMPUTE_DEVICE_TAGGING, os_traits.COMPUTE_NET_ATTACH_INTERFACE, os_traits.COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG, @@ -464,6 +534,7 @@ # nova.virt.fake.FakeDriver.capabilities expected_fake_driver_capability_traits = set([ six.u(trait) for trait in [ + os_traits.COMPUTE_ACCELERATORS, os_traits.COMPUTE_IMAGE_TYPE_RAW, os_traits.COMPUTE_DEVICE_TAGGING, os_traits.COMPUTE_NET_ATTACH_INTERFACE, @@ -504,21 +575,6 @@ self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) - self.computes = {} - - def _start_compute(self, host, cell_name=None): - """Start a nova compute service on the given host - - :param host: the name of the host that will be associated to the - compute service. - :param cell_name: optional name of the cell in which to start the - compute service (defaults to cell1) - :return: the nova compute service object - """ - compute = self.start_service('compute', host=host, cell=cell_name) - self.computes[host] = compute - return compute - def _get_provider_uuid_by_host(self, host): # NOTE(gibi): the compute node id is the same as the compute node # provider uuid on that compute @@ -534,6 +590,19 @@ return self.placement_api.get( '/allocations/%s' % server_uuid).body['allocations'] + def _wait_for_server_allocations(self, consumer_id, max_retries=20): + retry_count = 0 + while True: + alloc = self._get_allocations_by_server_uuid(consumer_id) + if alloc: + break + retry_count += 1 + if retry_count == max_retries: + self.fail('Wait for server allocations failed, ' + 'server=%s' % (consumer_id)) + time.sleep(0.5) + return alloc + def _get_allocations_by_provider_uuid(self, rp_uuid): return self.placement_api.get( '/resource_providers/%s/allocations' % rp_uuid).body['allocations'] @@ -843,23 +912,6 @@ self.assertEqual(0, len(allocations)) return migration_uuid - def _run_periodics(self): - """Run the update_available_resource task on every compute manager - - This runs periodics on the computes in an undefined order; some child - class redefined this function to force a specific order. - """ - - ctx = context.get_admin_context() - for host, compute in self.computes.items(): - LOG.info('Running periodic for compute (%s)', host) - # Make sure the context is targeted to the proper cell database - # for multi-cell tests. - with context.target_cell( - ctx, self.host_mappings[host].cell_mapping) as cctxt: - compute.manager.update_available_resource(cctxt) - LOG.info('Finished with periodics') - def _move_and_check_allocations(self, server, request, old_flavor, new_flavor, source_rp_uuid, dest_rp_uuid): self.api.post_server_action(server['id'], request) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_numa_servers.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_numa_servers.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_numa_servers.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_numa_servers.py 2020-04-10 17:57:57.000000000 +0000 @@ -39,6 +39,8 @@ def setUp(self): super(NUMAServersTestBase, self).setUp() + self.ctxt = nova_context.get_admin_context() + # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect # this host_manager = self.scheduler.manager.driver.host_manager @@ -141,6 +143,64 @@ self._run_build_test(flavor_id, end_status='ERROR') + def test_create_server_with_hugepages(self): + """Create a server with huge pages. + + Configuring huge pages against a server also necessitates configuring a + NUMA topology. + """ + host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=2, + cpu_cores=2, cpu_threads=2, + kB_mem=(1024 * 1024 * 16)) # GB + self.mock_conn.return_value = self._get_connection(host_info=host_info) + + # create 1024 * 2 MB huge pages, and allocate the rest of the 16 GB as + # small pages + for cell in host_info.numa_topology.cells: + huge_pages = 1024 + small_pages = (host_info.kB_mem - (2048 * huge_pages)) // 4 + cell.mempages = fakelibvirt.create_mempages([ + (4, small_pages), + (2048, huge_pages), + ]) + + extra_spec = {'hw:mem_page_size': 'large'} + flavor_id = self._create_flavor(memory_mb=2048, extra_spec=extra_spec) + expected_usage = {'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2} + + server = self._run_build_test(flavor_id, expected_usage=expected_usage) + + ctx = nova_context.get_admin_context() + inst = objects.Instance.get_by_uuid(ctx, server['id']) + self.assertEqual(1, len(inst.numa_topology.cells)) + self.assertEqual(2048, inst.numa_topology.cells[0].pagesize) # kB + self.assertEqual(2048, inst.numa_topology.cells[0].memory) # MB + + def test_create_server_with_hugepages_fails(self): + """Create a server with huge pages on a host that doesn't support them. + + This should fail because there are hugepages but not enough of them. + """ + host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=2, + cpu_cores=2, cpu_threads=2, + kB_mem=(1024 * 1024 * 16)) # GB + self.mock_conn.return_value = self._get_connection(host_info=host_info) + + # create 512 * 2 MB huge pages, and allocate the rest of the 16 GB as + # small pages + for cell in host_info.numa_topology.cells: + huge_pages = 512 + small_pages = (host_info.kB_mem - (2048 * huge_pages)) // 4 + cell.mempages = fakelibvirt.create_mempages([ + (4, small_pages), + (2048, huge_pages), + ]) + + extra_spec = {'hw:mem_page_size': 'large'} + flavor_id = self._create_flavor(memory_mb=2048, extra_spec=extra_spec) + + self._run_build_test(flavor_id, end_status='ERROR') + def test_create_server_with_legacy_pinning_policy(self): """Create a server using the legacy 'hw:cpu_policy' extra spec. @@ -166,8 +226,7 @@ server = self._run_build_test(flavor_id, expected_usage=expected_usage) - ctx = nova_context.get_admin_context() - inst = objects.Instance.get_by_uuid(ctx, server['id']) + inst = objects.Instance.get_by_uuid(self.ctxt, server['id']) self.assertEqual(1, len(inst.numa_topology.cells)) self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores) @@ -345,6 +404,111 @@ self.api.post_server, post) self.assertEqual(403, ex.response.status_code) + def _inspect_filter_numa_topology(self, cell_count): + """Helper function used by test_resize_server_with_numa* tests.""" + args, kwargs = self.mock_filter.call_args_list[0] + self.assertEqual(2, len(args)) + self.assertEqual({}, kwargs) + numa_topology = args[1].numa_topology + self.assertEqual(cell_count, len(numa_topology.cells), args) + + # We always reset mock_filter because we don't want these result + # fudging later tests + self.mock_filter.reset_mock() + self.assertEqual(0, len(self.mock_filter.call_args_list)) + + def _inspect_request_spec(self, server, cell_count): + """Helper function used by test_resize_server_with_numa* tests.""" + req_spec = objects.RequestSpec.get_by_instance_uuid( + self.ctxt, server['id']) + self.assertEqual(cell_count, len(req_spec.numa_topology.cells)) + + def test_resize_revert_server_with_numa(self): + """Create a single-node instance and resize it to a flavor with two + nodes, then revert to the old flavor rather than confirm. + + Nothing too complicated going on here. We create an instance with a one + NUMA node guest topology and then attempt to resize this to use a + topology with two nodes. Once done, we revert this resize to ensure the + instance reverts to using the old NUMA topology as expected. + """ + # don't bother waiting for neutron events since we don't actually have + # neutron + self.flags(vif_plugging_timeout=0) + + host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1, + cpu_cores=2, cpu_threads=2, + kB_mem=15740000) + + # Start services + self.computes = {} + for host in ['test_compute0', 'test_compute1']: + fake_connection = self._get_connection( + host_info=host_info, hostname=host) + + # This is fun. Firstly we need to do a global'ish mock so we can + # actually start the service. + with mock.patch('nova.virt.libvirt.host.Host.get_connection', + return_value=fake_connection): + compute = self.start_service('compute', host=host) + + # Once that's done, we need to do some tweaks to each individual + # compute "service" to make sure they return unique objects + compute.driver._host.get_connection = lambda: fake_connection + self.computes[host] = compute + + # STEP ONE + + # Create server + extra_spec = {'hw:numa_nodes': '1'} + flavor_a_id = self._create_flavor(vcpu=4, extra_spec=extra_spec) + + server = self._create_server(flavor_id=flavor_a_id) + + # Ensure the filter saw the 'numa_topology' field and the request spec + # is as expected + self._inspect_filter_numa_topology(cell_count=1) + self._inspect_request_spec(server, cell_count=1) + + # STEP TWO + + # Create a new flavor with a different but still valid number of NUMA + # nodes + extra_spec = {'hw:numa_nodes': '2'} + flavor_b_id = self._create_flavor(vcpu=4, extra_spec=extra_spec) + + # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should + # probably be less...dumb + with mock.patch('nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}'): + post = {'resize': {'flavorRef': flavor_b_id}} + self.api.post_server_action(server['id'], post) + + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') + + # Ensure the filter saw 'hw:numa_nodes=2' from flavor_b and the request + # spec has been updated + self._inspect_filter_numa_topology(cell_count=2) + self._inspect_request_spec(server, cell_count=2) + + # STEP THREE + + # Revert the instance rather than confirming it, and ensure we see the + # old NUMA topology + + # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should + # probably be less...dumb + with mock.patch('nova.virt.libvirt.driver.LibvirtDriver' + '.migrate_disk_and_power_off', return_value='{}'): + post = {'revertResize': {}} + self.api.post_server_action(server['id'], post) + + server = self._wait_for_state_change(server, 'ACTIVE') + + # We don't have a filter call to check, but we can check that the + # request spec changes were reverted + self._inspect_request_spec(server, cell_count=1) + def test_resize_vcpu_to_pcpu(self): """Create an unpinned instance and resize it to a flavor with pinning. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_report_cpu_traits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_report_cpu_traits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_report_cpu_traits.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_report_cpu_traits.py 2020-04-10 17:57:57.000000000 +0000 @@ -78,7 +78,8 @@ # The periodic restored the COMPUTE_NODE trait. u'COMPUTE_NODE'] ) - self.assertItemsEqual(expected_traits, traits) + for trait in expected_traits: + self.assertIn(trait, traits) class LibvirtReportNoSevTraitsTests(LibvirtReportTraitsTestBase): @@ -132,6 +133,9 @@ ) as (mock_exists, mock_open, mock_features): # Retrigger the detection code. In the real world this # would be a restart of the compute service. + # As we are changing the domain caps we need to clear the + # cache in the host object. + self.compute.driver._host._domain_caps = None self.compute.driver._host._set_amd_sev_support() self.assertTrue(self.compute.driver._host.supports_amd_sev) @@ -141,6 +145,8 @@ # However it won't disappear in the provider tree and get synced # back to placement until we force a reinventory: self.compute.manager.reset() + # reset cached traits so they are recalculated. + self.compute.driver._static_traits = None self._run_periodics() traits = self._get_provider_traits(self.host_uuid) @@ -199,6 +205,7 @@ with self.patch_exists(SEV_KERNEL_PARAM_FILE, False) as mock_exists: # Retrigger the detection code. In the real world this # would be a restart of the compute service. + self.compute.driver._host._domain_caps = None self.compute.driver._host._set_amd_sev_support() self.assertFalse(self.compute.driver._host.supports_amd_sev) @@ -207,6 +214,8 @@ # However it won't disappear in the provider tree and get synced # back to placement until we force a reinventory: self.compute.manager.reset() + # reset cached traits so they are recalculated. + self.compute.driver._static_traits = None self._run_periodics() traits = self._get_provider_traits(self.host_uuid) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_reshape.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_reshape.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_reshape.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_reshape.py 2020-04-10 17:57:57.000000000 +0000 @@ -59,10 +59,28 @@ # the old tree as that would be a bad time for reshape. Later when the # compute service is restarted the driver will do the reshape. + mdevs = { + 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01': + fakelibvirt.FakeMdevDevice( + dev_name='mdev_4b20d080_1b54_4048_85b3_a6a62d165c01', + type_id=fakelibvirt.NVIDIA_11_VGPU_TYPE, + parent=fakelibvirt.PGPU1_PCI_ADDR), + 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c02': + fakelibvirt.FakeMdevDevice( + dev_name='mdev_4b20d080_1b54_4048_85b3_a6a62d165c02', + type_id=fakelibvirt.NVIDIA_11_VGPU_TYPE, + parent=fakelibvirt.PGPU2_PCI_ADDR), + 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c03': + fakelibvirt.FakeMdevDevice( + dev_name='mdev_4b20d080_1b54_4048_85b3_a6a62d165c03', + type_id=fakelibvirt.NVIDIA_11_VGPU_TYPE, + parent=fakelibvirt.PGPU3_PCI_ADDR), + } + fake_connection = self._get_connection( # We need more RAM or the 3rd server won't be created host_info=fakelibvirt.HostInfo(kB_mem=8192), - mdev_info=fakelibvirt.HostMdevDevicesInfo()) + mdev_info=fakelibvirt.HostMdevDevicesInfo(devices=mdevs)) self.mock_conn.return_value = fake_connection # start a compute with vgpu support disabled so the driver will @@ -87,6 +105,16 @@ '/resource_providers/%s/inventories' % compute_rp_uuid, inventories) + # enabled vgpu support + self.flags( + enabled_vgpu_types=fakelibvirt.NVIDIA_11_VGPU_TYPE, + group='devices') + # We don't want to restart the compute service or it would call for + # a reshape but we still want to accept some vGPU types so we call + # directly the needed method + self.compute.driver.supported_vgpu_types = ( + self.compute.driver._get_supported_vgpu_types()) + # now we boot two servers with vgpu extra_spec = {"resources:VGPU": 1} flavor_id = self._create_flavor(extra_spec=extra_spec) @@ -139,10 +167,6 @@ {'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2, 'VGPU': 1}, allocations[compute_rp_uuid]['resources']) - # enabled vgpu support - self.flags( - enabled_vgpu_types=fakelibvirt.NVIDIA_11_VGPU_TYPE, - group='devices') # restart compute which will trigger a reshape self.compute = self.restart_compute_service(self.compute) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_vgpu.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_vgpu.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/libvirt/test_vgpu.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/libvirt/test_vgpu.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,234 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import re + +import os_resource_classes as orc +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import uuidutils + +import nova.conf +from nova import context +from nova import objects +from nova.tests.functional.libvirt import base +from nova.tests.unit.virt.libvirt import fakelibvirt +from nova.virt.libvirt import utils as libvirt_utils + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class VGPUTestBase(base.ServersTestBase): + + FAKE_LIBVIRT_VERSION = 5000000 + FAKE_QEMU_VERSION = 3001000 + + def setUp(self): + super(VGPUTestBase, self).setUp() + self.useFixture(fixtures.MockPatch( + 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info', + return_value={'total': 128, + 'used': 44, + 'free': 84})) + self.useFixture(fixtures.MockPatch( + 'nova.privsep.libvirt.create_mdev', + side_effect=self._create_mdev)) + self.context = context.get_admin_context() + + def pci2libvirt_address(self, address): + return "pci_{}_{}_{}_{}".format(*re.split("[.:]", address)) + + def libvirt2pci_address(self, dev_name): + return "{}:{}:{}.{}".format(*dev_name[4:].split('_')) + + def _create_mdev(self, physical_device, mdev_type, uuid=None): + # We need to fake the newly created sysfs object by adding a new + # FakeMdevDevice in the existing persisted Connection object so + # when asking to get the existing mdevs, we would see it. + if not uuid: + uuid = uuidutils.generate_uuid() + mdev_name = libvirt_utils.mdev_uuid2name(uuid) + libvirt_parent = self.pci2libvirt_address(physical_device) + self.fake_connection.mdev_info.devices.update( + {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=mdev_name, + type_id=mdev_type, + parent=libvirt_parent)}) + return uuid + + def _start_compute_service(self, hostname): + self.fake_connection = self._get_connection( + host_info=fakelibvirt.HostInfo(cpu_nodes=2, kB_mem=8192), + # We want to create two pGPUs but no other PCI devices + pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=0, + num_pfs=0, + num_vfs=0, + num_mdevcap=2), + hostname=hostname) + + self.mock_conn.return_value = self.fake_connection + compute = self.start_service('compute', host=hostname) + rp_uuid = self._get_provider_uuid_by_name(hostname) + rp_uuids = self._get_all_rp_uuids_in_a_tree(rp_uuid) + for rp in rp_uuids: + inventory = self._get_provider_inventory(rp) + if orc.VGPU in inventory: + usage = self._get_provider_usages(rp) + self.assertEqual(16, inventory[orc.VGPU]['total']) + self.assertEqual(0, usage[orc.VGPU]) + # Since we haven't created any mdevs yet, we shouldn't find them + self.assertEqual([], compute.driver._get_mediated_devices()) + return compute + + +class VGPUTests(VGPUTestBase): + + def setUp(self): + super(VGPUTests, self).setUp() + extra_spec = {"resources:VGPU": "1"} + self.flavor = self._create_flavor(extra_spec=extra_spec) + + # Start compute1 supporting only nvidia-11 + self.flags( + enabled_vgpu_types=fakelibvirt.NVIDIA_11_VGPU_TYPE, + group='devices') + self.compute1 = self._start_compute_service('host1') + + def test_create_servers_with_vgpu(self): + self._create_server( + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + flavor_id=self.flavor, host=self.compute1.host, + expected_state='ACTIVE') + # Now we should find a new mdev + mdevs = self.compute1.driver._get_mediated_devices() + self.assertEqual(1, len(mdevs)) + + # Checking also the allocations for the parent pGPU + parent_name = mdevs[0]['parent'] + parent_rp_name = self.compute1.host + '_' + parent_name + parent_rp_uuid = self._get_provider_uuid_by_name(parent_rp_name) + usage = self._get_provider_usages(parent_rp_uuid) + self.assertEqual(1, usage[orc.VGPU]) + + +class VGPUMultipleTypesTests(VGPUTestBase): + + def setUp(self): + super(VGPUMultipleTypesTests, self).setUp() + extra_spec = {"resources:VGPU": "1"} + self.flavor = self._create_flavor(extra_spec=extra_spec) + + self.flags( + enabled_vgpu_types=[fakelibvirt.NVIDIA_11_VGPU_TYPE, + fakelibvirt.NVIDIA_12_VGPU_TYPE], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + # host1 will have 2 physical GPUs : + # - 0000:81:00.0 will only support nvidia-11 + # - 0000:81:01.0 will only support nvidia-12 + pgpu1_pci_addr = self.libvirt2pci_address(fakelibvirt.PGPU1_PCI_ADDR) + pgpu2_pci_addr = self.libvirt2pci_address(fakelibvirt.PGPU2_PCI_ADDR) + self.flags(device_addresses=[pgpu1_pci_addr], group='vgpu_nvidia-11') + self.flags(device_addresses=[pgpu2_pci_addr], group='vgpu_nvidia-12') + + # Prepare traits for later on + self._create_trait('CUSTOM_NVIDIA_11') + self._create_trait('CUSTOM_NVIDIA_12') + self.compute1 = self._start_compute_service('host1') + + def test_create_servers_with_vgpu(self): + self._create_server( + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + flavor_id=self.flavor, host=self.compute1.host, + expected_state='ACTIVE') + mdevs = self.compute1.driver._get_mediated_devices() + self.assertEqual(1, len(mdevs)) + + # We can be deterministic : since 0000:81:01.0 is asked to only support + # nvidia-12 *BUT* doesn't actually have this type as a PCI capability, + # we are sure that only 0000:81:00.0 is used. + parent_name = mdevs[0]['parent'] + self.assertEqual(fakelibvirt.PGPU1_PCI_ADDR, parent_name) + + # We are also sure that there is no RP for 0000:81:01.0 since there + # is no inventory for nvidia-12 + root_rp_uuid = self._get_provider_uuid_by_name(self.compute1.host) + rp_uuids = self._get_all_rp_uuids_in_a_tree(root_rp_uuid) + # We only have 2 RPs : the root RP and only the pGPU1 RP... + self.assertEqual(2, len(rp_uuids)) + # ... but we double-check by asking the RP by its expected name + expected_pgpu2_rp_name = (self.compute1.host + '_' + + fakelibvirt.PGPU2_PCI_ADDR) + pgpu2_rp = self.placement_api.get( + '/resource_providers?name=' + expected_pgpu2_rp_name).body[ + 'resource_providers'] + # See, Placement API returned no RP for this name as it doesn't exist. + self.assertEqual([], pgpu2_rp) + + def test_create_servers_with_specific_type(self): + # Regenerate the PCI addresses so both pGPUs now support nvidia-12 + self.fake_connection.pci_info = fakelibvirt.HostPCIDevicesInfo( + num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2, + multiple_gpu_types=True) + # Make a restart to update the Resource Providers + self.compute1 = self.restart_compute_service(self.compute1) + pgpu1_rp_uuid = self._get_provider_uuid_by_name( + self.compute1.host + '_' + fakelibvirt.PGPU1_PCI_ADDR) + pgpu2_rp_uuid = self._get_provider_uuid_by_name( + self.compute1.host + '_' + fakelibvirt.PGPU2_PCI_ADDR) + + pgpu1_inventory = self._get_provider_inventory(pgpu1_rp_uuid) + self.assertEqual(16, pgpu1_inventory[orc.VGPU]['total']) + pgpu2_inventory = self._get_provider_inventory(pgpu2_rp_uuid) + self.assertEqual(8, pgpu2_inventory[orc.VGPU]['total']) + + # Attach traits to the pGPU RPs + self._set_provider_traits(pgpu1_rp_uuid, ['CUSTOM_NVIDIA_11']) + self._set_provider_traits(pgpu2_rp_uuid, ['CUSTOM_NVIDIA_12']) + + expected = {'CUSTOM_NVIDIA_11': fakelibvirt.PGPU1_PCI_ADDR, + 'CUSTOM_NVIDIA_12': fakelibvirt.PGPU2_PCI_ADDR} + + for trait in expected.keys(): + # Add a trait to the flavor + extra_spec = {"resources:VGPU": "1", + "trait:%s" % trait: "required"} + flavor = self._create_flavor(extra_spec=extra_spec) + + # Use the new flavor for booting + server = self._create_server( + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + flavor_id=flavor, host=self.compute1.host, + expected_state='ACTIVE') + + # Get the instance we just created + inst = objects.Instance.get_by_uuid(self.context, server['id']) + # Get the mdevs that were allocated for this instance, we should + # only have one + mdevs = self.compute1.driver._get_all_assigned_mediated_devices( + inst) + self.assertEqual(1, len(mdevs)) + + # It's a dict of mdev_uuid/instance_uuid pairs, we only care about + # the keys + mdevs = list(mdevs.keys()) + # Now get the detailed information about this single mdev + mdev_info = self.compute1.driver._get_mediated_device_information( + libvirt_utils.mdev_uuid2name(mdevs[0])) + + # We can be deterministic : since we asked for a specific type, + # we know which pGPU we landed. + self.assertEqual(expected[trait], mdev_info['parent']) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/notification_sample_tests/test_flavor.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/notification_sample_tests/test_flavor.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/notification_sample_tests/test_flavor.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/notification_sample_tests/test_flavor.py 2020-04-10 17:57:57.000000000 +0000 @@ -67,8 +67,7 @@ body = { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:numa_nodes": "2", } } self.admin_api.api_post( diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1595962.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1595962.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1595962.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1595962.py 2020-04-10 17:57:57.000000000 +0000 @@ -73,6 +73,7 @@ self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] + @mock.patch.object(fakelibvirt.Domain, 'undefine') @mock.patch('nova.virt.libvirt.LibvirtDriver.get_volume_connector') @mock.patch('nova.virt.libvirt.guest.Guest.get_job_info') @mock.patch.object(fakelibvirt.Domain, 'migrateToURI3') @@ -100,7 +101,8 @@ mock_host_get_connection, mock_migrate_to_uri, mock_get_job_info, - mock_get_volume_connector): + mock_get_volume_connector, + mock_undefine): """Regression test for bug #1595962. If the graphical consoles VNC and SPICE are disabled, the @@ -120,6 +122,12 @@ version=fakelibvirt.FAKE_LIBVIRT_VERSION, hv_version=fakelibvirt.FAKE_QEMU_VERSION) mock_host_get_connection.return_value = fake_connection + # We invoke cleanup on source host first which will call undefine + # method currently. Since in functional test we make all compute + # services linked to the same connection, we need to mock the undefine + # method to avoid triggering 'Domain not found' error in subsequent + # rpc call post_live_migration_at_destination. + mock_undefine.return_value = True server_attr = dict(name='server1', imageRef=self.image_id, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1764883.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1764883.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1764883.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1764883.py 2020-04-10 17:57:57.000000000 +0000 @@ -60,11 +60,8 @@ self.start_service('scheduler') # Start two computes - self.computes = {} - - self.computes['host1'] = self.start_service('compute', host='host1') - - self.computes['host2'] = self.start_service('compute', host='host2') + self._start_compute('host1') + self._start_compute('host2') self.image_id = self.api.get_images()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id'] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1823370.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1823370.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1823370.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1823370.py 2020-04-10 17:57:57.000000000 +0000 @@ -49,9 +49,7 @@ """ host_to_cell = {'host1': 'cell1', 'host2': 'cell2', 'host3': 'cell1'} for host, cell in host_to_cell.items(): - svc = self.start_service('compute', host=host, cell=cell) - # Set an attribute so we can access this service later. - setattr(self, host, svc) + self._start_compute(host, cell_name=cell) def test_evacuate_multi_cell(self): # Create a server which should land on host1 since it has the highest @@ -62,7 +60,7 @@ self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) # Disable the host on which the server is now running. - self.host1.stop() + self.computes['host1'].stop() self.api.force_down_service('host1', 'nova-compute', forced_down=True) # Now evacuate the server which should send it to host3 since it is diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1830747.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1830747.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1830747.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1830747.py 2020-04-10 17:57:57.000000000 +0000 @@ -68,10 +68,8 @@ self.start_service('scheduler') # Start two computes, one where the server will be created and another # where we'll cold migrate it. - self.computes = {} # keep track of the compute services per host name - for host in ('host1', 'host2'): - compute_service = self.start_service('compute', host=host) - self.computes[host] = compute_service + self._start_compute('host1') + self._start_compute('host2') def test_cold_migrate_reschedule(self): # Create an anti-affinity group for the server. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1831771.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1831771.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1831771.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1831771.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,96 @@ +# Copyright 2019, Red Hat, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +import mock + +from nova.compute import task_states +from nova.compute import vm_states +from nova import objects +from nova import test +from nova.tests.functional import integrated_helpers + + +class TestDelete(integrated_helpers.ProviderUsageBaseTestCase): + compute_driver = 'fake.MediumFakeDriver' + + def test_delete_during_create(self): + compute = self._start_compute('compute1') + + def delete_race(instance): + self.api.delete_server(instance.uuid) + self._wait_for_server_parameter( + {'id': instance.uuid}, + {'OS-EXT-STS:task_state': task_states.DELETING}, + ) + + orig_save = objects.Instance.save + + # an in-memory record of the current instance task state as persisted + # to the db. + db_task_states = collections.defaultdict(str) + active_after_deleting_error = [False] + + # A wrapper round instance.save() which allows us to inject a race + # under specific conditions before calling the original instance.save() + def wrap_save(instance, *wrap_args, **wrap_kwargs): + # We're looking to inject the race before: + # instance.save(expected_task_state=task_states.SPAWNING) + # towards the end of _build_and_run_instance. + # + # At this point the driver has finished creating the instance, but + # we're still on the compute host and still holding the compute + # host instance lock. + # + # This is just a convenient interception point. In order to race + # the delete could have happened at any point prior to this since + # the previous instance.save() + expected_task_state = wrap_kwargs.get('expected_task_state') + if ( + expected_task_state == task_states.SPAWNING + ): + delete_race(instance) + + orig_save(instance, *wrap_args, **wrap_kwargs) + + if ( + db_task_states[instance.uuid] == task_states.DELETING and + instance.vm_state == vm_states.ACTIVE and + instance.task_state is None + ): + # the instance was in the DELETING task state in the db, and we + # overwrote that to set it to ACTIVE with no task state. + # Bug 1848666. + active_after_deleting_error[0] = True + + db_task_states[instance.uuid] = instance.task_state + + with test.nested( + mock.patch('nova.objects.Instance.save', wrap_save), + mock.patch.object(compute.driver, 'spawn'), + mock.patch.object(compute.driver, 'unplug_vifs'), + ) as (_, mock_spawn, mock_unplug_vifs): + # the compute manager doesn't set the ERROR state in cleanup since + # it might race with delete, therefore we'll be left in BUILDING + server_req = self._build_server(networks='none') + created_server = self.api.post_server({'server': server_req}) + self._wait_until_deleted(created_server) + + # assert that we spawned the instance, and unplugged vifs on + # cleanup + mock_spawn.assert_called() + mock_unplug_vifs.assert_called() + # FIXME(mdbooth): Bug 1848666 + self.assertTrue(active_after_deleting_error[0]) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1862633.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1862633.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/regressions/test_bug_1862633.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/regressions/test_bug_1862633.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,89 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import mock +from neutronclient.common import exceptions as neutron_exception + +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit import fake_notifier +from nova.tests.unit.image import fake as fake_image + + +class UnshelveNeutronErrorTest( + test.TestCase, integrated_helpers.InstanceHelperMixin): + def setUp(self): + super(UnshelveNeutronErrorTest, self).setUp() + # Start standard fixtures. + placement = func_fixtures.PlacementFixture() + self.useFixture(placement) + self.placement_api = placement.api + self.neutron = nova_fixtures.NeutronFixture(self) + self.useFixture(self.neutron) + fake_image.stub_out_image_service(self) + self.addCleanup(fake_image.FakeImageService_reset) + # Start nova services. + self.api = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')).admin_api + self.api.microversion = 'latest' + fake_notifier.stub_notifier(self) + self.addCleanup(fake_notifier.reset) + + self.start_service('conductor') + self.start_service('scheduler') + self.start_service('compute', host='host1') + self.start_service('compute', host='host2') + + def test_unshelve_offloaded_fails_due_to_neutron(self): + server = self._create_server( + networks=[{'port': self.neutron.port_1['id']}], az='nova:host1') + + # with default config shelve means immediate offload as well + req = { + 'shelve': {} + } + self.api.post_server_action(server['id'], req) + self._wait_for_server_parameter( + server, {'status': 'SHELVED_OFFLOADED', + 'OS-EXT-SRV-ATTR:host': None}) + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + # disable the original host of the instance to force a port update + # during unshelve + source_service_id = self.api.get_services( + host='host1', binary='nova-compute')[0]['id'] + self.api.put_service(source_service_id, {"status": "disabled"}) + + # Simulate that port update fails during unshelve due to neutron is + # unavailable + with mock.patch( + 'nova.tests.fixtures.NeutronFixture.' + 'update_port') as mock_update_port: + mock_update_port.side_effect = neutron_exception.ConnectionFailed( + reason='test') + req = {'unshelve': None} + self.api.post_server_action(server['id'], req) + fake_notifier.wait_for_versioned_notifications( + 'instance.unshelve.start') + self._wait_for_server_parameter( + server, + {'status': 'SHELVED_OFFLOADED', + 'OS-EXT-STS:task_state': None, + 'OS-EXT-SRV-ATTR:host': None}) + + # As the instance went back to offloaded state we expect no allocation + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_aggregates.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_aggregates.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_aggregates.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_aggregates.py 2020-04-10 17:57:57.000000000 +0000 @@ -178,17 +178,6 @@ # Aggregate with neither host self._create_aggregate('no-hosts') - def _start_compute(self, host): - """Start a nova compute service on the given host - - :param host: the name of the host that will be associated to the - compute service. - :return: the nova compute service object - """ - compute = self.start_service('compute', host=host) - self.computes[host] = compute - return compute - def _create_aggregate(self, name): agg = self.admin_api.post_aggregate({'aggregate': {'name': name}}) self.aggregates[name] = agg @@ -840,9 +829,6 @@ class TestAggregateMultiTenancyIsolationFilter( test.TestCase, integrated_helpers.InstanceHelperMixin): - def _start_compute(self, host): - self.start_service('compute', host=host) - def setUp(self): super(TestAggregateMultiTenancyIsolationFilter, self).setUp() # Stub out glance, placement and neutron. @@ -864,7 +850,7 @@ self.flags(enabled_filters=enabled_filters, group='filter_scheduler') self.start_service('scheduler') for host in ('host1', 'host2'): - self._start_compute(host) + self.start_service('compute', host=host) def test_aggregate_multitenancy_isolation_filter(self): """Tests common scenarios with the AggregateMultiTenancyIsolation diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_boot_from_volume.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_boot_from_volume.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_boot_from_volume.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_boot_from_volume.py 2020-04-10 17:57:57.000000000 +0000 @@ -194,7 +194,7 @@ self.useFixture(nova_fixtures.NoopConductorFixture()) # NOTE(gibi): Do not use 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' image # as that is defined with a separate kernel image, leading to one extra - # call to nova.image.api.API.get from compute.api + # call to nova.image.glance.API.get from compute.api # _handle_kernel_and_ramdisk() image1 = 'a2459075-d96c-40d5-893e-577ff92e721c' image2 = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' @@ -217,7 +217,7 @@ # Wrap the image service get method to check how many times it was # called. - with mock.patch('nova.image.api.API.get', + with mock.patch('nova.image.glance.API.get', wraps=self.image_service.show) as mock_image_get: self.api.post_server({'server': server}) # Assert that there was caching of the GET /v2/images/{image_id} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_compute_mgr.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_compute_mgr.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_compute_mgr.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_compute_mgr.py 2020-04-10 17:57:57.000000000 +0000 @@ -10,12 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. +from __future__ import absolute_import + +import fixtures import mock from nova import context +from nova.network import model as network_model from nova import objects from nova import test -from nova.tests import fixtures +from nova.tests import fixtures as nova_fixtures from nova.tests.unit import cast_as_call from nova.tests.unit import fake_network from nova.tests.unit import fake_server_actions @@ -24,7 +28,7 @@ class ComputeManagerTestCase(test.TestCase): def setUp(self): super(ComputeManagerTestCase, self).setUp() - self.useFixture(fixtures.SpawnIsSynchronousFixture()) + self.useFixture(nova_fixtures.SpawnIsSynchronousFixture()) self.useFixture(cast_as_call.CastAsCall(self)) self.conductor = self.start_service('conductor') self.start_service('scheduler') @@ -32,6 +36,10 @@ self.context = context.RequestContext('fake', 'fake') fake_server_actions.stub_out_action_events(self) fake_network.set_stub_network_methods(self) + self.useFixture(fixtures.MockPatch( + 'nova.network.neutron.API.get_instance_nw_info', + return_value=network_model.NetworkInfo(), + )) def test_instance_fault_message_no_traceback_with_retry(self): """This test simulates a spawn failure on the last retry attempt. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_cross_cell_migrate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_cross_cell_migrate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_cross_cell_migrate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_cross_cell_migrate.py 2020-04-10 17:57:57.000000000 +0000 @@ -869,8 +869,11 @@ 'disk': old_flavor['disk'] } self.admin_api.post_flavor({'flavor': new_flavor}) - self.admin_api.post_extra_spec(new_flavor['id'], - {'extra_specs': {'foo': 'bar'}}) + # TODO(stephenfin): What do I do with this??? + self.admin_api.post_extra_spec( + new_flavor['id'], + {'extra_specs': {'aggregate_instance_extra_specs:foo': 'bar'}} + ) # Enable AggregateInstanceExtraSpecsFilter and restart the scheduler. enabled_filters = CONF.filter_scheduler.enabled_filters if 'AggregateInstanceExtraSpecsFilter' not in enabled_filters: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_flavor_extraspecs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_flavor_extraspecs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_flavor_extraspecs.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_flavor_extraspecs.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,146 @@ +# Copyright 2020, Red Hat, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for os-extra_specs API.""" + +from nova.tests.functional.api import client as api_client +from nova.tests.functional import integrated_helpers + + +class FlavorExtraSpecsTest(integrated_helpers._IntegratedTestBase): + api_major_version = 'v2' + + def setUp(self): + super(FlavorExtraSpecsTest, self).setUp() + self.flavor_id = self._create_flavor() + + def test_create(self): + """Test creating flavor extra specs with valid specs.""" + body = { + 'extra_specs': {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}, + } + self.admin_api.post_extra_spec(self.flavor_id, body) + self.assertEqual( + body['extra_specs'], self.admin_api.get_extra_specs(self.flavor_id) + ) + + def test_create_invalid_spec(self): + """Test creating flavor extra specs with invalid specs. + + This should pass because validation is not enabled in this API + microversion. + """ + body = {'extra_specs': {'hw:numa_nodes': 'foo', 'foo': 'bar'}} + self.admin_api.post_extra_spec(self.flavor_id, body) + self.assertEqual( + body['extra_specs'], self.admin_api.get_extra_specs(self.flavor_id) + ) + + def test_update(self): + """Test updating extra specs with valid specs.""" + spec_id = 'hw:numa_nodes' + body = {'hw:numa_nodes': '1'} + self.admin_api.put_extra_spec(self.flavor_id, spec_id, body) + self.assertEqual( + body, self.admin_api.get_extra_spec(self.flavor_id, spec_id) + ) + + def test_update_invalid_spec(self): + """Test updating extra specs with invalid specs. + + This should pass because validation is not enabled in this API + microversion. + """ + spec_id = 'hw:foo' + body = {'hw:foo': 'bar'} + self.admin_api.put_extra_spec(self.flavor_id, spec_id, body) + self.assertEqual( + body, self.admin_api.get_extra_spec(self.flavor_id, spec_id) + ) + + +class FlavorExtraSpecsV286Test(FlavorExtraSpecsTest): + api_major_version = 'v2.1' + microversion = '2.86' + + def test_create_invalid_spec(self): + """Test creating extra specs with invalid specs.""" + body = {'extra_specs': {'hw:numa_nodes': 'foo', 'foo': 'bar'}} + + # this should fail because 'foo' is not a suitable value for + # 'hw:numa_nodes' + exc = self.assertRaises( + api_client.OpenStackApiException, + self.admin_api.post_extra_spec, + self.flavor_id, body, + ) + self.assertEqual(400, exc.response.status_code) + + # ...and the extra specs should not be saved + self.assertEqual({}, self.admin_api.get_extra_specs(self.flavor_id)) + + def test_create_unknown_spec(self): + """Test creating extra specs with unknown specs.""" + body = {'extra_specs': {'hw:numa_nodes': '1', 'foo': 'bar'}} + + # this should pass because we don't recognize the extra spec but it's + # not in a namespace we care about + self.admin_api.post_extra_spec(self.flavor_id, body) + + body = {'extra_specs': {'hw:numa_nodes': '1', 'hw:foo': 'bar'}} + + # ...but this should fail because we do recognize the namespace + exc = self.assertRaises( + api_client.OpenStackApiException, + self.admin_api.post_extra_spec, + self.flavor_id, body, + ) + self.assertEqual(400, exc.response.status_code) + + def test_update_invalid_spec(self): + """Test updating extra specs with invalid specs.""" + spec_id = 'hw:foo' + body = {'hw:foo': 'bar'} + + # this should fail because we don't recognize the extra spec + exc = self.assertRaises( + api_client.OpenStackApiException, + self.admin_api.put_extra_spec, + self.flavor_id, spec_id, body, + ) + self.assertEqual(400, exc.response.status_code) + + spec_id = 'hw:numa_nodes' + body = {'hw:numa_nodes': 'foo'} + + # ...while this should fail because the value is not valid + exc = self.assertRaises( + api_client.OpenStackApiException, + self.admin_api.put_extra_spec, + self.flavor_id, spec_id, body, + ) + self.assertEqual(400, exc.response.status_code) + + # ...and neither extra spec should be saved + self.assertEqual({}, self.admin_api.get_extra_specs(self.flavor_id)) + + def test_update_unknown_spec(self): + """Test updating extra specs with unknown specs.""" + spec_id = 'foo:bar' + body = {'foo:bar': 'baz'} + + # this should pass because we don't recognize the extra spec but it's + # not in a namespace we care about + self.admin_api.put_extra_spec(self.flavor_id, spec_id, body) + self.assertEqual(body, self.admin_api.get_extra_specs(self.flavor_id)) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_instance_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_instance_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_instance_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_instance_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -12,8 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. +import mock +from oslo_policy import policy as oslo_policy + +from nova import exception +from nova import policy +from nova import test +from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client +from nova.tests.functional import fixtures as func_fixtures +from nova.tests.functional import integrated_helpers from nova.tests.functional import test_servers +from nova.tests.unit.image import fake as fake_image +from nova.tests.unit import policy_fixture class InstanceActionsTestV2(test_servers.ServersTestBase): @@ -49,3 +60,214 @@ actions = self.api.get_instance_actions(server['id']) self.assertEqual('delete', actions[0]['action']) self.assertEqual('create', actions[1]['action']) + + +class HypervisorError(Exception): + """This is just used to make sure the exception type is in the events.""" + pass + + +class InstanceActionEventFaultsTestCase( + test.TestCase, integrated_helpers.InstanceHelperMixin): + """Tests for the instance action event details reporting from the API""" + + def setUp(self): + super(InstanceActionEventFaultsTestCase, self).setUp() + # Setup the standard fixtures. + fake_image.stub_out_image_service(self) + self.addCleanup(fake_image.FakeImageService_reset) + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(func_fixtures.PlacementFixture()) + self.useFixture(policy_fixture.RealPolicyFixture()) + + # Start the compute services. + self.start_service('conductor') + self.start_service('scheduler') + self.compute = self.start_service('compute') + api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')) + self.api = api_fixture.api + self.admin_api = api_fixture.admin_api + + def _set_policy_rules(self, overwrite=True): + rules = {'os_compute_api:os-instance-actions:show': '', + 'os_compute_api:os-instance-actions:events:details': + 'project_id:%(project_id)s'} + policy.set_rules(oslo_policy.Rules.from_dict(rules), + overwrite=overwrite) + + def test_instance_action_event_details_non_nova_exception(self): + """Creates a server using the non-admin user, then reboot it which + will generate a non-NovaException fault and put the instance into + ERROR status. Then checks that fault details are visible. + """ + + # Create the server with the non-admin user. + server = self._build_server( + networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}]) + server = self.api.post_server({'server': server}) + server = self._wait_for_state_change(server, 'ACTIVE') + + # Stop the server before rebooting it so that after the driver.reboot + # method raises an exception, the fake driver does not report the + # instance power state as running - that will make the compute manager + # set the instance vm_state to error. + self.api.post_server_action(server['id'], {'os-stop': None}) + server = self._wait_for_state_change(server, 'SHUTOFF') + + # Stub out the compute driver reboot method to raise a non-nova + # exception to simulate some error from the underlying hypervisor + # which in this case we are going to say has sensitive content. + error_msg = 'sensitive info' + with mock.patch.object( + self.compute.manager.driver, 'reboot', + side_effect=HypervisorError(error_msg)) as mock_reboot: + reboot_request = {'reboot': {'type': 'HARD'}} + self.api.post_server_action(server['id'], reboot_request) + # In this case we wait for the status to change to ERROR using + # the non-admin user so we can assert the fault details. We also + # wait for the task_state to be None since the wrap_instance_fault + # decorator runs before the reverts_task_state decorator so we will + # be sure the fault is set on the server. + server = self._wait_for_server_parameter( + server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None}, + api=self.api) + mock_reboot.assert_called_once() + + self._set_policy_rules(overwrite=False) + + server_id = server['id'] + # Calls GET on the server actions and verifies that the reboot + # action expected in the response. + response = self.api.api_get('/servers/%s/os-instance-actions' % + server_id) + server_actions = response.body['instanceActions'] + for actions in server_actions: + if actions['action'] == 'reboot': + reboot_request_id = actions['request_id'] + # non admin shows instance actions details and verifies the 'details' + # in the action events via 'request_id', since microversion 2.51 that + # we can show events, but in microversion 2.84 that we can show + # 'details' for non-admin. + self.api.microversion = '2.84' + action_events_response = self.api.api_get( + '/servers/%s/os-instance-actions/%s' % (server_id, + reboot_request_id)) + reboot_action = action_events_response.body['instanceAction'] + # Since reboot action failed, the 'message' property in reboot action + # should be 'Error', otherwise it's None. + self.assertEqual('Error', reboot_action['message']) + reboot_action_events = reboot_action['events'] + # The instance action events from the non-admin user API response + # should not have 'traceback' in it. + self.assertNotIn('traceback', reboot_action_events[0]) + # And the sensitive details from the non-nova exception should not be + # in the details. + self.assertIn('details', reboot_action_events[0]) + self.assertNotIn(error_msg, reboot_action_events[0]['details']) + # The exception type class name should be in the details. + self.assertIn('HypervisorError', reboot_action_events[0]['details']) + + # Get the server fault details for the admin user. + self.admin_api.microversion = '2.84' + action_events_response = self.admin_api.api_get( + '/servers/%s/os-instance-actions/%s' % (server_id, + reboot_request_id)) + reboot_action = action_events_response.body['instanceAction'] + self.assertEqual('Error', reboot_action['message']) + reboot_action_events = reboot_action['events'] + # The admin can see the fault details which includes the traceback, + # and make sure the traceback is there by looking for part of it. + self.assertIn('traceback', reboot_action_events[0]) + self.assertIn('in reboot_instance', + reboot_action_events[0]['traceback']) + # The exception type class name should be in the details for the admin + # user as well since the fault handling code cannot distinguish who + # is going to see the message so it only sets class name. + self.assertIn('HypervisorError', reboot_action_events[0]['details']) + + def test_instance_action_event_details_with_nova_exception(self): + """Creates a server using the non-admin user, then reboot it which + will generate a nova exception fault and put the instance into + ERROR status. Then checks that fault details are visible. + """ + + # Create the server with the non-admin user. + server = self._build_server( + networks=[{'port': nova_fixtures.NeutronFixture.port_1['id']}]) + server = self.api.post_server({'server': server}) + server = self._wait_for_state_change(server, 'ACTIVE') + + # Stop the server before rebooting it so that after the driver.reboot + # method raises an exception, the fake driver does not report the + # instance power state as running - that will make the compute manager + # set the instance vm_state to error. + self.api.post_server_action(server['id'], {'os-stop': None}) + server = self._wait_for_state_change(server, 'SHUTOFF') + + # Stub out the compute driver reboot method to raise a nova + # exception 'InstanceRebootFailure' to simulate some error. + exc_reason = 'reboot failure' + with mock.patch.object( + self.compute.manager.driver, 'reboot', + side_effect=exception.InstanceRebootFailure(reason=exc_reason) + ) as mock_reboot: + reboot_request = {'reboot': {'type': 'HARD'}} + self.api.post_server_action(server['id'], reboot_request) + # In this case we wait for the status to change to ERROR using + # the non-admin user so we can assert the fault details. We also + # wait for the task_state to be None since the wrap_instance_fault + # decorator runs before the reverts_task_state decorator so we will + # be sure the fault is set on the server. + server = self._wait_for_server_parameter( + server, {'status': 'ERROR', 'OS-EXT-STS:task_state': None}, + api=self.api) + mock_reboot.assert_called_once() + + self._set_policy_rules(overwrite=False) + + server_id = server['id'] + # Calls GET on the server actions and verifies that the reboot + # action expected in the response. + response = self.api.api_get('/servers/%s/os-instance-actions' % + server_id) + server_actions = response.body['instanceActions'] + for actions in server_actions: + if actions['action'] == 'reboot': + reboot_request_id = actions['request_id'] + + # non admin shows instance actions details and verifies the 'details' + # in the action events via 'request_id', since microversion 2.51 that + # we can show events, but in microversion 2.84 that we can show + # 'details' for non-admin. + self.api.microversion = '2.84' + action_events_response = self.api.api_get( + '/servers/%s/os-instance-actions/%s' % (server_id, + reboot_request_id)) + reboot_action = action_events_response.body['instanceAction'] + # Since reboot action failed, the 'message' property in reboot action + # should be 'Error', otherwise it's None. + self.assertEqual('Error', reboot_action['message']) + reboot_action_events = reboot_action['events'] + # The instance action events from the non-admin user API response + # should not have 'traceback' in it. + self.assertNotIn('traceback', reboot_action_events[0]) + # The nova exception format message should be in the details. + self.assertIn('details', reboot_action_events[0]) + self.assertIn(exc_reason, reboot_action_events[0]['details']) + + # Get the server fault details for the admin user. + self.admin_api.microversion = '2.84' + action_events_response = self.admin_api.api_get( + '/servers/%s/os-instance-actions/%s' % (server_id, + reboot_request_id)) + reboot_action = action_events_response.body['instanceAction'] + self.assertEqual('Error', reboot_action['message']) + reboot_action_events = reboot_action['events'] + # The admin can see the fault details which includes the traceback, + # and make sure the traceback is there by looking for part of it. + self.assertIn('traceback', reboot_action_events[0]) + self.assertIn('in reboot_instance', + reboot_action_events[0]['traceback']) + # The nova exception format message should be in the details. + self.assertIn(exc_reason, reboot_action_events[0]['details']) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_legacy_v2_compatible_wrapper.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_legacy_v2_compatible_wrapper.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_legacy_v2_compatible_wrapper.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_legacy_v2_compatible_wrapper.py 2020-04-10 17:57:57.000000000 +0000 @@ -14,7 +14,6 @@ # under the License. from nova.api import openstack -from nova.api.openstack import compute from nova.api.openstack import wsgi from nova.tests.functional.api import client from nova.tests.functional import test_servers @@ -25,8 +24,7 @@ def setUp(self): super(LegacyV2CompatibleTestBase, self).setUp() - self._check_api_endpoint('/v2', [compute.APIRouterV21, - openstack.LegacyV2CompatibleWrapper]) + self._check_api_endpoint('/v2', [openstack.LegacyV2CompatibleWrapper]) def test_request_with_microversion_headers(self): self.api.microversion = '2.100' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_metadata.py 2020-04-10 17:57:57.000000000 +0000 @@ -15,15 +15,18 @@ from __future__ import absolute_import import fixtures +import jsonschema +import os import requests from oslo_serialization import jsonutils from oslo_utils import uuidutils -from nova import context -from nova import objects from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit.image import fake as fake_image class fake_result(object): @@ -45,45 +48,38 @@ return real_request(method, url, **kwargs) -class MetadataTest(test.TestCase): +class MetadataTest(test.TestCase, integrated_helpers.InstanceHelperMixin): def setUp(self): super(MetadataTest, self).setUp() - self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer()) - self.md_url = self.api_fixture.md_url - - ctxt = context.RequestContext('fake', 'fake') - flavor = objects.Flavor( - id=1, name='flavor1', memory_mb=256, vcpus=1, root_gb=1, - ephemeral_gb=1, flavorid='1', swap=0, rxtx_factor=1.0, - vcpu_weight=1, disabled=False, is_public=True, extra_specs={}, - projects=[]) - instance = objects.Instance(ctxt, flavor=flavor, vcpus=1, - memory_mb=256, root_gb=0, ephemeral_gb=0, - project_id='fake', hostname='test') - instance.create() - # The NeutronFixture is needed to provide the fixed IP for the metadata - # service + fake_image.stub_out_image_service(self) + self.addCleanup(fake_image.FakeImageService_reset) self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(func_fixtures.PlacementFixture()) + self.start_service('conductor') + self.start_service('scheduler') + self.api = self.useFixture( + nova_fixtures.OSAPIFixture(api_version='v2.1')).api + self.start_service('compute') + + # create a server for the tests + server = self._build_server(name='test') + server = self.api.post_server({'server': server}) + self.server = self._wait_for_state_change(server, 'ACTIVE') + + self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer()) + self.md_url = self.api_fixture.md_url + # make sure that the metadata service returns information about the + # server we created above def fake_get_fixed_ip_by_address(self, ctxt, address): - return {'instance_uuid': instance.uuid} + return {'instance_uuid': server['id']} self.useFixture( fixtures.MonkeyPatch( 'nova.network.neutron.API.get_fixed_ip_by_address', fake_get_fixed_ip_by_address)) - def fake_get_ec2_ip_info(nw_info): - return {'fixed_ips': ['127.0.0.2'], - 'fixed_ip6s': [], - 'floating_ips': []} - - self.useFixture( - fixtures.MonkeyPatch( - 'nova.virt.netutils.get_ec2_ip_info', - fake_get_ec2_ip_info)) - def test_lookup_metadata_root_url(self): res = requests.request('GET', self.md_url, timeout=5) self.assertEqual(200, res.status_code) @@ -174,7 +170,25 @@ self.assertIn('instance-id', j['testing']) self.assertTrue(uuidutils.is_uuid_like(j['testing']['instance-id'])) self.assertIn('hostname', j['testing']) - self.assertEqual('fake', j['testing']['project-id']) + self.assertEqual(self.server['tenant_id'], j['testing']['project-id']) self.assertIn('metadata', j['testing']) self.assertIn('image-id', j['testing']) self.assertIn('user-data', j['testing']) + + def test_network_data_matches_schema(self): + self.useFixture(fixtures.MonkeyPatch( + 'keystoneauth1.session.Session.request', fake_request)) + + url = '%sopenstack/latest/network_data.json' % self.md_url + + res = requests.request('GET', url, timeout=5) + self.assertEqual(200, res.status_code) + + # load the jsonschema for network_data + schema_file = os.path.normpath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "../../../doc/api_schemas/network_data.json")) + with open(schema_file, 'rb') as f: + schema = jsonutils.load(f) + + jsonschema.validate(res.json(), schema) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_nova_manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_nova_manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_nova_manage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_nova_manage.py 2020-04-10 17:57:57.000000000 +0000 @@ -746,6 +746,45 @@ self.assertIn('Unable to find cell for instance %s, is it mapped?' % server['id'], output) + def test_heal_allocations_specific_cell(self): + """Tests the case that a specific cell is processed and only that + cell even though there are two which require processing. + """ + # Create one that we won't process. + server1, rp_uuid1 = self._boot_and_assert_no_allocations( + self.flavor, 'cell1') + # Create another that we will process specifically. + server2, rp_uuid2 = self._boot_and_assert_no_allocations( + self.flavor, 'cell2') + + # Get Cell_id of cell2 + cell2_id = self.cell_mappings['cell2'].uuid + + # First do a dry run to make sure two instances need processing. + result = self.cli.heal_allocations( + max_count=2, verbose=True, dry_run=True) + # Nothing changed so the return code should be 4. + self.assertEqual(4, result, self.output.getvalue()) + output = self.output.getvalue() + self.assertIn('Found 1 candidate instances', output) + + # Now run with our specific cell and it should be the only one + # processed. + result = self.cli.heal_allocations(verbose=True, + cell_uuid=cell2_id) + output = self.output.getvalue() + self.assertEqual(0, result, self.output.getvalue()) + self.assertIn('Found 1 candidate instances', output) + self.assertIn('Processed 1 instances.', output) + + # Now run it again on the specific cell and it should be done. + result = self.cli.heal_allocations( + verbose=True, cell_uuid=cell2_id) + output = self.output.getvalue() + self.assertEqual(4, result, self.output.getvalue()) + self.assertIn('Found 1 candidate instances', output) + self.assertIn('Processed 0 instances.', output) + class TestNovaManagePlacementHealPortAllocations( test_servers.PortResourceRequestBasedSchedulingTestBase): @@ -1393,6 +1432,223 @@ '%s should be in two provider aggregates' % host) +class TestNovaManagePlacementAudit( + integrated_helpers.ProviderUsageBaseTestCase): + """Functional tests for nova-manage placement audit""" + + # Let's just use a simple fake driver + compute_driver = 'fake.SmallFakeDriver' + + def setUp(self): + super(TestNovaManagePlacementAudit, self).setUp() + self.cli = manage.PlacementCommands() + # Make sure we have two computes for migrations + self.compute1 = self._start_compute('host1') + self.compute2 = self._start_compute('host2') + + # Make sure we have two hypervisors reported in the API. + hypervisors = self.admin_api.api_get( + '/os-hypervisors').body['hypervisors'] + self.assertEqual(2, len(hypervisors)) + + self.output = StringIO() + self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) + + self.flavor = self.api.get_flavors()[0] + + def test_audit_orphaned_allocation_from_instance_delete(self): + """Creates a server and deletes it by retaining its allocations so the + audit command can find it. + """ + target_hostname = self.compute1.host + rp_uuid = self._get_provider_uuid_by_host(target_hostname) + + server = self._boot_and_check_allocations(self.flavor, target_hostname) + + # let's mock the allocation delete call to placement + with mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'delete_allocation_for_instance'): + self.api.delete_server(server['id']) + self._wait_until_deleted(server) + + # make sure the allocation is still around + self.assertFlavorMatchesUsage(rp_uuid, self.flavor) + + # Don't ask to delete the orphaned allocations, just audit them + ret = self.cli.audit(verbose=True) + # The allocation should still exist + self.assertFlavorMatchesUsage(rp_uuid, self.flavor) + + output = self.output.getvalue() + self.assertIn( + 'Allocations for consumer UUID %(consumer_uuid)s on ' + 'Resource Provider %(rp_uuid)s can be deleted' % + {'consumer_uuid': server['id'], + 'rp_uuid': rp_uuid}, + output) + self.assertIn('Processed 1 allocation.', output) + # Here we don't want to delete the found allocations + self.assertNotIn( + 'Deleted allocations for consumer UUID %s' % server['id'], output) + self.assertEqual(3, ret) + + # Now ask the audit command to delete the rogue allocations. + ret = self.cli.audit(delete=True, verbose=True) + + # The allocations are now deleted + self.assertRequestMatchesUsage( + {'VCPU': 0, 'MEMORY_MB': 0, 'DISK_GB': 0}, rp_uuid) + + output = self.output.getvalue() + self.assertIn( + 'Deleted allocations for consumer UUID %s' % server['id'], output) + self.assertIn('Processed 1 allocation.', output) + self.assertEqual(4, ret) + + def test_audit_orphaned_allocations_from_confirmed_resize(self): + """Resize a server but when confirming it, leave the migration + allocation there so the audit command can find it. + """ + source_hostname = self.compute1.host + dest_hostname = self.compute2.host + + source_rp_uuid = self._get_provider_uuid_by_host(source_hostname) + dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname) + + old_flavor = self.flavor + new_flavor = self.api.get_flavors()[1] + # we want to make sure we resize to compute2 + self.flags(allow_resize_to_same_host=False) + + server = self._boot_and_check_allocations(self.flavor, source_hostname) + + # Do a resize + post = { + 'resize': { + 'flavorRef': new_flavor['id'] + } + } + self._move_and_check_allocations( + server, request=post, old_flavor=old_flavor, + new_flavor=new_flavor, source_rp_uuid=source_rp_uuid, + dest_rp_uuid=dest_rp_uuid) + + # Retain the migration UUID record for later usage + migration_uuid = self.get_migration_uuid_for_instance(server['id']) + + # Confirm the resize so it should in theory delete the source + # allocations but mock out the allocation delete for the source + post = {'confirmResize': None} + with mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'delete_allocation_for_instance'): + self.api.post_server_action( + server['id'], post, check_response_status=[204]) + self._wait_for_state_change(server, 'ACTIVE') + + # The target host usage should be according to the new flavor... + self.assertFlavorMatchesUsage(dest_rp_uuid, new_flavor) + # ...but we should still see allocations for the source compute + self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor) + + # Now, run the audit command that will find this orphaned allocation + ret = self.cli.audit(verbose=True) + output = self.output.getvalue() + self.assertIn( + 'Allocations for consumer UUID %(consumer_uuid)s on ' + 'Resource Provider %(rp_uuid)s can be deleted' % + {'consumer_uuid': migration_uuid, 'rp_uuid': source_rp_uuid}, + output) + self.assertIn('Processed 1 allocation.', output) + self.assertEqual(3, ret) + + # Now we want to delete the orphaned allocation that is duplicate + ret = self.cli.audit(delete=True, verbose=True) + + # There should be no longer usage for the source host since the + # allocation disappeared + self.assertRequestMatchesUsage({'VCPU': 0, + 'MEMORY_MB': 0, + 'DISK_GB': 0}, source_rp_uuid) + + output = self.output.getvalue() + self.assertIn( + 'Deleted allocations for consumer UUID %(consumer_uuid)s on ' + 'Resource Provider %(rp_uuid)s' % + {'consumer_uuid': migration_uuid, + 'rp_uuid': source_rp_uuid}, + output) + self.assertIn('Processed 1 allocation.', output) + self.assertEqual(4, ret) + + # TODO(sbauza): Remove this test once bug #1829479 is fixed + def test_audit_orphaned_allocations_from_deleted_compute_evacuate(self): + """Evacuate a server and the delete the source node so that it will + leave a source allocation that the audit command will find. + """ + + source_hostname = self.compute1.host + dest_hostname = self.compute2.host + + source_rp_uuid = self._get_provider_uuid_by_host(source_hostname) + dest_rp_uuid = self._get_provider_uuid_by_host(dest_hostname) + + server = self._boot_and_check_allocations(self.flavor, source_hostname) + + # Stop the service and fake it down + self.compute1.stop() + source_service_id = self.admin_api.get_services( + host=source_hostname, binary='nova-compute')[0]['id'] + self.admin_api.put_service(source_service_id, {'forced_down': 'true'}) + + # evacuate the instance to the target + post = {'evacuate': {"host": dest_hostname}} + self.admin_api.post_server_action(server['id'], post) + self._wait_for_server_parameter(server, + {'OS-EXT-SRV-ATTR:host': dest_hostname, + 'status': 'ACTIVE'}) + + # Now the instance is gone, we can delete the compute service + self.admin_api.api_delete('/os-services/%s' % source_service_id) + + # Since the compute is deleted, we should have in theory a single + # allocation against the destination resource provider, but evacuated + # instances are not having their allocations deleted. See bug #1829479. + # We have two allocations for the same consumer, source and destination + self._check_allocation_during_evacuate( + self.flavor, server['id'], source_rp_uuid, dest_rp_uuid) + + # Now, run the audit command that will find this orphaned allocation + ret = self.cli.audit(verbose=True) + output = self.output.getvalue() + self.assertIn( + 'Allocations for consumer UUID %(consumer_uuid)s on ' + 'Resource Provider %(rp_uuid)s can be deleted' % + {'consumer_uuid': server['id'], + 'rp_uuid': source_rp_uuid}, + output) + self.assertIn('Processed 1 allocation.', output) + self.assertEqual(3, ret) + + # Now we want to delete the orphaned allocation that is duplicate + ret = self.cli.audit(delete=True, verbose=True) + + # We finally should only have the target allocations + self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor) + self.assertRequestMatchesUsage({'VCPU': 0, + 'MEMORY_MB': 0, + 'DISK_GB': 0}, source_rp_uuid) + + output = self.output.getvalue() + self.assertIn( + 'Deleted allocations for consumer UUID %(consumer_uuid)s on ' + 'Resource Provider %(rp_uuid)s' % + {'consumer_uuid': server['id'], + 'rp_uuid': source_rp_uuid}, + output) + self.assertIn('Processed 1 allocation.', output) + self.assertEqual(4, ret) + + class TestDBArchiveDeletedRows(integrated_helpers._IntegratedTestBase): """Functional tests for the "nova-manage db archive_deleted_rows" CLI.""" api_major_version = 'v2.1' @@ -1467,9 +1723,9 @@ # Start two compute services, one per cell self.compute1 = self.start_service('compute', host='host1', - cell='cell1') + cell_name='cell1') self.compute2 = self.start_service('compute', host='host2', - cell='cell2') + cell_name='cell2') def test_archive_deleted_rows(self): admin_context = context.get_admin_context(read_deleted='yes') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_report_client.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_report_client.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_report_client.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_report_client.py 2020-04-10 17:57:57.000000000 +0000 @@ -17,12 +17,8 @@ import mock import os_resource_classes as orc import os_traits as ot -from oslo_config import cfg -from oslo_config import fixture as config_fixture from oslo_utils.fixture import uuidsentinel as uuids import pkg_resources -from placement import direct -from placement.tests.functional.fixtures import placement from nova.cmd import status from nova.compute import provider_tree @@ -38,6 +34,7 @@ from nova.scheduler.client import report from nova.scheduler import utils from nova import test +from nova.tests.functional import fixtures as func_fixtures CONF = conf.CONF @@ -86,68 +83,11 @@ return super(VersionCheckingReportClient, self).delete(*args, **kwargs) -class SchedulerReportClientTestBase(test.TestCase): - - def setUp(self): - super(SchedulerReportClientTestBase, self).setUp() - # Because these tests use PlacementDirect we need to manage - # the database and other config ourselves. - config = cfg.ConfigOpts() - placement_conf = self.useFixture(config_fixture.Config(config)) - self.useFixture( - placement.PlacementFixture(conf_fixture=placement_conf, db=True, - use_intercept=False)) - self.placement_conf = placement_conf.conf - - def _interceptor(self, app=None, latest_microversion=True): - """Set up an intercepted placement API to test against. - - Use as e.g. - - with interceptor() as client: - ret = client.get_provider_tree_and_ensure_root(...) - - :param app: An optional wsgi app loader. - :param latest_microversion: If True (the default), API requests will - use the latest microversion if not - otherwise specified. If False, the base - microversion is the default. - :return: Context manager, which in turn returns a direct - SchedulerReportClient. - """ - class ReportClientInterceptor(direct.PlacementDirect): - """A shim around PlacementDirect that wraps the Adapter in a - SchedulerReportClient. - """ - def __enter__(inner_self): - adap = super(ReportClientInterceptor, inner_self).__enter__() - client = VersionCheckingReportClient(adapter=adap) - # NOTE(efried): This `self` is the TestCase! - self._set_client(client) - return client - - interceptor = ReportClientInterceptor( - self.placement_conf, latest_microversion=latest_microversion) - if app: - interceptor.app = app - return interceptor - - def _set_client(self, client): - """Set report client attributes on the TestCase instance. - - Override this to do things like: - self.mocked_thingy.report_client = client - - :param client: A direct SchedulerReportClient. - """ - pass - - @ddt.ddt @mock.patch('nova.compute.utils.is_volume_backed_instance', new=mock.Mock(return_value=False)) @mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock()) -class SchedulerReportClientTests(SchedulerReportClientTestBase): +class SchedulerReportClientTests(test.TestCase): def setUp(self): super(SchedulerReportClientTests, self).setUp() @@ -176,9 +116,11 @@ extra_specs={})) self.context = context.get_admin_context() - def _set_client(self, client): - # TODO(efried): Rip this out and just use `as client` throughout. - self.client = client + # The ksa adapter used by the PlacementFixture, for mocking purposes. + self.placement_client = self.useFixture( + func_fixtures.PlacementFixture())._client + + self.client = VersionCheckingReportClient() def compute_node_to_inventory_dict(self): result = {} @@ -219,138 +161,135 @@ # tests that when allocation or inventory errors happen, we # are resilient. res_class = orc.VCPU - with self._interceptor(): - # When we start out there are no resource providers. - rp = self.client._get_resource_provider(self.context, - self.compute_uuid) - self.assertIsNone(rp) - rps = self.client.get_providers_in_tree(self.context, - self.compute_uuid) - self.assertEqual([], rps) - # But get_provider_tree_and_ensure_root creates one (via - # _ensure_resource_provider) - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # Now let's update status for our compute node. - self.client._ensure_resource_provider( - self.context, self.compute_uuid, name=self.compute_name) - self.client.set_inventory_for_provider( - self.context, self.compute_uuid, - self.compute_node_to_inventory_dict()) + # When we start out there are no resource providers. + rp = self.client._get_resource_provider(self.context, + self.compute_uuid) + self.assertIsNone(rp) + rps = self.client.get_providers_in_tree(self.context, + self.compute_uuid) + self.assertEqual([], rps) + # But get_provider_tree_and_ensure_root creates one (via + # _ensure_resource_provider) + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # So now we have a resource provider - rp = self.client._get_resource_provider(self.context, - self.compute_uuid) - self.assertIsNotNone(rp) - rps = self.client.get_providers_in_tree(self.context, - self.compute_uuid) - self.assertEqual(1, len(rps)) + # Now let's update status for our compute node. + self.client._ensure_resource_provider( + self.context, self.compute_uuid, name=self.compute_name) + self.client.set_inventory_for_provider( + self.context, self.compute_uuid, + self.compute_node_to_inventory_dict()) + + # So now we have a resource provider + rp = self.client._get_resource_provider(self.context, + self.compute_uuid) + self.assertIsNotNone(rp) + rps = self.client.get_providers_in_tree(self.context, + self.compute_uuid) + self.assertEqual(1, len(rps)) + + # We should also have empty sets of aggregate and trait + # associations + self.assertEqual( + [], self.client._get_sharing_providers(self.context, + [uuids.agg])) + self.assertFalse( + self.client._provider_tree.have_aggregates_changed( + self.compute_uuid, [])) + self.assertFalse( + self.client._provider_tree.have_traits_changed( + self.compute_uuid, [])) + + # TODO(cdent): change this to use the methods built in + # to the report client to retrieve inventory? + inventory_url = ('/resource_providers/%s/inventories' % + self.compute_uuid) + resp = self.client.get(inventory_url) + inventory_data = resp.json()['inventories'] + self.assertEqual(self.compute_node.vcpus, + inventory_data[res_class]['total']) - # We should also have empty sets of aggregate and trait - # associations - self.assertEqual( - [], self.client._get_sharing_providers(self.context, - [uuids.agg])) - self.assertFalse( - self.client._provider_tree.have_aggregates_changed( - self.compute_uuid, [])) - self.assertFalse( - self.client._provider_tree.have_traits_changed( - self.compute_uuid, [])) - - # TODO(cdent): change this to use the methods built in - # to the report client to retrieve inventory? - inventory_url = ('/resource_providers/%s/inventories' % - self.compute_uuid) - resp = self.client.get(inventory_url) - inventory_data = resp.json()['inventories'] - self.assertEqual(self.compute_node.vcpus, - inventory_data[res_class]['total']) - - # Providers and inventory show up nicely in the provider tree - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - self.assertTrue(ptree.has_inventory(self.compute_uuid)) - - # Update allocations with our instance - alloc_dict = utils.resources_from_flavor(self.instance, - self.instance.flavor) - payload = { - "allocations": { - self.compute_uuid: {"resources": alloc_dict} - }, - "project_id": self.instance.project_id, - "user_id": self.instance.user_id, - "consumer_generation": None - } - self.client.put_allocations( - self.context, self.instance_uuid, payload) + # Providers and inventory show up nicely in the provider tree + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) + self.assertTrue(ptree.has_inventory(self.compute_uuid)) + + # Update allocations with our instance + alloc_dict = utils.resources_from_flavor(self.instance, + self.instance.flavor) + payload = { + "allocations": { + self.compute_uuid: {"resources": alloc_dict} + }, + "project_id": self.instance.project_id, + "user_id": self.instance.user_id, + "consumer_generation": None + } + self.client.put_allocations( + self.context, self.instance_uuid, payload) - # Check that allocations were made - resp = self.client.get('/allocations/%s' % self.instance_uuid) - alloc_data = resp.json()['allocations'] - vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class] - self.assertEqual(2, vcpu_data) - - # Check that usages are up to date - resp = self.client.get('/resource_providers/%s/usages' % - self.compute_uuid) - usage_data = resp.json()['usages'] - vcpu_data = usage_data[res_class] - self.assertEqual(2, vcpu_data) - - # Delete allocations with our instance - self.client.delete_allocation_for_instance(self.context, - self.instance.uuid) - - # No usage - resp = self.client.get('/resource_providers/%s/usages' % - self.compute_uuid) - usage_data = resp.json()['usages'] - vcpu_data = usage_data[res_class] - self.assertEqual(0, vcpu_data) - - # Allocation bumped the generation, so refresh to get the latest - self.client._refresh_and_get_inventory(self.context, - self.compute_uuid) - - # Trigger the reporting client deleting all inventory by setting - # the compute node's CPU, RAM and disk amounts to 0. - self.compute_node.vcpus = 0 - self.compute_node.memory_mb = 0 - self.compute_node.local_gb = 0 - self.client.set_inventory_for_provider( - self.context, self.compute_uuid, - self.compute_node_to_inventory_dict()) + # Check that allocations were made + resp = self.client.get('/allocations/%s' % self.instance_uuid) + alloc_data = resp.json()['allocations'] + vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class] + self.assertEqual(2, vcpu_data) + + # Check that usages are up to date + resp = self.client.get('/resource_providers/%s/usages' % + self.compute_uuid) + usage_data = resp.json()['usages'] + vcpu_data = usage_data[res_class] + self.assertEqual(2, vcpu_data) + + # Delete allocations with our instance + self.client.delete_allocation_for_instance(self.context, + self.instance.uuid) + + # No usage + resp = self.client.get('/resource_providers/%s/usages' % + self.compute_uuid) + usage_data = resp.json()['usages'] + vcpu_data = usage_data[res_class] + self.assertEqual(0, vcpu_data) + + # Allocation bumped the generation, so refresh to get the latest + self.client._refresh_and_get_inventory(self.context, + self.compute_uuid) + + # Trigger the reporting client deleting all inventory by setting + # the compute node's CPU, RAM and disk amounts to 0. + self.compute_node.vcpus = 0 + self.compute_node.memory_mb = 0 + self.compute_node.local_gb = 0 + self.client.set_inventory_for_provider( + self.context, self.compute_uuid, + self.compute_node_to_inventory_dict()) + + # Check there's no more inventory records + resp = self.client.get(inventory_url) + inventory_data = resp.json()['inventories'] + self.assertEqual({}, inventory_data) - # Check there's no more inventory records - resp = self.client.get(inventory_url) - inventory_data = resp.json()['inventories'] - self.assertEqual({}, inventory_data) - - # Build the provider tree afresh. - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - # The compute node is still there - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # But the inventory is gone - self.assertFalse(ptree.has_inventory(self.compute_uuid)) + # Build the provider tree afresh. + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + # The compute node is still there + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) + # But the inventory is gone + self.assertFalse(ptree.has_inventory(self.compute_uuid)) def test_global_request_id(self): global_request_id = 'req-%s' % uuids.global_request_id - def assert_app(environ, start_response): - # Assert the 'X-Openstack-Request-Id' header in the request. - self.assertIn('HTTP_X_OPENSTACK_REQUEST_ID', environ) + def fake_request(*args, **kwargs): self.assertEqual(global_request_id, - environ['HTTP_X_OPENSTACK_REQUEST_ID']) - start_response('204 OK', []) - return [] + kwargs['headers']['X-OpenStack-Request-ID']) - with self._interceptor(app=lambda: assert_app): + with mock.patch.object(self.client._client, 'request', + side_effect=fake_request): self.client._delete_provider(self.compute_uuid, global_request_id=global_request_id) payload = { @@ -377,245 +316,81 @@ sip(IP) x """ - with self._interceptor(): - # Register the compute node and its inventory + # Register the compute node and its inventory + self.client._ensure_resource_provider( + self.context, self.compute_uuid, name=self.compute_name) + self.client.set_inventory_for_provider( + self.context, self.compute_uuid, + self.compute_node_to_inventory_dict()) + # The compute node is associated with two of the shared storages + self.client.set_aggregates_for_provider( + self.context, self.compute_uuid, + set([uuids.agg_disk_1, uuids.agg_disk_2])) + + # Register two SR-IOV PFs with VF and bandwidth inventory + for x in (1, 2): + name = 'pf%d' % x + uuid = getattr(uuids, name) self.client._ensure_resource_provider( - self.context, self.compute_uuid, name=self.compute_name) + self.context, uuid, name=name, + parent_provider_uuid=self.compute_uuid) self.client.set_inventory_for_provider( - self.context, self.compute_uuid, - self.compute_node_to_inventory_dict()) - # The compute node is associated with two of the shared storages - self.client.set_aggregates_for_provider( - self.context, self.compute_uuid, - set([uuids.agg_disk_1, uuids.agg_disk_2])) - - # Register two SR-IOV PFs with VF and bandwidth inventory - for x in (1, 2): - name = 'pf%d' % x - uuid = getattr(uuids, name) - self.client._ensure_resource_provider( - self.context, uuid, name=name, - parent_provider_uuid=self.compute_uuid) - self.client.set_inventory_for_provider( - self.context, uuid, { - orc.SRIOV_NET_VF: { - 'total': 24 * x, - 'reserved': x, - 'min_unit': 1, - 'max_unit': 24 * x, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - 'CUSTOM_BANDWIDTH': { - 'total': 125000 * x, - 'reserved': 1000 * x, - 'min_unit': 5000, - 'max_unit': 25000 * x, - 'step_size': 5000, - 'allocation_ratio': 1.0, - }, - }) - # They're associated with an IP address aggregate - self.client.set_aggregates_for_provider(self.context, uuid, - [uuids.agg_ip]) - # Set some traits on 'em - self.client.set_traits_for_provider( - self.context, uuid, ['CUSTOM_PHYSNET_%d' % x]) - - # Register three shared storage pools with disk inventory - for x in (1, 2, 3): - name = 'ss%d' % x - uuid = getattr(uuids, name) - self.client._ensure_resource_provider(self.context, uuid, - name=name) - self.client.set_inventory_for_provider( - self.context, uuid, { - orc.DISK_GB: { - 'total': 100 * x, - 'reserved': x, - 'min_unit': 1, - 'max_unit': 10 * x, - 'step_size': 2, - 'allocation_ratio': 10.0, - }, - }) - # Mark as a sharing provider - self.client.set_traits_for_provider( - self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE']) - # Associate each with its own aggregate. The compute node is - # associated with the first two (agg_disk_1 and agg_disk_2). - agg = getattr(uuids, 'agg_disk_%d' % x) - self.client.set_aggregates_for_provider(self.context, uuid, - [agg]) - - # Register a shared IP address provider with IP address inventory - self.client._ensure_resource_provider(self.context, uuids.sip, - name='sip') - self.client.set_inventory_for_provider( - self.context, uuids.sip, { - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, + self.context, uuid, { + orc.SRIOV_NET_VF: { + 'total': 24 * x, + 'reserved': x, 'min_unit': 1, - 'max_unit': 8, + 'max_unit': 24 * x, 'step_size': 1, 'allocation_ratio': 1.0, }, - }) - # Mark as a sharing provider, and add another trait - self.client.set_traits_for_provider( - self.context, uuids.sip, - set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) - # It's associated with the same aggregate as both PFs - self.client.set_aggregates_for_provider(self.context, uuids.sip, - [uuids.agg_ip]) - - # Register a shared network bandwidth provider - self.client._ensure_resource_provider(self.context, uuids.sbw, - name='sbw') - self.client.set_inventory_for_provider( - self.context, uuids.sbw, { 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, + 'total': 125000 * x, + 'reserved': 1000 * x, 'min_unit': 5000, - 'max_unit': 250000, + 'max_unit': 25000 * x, 'step_size': 5000, - 'allocation_ratio': 8.0, + 'allocation_ratio': 1.0, }, }) - # Mark as a sharing provider + # They're associated with an IP address aggregate + self.client.set_aggregates_for_provider(self.context, uuid, + [uuids.agg_ip]) + # Set some traits on 'em self.client.set_traits_for_provider( - self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE']) - # It's associated with some other aggregate. - self.client.set_aggregates_for_provider(self.context, uuids.sbw, - [uuids.agg_bw]) - - # Setup is done. Grab the ProviderTree - prov_tree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - - # All providers show up because we used _ensure_resource_provider - self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2, - uuids.pf1, uuids.pf2, uuids.sip, uuids.ss3, - uuids.sbw]), - set(prov_tree.get_provider_uuids())) - # Narrow the field to just our compute subtree. - self.assertEqual( - set([self.compute_uuid, uuids.pf1, uuids.pf2]), - set(prov_tree.get_provider_uuids(self.compute_uuid))) + self.context, uuid, ['CUSTOM_PHYSNET_%d' % x]) - # Validate traits for a couple of providers - self.assertFalse(prov_tree.have_traits_changed( - uuids.pf2, ['CUSTOM_PHYSNET_2'])) - self.assertFalse(prov_tree.have_traits_changed( - uuids.sip, ['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) - - # Validate aggregates for a couple of providers - self.assertFalse(prov_tree.have_aggregates_changed( - uuids.sbw, [uuids.agg_bw])) - self.assertFalse(prov_tree.have_aggregates_changed( - self.compute_uuid, [uuids.agg_disk_1, uuids.agg_disk_2])) - - def test__set_inventory_reserved_eq_total(self): - with self._interceptor(latest_microversion=False): - # Create the provider - self.client._ensure_resource_provider(self.context, uuids.cn) - - # Make sure we can set reserved value equal to total - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 24, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - def test_set_inventory_for_provider(self): - """Tests for SchedulerReportClient.set_inventory_for_provider. - """ - with self._interceptor(): - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - # Provider doesn't exist in our cache - self.assertRaises( - ValueError, - self.client.set_inventory_for_provider, - self.context, uuids.cn, inv) - self.assertIsNone(self.client._get_inventory( - self.context, uuids.cn)) - - # Create the provider - self.client._ensure_resource_provider(self.context, uuids.cn) - # Still no inventory, but now we don't get a 404 - self.assertEqual( - {}, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Now set the inventory - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Make sure we can change it - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } + # Register three shared storage pools with disk inventory + for x in (1, 2, 3): + name = 'ss%d' % x + uuid = getattr(uuids, name) + self.client._ensure_resource_provider(self.context, uuid, + name=name) self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Create custom resource classes on the fly - self.assertFalse( - self.client.get('/resource_classes/CUSTOM_BANDWIDTH')) - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, + self.context, uuid, { + orc.DISK_GB: { + 'total': 100 * x, + 'reserved': x, + 'min_unit': 1, + 'max_unit': 10 * x, + 'step_size': 2, + 'allocation_ratio': 10.0, + }, + }) + # Mark as a sharing provider + self.client.set_traits_for_provider( + self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE']) + # Associate each with its own aggregate. The compute node is + # associated with the first two (agg_disk_1 and agg_disk_2). + agg = getattr(uuids, 'agg_disk_%d' % x) + self.client.set_aggregates_for_provider(self.context, uuid, + [agg]) + + # Register a shared IP address provider with IP address inventory + self.client._ensure_resource_provider(self.context, uuids.sip, + name='sip') + self.client.set_inventory_for_provider( + self.context, uuids.sip, { orc.IPV4_ADDRESS: { 'total': 128, 'reserved': 0, @@ -624,6 +399,20 @@ 'step_size': 1, 'allocation_ratio': 1.0, }, + }) + # Mark as a sharing provider, and add another trait + self.client.set_traits_for_provider( + self.context, uuids.sip, + set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) + # It's associated with the same aggregate as both PFs + self.client.set_aggregates_for_provider(self.context, uuids.sip, + [uuids.agg_ip]) + + # Register a shared network bandwidth provider + self.client._ensure_resource_provider(self.context, uuids.sbw, + name='sbw') + self.client.set_inventory_for_provider( + self.context, uuids.sbw, { 'CUSTOM_BANDWIDTH': { 'total': 1250000, 'reserved': 10000, @@ -632,154 +421,302 @@ 'step_size': 5000, 'allocation_ratio': 8.0, }, - } - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - # The custom resource class got created. - self.assertTrue( - self.client.get('/resource_classes/CUSTOM_BANDWIDTH')) - - # Creating a bogus resource class raises the appropriate exception. - bogus_inv = dict(inv) - bogus_inv['CUSTOM_BOGU$$'] = { - 'total': 1, - 'reserved': 1, + }) + # Mark as a sharing provider + self.client.set_traits_for_provider( + self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE']) + # It's associated with some other aggregate. + self.client.set_aggregates_for_provider(self.context, uuids.sbw, + [uuids.agg_bw]) + + # Setup is done. Grab the ProviderTree + prov_tree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + + # All providers show up because we used _ensure_resource_provider + self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2, + uuids.pf1, uuids.pf2, uuids.sip, uuids.ss3, + uuids.sbw]), + set(prov_tree.get_provider_uuids())) + # Narrow the field to just our compute subtree. + self.assertEqual( + set([self.compute_uuid, uuids.pf1, uuids.pf2]), + set(prov_tree.get_provider_uuids(self.compute_uuid))) + + # Validate traits for a couple of providers + self.assertFalse(prov_tree.have_traits_changed( + uuids.pf2, ['CUSTOM_PHYSNET_2'])) + self.assertFalse(prov_tree.have_traits_changed( + uuids.sip, ['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO'])) + + # Validate aggregates for a couple of providers + self.assertFalse(prov_tree.have_aggregates_changed( + uuids.sbw, [uuids.agg_bw])) + self.assertFalse(prov_tree.have_aggregates_changed( + self.compute_uuid, [uuids.agg_disk_1, uuids.agg_disk_2])) + + def test__set_inventory_reserved_eq_total(self): + # Create the provider + self.client._ensure_resource_provider(self.context, uuids.cn) + + # Make sure we can set reserved value equal to total + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 24, 'min_unit': 1, - 'max_unit': 1, + 'max_unit': 24, 'step_size': 1, 'allocation_ratio': 1.0, - } - self.assertRaises( - exception.InvalidResourceClass, - self.client.set_inventory_for_provider, - self.context, uuids.cn, bogus_inv) - self.assertFalse( - self.client.get('/resource_classes/BOGUS')) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + }, + } + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) - # Create a generation conflict by doing an "out of band" update - oob_inv = { - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - gen = self.client._provider_tree.data(uuids.cn).generation - self.assertTrue( - self.client.put( - '/resource_providers/%s/inventories' % uuids.cn, - {'resource_provider_generation': gen, - 'inventories': oob_inv})) - self.assertEqual( - oob_inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Now try to update again. - inv = { - orc.SRIOV_NET_VF: { - 'total': 24, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 24, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - }, - } - # Cached generation is off, so this will bounce with a conflict. - self.assertRaises( - exception.ResourceProviderUpdateConflict, - self.client.set_inventory_for_provider, - self.context, uuids.cn, inv) - # Inventory still corresponds to the out-of-band update - self.assertEqual( - oob_inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - # Force refresh to get the latest generation - self.client._refresh_and_get_inventory(self.context, uuids.cn) - # Now the update should work - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - payload = { - "allocations": { - uuids.cn: {"resources": {orc.SRIOV_NET_VF: 1}} - }, - "project_id": uuids.proj, - "user_id": uuids.user, - "consumer_generation": None - } - # Now set up an InventoryInUse case by creating a VF allocation... - self.assertTrue( - self.client.put_allocations( - self.context, uuids.consumer, payload)) - # ...and trying to delete the provider's VF inventory - bad_inv = { - 'CUSTOM_BANDWIDTH': { - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - }, - } - # Allocation bumped the generation, so refresh to get the latest - self.client._refresh_and_get_inventory(self.context, uuids.cn) - msgre = (".*update conflict: Inventory for 'SRIOV_NET_VF' on " - "resource provider '%s' in use..*" % uuids.cn) - with self.assertRaisesRegex(exception.InventoryInUse, msgre): - self.client.set_inventory_for_provider(self.context, uuids.cn, - bad_inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Same result if we try to clear all the inventory - bad_inv = {} - with self.assertRaisesRegex(exception.InventoryInUse, msgre): - self.client.set_inventory_for_provider(self.context, uuids.cn, - bad_inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) - - # Remove the allocation to make it work - self.client.delete('/allocations/' + uuids.consumer) - # Force refresh to get the latest generation - self.client._refresh_and_get_inventory(self.context, uuids.cn) - inv = {} - self.client.set_inventory_for_provider( - self.context, uuids.cn, inv) - self.assertEqual( - inv, - self.client._get_inventory( - self.context, uuids.cn)['inventories']) + def test_set_inventory_for_provider(self): + """Tests for SchedulerReportClient.set_inventory_for_provider.""" + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + # Provider doesn't exist in our cache + self.assertRaises( + ValueError, + self.client.set_inventory_for_provider, + self.context, uuids.cn, inv) + self.assertIsNone(self.client._get_inventory( + self.context, uuids.cn)) + + # Create the provider + self.client._ensure_resource_provider(self.context, uuids.cn) + # Still no inventory, but now we don't get a 404 + self.assertEqual( + {}, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Now set the inventory + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Make sure we can change it + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Create custom resource classes on the fly + self.assertFalse( + self.client.get('/resource_classes/CUSTOM_BANDWIDTH', + version='1.2')) + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + } + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + # The custom resource class got created. + self.assertTrue( + self.client.get('/resource_classes/CUSTOM_BANDWIDTH', + version='1.2')) + + # Creating a bogus resource class raises the appropriate exception. + bogus_inv = dict(inv) + bogus_inv['CUSTOM_BOGU$$'] = { + 'total': 1, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 1, + 'step_size': 1, + 'allocation_ratio': 1.0, + } + self.assertRaises( + exception.InvalidResourceClass, + self.client.set_inventory_for_provider, + self.context, uuids.cn, bogus_inv) + self.assertFalse( + self.client.get('/resource_classes/BOGUS')) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Create a generation conflict by doing an "out of band" update + oob_inv = { + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + gen = self.client._provider_tree.data(uuids.cn).generation + self.assertTrue( + self.client.put( + '/resource_providers/%s/inventories' % uuids.cn, + {'resource_provider_generation': gen, + 'inventories': oob_inv})) + self.assertEqual( + oob_inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Now try to update again. + inv = { + orc.SRIOV_NET_VF: { + 'total': 24, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 24, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + } + # Cached generation is off, so this will bounce with a conflict. + self.assertRaises( + exception.ResourceProviderUpdateConflict, + self.client.set_inventory_for_provider, + self.context, uuids.cn, inv) + # Inventory still corresponds to the out-of-band update + self.assertEqual( + oob_inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + # Force refresh to get the latest generation + self.client._refresh_and_get_inventory(self.context, uuids.cn) + # Now the update should work + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + payload = { + "allocations": { + uuids.cn: {"resources": {orc.SRIOV_NET_VF: 1}} + }, + "project_id": uuids.proj, + "user_id": uuids.user, + "consumer_generation": None + } + # Now set up an InventoryInUse case by creating a VF allocation... + self.assertTrue( + self.client.put_allocations( + self.context, uuids.consumer, payload)) + # ...and trying to delete the provider's VF inventory + bad_inv = { + 'CUSTOM_BANDWIDTH': { + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + }, + } + # Allocation bumped the generation, so refresh to get the latest + self.client._refresh_and_get_inventory(self.context, uuids.cn) + msgre = (".*update conflict: Inventory for 'SRIOV_NET_VF' on " + "resource provider '%s' in use..*" % uuids.cn) + with self.assertRaisesRegex(exception.InventoryInUse, msgre): + self.client.set_inventory_for_provider(self.context, uuids.cn, + bad_inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Same result if we try to clear all the inventory + bad_inv = {} + with self.assertRaisesRegex(exception.InventoryInUse, msgre): + self.client.set_inventory_for_provider(self.context, uuids.cn, + bad_inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) + + # Remove the allocation to make it work + self.client.delete('/allocations/' + uuids.consumer) + # Force refresh to get the latest generation + self.client._refresh_and_get_inventory(self.context, uuids.cn) + inv = {} + self.client.set_inventory_for_provider( + self.context, uuids.cn, inv) + self.assertEqual( + inv, + self.client._get_inventory( + self.context, uuids.cn)['inventories']) def test_update_from_provider_tree(self): """A "realistic" walk through the lifecycle of a compute node provider @@ -804,205 +741,206 @@ self.assertFalse( new_tree.have_aggregates_changed(uuid, cdata.aggregates)) - # Do these with a failing interceptor to prove no API calls are made. - with self._interceptor(app=lambda: 'nuke') as client: + # Do these with a failing request method to prove no API calls are made + with mock.patch.object(self.placement_client, 'request', + mock.NonCallableMock()): # To begin with, the cache should be empty - self.assertEqual([], client._provider_tree.get_provider_uuids()) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) # When new_tree is empty, it's a no-op. - client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - with self._interceptor(): - # Populate with a provider with no inventories, aggregates, traits - new_tree.new_root('root', uuids.root) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Throw in some more providers, in various spots in the tree, with - # some sub-properties - new_tree.new_child('child1', uuids.root, uuid=uuids.child1) - new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2]) - new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1) - new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2']) - new_tree.new_root('ssp', uuids.ssp) - new_tree.update_inventory('ssp', { - orc.DISK_GB: { - 'total': 100, - 'reserved': 1, - 'min_unit': 1, - 'max_unit': 10, - 'step_size': 2, - 'allocation_ratio': 10.0, - }, - }) self.client.update_from_provider_tree(self.context, new_tree) assert_ptrees_equal() - # Swizzle properties - # Give the root some everything - new_tree.update_inventory(uuids.root, { - orc.VCPU: { - 'total': 10, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 2, - 'step_size': 1, - 'allocation_ratio': 10.0, - }, - orc.MEMORY_MB: { - 'total': 1048576, - 'reserved': 2048, - 'min_unit': 1024, - 'max_unit': 131072, - 'step_size': 1024, - 'allocation_ratio': 1.0, - }, - }) - new_tree.update_aggregates(uuids.root, [uuids.agg1]) - new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', - 'HW_CPU_X86_AVX2']) - # Take away the child's aggregates - new_tree.update_aggregates(uuids.child1, []) - # Grandchild gets some inventory - ipv4_inv = { - orc.IPV4_ADDRESS: { - 'total': 128, - 'reserved': 0, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - }, - } - new_tree.update_inventory('grandchild1_1', ipv4_inv) - # Shared storage provider gets traits - new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE', - 'STORAGE_DISK_SSD'])) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Let's go for some error scenarios. - # Add inventory in an invalid resource class - new_tree.update_inventory( - 'grandchild1_1', - dict(ipv4_inv, - MOTSUC_BANDWIDTH={ - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - })) - self.assertRaises( - exception.ResourceProviderSyncFailed, - self.client.update_from_provider_tree, self.context, new_tree) - # The inventory update didn't get synced. - self.assertIsNone(self.client._get_inventory( - self.context, uuids.grandchild1_1)) - # We invalidated the cache for the entire tree around grandchild1_1 - # but did not invalidate the other root (the SSP) - self.assertEqual([uuids.ssp], - self.client._provider_tree.get_provider_uuids()) - # This is a little under-the-hood-looking, but make sure we cleared - # the association refresh timers for everything in the grandchild's - # tree - self.assertEqual(set([uuids.ssp]), - set(self.client._association_refresh_time)) - - # Fix that problem so we can try the next one - new_tree.update_inventory( - 'grandchild1_1', - dict(ipv4_inv, - CUSTOM_BANDWIDTH={ - 'total': 1250000, - 'reserved': 10000, - 'min_unit': 5000, - 'max_unit': 250000, - 'step_size': 5000, - 'allocation_ratio': 8.0, - })) - - # Add a bogus trait - new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', - 'HW_CPU_X86_AVX2', - 'MOTSUC_FOO']) - self.assertRaises( - exception.ResourceProviderSyncFailed, - self.client.update_from_provider_tree, self.context, new_tree) - # Placement didn't get updated - self.assertEqual(set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']), - self.client.get_provider_traits( - self.context, uuids.root).traits) - # ...and the root was removed from the cache - self.assertFalse(self.client._provider_tree.exists(uuids.root)) - - # Fix that problem - new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', - 'HW_CPU_X86_AVX2', - 'CUSTOM_FOO']) - - # Now the sync should work - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Let's cause a conflict error by doing an "out-of-band" update - gen = self.client._provider_tree.data(uuids.ssp).generation - self.assertTrue(self.client.put( - '/resource_providers/%s/traits' % uuids.ssp, - {'resource_provider_generation': gen, - 'traits': ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_HDD']}, - version='1.6')) - - # Now if we try to modify the traits, we should fail and invalidate - # the cache... - new_tree.update_traits(uuids.ssp, ['MISC_SHARES_VIA_AGGREGATE', - 'STORAGE_DISK_SSD', - 'CUSTOM_FAST']) + # Populate with a provider with no inventories, aggregates, traits + new_tree.new_root('root', uuids.root) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Throw in some more providers, in various spots in the tree, with + # some sub-properties + new_tree.new_child('child1', uuids.root, uuid=uuids.child1) + new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2]) + new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1) + new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2']) + new_tree.new_root('ssp', uuids.ssp) + new_tree.update_inventory('ssp', { + orc.DISK_GB: { + 'total': 100, + 'reserved': 1, + 'min_unit': 1, + 'max_unit': 10, + 'step_size': 2, + 'allocation_ratio': 10.0, + }, + }) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Swizzle properties + # Give the root some everything + new_tree.update_inventory(uuids.root, { + orc.VCPU: { + 'total': 10, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 2, + 'step_size': 1, + 'allocation_ratio': 10.0, + }, + orc.MEMORY_MB: { + 'total': 1048576, + 'reserved': 2048, + 'min_unit': 1024, + 'max_unit': 131072, + 'step_size': 1024, + 'allocation_ratio': 1.0, + }, + }) + new_tree.update_aggregates(uuids.root, [uuids.agg1]) + new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', + 'HW_CPU_X86_AVX2']) + # Take away the child's aggregates + new_tree.update_aggregates(uuids.child1, []) + # Grandchild gets some inventory + ipv4_inv = { + orc.IPV4_ADDRESS: { + 'total': 128, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 8, + 'step_size': 1, + 'allocation_ratio': 1.0, + }, + } + new_tree.update_inventory('grandchild1_1', ipv4_inv) + # Shared storage provider gets traits + new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE', + 'STORAGE_DISK_SSD'])) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Let's go for some error scenarios. + # Add inventory in an invalid resource class + new_tree.update_inventory( + 'grandchild1_1', + dict(ipv4_inv, + MOTSUC_BANDWIDTH={ + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + })) + self.assertRaises( + exception.ResourceProviderSyncFailed, + self.client.update_from_provider_tree, self.context, new_tree) + # The inventory update didn't get synced. + self.assertIsNone(self.client._get_inventory( + self.context, uuids.grandchild1_1)) + # We invalidated the cache for the entire tree around grandchild1_1 + # but did not invalidate the other root (the SSP) + self.assertEqual([uuids.ssp], + self.client._provider_tree.get_provider_uuids()) + # This is a little under-the-hood-looking, but make sure we cleared + # the association refresh timers for everything in the grandchild's + # tree + self.assertEqual(set([uuids.ssp]), + set(self.client._association_refresh_time)) + + # Fix that problem so we can try the next one + new_tree.update_inventory( + 'grandchild1_1', + dict(ipv4_inv, + CUSTOM_BANDWIDTH={ + 'total': 1250000, + 'reserved': 10000, + 'min_unit': 5000, + 'max_unit': 250000, + 'step_size': 5000, + 'allocation_ratio': 8.0, + })) + + # Add a bogus trait + new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', + 'HW_CPU_X86_AVX2', + 'MOTSUC_FOO']) + self.assertRaises( + exception.ResourceProviderSyncFailed, + self.client.update_from_provider_tree, self.context, new_tree) + # Placement didn't get updated + self.assertEqual(set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']), + self.client.get_provider_traits( + self.context, uuids.root).traits) + # ...and the root was removed from the cache + self.assertFalse(self.client._provider_tree.exists(uuids.root)) + + # Fix that problem + new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX', + 'HW_CPU_X86_AVX2', + 'CUSTOM_FOO']) + + # Now the sync should work + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Let's cause a conflict error by doing an "out-of-band" update + gen = self.client._provider_tree.data(uuids.ssp).generation + self.assertTrue(self.client.put( + '/resource_providers/%s/traits' % uuids.ssp, + {'resource_provider_generation': gen, + 'traits': ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_HDD']}, + version='1.6')) + + # Now if we try to modify the traits, we should fail and invalidate + # the cache... + new_tree.update_traits(uuids.ssp, ['MISC_SHARES_VIA_AGGREGATE', + 'STORAGE_DISK_SSD', + 'CUSTOM_FAST']) + self.assertRaises( + exception.ResourceProviderUpdateConflict, + self.client.update_from_provider_tree, self.context, new_tree) + # ...but the next iteration will refresh the cache with the latest + # generation and so the next attempt should succeed. + self.client.update_from_provider_tree(self.context, new_tree) + # The out-of-band change is blown away, as it should be. + assert_ptrees_equal() + + # Let's delete some stuff + new_tree.remove(uuids.ssp) + self.assertFalse(new_tree.exists('ssp')) + + # Verify that placement communication failure raises through + with mock.patch.object(self.client, '_delete_provider', + side_effect=kse.EndpointNotFound): self.assertRaises( - exception.ResourceProviderUpdateConflict, - self.client.update_from_provider_tree, self.context, new_tree) - # ...but the next iteration will refresh the cache with the latest - # generation and so the next attempt should succeed. - self.client.update_from_provider_tree(self.context, new_tree) - # The out-of-band change is blown away, as it should be. - assert_ptrees_equal() - - # Let's delete some stuff - new_tree.remove(uuids.ssp) - self.assertFalse(new_tree.exists('ssp')) - - # Verify that placement communication failure raises through - with mock.patch.object(self.client, '_delete_provider', - side_effect=kse.EndpointNotFound): - self.assertRaises( - kse.ClientException, - self.client.update_from_provider_tree, - self.context, new_tree) - # The provider didn't get deleted (this doesn't raise - # ResourceProviderNotFound) - self.client.get_provider_by_name(self.context, 'ssp') - - # Continue removing stuff - new_tree.remove('child1') - self.assertFalse(new_tree.exists('child1')) - # Removing a node removes its descendants too - self.assertFalse(new_tree.exists('grandchild1_1')) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Remove the last provider - new_tree.remove(uuids.root) - self.assertEqual([], new_tree.get_provider_uuids()) - self.client.update_from_provider_tree(self.context, new_tree) - assert_ptrees_equal() - - # Having removed the providers this way, they ought to be gone - # from placement - for uuid in (uuids.root, uuids.child1, uuids.grandchild1_1, - uuids.ssp): - resp = self.client.get('/resource_providers/%s' % uuid) - self.assertEqual(404, resp.status_code) + kse.ClientException, + self.client.update_from_provider_tree, + self.context, new_tree) + # The provider didn't get deleted (this doesn't raise + # ResourceProviderNotFound) + self.client.get_provider_by_name(self.context, 'ssp') + + # Continue removing stuff + new_tree.remove('child1') + self.assertFalse(new_tree.exists('child1')) + # Removing a node removes its descendants too + self.assertFalse(new_tree.exists('grandchild1_1')) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Remove the last provider + new_tree.remove(uuids.root) + self.assertEqual([], new_tree.get_provider_uuids()) + self.client.update_from_provider_tree(self.context, new_tree) + assert_ptrees_equal() + + # Having removed the providers this way, they ought to be gone + # from placement + for uuid in (uuids.root, uuids.child1, uuids.grandchild1_1, + uuids.ssp): + resp = self.client.get('/resource_providers/%s' % uuid) + self.assertEqual(404, resp.status_code) def test_non_tree_aggregate_membership(self): """There are some methods of the reportclient that interact with the @@ -1014,63 +952,61 @@ sure it never gets populated (and we don't raise ValueError). """ agg_uuid = uuids.agg - with self._interceptor(): - # get_provider_tree_and_ensure_root creates a resource provider - # record for us - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid, name=self.compute_name) - self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) - # Now blow away the cache so we can ensure the use_cache=False - # behavior of aggregate_{add|remove}_host correctly ignores and/or - # doesn't attempt to populate/update it. - self.client._provider_tree.remove(self.compute_uuid) - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) - - # Use the reportclient's _get_provider_aggregates() private method - # to verify no aggregates are yet associated with this provider - aggs = self.client._get_provider_aggregates( - self.context, self.compute_uuid).aggregates - self.assertEqual(set(), aggs) - - # Now associate the compute **host name** with an aggregate and - # ensure the aggregate association is saved properly - self.client.aggregate_add_host( - self.context, agg_uuid, host_name=self.compute_name) - - # Check that the ProviderTree cache hasn't been modified (since - # the aggregate_add_host() method is only called from nova-api and - # we don't want to have a ProviderTree cache at that layer. - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) - aggs = self.client._get_provider_aggregates( - self.context, self.compute_uuid).aggregates - self.assertEqual(set([agg_uuid]), aggs) - - # Finally, remove the association and verify it's removed in - # placement - self.client.aggregate_remove_host( - self.context, agg_uuid, self.compute_name) - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) - aggs = self.client._get_provider_aggregates( - self.context, self.compute_uuid).aggregates - self.assertEqual(set(), aggs) - - # Try removing the same host and verify no error - self.client.aggregate_remove_host( - self.context, agg_uuid, self.compute_name) - self.assertEqual( - [], self.client._provider_tree.get_provider_uuids()) + # get_provider_tree_and_ensure_root creates a resource provider + # record for us + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid, name=self.compute_name) + self.assertEqual([self.compute_uuid], ptree.get_provider_uuids()) + # Now blow away the cache so we can ensure the use_cache=False + # behavior of aggregate_{add|remove}_host correctly ignores and/or + # doesn't attempt to populate/update it. + self.client._provider_tree.remove(self.compute_uuid) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) + + # Use the reportclient's _get_provider_aggregates() private method + # to verify no aggregates are yet associated with this provider + aggs = self.client._get_provider_aggregates( + self.context, self.compute_uuid).aggregates + self.assertEqual(set(), aggs) + + # Now associate the compute **host name** with an aggregate and + # ensure the aggregate association is saved properly + self.client.aggregate_add_host( + self.context, agg_uuid, host_name=self.compute_name) + + # Check that the ProviderTree cache hasn't been modified (since + # the aggregate_add_host() method is only called from nova-api and + # we don't want to have a ProviderTree cache at that layer. + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) + aggs = self.client._get_provider_aggregates( + self.context, self.compute_uuid).aggregates + self.assertEqual(set([agg_uuid]), aggs) + + # Finally, remove the association and verify it's removed in + # placement + self.client.aggregate_remove_host( + self.context, agg_uuid, self.compute_name) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) + aggs = self.client._get_provider_aggregates( + self.context, self.compute_uuid).aggregates + self.assertEqual(set(), aggs) + + # Try removing the same host and verify no error + self.client.aggregate_remove_host( + self.context, agg_uuid, self.compute_name) + self.assertEqual( + [], self.client._provider_tree.get_provider_uuids()) def test_alloc_cands_smoke(self): """Simple call to get_allocation_candidates for version checking.""" flavor = objects.Flavor( vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0) req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) - with self._interceptor(): - self.client.get_allocation_candidates( - self.context, utils.ResourceRequest(req_spec)) + self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec)) def _set_up_provider_tree(self): r"""Create two compute nodes in placement ("this" one, and another one) @@ -1092,8 +1028,6 @@ | SRIOV_NET_VF=4| | SRIOV_NET_VF=4| |aggs: [uuids.agg1] | +-------------------+ +-------------------+ +-------------------+ - Must be invoked from within an _interceptor() context. - Returns a dict, keyed by provider UUID, of the expected shape of the provider tree, as expected by the expected_dict param of assertProviderTree. @@ -1210,10 +1144,7 @@ (k, uuid)) def _set_up_provider_tree_allocs(self): - """Create some allocations on our compute (with sharing). - - Must be invoked from within an _interceptor() context. - """ + """Create some allocations on our compute (with sharing).""" ret = { uuids.cn_inst1: { 'allocations': { @@ -1288,19 +1219,18 @@ 'resources_DISK:DISK_GB': 10 }) req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) - with self._interceptor(): - self._set_up_provider_tree() - acs = self.client.get_allocation_candidates( - self.context, utils.ResourceRequest(req_spec))[0] - # We're not going to validate all the allocations - Placement has - # tests for that - just make sure they're there. - self.assertEqual(3, len(acs)) - # We're not going to validate all the mappings - Placement has - # tests for that - just make sure they're there. - for ac in acs: - self.assertIn('allocations', ac) - self.assertEqual({'_CPU', '_MEM', '_DISK'}, - set(ac['mappings'])) + self._set_up_provider_tree() + acs = self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec))[0] + # We're not going to validate all the allocations - Placement has + # tests for that - just make sure they're there. + self.assertEqual(3, len(acs)) + # We're not going to validate all the mappings - Placement has + # tests for that - just make sure they're there. + for ac in acs: + self.assertIn('allocations', ac) + self.assertEqual({'_CPU', '_MEM', '_DISK'}, + set(ac['mappings'])) # One data element is: # root_required: set of traits for root_required @@ -1348,246 +1278,242 @@ req_spec = objects.RequestSpec(flavor=flavor, is_bfv=False) req_spec.root_required.update(data['root_required']) req_spec.root_forbidden.update(data['root_forbidden']) - with self._interceptor(): - self._set_up_provider_tree() - self.client.set_traits_for_provider( - self.context, self.compute_uuid, - (ot.COMPUTE_STATUS_DISABLED, ot.COMPUTE_VOLUME_EXTEND, - 'CUSTOM_FOO')) - acs, _, ver = self.client.get_allocation_candidates( - self.context, utils.ResourceRequest(req_spec)) - self.assertEqual('1.35', ver) - # This prints which ddt permutation we're using if it fails. - self.assertEqual(data['expected_acs'], len(acs), data) + self._set_up_provider_tree() + self.client.set_traits_for_provider( + self.context, self.compute_uuid, + (ot.COMPUTE_STATUS_DISABLED, ot.COMPUTE_VOLUME_EXTEND, + 'CUSTOM_FOO')) + acs, _, ver = self.client.get_allocation_candidates( + self.context, utils.ResourceRequest(req_spec)) + self.assertEqual('1.35', ver) + # This prints which ddt permutation we're using if it fails. + self.assertEqual(data['expected_acs'], len(acs), data) def test_get_allocations_for_provider_tree(self): - with self._interceptor(): - # When the provider tree cache is empty (or we otherwise supply a - # bogus node name), we get ValueError. - self.assertRaises(ValueError, - self.client.get_allocations_for_provider_tree, - self.context, 'bogus') - - self._set_up_provider_tree() - - # At this point, there are no allocations - self.assertEqual({}, self.client.get_allocations_for_provider_tree( - self.context, self.compute_name)) - - expected = self._set_up_provider_tree_allocs() - - # And now we should get all the right allocations. Note that we see - # nothing from othercn_inst. - actual = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertAllocations(expected, actual) + # When the provider tree cache is empty (or we otherwise supply a + # bogus node name), we get ValueError. + self.assertRaises(ValueError, + self.client.get_allocations_for_provider_tree, + self.context, 'bogus') + + self._set_up_provider_tree() + + # At this point, there are no allocations + self.assertEqual({}, self.client.get_allocations_for_provider_tree( + self.context, self.compute_name)) + + expected = self._set_up_provider_tree_allocs() + + # And now we should get all the right allocations. Note that we see + # nothing from othercn_inst. + actual = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertAllocations(expected, actual) def test_reshape(self): """Smoke test the report client shim for the reshaper API.""" - with self._interceptor(): - # Simulate placement API communication failure - with mock.patch.object( - self.client, 'post', side_effect=kse.MissingAuthPlugin): - self.assertRaises(kse.ClientException, - self.client._reshape, self.context, {}, {}) - - # Invalid payload (empty inventories) results in a 409, which the - # report client converts to ReshapeFailed - try: - self.client._reshape(self.context, {}, {}) - except exception.ReshapeFailed as e: - self.assertIn('JSON does not validate: {} does not have ' - 'enough properties', e.kwargs['error']) - - # Okay, do some real stuffs. We're just smoke-testing that we can - # hit a good path to the API here; real testing of the API happens - # in gabbits and via update_from_provider_tree. - self._set_up_provider_tree() - self._set_up_provider_tree_allocs() - # Updating allocations bumps generations for affected providers. - # In real life, the subsequent update_from_provider_tree will - # bounce 409, the cache will be cleared, and the operation will be - # retried. We don't care about any of that retry logic in the scope - # of this test case, so just clear the cache so - # get_provider_tree_and_ensure_root repopulates it and we avoid the - # conflict exception. - self.client.clear_provider_cache() - - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - inventories = {} - for rp_uuid in ptree.get_provider_uuids(): - data = ptree.data(rp_uuid) - # Add a new resource class to the inventories - inventories[rp_uuid] = { - "inventories": dict(data.inventory, - CUSTOM_FOO={'total': 10}), - "resource_provider_generation": data.generation - } - - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - for alloc in allocs.values(): - for res in alloc['allocations'].values(): - res['resources']['CUSTOM_FOO'] = 1 + # Simulate placement API communication failure + with mock.patch.object( + self.client, 'post', side_effect=kse.MissingAuthPlugin): + self.assertRaises(kse.ClientException, + self.client._reshape, self.context, {}, {}) + + # Invalid payload (empty inventories) results in a 409, which the + # report client converts to ReshapeFailed + try: + self.client._reshape(self.context, {}, {}) + except exception.ReshapeFailed as e: + self.assertIn('JSON does not validate: {} does not have ' + 'enough properties', e.kwargs['error']) + + # Okay, do some real stuffs. We're just smoke-testing that we can + # hit a good path to the API here; real testing of the API happens + # in gabbits and via update_from_provider_tree. + self._set_up_provider_tree() + self._set_up_provider_tree_allocs() + # Updating allocations bumps generations for affected providers. + # In real life, the subsequent update_from_provider_tree will + # bounce 409, the cache will be cleared, and the operation will be + # retried. We don't care about any of that retry logic in the scope + # of this test case, so just clear the cache so + # get_provider_tree_and_ensure_root repopulates it and we avoid the + # conflict exception. + self.client.clear_provider_cache() + + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + inventories = {} + for rp_uuid in ptree.get_provider_uuids(): + data = ptree.data(rp_uuid) + # Add a new resource class to the inventories + inventories[rp_uuid] = { + "inventories": dict(data.inventory, + CUSTOM_FOO={'total': 10}), + "resource_provider_generation": data.generation + } - resp = self.client._reshape(self.context, inventories, allocs) - self.assertEqual(204, resp.status_code) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + for alloc in allocs.values(): + for res in alloc['allocations'].values(): + res['resources']['CUSTOM_FOO'] = 1 + + resp = self.client._reshape(self.context, inventories, allocs) + self.assertEqual(204, resp.status_code) def test_update_from_provider_tree_reshape(self): """Run update_from_provider_tree with reshaping.""" - with self._interceptor(): - exp_ptree = self._set_up_provider_tree() - # Save a copy of this for later - orig_exp_ptree = copy.deepcopy(exp_ptree) - - # A null reshape: no inv changes, empty allocs - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations({}, allocs) - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) - - exp_allocs = self._set_up_provider_tree_allocs() - # Save a copy of this for later - orig_exp_allocs = copy.deepcopy(exp_allocs) - # Updating allocations bumps generations for affected providers. - # In real life, the subsequent update_from_provider_tree will - # bounce 409, the cache will be cleared, and the operation will be - # retried. We don't care about any of that retry logic in the scope - # of this test case, so just clear the cache so - # get_provider_tree_and_ensure_root repopulates it and we avoid the - # conflict exception. - self.client.clear_provider_cache() - # Another null reshape: no inv changes, no alloc changes - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) - - # Now a reshape that adds an inventory item to all the providers in - # the provider tree (i.e. the "local" ones and the shared one, but - # not the othercn); and an allocation of that resource only for the - # local instances, and only on providers that already have - # allocations (i.e. the compute node and sharing provider for both - # cn_inst*, and numa1 for cn_inst1 and numa2 for cn_inst2). - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - for rp_uuid in ptree.get_provider_uuids(): - # Add a new resource class to the inventories - ptree.update_inventory( - rp_uuid, dict(ptree.data(rp_uuid).inventory, - CUSTOM_FOO={'total': 10})) - exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = { - 'total': 10} - for c_uuid, alloc in allocs.items(): - for rp_uuid, res in alloc['allocations'].items(): - res['resources']['CUSTOM_FOO'] = 1 - exp_allocs[c_uuid]['allocations'][rp_uuid][ - 'resources']['CUSTOM_FOO'] = 1 - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) - - # Let's do a big transform that stuffs everything back onto the - # compute node - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - cum_inv = {} - for rp_uuid in ptree.get_provider_uuids(): - # Accumulate all the inventory amounts for each RC - for rc, inv in ptree.data(rp_uuid).inventory.items(): - if rc not in cum_inv: - cum_inv[rc] = {'total': 0} - cum_inv[rc]['total'] += inv['total'] - # Remove all the providers except the compute node and the - # shared storage provider, which still has (and shall - # retain) allocations from the "other" compute node. - # TODO(efried): But is that right? I should be able to - # remove the SSP from *this* tree and have it continue to - # exist in the world. But how would ufpt distinguish? - if rp_uuid not in (self.compute_uuid, uuids.ssp): - ptree.remove(rp_uuid) - # Put the accumulated inventory onto the compute RP - ptree.update_inventory(self.compute_uuid, cum_inv) - # Cause trait and aggregate transformations too. - ptree.update_aggregates(self.compute_uuid, set()) - ptree.update_traits(self.compute_uuid, ['CUSTOM_ALL_IN_ONE']) - exp_ptree = { - self.compute_uuid: dict( - parent_uuid = None, - inventory = cum_inv, - aggregates=set(), - traits = set(['CUSTOM_ALL_IN_ONE']), - ), - uuids.ssp: dict( - # Don't really care about the details - parent_uuid=None, - ), - } + exp_ptree = self._set_up_provider_tree() + # Save a copy of this for later + orig_exp_ptree = copy.deepcopy(exp_ptree) - # Let's inject an error path test here: attempting to reshape - # inventories without having moved their allocations should fail. - ex = self.assertRaises( - exception.ReshapeFailed, - self.client.update_from_provider_tree, self.context, ptree, - allocations=allocs) - self.assertIn('placement.inventory.inuse', ex.format_message()) - - # Move all the allocations off their existing providers and - # onto the compute node - for c_uuid, alloc in allocs.items(): - cum_allocs = {} - for rp_uuid, resources in alloc['allocations'].items(): - # Accumulate all the allocations for each RC - for rc, amount in resources['resources'].items(): - if rc not in cum_allocs: - cum_allocs[rc] = 0 - cum_allocs[rc] += amount - alloc['allocations'] = { - # Put the accumulated allocations on the compute RP - self.compute_uuid: {'resources': cum_allocs}} - exp_allocs = copy.deepcopy(allocs) - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) - - # Okay, let's transform back now - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(exp_ptree, ptree) - self.assertAllocations(exp_allocs, allocs) - for rp_uuid, data in orig_exp_ptree.items(): - if not ptree.exists(rp_uuid): - # This should only happen for children, because the CN - # and SSP are already there. - ptree.new_child(data['name'], data['parent_uuid'], - uuid=rp_uuid) - ptree.update_inventory(rp_uuid, data['inventory']) - ptree.update_traits(rp_uuid, data['traits']) - ptree.update_aggregates(rp_uuid, data['aggregates']) - for c_uuid, orig_allocs in orig_exp_allocs.items(): - allocs[c_uuid]['allocations'] = orig_allocs['allocations'] - self.client.update_from_provider_tree(self.context, ptree, - allocations=allocs) - ptree = self.client.get_provider_tree_and_ensure_root( - self.context, self.compute_uuid) - allocs = self.client.get_allocations_for_provider_tree( - self.context, self.compute_name) - self.assertProviderTree(orig_exp_ptree, ptree) - self.assertAllocations(orig_exp_allocs, allocs) + # A null reshape: no inv changes, empty allocs + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations({}, allocs) + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) + + exp_allocs = self._set_up_provider_tree_allocs() + # Save a copy of this for later + orig_exp_allocs = copy.deepcopy(exp_allocs) + # Updating allocations bumps generations for affected providers. + # In real life, the subsequent update_from_provider_tree will + # bounce 409, the cache will be cleared, and the operation will be + # retried. We don't care about any of that retry logic in the scope + # of this test case, so just clear the cache so + # get_provider_tree_and_ensure_root repopulates it and we avoid the + # conflict exception. + self.client.clear_provider_cache() + # Another null reshape: no inv changes, no alloc changes + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) + + # Now a reshape that adds an inventory item to all the providers in + # the provider tree (i.e. the "local" ones and the shared one, but + # not the othercn); and an allocation of that resource only for the + # local instances, and only on providers that already have + # allocations (i.e. the compute node and sharing provider for both + # cn_inst*, and numa1 for cn_inst1 and numa2 for cn_inst2). + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + for rp_uuid in ptree.get_provider_uuids(): + # Add a new resource class to the inventories + ptree.update_inventory( + rp_uuid, dict(ptree.data(rp_uuid).inventory, + CUSTOM_FOO={'total': 10})) + exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = { + 'total': 10} + for c_uuid, alloc in allocs.items(): + for rp_uuid, res in alloc['allocations'].items(): + res['resources']['CUSTOM_FOO'] = 1 + exp_allocs[c_uuid]['allocations'][rp_uuid][ + 'resources']['CUSTOM_FOO'] = 1 + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) + + # Let's do a big transform that stuffs everything back onto the + # compute node + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + cum_inv = {} + for rp_uuid in ptree.get_provider_uuids(): + # Accumulate all the inventory amounts for each RC + for rc, inv in ptree.data(rp_uuid).inventory.items(): + if rc not in cum_inv: + cum_inv[rc] = {'total': 0} + cum_inv[rc]['total'] += inv['total'] + # Remove all the providers except the compute node and the + # shared storage provider, which still has (and shall + # retain) allocations from the "other" compute node. + # TODO(efried): But is that right? I should be able to + # remove the SSP from *this* tree and have it continue to + # exist in the world. But how would ufpt distinguish? + if rp_uuid not in (self.compute_uuid, uuids.ssp): + ptree.remove(rp_uuid) + # Put the accumulated inventory onto the compute RP + ptree.update_inventory(self.compute_uuid, cum_inv) + # Cause trait and aggregate transformations too. + ptree.update_aggregates(self.compute_uuid, set()) + ptree.update_traits(self.compute_uuid, ['CUSTOM_ALL_IN_ONE']) + exp_ptree = { + self.compute_uuid: dict( + parent_uuid = None, + inventory = cum_inv, + aggregates=set(), + traits = set(['CUSTOM_ALL_IN_ONE']), + ), + uuids.ssp: dict( + # Don't really care about the details + parent_uuid=None, + ), + } + + # Let's inject an error path test here: attempting to reshape + # inventories without having moved their allocations should fail. + ex = self.assertRaises( + exception.ReshapeFailed, + self.client.update_from_provider_tree, self.context, ptree, + allocations=allocs) + self.assertIn('placement.inventory.inuse', ex.format_message()) + + # Move all the allocations off their existing providers and + # onto the compute node + for c_uuid, alloc in allocs.items(): + cum_allocs = {} + for rp_uuid, resources in alloc['allocations'].items(): + # Accumulate all the allocations for each RC + for rc, amount in resources['resources'].items(): + if rc not in cum_allocs: + cum_allocs[rc] = 0 + cum_allocs[rc] += amount + alloc['allocations'] = { + # Put the accumulated allocations on the compute RP + self.compute_uuid: {'resources': cum_allocs}} + exp_allocs = copy.deepcopy(allocs) + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) + + # Okay, let's transform back now + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(exp_ptree, ptree) + self.assertAllocations(exp_allocs, allocs) + for rp_uuid, data in orig_exp_ptree.items(): + if not ptree.exists(rp_uuid): + # This should only happen for children, because the CN + # and SSP are already there. + ptree.new_child(data['name'], data['parent_uuid'], + uuid=rp_uuid) + ptree.update_inventory(rp_uuid, data['inventory']) + ptree.update_traits(rp_uuid, data['traits']) + ptree.update_aggregates(rp_uuid, data['aggregates']) + for c_uuid, orig_allocs in orig_exp_allocs.items(): + allocs[c_uuid]['allocations'] = orig_allocs['allocations'] + self.client.update_from_provider_tree(self.context, ptree, + allocations=allocs) + ptree = self.client.get_provider_tree_and_ensure_root( + self.context, self.compute_uuid) + allocs = self.client.get_allocations_for_provider_tree( + self.context, self.compute_name) + self.assertProviderTree(orig_exp_ptree, ptree) + self.assertAllocations(orig_exp_allocs, allocs) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_scheduler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_scheduler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_scheduler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_scheduler.py 2020-04-10 17:57:57.000000000 +0000 @@ -64,8 +64,8 @@ in different cells and make sure that migration fails with NoValidHost. """ # Hosts in different cells - self.start_service('compute', host='compute1', cell=CELL1_NAME) - self.start_service('compute', host='compute2', cell=CELL2_NAME) + self.start_service('compute', host='compute1', cell_name=CELL1_NAME) + self.start_service('compute', host='compute2', cell_name=CELL2_NAME) _, server = self._test_create_and_migrate(expected_status=202) # The instance action should have failed with details. @@ -79,11 +79,11 @@ migration is allowed. """ # Hosts in the same cell - self.start_service('compute', host='compute1', cell=CELL1_NAME) - self.start_service('compute', host='compute2', cell=CELL1_NAME) + self.start_service('compute', host='compute1', cell_name=CELL1_NAME) + self.start_service('compute', host='compute2', cell_name=CELL1_NAME) # Create another host just so it looks like we have hosts in # both cells - self.start_service('compute', host='compute3', cell=CELL2_NAME) + self.start_service('compute', host='compute3', cell_name=CELL2_NAME) # Force the server onto compute1 in cell1 so we do not accidentally # land on compute3 in cell2 and fail to migrate. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_server_group.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_server_group.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_server_group.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_server_group.py 2020-04-10 17:57:57.000000000 +0000 @@ -867,9 +867,9 @@ super(ServerGroupTestMultiCell, self).setUp() # Start two compute services, one per cell self.compute1 = self.start_service('compute', host='host1', - cell='cell1') + cell_name='cell1') self.compute2 = self.start_service('compute', host='host2', - cell='cell2') + cell_name='cell2') # This is needed to find a server that is still booting with multiple # cells, while waiting for the state change to ACTIVE. See the # _get_instance method in the compute/api for details. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_servers.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_servers.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_servers.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_servers.py 2020-04-10 17:57:57.000000000 +0000 @@ -22,6 +22,7 @@ from keystoneauth1 import adapter import mock +from neutronclient.common import exceptions as neutron_exception import os_resource_classes as orc from oslo_config import cfg from oslo_log import log as logging @@ -51,8 +52,9 @@ from nova.tests.unit import fake_block_device from nova.tests.unit import fake_notifier from nova.tests.unit import fake_requests -import nova.tests.unit.image.fake +from nova.tests.unit.image import fake as fake_image from nova.tests.unit.objects import test_instance_info_cache +from nova import utils as nova_utils from nova.virt import fake from nova import volume @@ -72,7 +74,6 @@ _min_count_parameter = 'min_count' def setUp(self): - self.computes = {} super(ServersTestBase, self).setUp() def _get_access_ips_params(self): @@ -100,23 +101,6 @@ node.hypervisor_hostname: int(node.stats.get('failed_builds', 0)) for node in computes} - def _run_periodics(self): - """Run the update_available_resource task on every compute manager - - This runs periodics on the computes in an undefined order; some child - class redefined this function to force a specific order. - """ - - if self.compute.host not in self.computes: - self.computes[self.compute.host] = self.compute - - ctx = context.get_admin_context() - for compute in self.computes.values(): - LOG.info('Running periodic for compute (%s)', - compute.manager.host) - compute.manager.update_available_resource(ctx) - LOG.info('Finished with periodics') - def test_create_server_with_error(self): # Create a server which will enter error state. @@ -141,8 +125,18 @@ # We should have no (persisted) build failures until we update # resources, after which we should have one self.assertEqual([0], list(self._get_node_build_failures().values())) - self._run_periodics() - self.assertEqual([1], list(self._get_node_build_failures().values())) + + # BuildAbortException will not trigger a reschedule and the build + # failure update is the last step in the compute manager after + # instance state setting, fault recording and notification sending. So + # we have no other way than simply wait to ensure the node build + # failure counter updated before we assert it. + def failed_counter_updated(): + self._run_periodics() + self.assertEqual( + [1], list(self._get_node_build_failures().values())) + + self._wait_for_assert(failed_counter_updated) def test_create_server_with_image_type_filter(self): self.flags(query_placement_for_image_type_support=True, @@ -168,8 +162,7 @@ def _test_create_server_with_error_with_retries(self): # Create a server which will enter error state. - self.compute2 = self.start_service('compute', host='host2') - self.computes['compute2'] = self.compute2 + self._start_compute('host2') fails = [] @@ -205,9 +198,18 @@ self.flags(max_attempts=1, group='scheduler') fails = self._test_create_server_with_error_with_retries() self.assertEqual(1, fails) - self._run_periodics() - self.assertEqual( - [0, 1], list(sorted(self._get_node_build_failures().values()))) + + # The build failure update is the last step in build_and_run_instance + # in the compute manager after instance state setting, fault + # recording and notification sending. So we have no other way than + # simply wait to ensure the node build failure counter updated + # before we assert it. + def failed_counter_updated(): + self._run_periodics() + self.assertEqual( + [0, 1], list(sorted(self._get_node_build_failures().values()))) + + self._wait_for_assert(failed_counter_updated) def test_create_and_delete_server(self): # Creates and deletes a server. @@ -1371,8 +1373,7 @@ # Now update the image metadata to be something that won't work with # the fake compute driver we're using since the fake driver has an # "x86_64" architecture. - rebuild_image_ref = ( - nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) + rebuild_image_ref = fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID self.api.put_image_meta_key( rebuild_image_ref, 'hw_architecture', 'unicore32') # Now rebuild the server with that updated image and it should result @@ -1485,8 +1486,7 @@ allocs = allocs[rp_uuid]['resources'] assertFlavorMatchesAllocation(flavor, allocs) - rebuild_image_ref = ( - nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) + rebuild_image_ref = fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID # Now rebuild the server with a different image. rebuild_req_body = { 'rebuild': { @@ -1539,8 +1539,7 @@ # Now rebuild the server with a different image than was used to create # our fake volume. - rebuild_image_ref = ( - nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID) + rebuild_image_ref = fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID rebuild_req_body = { 'rebuild': { 'imageRef': rebuild_image_ref @@ -1932,10 +1931,16 @@ self.api.post_server_action( server['id'], resize_req, check_response_status=[202]) - self._assert_resize_migrate_action_fail( + event = self._assert_resize_migrate_action_fail( server, instance_actions.RESIZE, 'NoValidHost') + self.assertIn('details', event) + # This test case works in microversion 2.84. + self.assertIn('No valid host was found', event['details']) server = self.admin_api.get_server(server['id']) self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host']) + # The server is still ACTIVE and thus there is no fault message. + self.assertEqual('ACTIVE', server['status']) + self.assertNotIn('fault', server) # only the source host shall have usages after the failed resize self.assertFlavorMatchesUsage(source_rp_uuid, self.flavor1) @@ -3655,11 +3660,68 @@ failed_hostname = self.compute1.manager.host - failed_rp_uuid = self._get_provider_uuid_by_host(failed_hostname) - # Expects no allocation records on the failed host. - self.assertRequestMatchesUsage({'VCPU': 0, - 'MEMORY_MB': 0, - 'DISK_GB': 0}, failed_rp_uuid) + # BuildAbortException coming from the FakeBuildAbortDriver will not + # trigger a reschedule and the placement cleanup is the last step in + # the compute manager after instance state setting, fault recording + # and notification sending. So we have no other way than simply wait + # to ensure the placement cleanup happens before we assert it. + def placement_cleanup(): + failed_rp_uuid = self._get_provider_uuid_by_host(failed_hostname) + # Expects no allocation records on the failed host. + self.assertRequestMatchesUsage({'VCPU': 0, + 'MEMORY_MB': 0, + 'DISK_GB': 0}, failed_rp_uuid) + self._wait_for_assert(placement_cleanup) + + +class ServerDeleteBuildTests(integrated_helpers.ProviderUsageBaseTestCase): + """Tests server delete during instance in build and validates that + allocations in Placement are properly cleaned up. + """ + compute_driver = 'fake.SmallFakeDriver' + + def setUp(self): + super(ServerDeleteBuildTests, self).setUp() + self.compute1 = self._start_compute(host='host1') + flavors = self.api.get_flavors() + self.flavor1 = flavors[0] + + def test_delete_stuck_build_instance_after_claim(self): + """Test for bug 1859496 where an instance allocation can leaks after + deletion if build process have been interrupted after resource claim + """ + + # To reproduce the issue we need to interrupt instance spawn + # when build request has already reached the scheduler service, + # so that instance resource get claims. + # Real case can typically be a conductor restart during + # instance claim. + # To emulate conductor restart we raise an Exception in + # filter_scheduler after instance is claimed and mock + # _bury_in_cell0 in that case conductor thread return. + # Then we delete server after ensuring allocation is made and check + # There is no leak. + # Note that because deletion occurs early, conductor did not populate + # instance DB entries in cells, preventing the compute + # update_available_resource periodic task to heal leaked allocations. + + server_req = self._build_server( + 'interrupted-server', flavor_id=self.flavor1['id'], + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + networks='none') + + with test.nested( + mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.' + '_ensure_sufficient_hosts'), + mock.patch('nova.conductor.manager.ComputeTaskManager.' + '_bury_in_cell0') + ) as (mock_suff_hosts, mock_bury): + mock_suff_hosts.side_effect = test.TestingException('oops') + server = self.api.post_server({'server': server_req}) + self._wait_for_server_allocations(server['id']) + self.api.api_delete('/servers/%s' % server['id']) + allocations = self._get_allocations_by_server_uuid(server['id']) + self.assertEqual({}, allocations) class ServerBuildAbortTestsWithNestedResourceRequest(ServerBuildAbortTests): @@ -4031,7 +4093,7 @@ # Note that we're using v2.35 explicitly as the api returns 404 # starting with 2.36 - with nova.utils.temporary_mutation(self.api, microversion='2.35'): + with nova_utils.temporary_mutation(self.api, microversion='2.35'): images = self.api.get_images() self.image_id_with_trait = images[0]['id'] self.api.api_put('/images/%s/metadata' % self.image_id_with_trait, @@ -4531,7 +4593,7 @@ 'host2': 'cell2'} for host in sorted(host_to_cell_mappings): self.start_service('compute', host=host, - cell=host_to_cell_mappings[host]) + cell_name=host_to_cell_mappings[host]) def test_migrate_server_to_host_in_different_cell(self): # We target host1 specifically so that we have a predictable target for @@ -5956,6 +6018,76 @@ "until microversion 2.72.", six.text_type(ex)) + def test_live_migrate_server_with_port_resource_request_old_version( + self): + server = self._create_server( + flavor=self.flavor, + networks=[{'port': self.neutron.port_1['id']}]) + self._wait_for_state_change(server, 'ACTIVE') + + # We need to simulate that the above server has a port that has + # resource request; we cannot boot with such a port but legacy servers + # can exist with such a port. + self._add_resource_request_to_a_bound_port(self.neutron.port_1['id']) + + post = { + 'os-migrateLive': { + 'host': None, + 'block_migration': False, + } + } + with mock.patch( + "nova.objects.service.get_minimum_version_all_cells", + return_value=48, + ): + ex = self.assertRaises( + client.OpenStackApiException, + self.api.post_server_action, server['id'], post) + + self.assertEqual(400, ex.response.status_code) + self.assertIn( + "The os-migrateLive action on a server with ports having resource " + "requests, like a port with a QoS minimum bandwidth policy, is " + "not supported by this cluster right now", + six.text_type(ex)) + self.assertIn( + "The os-migrateLive action on a server with ports having resource " + "requests, like a port with a QoS minimum bandwidth policy, is " + "not supported until every nova-compute is upgraded to Ussuri", + self.stdlog.logger.output) + + def test_evacuate_server_with_port_resource_request_old_version( + self): + server = self._create_server( + flavor=self.flavor, + networks=[{'port': self.neutron.port_1['id']}]) + self._wait_for_state_change(server, 'ACTIVE') + + # We need to simulate that the above server has a port that has + # resource request; we cannot boot with such a port but legacy servers + # can exist with such a port. + self._add_resource_request_to_a_bound_port(self.neutron.port_1['id']) + + with mock.patch( + "nova.objects.service.get_minimum_version_all_cells", + return_value=48, + ): + ex = self.assertRaises( + client.OpenStackApiException, + self.api.post_server_action, server['id'], {'evacuate': {}}) + + self.assertEqual(400, ex.response.status_code) + self.assertIn( + "The evacuate action on a server with ports having resource " + "requests, like a port with a QoS minimum bandwidth policy, is " + "not supported by this cluster right now", + six.text_type(ex)) + self.assertIn( + "The evacuate action on a server with ports having resource " + "requests, like a port with a QoS minimum bandwidth policy, is " + "not supported until every nova-compute is upgraded to Ussuri", + self.stdlog.logger.output) + def test_unshelve_offloaded_server_with_port_resource_request_old_version( self): server = self._create_server( @@ -5976,14 +6108,25 @@ # can exist with such a port. self._add_resource_request_to_a_bound_port(self.neutron.port_1['id']) - ex = self.assertRaises( - client.OpenStackApiException, - self.api.post_server_action, server['id'], {'unshelve': None}) + with mock.patch( + "nova.objects.service.get_minimum_version_all_cells", + return_value=48, + ): + ex = self.assertRaises( + client.OpenStackApiException, + self.api.post_server_action, server['id'], {'unshelve': None}) self.assertEqual(400, ex.response.status_code) self.assertIn( - 'The unshelve action on a server with ports having resource ' - 'requests', six.text_type(ex)) + "The unshelve action on a server with ports having resource " + "requests, like a port with a QoS minimum bandwidth policy, is " + "not supported by this cluster right now", + six.text_type(ex)) + self.assertIn( + "The unshelve action on a server with ports having resource " + "requests, like a port with a QoS minimum bandwidth policy, is " + "not supported until every nova-compute is upgraded to Ussuri", + self.stdlog.logger.output) def test_unshelve_not_offloaded_server_with_port_resource_request( self): @@ -6027,7 +6170,6 @@ # allow non-admin to call the operations self.policy.set_rules({ - 'os_compute_api:os-evacuate': '@', 'os_compute_api:servers:create': '@', 'os_compute_api:servers:create:attach_network': '@', 'os_compute_api:servers:show': '@', @@ -6036,6 +6178,7 @@ 'os_compute_api:os-shelve:shelve': '@', 'os_compute_api:os-shelve:unshelve': '@', 'os_compute_api:os-migrate-server:migrate_live': '@', + 'os_compute_api:os-evacuate': '@', }) @@ -6473,7 +6616,7 @@ server = self._create_server_with_ports_and_check_allocation( non_qos_normal_port, qos_normal_port, qos_sriov_port) - orig_get_service = nova.objects.Service.get_by_host_and_binary + orig_get_service = objects.Service.get_by_host_and_binary def fake_get_service(context, host, binary): # host2 is the only migration target, let's make it too old so the @@ -6527,7 +6670,7 @@ server = self._create_server_with_ports_and_check_allocation( non_qos_normal_port, qos_normal_port, qos_sriov_port) - orig_get_service = nova.objects.Service.get_by_host_and_binary + orig_get_service = objects.Service.get_by_host_and_binary def fake_get_service(context, host, binary): # host2 is the first migration target, let's make it too old so the @@ -7228,8 +7371,7 @@ server = self._create_server_with_ports_and_check_allocation( non_qos_normal_port, qos_normal_port, qos_sriov_port) - orig_check = nova.virt.fake.FakeDriver.\ - check_can_live_migrate_destination + orig_check = fake.FakeDriver.check_can_live_migrate_destination def fake_check_can_live_migrate_destination( context, instance, src_compute_info, dst_compute_info, @@ -7361,6 +7503,167 @@ self._delete_server_and_check_allocations( server, qos_normal_port, qos_sriov_port) + def test_unshelve_offloaded_server_with_qos_port(self): + non_qos_normal_port = self.neutron.port_1 + qos_normal_port = self.neutron.port_with_resource_request + qos_sriov_port = self.neutron.port_with_sriov_resource_request + + server = self._create_server_with_ports_and_check_allocation( + non_qos_normal_port, qos_normal_port, qos_sriov_port) + + # with default config shelve means immediate offload as well + req = { + 'shelve': {} + } + self.api.post_server_action(server['id'], req) + self._wait_for_server_parameter( + server, {'status': 'SHELVED_OFFLOADED'}) + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + self.api.post_server_action(server['id'], {'unshelve': None}) + self._wait_for_server_parameter( + server, + {'OS-EXT-SRV-ATTR:host': 'host1', + 'status': 'ACTIVE'}) + self._check_allocation( + server, self.compute1_rp_uuid, non_qos_normal_port, + qos_normal_port, qos_sriov_port, self.flavor_with_group_policy) + + self._assert_pci_request_pf_device_name(server, 'host1-ens2') + + # shelve offload again and then make host1 unusable so the subsequent + # unshelve needs to select host2 + req = { + 'shelve': {} + } + self.api.post_server_action(server['id'], req) + self._wait_for_server_parameter( + server, {'status': 'SHELVED_OFFLOADED'}) + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + self.admin_api.put_service( + self.compute1_service_id, {"status": "disabled"}) + + self.api.post_server_action(server['id'], {'unshelve': None}) + self._wait_for_server_parameter( + server, + {'OS-EXT-SRV-ATTR:host': 'host2', + 'status': 'ACTIVE'}) + + self._check_allocation( + server, self.compute2_rp_uuid, non_qos_normal_port, + qos_normal_port, qos_sriov_port, self.flavor_with_group_policy) + + self._assert_pci_request_pf_device_name(server, 'host2-ens2') + + self._delete_server_and_check_allocations( + server, qos_normal_port, qos_sriov_port) + + def test_unshelve_offloaded_server_with_qos_port_pci_update_fails(self): + # Update the name of the network device RP of PF2 on host2 to something + # unexpected. This will cause + # update_pci_request_spec_with_allocated_interface_name() to raise + # when the instance is unshelved to the host2. + rsp = self.placement_api.put( + '/resource_providers/%s' + % self.sriov_dev_rp_per_host[self.compute2_rp_uuid][self.PF2], + {"name": "invalid-device-rp-name"}) + self.assertEqual(200, rsp.status) + + non_qos_normal_port = self.neutron.port_1 + qos_normal_port = self.neutron.port_with_resource_request + qos_sriov_port = self.neutron.port_with_sriov_resource_request + + server = self._create_server_with_ports_and_check_allocation( + non_qos_normal_port, qos_normal_port, qos_sriov_port) + + # with default config shelve means immediate offload as well + req = { + 'shelve': {} + } + self.api.post_server_action(server['id'], req) + self._wait_for_server_parameter( + server, {'status': 'SHELVED_OFFLOADED'}) + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + # make host1 unusable so the subsequent unshelve needs to select host2 + self.admin_api.put_service( + self.compute1_service_id, {"status": "disabled"}) + + self.api.post_server_action(server['id'], {'unshelve': None}) + + # Unshelve fails on host2 due to + # update_pci_request_spec_with_allocated_interface_name fails so the + # instance goes back to shelve offloaded state + fake_notifier.wait_for_versioned_notifications( + 'instance.unshelve.start') + error_notification = fake_notifier.wait_for_versioned_notifications( + 'compute.exception')[0] + self.assertEqual( + 'UnexpectedResourceProviderNameForPCIRequest', + error_notification['payload']['nova_object.data']['exception']) + server = self._wait_for_server_parameter( + server, + {'OS-EXT-STS:task_state': None, + 'status': 'SHELVED_OFFLOADED'}) + + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + self._delete_server_and_check_allocations( + server, qos_normal_port, qos_sriov_port) + + def test_unshelve_offloaded_server_with_qos_port_fails_due_to_neutron( + self): + non_qos_normal_port = self.neutron.port_1 + qos_normal_port = self.neutron.port_with_resource_request + qos_sriov_port = self.neutron.port_with_sriov_resource_request + + server = self._create_server_with_ports_and_check_allocation( + non_qos_normal_port, qos_normal_port, qos_sriov_port) + + # with default config shelve means immediate offload as well + req = { + 'shelve': {} + } + self.api.post_server_action(server['id'], req) + self._wait_for_server_parameter( + server, {'status': 'SHELVED_OFFLOADED'}) + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + # Simulate that port update fails during unshelve due to neutron is + # unavailable + with mock.patch( + 'nova.tests.fixtures.NeutronFixture.' + 'update_port') as mock_update_port: + mock_update_port.side_effect = neutron_exception.ConnectionFailed( + reason='test') + req = {'unshelve': None} + self.api.post_server_action(server['id'], req) + fake_notifier.wait_for_versioned_notifications( + 'instance.unshelve.start') + self._wait_for_server_parameter( + server, + {'status': 'SHELVED_OFFLOADED', + 'OS-EXT-STS:task_state': None}) + + # As the instance went back to offloaded state we expect no allocation + allocations = self.placement_api.get( + '/allocations/%s' % server['id']).body['allocations'] + self.assertEqual(0, len(allocations)) + + self._delete_server_and_check_allocations( + server, qos_normal_port, qos_sriov_port) + class LiveMigrateAbortWithPortResourceRequestTest( PortResourceRequestBasedSchedulingTestBase): @@ -7506,7 +7809,7 @@ # First call is during boot, we want that to succeed normally. Then the # fake virt driver triggers a re-schedule. During that re-schedule the # fill is called again, and we simulate that call raises. - original_fill = nova.scheduler.utils.fill_provider_mapping + original_fill = utils.fill_provider_mapping def stub_fill_provider_mapping(*args, **kwargs): if not mock_fill.called: @@ -7532,3 +7835,424 @@ updated_port = self.neutron.show_port(port['id'])['port'] binding_profile = neutronapi.get_binding_profile(updated_port) self.assertNotIn('allocation', binding_profile) + + +class AcceleratorServerBase(integrated_helpers.ProviderUsageBaseTestCase): + + compute_driver = 'fake.SmallFakeDriver' + + def setUp(self): + super(AcceleratorServerBase, self).setUp() + self.cyborg = self.useFixture(nova_fixtures.CyborgFixture()) + # dict of form {$compute_rp_uuid: $device_rp_uuid} + self.device_rp_map = {} + # self.NUM_HOSTS should be set up by derived classes + if not hasattr(self, 'NUM_HOSTS'): + self.NUM_HOSTS = 1 + self._setup_compute_nodes_and_device_rps() + + def _setup_compute_nodes_and_device_rps(self): + self.compute_services = [] + for i in range(self.NUM_HOSTS): + svc = self._start_compute(host='accel_host' + str(i)) + self.compute_services.append(svc) + self.compute_rp_uuids = [ + rp['uuid'] for rp in self._get_all_providers() + if rp['uuid'] == rp['root_provider_uuid']] + for index, uuid in enumerate(self.compute_rp_uuids): + device_rp_uuid = self._create_device_rp(index, uuid) + self.device_rp_map[uuid] = device_rp_uuid + self.device_rp_uuids = list(self.device_rp_map.values()) + + def _create_device_rp(self, index, compute_rp_uuid, + resource='FPGA', res_amt=2): + """Created nested RP for a device. There is one per host. + + :param index: Number of the device rp uuid for this setup + :param compute_rp_uuid: Resource provider UUID of the host. + :param resource: Placement resource name. + Assumed to be a standard resource class. + :param res_amt: Amount of the resource. + :returns: Device RP UUID + """ + resp = self._post_nested_resource_provider( + 'FakeDevice' + str(index), parent_rp_uuid=compute_rp_uuid) + device_rp_uuid = resp['uuid'] + inventory = { + 'resource_provider_generation': 0, + 'inventories': { + resource: { + 'total': res_amt, + 'allocation_ratio': 1.0, + 'max_unit': res_amt, + 'min_unit': 1, + 'reserved': 0, + 'step_size': 1, + } + }, + } + self._update_inventory(device_rp_uuid, inventory) + self._create_trait(self.cyborg.trait) + self._set_provider_traits(device_rp_uuid, [self.cyborg.trait]) + return device_rp_uuid + + def _post_nested_resource_provider(self, rp_name, parent_rp_uuid): + body = {'name': rp_name, 'parent_provider_uuid': parent_rp_uuid} + return self.placement_api.post( + url='/resource_providers', version='1.20', body=body).body + + def _create_acc_flavor(self): + extra_specs = {'accel:device_profile': self.cyborg.dp_name} + flavor_id = self._create_flavor(name='acc.tiny', + extra_spec=extra_specs) + return flavor_id + + def _check_allocations_usage(self, server, check_other_host_alloc=True): + # Check allocations on host where instance is running + server_uuid = server['id'] + + hostname = server['OS-EXT-SRV-ATTR:host'] + server_host_rp_uuid = self._get_provider_uuid_by_host(hostname) + expected_host_alloc = { + 'resources': {'VCPU': 2, 'MEMORY_MB': 2048, 'DISK_GB': 20}, + } + expected_device_alloc = {'resources': {'FPGA': 1}} + + for i in range(self.NUM_HOSTS): + compute_uuid = self.compute_rp_uuids[i] + device_uuid = self.device_rp_map[compute_uuid] + host_alloc = self._get_allocations_by_provider_uuid(compute_uuid) + device_alloc = self._get_allocations_by_provider_uuid(device_uuid) + if compute_uuid == server_host_rp_uuid: + self.assertEqual(expected_host_alloc, host_alloc[server_uuid]) + self.assertEqual(expected_device_alloc, + device_alloc[server_uuid]) + else: + if check_other_host_alloc: + self.assertEqual({}, host_alloc) + self.assertEqual({}, device_alloc) + + # NOTE(Sundar): ARQs for an instance could come from different + # devices in the same host, in general. But, in this test case, + # there is only one device in the host. So, we check for that. + device_rp_uuid = self.device_rp_map[server_host_rp_uuid] + expected_arq_bind_info = set([('Bound', hostname, + device_rp_uuid, server_uuid)]) + arqs = nova_fixtures.CyborgFixture.fake_get_arqs_for_instance( + server_uuid) + # The state is hardcoded but other fields come from the test case. + arq_bind_info = {(arq['state'], arq['hostname'], + arq['device_rp_uuid'], arq['instance_uuid']) + for arq in arqs} + self.assertSetEqual(expected_arq_bind_info, arq_bind_info) + + def _check_no_allocs_usage(self, server_uuid): + allocs = self._get_allocations_by_server_uuid(server_uuid) + self.assertEqual({}, allocs) + + for i in range(self.NUM_HOSTS): + host_alloc = self._get_allocations_by_provider_uuid( + self.compute_rp_uuids[i]) + self.assertEqual({}, host_alloc) + device_alloc = self._get_allocations_by_provider_uuid( + self.device_rp_uuids[i]) + self.assertEqual({}, device_alloc) + usage = self._get_provider_usages( + self.device_rp_uuids[i]).get('FPGA') + self.assertEqual(0, usage) + + +class AcceleratorServerTest(AcceleratorServerBase): + def setUp(self): + self.NUM_HOSTS = 1 + super(AcceleratorServerTest, self).setUp() + + def _get_server(self, expected_state='ACTIVE'): + flavor_id = self._create_acc_flavor() + server_name = 'accel_server1' + server = self._create_server( + server_name, flavor_id=flavor_id, + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + networks='none', expected_state=expected_state) + return server + + def test_create_delete_server_ok(self): + server = self._get_server() + + # Verify that the host name and the device rp UUID are set properly. + # Other fields in the ARQ are hardcoded data from the fixture. + arqs = self.cyborg.fake_get_arqs_for_instance(server['id']) + self.assertEqual(self.device_rp_uuids[0], arqs[0]['device_rp_uuid']) + self.assertEqual(server['OS-EXT-SRV-ATTR:host'], arqs[0]['hostname']) + + # Check allocations and usage + self._check_allocations_usage(server) + + # Delete server and check that ARQs got deleted + self.api.delete_server(server['id']) + self._wait_until_deleted(server) + self.cyborg.mock_del_arqs.assert_called_once_with(server['id']) + + # Check that resources are freed + self._check_no_allocs_usage(server['id']) + + def test_create_server_with_error(self): + + def throw_error(*args, **kwargs): + raise exception.BuildAbortException(reason='', + instance_uuid='fake') + + self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error) + + server = self._get_server(expected_state='ERROR') + server_uuid = server['id'] + # Check that Cyborg was called to delete ARQs + self.cyborg.mock_del_arqs.assert_called_once_with(server_uuid) + + # BuildAbortException will not trigger a reschedule and the placement + # cleanup is the last step in the compute manager after instance state + # setting, fault recording and notification sending. So we have no + # other way than simply wait to ensure the placement cleanup happens + # before we assert it. + def placement_cleanup(): + # An instance in error state should consume no resources + self._check_no_allocs_usage(server_uuid) + + self._wait_for_assert(placement_cleanup) + + self.api.delete_server(server_uuid) + self._wait_until_deleted(server) + # Verify that there is one more call to delete ARQs + self.cyborg.mock_del_arqs.assert_has_calls( + [mock.call(server_uuid), mock.call(server_uuid)]) + + # Verify that no allocations/usages remain after deletion + self._check_no_allocs_usage(server_uuid) + + def test_create_server_with_local_delete(self): + """Delete the server when compute service is down.""" + server = self._get_server() + server_uuid = server['id'] + + # Stop the server. + self.api.post_server_action(server_uuid, {'os-stop': {}}) + self._wait_for_state_change(server, 'SHUTOFF') + self._check_allocations_usage(server) + # Stop and force down the compute service. + compute_id = self.admin_api.get_services( + host='accel_host0', binary='nova-compute')[0]['id'] + self.compute_services[0].stop() + self.admin_api.put_service(compute_id, {'forced_down': 'true'}) + + # Delete the server with compute service down. + self.api.delete_server(server_uuid) + self.cyborg.mock_del_arqs.assert_called_once_with(server_uuid) + self._check_no_allocs_usage(server_uuid) + + # Restart the compute service to see if anything fails. + self.admin_api.put_service(compute_id, {'forced_down': 'false'}) + self.compute_services[0].start() + + +class AcceleratorServerReschedTest(AcceleratorServerBase): + + def setUp(self): + self.NUM_HOSTS = 2 + super(AcceleratorServerReschedTest, self).setUp() + + def test_resched(self): + orig_spawn = fake.FakeDriver.spawn + + def fake_spawn(*args, **kwargs): + fake_spawn.count += 1 + if fake_spawn.count == 1: + raise exception.ComputeResourcesUnavailable( + reason='First host fake fail.', instance_uuid='fake') + else: + orig_spawn(*args, **kwargs) + fake_spawn.count = 0 + + with mock.patch('nova.virt.fake.FakeDriver.spawn', new=fake_spawn): + flavor_id = self._create_acc_flavor() + server_name = 'accel_server1' + server = self._create_server( + server_name, flavor_id=flavor_id, + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + networks='none', expected_state='ACTIVE') + + self.assertEqual(2, fake_spawn.count) + self._check_allocations_usage(server) + self.cyborg.mock_del_arqs.assert_called_once_with(server['id']) + + def test_resched_fails(self): + + def throw_error(*args, **kwargs): + raise exception.ComputeResourcesUnavailable(reason='', + instance_uuid='fake') + + self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error) + + flavor_id = self._create_acc_flavor() + server_name = 'accel_server1' + server = self._create_server( + server_name, flavor_id=flavor_id, + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + networks='none', expected_state='ERROR') + + server_uuid = server['id'] + self._check_no_allocs_usage(server_uuid) + self.cyborg.mock_del_arqs.assert_has_calls( + [mock.call(server_uuid), + mock.call(server_uuid), + mock.call(server_uuid)]) + + +class AcceleratorServerOpsTest(AcceleratorServerBase): + + def setUp(self): + self.NUM_HOSTS = 2 # 2nd host needed for evacuate + super(AcceleratorServerOpsTest, self).setUp() + flavor_id = self._create_acc_flavor() + server_name = 'accel_server1' + self.server = self._create_server( + server_name, flavor_id=flavor_id, + image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6', + networks='none', expected_state='ACTIVE') + + def test_soft_reboot_ok(self): + params = {'reboot': {'type': 'SOFT'}} + self.api.post_server_action(self.server['id'], params) + self._wait_for_state_change(self.server, 'ACTIVE') + self._check_allocations_usage(self.server) + + def test_hard_reboot_ok(self): + params = {'reboot': {'type': 'HARD'}} + self.api.post_server_action(self.server['id'], params) + self._wait_for_state_change(self.server, 'HARD_REBOOT') + self._wait_for_state_change(self.server, 'ACTIVE') + self._check_allocations_usage(self.server) + + def test_pause_unpause_ok(self): + # Pause and unpause should work with accelerators. + # This is not a general test of un/pause functionality. + self.api.post_server_action(self.server['id'], {'pause': {}}) + self._wait_for_state_change(self.server, 'PAUSED') + self._check_allocations_usage(self.server) + # ARQs didn't get deleted (and so didn't have to be re-created). + self.cyborg.mock_del_arqs.assert_not_called() + + self.api.post_server_action(self.server['id'], {'unpause': {}}) + self._wait_for_state_change(self.server, 'ACTIVE') + self._check_allocations_usage(self.server) + + def test_stop_start_ok(self): + # Stop and start should work with accelerators. + # This is not a general test of start/stop functionality. + self.api.post_server_action(self.server['id'], {'os-stop': {}}) + self._wait_for_state_change(self.server, 'SHUTOFF') + self._check_allocations_usage(self.server) + # ARQs didn't get deleted (and so didn't have to be re-created). + self.cyborg.mock_del_arqs.assert_not_called() + + self.api.post_server_action(self.server['id'], {'os-start': {}}) + self._wait_for_state_change(self.server, 'ACTIVE') + self._check_allocations_usage(self.server) + + def test_lock_unlock_ok(self): + # Lock/unlock are no-ops for accelerators. + self.api.post_server_action(self.server['id'], {'lock': {}}) + server = self.api.get_server(self.server['id']) + self.assertTrue(server['locked']) + self._check_allocations_usage(self.server) + + self.api.post_server_action(self.server['id'], {'unlock': {}}) + server = self.api.get_server(self.server['id']) + self.assertTrue(not server['locked']) + self._check_allocations_usage(self.server) + + def test_backup_ok(self): + self.api.post_server_action(self.server['id'], + {'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1}}) + self._check_allocations_usage(self.server) + + def test_create_image_ok(self): # snapshot + self.api.post_server_action(self.server['id'], + {'createImage': { + 'name': 'foo-image', + 'metadata': {'meta_var': 'meta_val'}}}) + self._check_allocations_usage(self.server) + + def test_rescue_unrescue_ok(self): + self.api.post_server_action(self.server['id'], + {'rescue': { + 'adminPass': 'MySecretPass', + 'rescue_image_ref': '70a599e0-31e7-49b7-b260-868f441e862b'}}) + self._check_allocations_usage(self.server) + # ARQs didn't get deleted (and so didn't have to be re-created). + self.cyborg.mock_del_arqs.assert_not_called() + self._wait_for_state_change(self.server, 'RESCUE') + + self.api.post_server_action(self.server['id'], {'unrescue': {}}) + self._check_allocations_usage(self.server) + + def test_resize_fails(self): + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, self.server['id'], + {'resize': {'flavorRef': '2', 'OS-DCF:diskConfig': 'AUTO'}}) + self.assertEqual(403, ex.response.status_code) + self._check_allocations_usage(self.server) + + def test_suspend_fails(self): + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, self.server['id'], {'suspend': {}}) + self.assertEqual(403, ex.response.status_code) + self._check_allocations_usage(self.server) + + def test_migrate_fails(self): + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, self.server['id'], {'migrate': {}}) + self.assertEqual(403, ex.response.status_code) + self._check_allocations_usage(self.server) + + def test_live_migrate_fails(self): + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, self.server['id'], + {'migrate': {'host': 'accel_host1'}}) + self.assertEqual(403, ex.response.status_code) + self._check_allocations_usage(self.server) + + def test_evacuate_fails(self): + server_hostname = self.server['OS-EXT-SRV-ATTR:host'] + for i in range(self.NUM_HOSTS): + hostname = 'accel_host' + str(i) + if hostname != server_hostname: + other_hostname = hostname + if self.compute_services[i].host == server_hostname: + compute_to_stop = self.compute_services[i] + + # Stop and force down the compute service. + compute_id = self.admin_api.get_services( + host=server_hostname, binary='nova-compute')[0]['id'] + compute_to_stop.stop() + self.admin_api.put_service(compute_id, {'forced_down': 'true'}) + + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, self.server['id'], + {'evacuate': { + 'host': other_hostname, + 'adminPass': 'MySecretPass'}}) + self.assertEqual(403, ex.response.status_code) + self._check_allocations_usage(self.server) + + def test_rebuild_fails(self): + rebuild_image_ref = fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID + ex = self.assertRaises(client.OpenStackApiException, + self.api.post_server_action, self.server['id'], + {'rebuild': { + 'imageRef': rebuild_image_ref, + 'OS-DCF:diskConfig': 'AUTO'}}) + self.assertEqual(403, ex.response.status_code) + self._check_allocations_usage(self.server) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_service.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_service.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/functional/test_service.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/functional/test_service.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,100 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context as nova_context +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures +from nova.tests.functional import integrated_helpers +from nova.tests.unit.image import fake as fake_image +from nova.tests.unit import policy_fixture + + +class ServiceTestCase(test.TestCase, + integrated_helpers.InstanceHelperMixin): + """Contains scenarios for testing services. + """ + + def setUp(self): + super(ServiceTestCase, self).setUp() + # Use the standard fixtures. + self.useFixture(policy_fixture.RealPolicyFixture()) + self.useFixture(nova_fixtures.NeutronFixture(self)) + self.useFixture(func_fixtures.PlacementFixture()) + fake_image.stub_out_image_service(self) + self.addCleanup(fake_image.FakeImageService_reset) + # Start nova controller services. + self.api = self.useFixture(nova_fixtures.OSAPIFixture( + api_version='v2.1')).api + self.start_service('conductor') + self.scheduler = self.start_service('scheduler') + # Our OSAPIFixture does not use a WSGIService, so just use the metadata + # server fixture (which uses WSGIService) for testing. + self.metadata = self.useFixture( + nova_fixtures.OSMetadataServer()).metadata + # Start one compute service. + self.start_service('compute') + + def test_service_reset_resets_cell_cache(self): + """Tests that the cell cache for database transaction context managers + is cleared after a service reset (example scenario: SIGHUP). + """ + server_req = self._build_server() + server = self.api.post_server({'server': server_req}) + self._wait_for_state_change(server, 'ACTIVE') + # Cell cache should be populated after creating a server. + self.assertTrue(nova_context.CELL_CACHE) + self.scheduler.reset() + # Cell cache should be empty after the service reset. + self.assertEqual({}, nova_context.CELL_CACHE) + + # Now test the WSGI service. + server = self.api.post_server({'server': server_req}) + self._wait_for_state_change(server, 'ACTIVE') + # Cell cache should be populated after creating a server. + self.assertTrue(nova_context.CELL_CACHE) + self.metadata.reset() + # Cell cache should be empty after the service reset. + self.assertEqual({}, nova_context.CELL_CACHE) + + def test_service_start_resets_cell_cache(self): + """Tests that the cell cache for database transaction context managers + is cleared upon a service start (example scenario: service start after + a SIGTERM and the parent process forks child process workers). + """ + server_req = self._build_server() + server = self.api.post_server({'server': server_req}) + self._wait_for_state_change(server, 'ACTIVE') + # Cell cache should be populated after creating a server. + self.assertTrue(nova_context.CELL_CACHE) + self.scheduler.stop() + # NOTE(melwitt): Simulate a service starting after being stopped. The + # scenario we want to handle is one where during start, the parent + # process forks child process workers while one or more of its cached + # database transaction context managers is inside a locked code + # section. If the child processes are forked while the lock is locked, + # the child processes begin with an already locked lock that can never + # be acquired again. The result is that requests gets stuck and fail + # with a CellTimeout error. + self.scheduler.start() + # Cell cache should be empty after the service start. + self.assertEqual({}, nova_context.CELL_CACHE) + + # Now test the WSGI service. + server = self.api.post_server({'server': server_req}) + self._wait_for_state_change(server, 'ACTIVE') + # Cell cache should be populated after creating a server. + self.assertTrue(nova_context.CELL_CACHE) + self.metadata.stop() + self.metadata.start() + # Cell cache should be empty after the service reset. + self.assertEqual({}, nova_context.CELL_CACHE) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/accelerator/test_cyborg.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/accelerator/test_cyborg.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/accelerator/test_cyborg.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/accelerator/test_cyborg.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,370 @@ +# Copyright 2019 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +import mock + +from keystoneauth1 import exceptions as ks_exc +from requests.models import Response + +from oslo_serialization import jsonutils + +from nova.accelerator import cyborg +from nova import context +from nova import exception +from nova.objects import request_spec +from nova import test +from nova.tests.unit import fake_requests + + +class CyborgTestCase(test.NoDBTestCase): + def setUp(self): + super(CyborgTestCase, self).setUp() + self.context = context.get_admin_context() + self.client = cyborg.get_client(self.context) + + def test_get_client(self): + # Set up some ksa conf options + region = 'MyRegion' + endpoint = 'http://example.com:1234' + self.flags(group='cyborg', + region_name=region, + endpoint_override=endpoint) + ctxt = context.get_admin_context() + client = cyborg.get_client(ctxt) + + # Dig into the ksa adapter a bit to ensure the conf options got through + # We don't bother with a thorough test of get_ksa_adapter - that's done + # elsewhere - this is just sanity-checking that we spelled things right + # in the conf setup. + self.assertEqual('accelerator', client._client.service_type) + self.assertEqual(region, client._client.region_name) + self.assertEqual(endpoint, client._client.endpoint_override) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_call_cyborg(self, mock_ksa_get): + mock_ksa_get.return_value = 1 # dummy value + resp, err_msg = self.client._call_cyborg( + self.client._client.get, self.client.DEVICE_PROFILE_URL) + self.assertEqual(resp, 1) + self.assertIsNone(err_msg) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_call_cyborg_keystone_error(self, mock_ksa_get): + mock_ksa_get.side_effect = ks_exc.ClientException + resp, err_msg = self.client._call_cyborg( + self.client._client.get, self.client.DEVICE_PROFILE_URL) + + self.assertIsNone(resp) + expected_err = 'Could not communicate with Cyborg.' + self.assertIn(expected_err, err_msg) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_call_cyborg_bad_response(self, mock_ksa_get): + mock_ksa_get.return_value = None + resp, err_msg = self.client._call_cyborg( + self.client._client.get, self.client.DEVICE_PROFILE_URL) + + self.assertIsNone(resp) + expected_err = 'Invalid response from Cyborg:' + self.assertIn(expected_err, err_msg) + + @mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg') + @mock.patch.object(Response, 'json') + def test_get_device_profile_list(self, mock_resp_json, mock_call_cyborg): + mock_call_cyborg.return_value = Response(), None + mock_resp_json.return_value = {'device_profiles': 1} # dummy value + ret = self.client._get_device_profile_list(dp_name='mydp') + self.assertEqual(ret, 1) + + @mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg') + def test_get_device_profile_list_bad_response(self, mock_call_cyborg): + "If Cyborg cannot be reached or returns bad response, raise exception." + mock_call_cyborg.return_value = (None, 'Some error') + self.assertRaises(exception.DeviceProfileError, + self.client._get_device_profile_list, + dp_name='mydp') + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + '_get_device_profile_list') + def test_get_device_profile_groups(self, mock_get_dp_list): + mock_get_dp_list.return_value = [{ + "groups": [{ + "resources:FPGA": "1", + "trait:CUSTOM_FPGA_CARD": "required" + }], + "name": "mydp", + "uuid": "307076c2-5aed-4f72-81e8-1b42f9aa2ec6" + }] + rg = request_spec.RequestGroup(requester_id='device_profile_0') + rg.add_resource(rclass='FPGA', amount='1') + rg.add_trait(trait_name='CUSTOM_FPGA_CARD', trait_type='required') + expected_groups = [rg] + + actual_groups = self.client.get_device_profile_groups('mydp') + self.assertEqual(len(expected_groups), len(actual_groups)) + self.assertEqual(expected_groups[0].__dict__, + actual_groups[0].__dict__) + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + '_get_device_profile_list') + def test_get_device_profile_groups_no_dp(self, mock_get_dp_list): + # If the return value has no device profiles, raise exception + mock_get_dp_list.return_value = None + self.assertRaises(exception.DeviceProfileError, + self.client.get_device_profile_groups, + dp_name='mydp') + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + '_get_device_profile_list') + def test_get_device_profile_groups_many_dp(self, mock_get_dp_list): + # If the returned list has more than one dp, raise exception + mock_get_dp_list.return_value = [1, 2] + self.assertRaises(exception.DeviceProfileError, + self.client.get_device_profile_groups, + dp_name='mydp') + + def _get_arqs_and_request_groups(self): + arq_common = { + # All ARQs for an instance have same device profile name. + "device_profile_name": "noprog-dp", + "device_rp_uuid": "", + "hostname": "", + "instance_uuid": "", + "state": "Initial", + } + arq_variants = [ + {"device_profile_group_id": 0, + "uuid": "edbba496-3cc8-4256-94ca-dfe3413348eb"}, + {"device_profile_group_id": 1, + "uuid": "20125bcb-9f55-4e13-8e8c-3fee30e54cca"}, + ] + arqs = [dict(arq_common, **variant) for variant in arq_variants] + rg_rp_map = { + 'device_profile_0': ['c532cf11-02ed-4b03-9dd8-3e9a454131dc'], + 'device_profile_1': ['2c332d7b-daaf-4726-a80d-ecf5212da4b8'], + } + return arqs, rg_rp_map + + def _get_bound_arqs(self): + arqs, rg_rp_map = self._get_arqs_and_request_groups() + common = { + 'host_name': 'myhost', + 'instance_uuid': '15d3acf8-df76-400b-bfc9-484a5208daa1', + } + bindings = { + arqs[0]['uuid']: dict( + common, device_rp_uuid=rg_rp_map['device_profile_0'][0]), + arqs[1]['uuid']: dict( + common, device_rp_uuid=rg_rp_map['device_profile_1'][0]), + } + bound_arq_common = { + "attach_handle_info": { + "bus": "01", + "device": "00", + "domain": "0000", + "function": "0" # will vary function ID later + }, + "attach_handle_type": "PCI", + "state": "Bound", + # Devic eprofile name is common to all bound ARQs + "device_profile_name": arqs[0]["device_profile_name"], + **common + } + bound_arqs = [ + {'uuid': arq['uuid'], + 'device_profile_group_id': arq['device_profile_group_id'], + 'device_rp_uuid': bindings[arq['uuid']]['device_rp_uuid'], + **bound_arq_common} for arq in arqs] + for index, bound_arq in enumerate(bound_arqs): + bound_arq['attach_handle_info']['function'] = index # fix func ID + return bindings, bound_arqs + + @mock.patch('keystoneauth1.adapter.Adapter.post') + def test_create_arqs_failure(self, mock_cyborg_post): + # If Cyborg returns invalid response, raise exception. + mock_cyborg_post.return_value = None + self.assertRaises(exception.AcceleratorRequestOpFailed, + self.client._create_arqs, + dp_name='mydp') + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + '_create_arqs') + def test_create_arq_and_match_rps(self, mock_create_arqs): + # Happy path + arqs, rg_rp_map = self._get_arqs_and_request_groups() + dp_name = arqs[0]["device_profile_name"] + + mock_create_arqs.return_value = arqs + + ret_arqs = self.client.create_arqs_and_match_resource_providers( + dp_name, rg_rp_map) + + # Each value in rg_rp_map is a list. We merge them into a single list. + expected_rp_uuids = sorted(list( + itertools.chain.from_iterable(rg_rp_map.values()))) + ret_rp_uuids = sorted([arq['device_rp_uuid'] for arq in ret_arqs]) + self.assertEqual(expected_rp_uuids, ret_rp_uuids) + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + '_create_arqs') + def test_create_arq_and_match_rps_exception(self, mock_create_arqs): + # If Cyborg response does not contain ARQs, raise + arqs, rg_rp_map = self._get_arqs_and_request_groups() + dp_name = arqs[0]["device_profile_name"] + + mock_create_arqs.return_value = None + self.assertRaises( + exception.AcceleratorRequestOpFailed, + self.client.create_arqs_and_match_resource_providers, + dp_name, rg_rp_map) + + @mock.patch('keystoneauth1.adapter.Adapter.patch') + def test_bind_arqs(self, mock_cyborg_patch): + bindings, bound_arqs = self._get_bound_arqs() + arq_uuid = bound_arqs[0]['uuid'] + + patch_list = {} + for arq_uuid, binding in bindings.items(): + patch = [{"path": "/" + field, + "op": "add", + "value": value + } for field, value in binding.items()] + patch_list[arq_uuid] = patch + + self.client.bind_arqs(bindings) + + mock_cyborg_patch.assert_called_once_with( + self.client.ARQ_URL, json=mock.ANY) + called_params = mock_cyborg_patch.call_args.kwargs['json'] + self.assertEqual(sorted(called_params), sorted(patch_list)) + + @mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg') + def test_bind_arqs_exception(self, mock_call_cyborg): + # If Cyborg returns invalid response, raise exception. + bindings, _ = self._get_bound_arqs() + mock_call_cyborg.return_value = None, 'Some error' + self.assertRaises(exception.AcceleratorRequestOpFailed, + self.client.bind_arqs, bindings=bindings) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_get_arqs_for_instance(self, mock_cyborg_get): + # Happy path, without only_resolved=True + _, bound_arqs = self._get_bound_arqs() + instance_uuid = bound_arqs[0]['instance_uuid'] + + query = {"instance": instance_uuid} + content = jsonutils.dumps({'arqs': bound_arqs}) + resp = fake_requests.FakeResponse(200, content) + mock_cyborg_get.return_value = resp + + ret_arqs = self.client.get_arqs_for_instance(instance_uuid) + + mock_cyborg_get.assert_called_once_with( + self.client.ARQ_URL, params=query) + + bound_arqs.sort(key=lambda x: x['uuid']) + ret_arqs.sort(key=lambda x: x['uuid']) + for ret_arq, bound_arq in zip(ret_arqs, bound_arqs): + self.assertDictEqual(ret_arq, bound_arq) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_get_arqs_for_instance_exception(self, mock_cyborg_get): + # If Cyborg returns an error code, raise exception + _, bound_arqs = self._get_bound_arqs() + instance_uuid = bound_arqs[0]['instance_uuid'] + + resp = fake_requests.FakeResponse(404, content='') + mock_cyborg_get.return_value = resp + self.assertRaises( + exception.AcceleratorRequestOpFailed, + self.client.get_arqs_for_instance, instance_uuid) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_get_arqs_for_instance_exception_no_resp(self, mock_cyborg_get): + # If Cyborg returns an error code, raise exception + _, bound_arqs = self._get_bound_arqs() + instance_uuid = bound_arqs[0]['instance_uuid'] + + content = jsonutils.dumps({'noarqs': 'oops'}) + resp = fake_requests.FakeResponse(200, content) + mock_cyborg_get.return_value = resp + self.assertRaisesRegex( + exception.AcceleratorRequestOpFailed, + 'Cyborg returned no accelerator requests for ', + self.client.get_arqs_for_instance, instance_uuid) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_get_arqs_for_instance_all_resolved(self, mock_cyborg_get): + # If all ARQs are resolved, return full list + _, bound_arqs = self._get_bound_arqs() + instance_uuid = bound_arqs[0]['instance_uuid'] + + query = {"instance": instance_uuid} + content = jsonutils.dumps({'arqs': bound_arqs}) + resp = fake_requests.FakeResponse(200, content) + mock_cyborg_get.return_value = resp + + ret_arqs = self.client.get_arqs_for_instance( + instance_uuid, only_resolved=True) + + mock_cyborg_get.assert_called_once_with( + self.client.ARQ_URL, params=query) + + bound_arqs.sort(key=lambda x: x['uuid']) + ret_arqs.sort(key=lambda x: x['uuid']) + for ret_arq, bound_arq in zip(ret_arqs, bound_arqs): + self.assertDictEqual(ret_arq, bound_arq) + + @mock.patch('keystoneauth1.adapter.Adapter.get') + def test_get_arqs_for_instance_some_resolved(self, mock_cyborg_get): + # If only some ARQs are resolved, return just the resolved ones + unbound_arqs, _ = self._get_arqs_and_request_groups() + _, bound_arqs = self._get_bound_arqs() + # Create a amixture of unbound and bound ARQs + arqs = [unbound_arqs[0], bound_arqs[0]] + instance_uuid = bound_arqs[0]['instance_uuid'] + + query = {"instance": instance_uuid} + content = jsonutils.dumps({'arqs': arqs}) + resp = fake_requests.FakeResponse(200, content) + mock_cyborg_get.return_value = resp + + ret_arqs = self.client.get_arqs_for_instance( + instance_uuid, only_resolved=True) + + mock_cyborg_get.assert_called_once_with( + self.client.ARQ_URL, params=query) + self.assertEqual(ret_arqs, [bound_arqs[0]]) + + @mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg') + def test_delete_arqs_for_instance(self, mock_call_cyborg): + # Happy path + mock_call_cyborg.return_value = ('Some Value', None) + instance_uuid = 'edbba496-3cc8-4256-94ca-dfe3413348eb' + self.client.delete_arqs_for_instance(instance_uuid) + mock_call_cyborg.assert_called_once_with(mock.ANY, + self.client.ARQ_URL, params={'instance': instance_uuid}) + + @mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg') + def test_delete_arqs_for_instance_exception(self, mock_call_cyborg): + # If Cyborg returns invalid response, raise exception. + err_msg = 'Some error' + mock_call_cyborg.return_value = (None, err_msg) + instance_uuid = 'edbba496-3cc8-4256-94ca-dfe3413348eb' + exc = self.assertRaises(exception.AcceleratorRequestOpFailed, + self.client.delete_arqs_for_instance, instance_uuid) + expected_msg = ('Failed to delete accelerator requests: ' + + err_msg + ' Instance ' + instance_uuid) + self.assertEqual(expected_msg, exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_admin_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_admin_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_admin_password.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_admin_password.py 2020-04-10 17:57:57.000000000 +0000 @@ -158,72 +158,3 @@ self.assertRaises(webob.exc.HTTPConflict, self._get_action(), self.fake_req, fakes.FAKE_UUID, body=body) - - -class AdminPasswordPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(AdminPasswordPolicyEnforcementV21, self).setUp() - self.controller = admin_password_v21.AdminPasswordController() - self.req = fakes.HTTPRequest.blank('') - req_context = self.req.environ['nova.context'] - - def fake_get_instance(self, context, id): - return fake_instance.fake_instance_obj( - req_context, - uuid=id, - project_id=req_context.project_id, - user_id=req_context.user_id) - - self.stub_out( - 'nova.api.openstack.common.get_instance', fake_get_instance) - - def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg): - self.policy.set_rules(rules) - exc = self.assertRaises( - exception.PolicyNotAuthorized, func, *arg, **kwarg) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_change_password_policy_failed_with_other_project(self): - rule_name = "os_compute_api:os-admin-password" - rule = {rule_name: "project_id:%(project_id)s"} - body = {'changePassword': {'adminPass': '1234pass'}} - # Change the project_id in request context. - req = fakes.HTTPRequest.blank('') - req.environ['nova.context'].project_id = 'other-project' - self._common_policy_check( - rule, rule_name, self.controller.change_password, - req, fakes.FAKE_UUID, body=body) - - @mock.patch('nova.compute.api.API.set_admin_password') - def test_change_password_overridden_policy_pass_with_same_project( - self, password_mock): - rule_name = "os_compute_api:os-admin-password" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - body = {'changePassword': {'adminPass': '1234pass'}} - self.controller.change_password(self.req, fakes.FAKE_UUID, body=body) - password_mock.assert_called_once_with(self.req.environ['nova.context'], - mock.ANY, '1234pass') - - def test_change_password_overridden_policy_failed_with_other_user(self): - rule_name = "os_compute_api:os-admin-password" - rule = {rule_name: "user_id:%(user_id)s"} - # Change the user_id in request context. - req = fakes.HTTPRequest.blank('') - req.environ['nova.context'].user_id = 'other-user' - body = {'changePassword': {'adminPass': '1234pass'}} - self._common_policy_check( - rule, rule_name, self.controller.change_password, - req, fakes.FAKE_UUID, body=body) - - @mock.patch('nova.compute.api.API.set_admin_password') - def test_change_password_overridden_policy_pass_with_same_user( - self, password_mock): - rule_name = "os_compute_api:os-admin-password" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - body = {'changePassword': {'adminPass': '1234pass'}} - self.controller.change_password(self.req, fakes.FAKE_UUID, body=body) - password_mock.assert_called_once_with(self.req.environ['nova.context'], - mock.ANY, '1234pass') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_agents.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_agents.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_agents.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_agents.py 2020-04-10 17:57:57.000000000 +0000 @@ -429,60 +429,3 @@ version=self.microversion) self.assertRaises(exception.ValidationError, self.controller.index, req) - - -class AgentsPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(AgentsPolicyEnforcementV21, self).setUp() - self.controller = agents_v21.AgentController() - self.req = fakes.HTTPRequest.blank('') - - def test_create_policy_failed(self): - rule_name = "os_compute_api:os-agents" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.create, self.req, - body={'agent': {'hypervisor': 'kvm', - 'os': 'win', - 'architecture': 'x86', - 'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', - 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_index_policy_failed(self): - rule_name = "os_compute_api:os-agents" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_delete_policy_failed(self): - rule_name = "os_compute_api:os-agents" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.delete, self.req, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_update_policy_failed(self): - rule_name = "os_compute_api:os-agents" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.update, self.req, fakes.FAKE_UUID, - body={'para': {'version': '7.0', - 'url': 'xxx://xxxx/xxx/xxx', - 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_aggregates.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_aggregates.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_aggregates.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_aggregates.py 2020-04-10 17:57:57.000000000 +0000 @@ -299,7 +299,7 @@ self.controller.show, self.user_req, "1") - def test_show_with_invalid_id(self): + def test_show_with_bad_aggregate(self): side_effect = exception.AggregateNotFound(aggregate_id='2') with mock.patch.object(self.controller.api, 'get_aggregate', side_effect=side_effect) as mock_get: @@ -307,6 +307,10 @@ self.req, "2") mock_get.assert_called_once_with(self.context, '2') + def test_show_with_invalid_id(self): + self.assertRaises(exc.HTTPBadRequest, self.controller.show, + self.req, 'foo') + def test_update(self): body = {"aggregate": {"name": "new_name", "availability_zone": "nova1"}} @@ -390,10 +394,11 @@ @mock.patch('nova.compute.api.AggregateAPI.update_aggregate') def test_update_with_none_availability_zone(self, mock_update_agg): - agg_id = uuidsentinel.aggregate + agg_id = 173 mock_update_agg.return_value = objects.Aggregate(self.context, name='test', - uuid=agg_id, + uuid=uuidsentinel.agg, + id=agg_id, hosts=[], metadata={}) body = {"aggregate": {"name": "test", @@ -414,6 +419,11 @@ mock_update.assert_called_once_with(self.context, '2', body["aggregate"]) + def test_update_with_invalid_id(self): + body = {"aggregate": {"name": "test_name"}} + self.assertRaises(exc.HTTPBadRequest, self.controller.update, + self.req, 'foo', body=body) + def test_update_with_duplicated_name(self): body = {"aggregate": {"name": "test_name"}} side_effect = exception.AggregateNameExists(aggregate_name="test_name") @@ -433,7 +443,7 @@ def test_update_with_invalid_action(self): with mock.patch.object(self.controller.api, "update_aggregate", side_effect=exception.InvalidAggregateAction( - action='invalid', aggregate_id='agg1', reason= "not empty")): + action='invalid', aggregate_id='1', reason= "not empty")): body = {"aggregate": {"availability_zone": "nova"}} self.assertRaises(exc.HTTPBadRequest, self.controller.update, self.req, "1", body=body) @@ -467,15 +477,20 @@ def test_add_host_with_bad_aggregate(self): side_effect = exception.AggregateNotFound( - aggregate_id="bogus_aggregate") + aggregate_id="2") with mock.patch.object(self.controller.api, 'add_host_to_aggregate', side_effect=side_effect) as mock_add: self.assertRaises(exc.HTTPNotFound, eval(self.add_host), - self.req, "bogus_aggregate", + self.req, "2", body={"add_host": {"host": "host1"}}) - mock_add.assert_called_once_with(self.context, "bogus_aggregate", + mock_add.assert_called_once_with(self.context, "2", "host1") + def test_add_host_with_invalid_id(self): + body = {"add_host": {"host": "host1"}} + self.assertRaises(exc.HTTPBadRequest, eval(self.add_host), + self.req, 'foo', body=body) + def test_add_host_with_bad_host(self): side_effect = exception.ComputeHostNotFound(host="bogus_host") with mock.patch.object(self.controller.api, 'add_host_to_aggregate', @@ -534,16 +549,21 @@ def test_remove_host_with_bad_aggregate(self): side_effect = exception.AggregateNotFound( - aggregate_id="bogus_aggregate") + aggregate_id="2") with mock.patch.object(self.controller.api, 'remove_host_from_aggregate', side_effect=side_effect) as mock_rem: self.assertRaises(exc.HTTPNotFound, eval(self.remove_host), - self.req, "bogus_aggregate", + self.req, "2", body={"remove_host": {"host": "host1"}}) - mock_rem.assert_called_once_with(self.context, "bogus_aggregate", + mock_rem.assert_called_once_with(self.context, "2", "host1") + def test_remove_host_with_invalid_id(self): + body = {"remove_host": {"host": "host1"}} + self.assertRaises(exc.HTTPBadRequest, eval(self.remove_host), + self.req, 'foo', body=body) + def test_remove_host_with_host_not_in_aggregate(self): side_effect = exception.AggregateHostNotFound(aggregate_id="1", host="host1") @@ -639,16 +659,21 @@ def test_set_metadata_with_bad_aggregate(self): body = {"set_metadata": {"metadata": {"foo": "bar"}}} - side_effect = exception.AggregateNotFound(aggregate_id="bad_aggregate") + side_effect = exception.AggregateNotFound(aggregate_id="2") with mock.patch.object(self.controller.api, 'update_aggregate_metadata', side_effect=side_effect) as mock_update: self.assertRaises(exc.HTTPNotFound, eval(self.set_metadata), - self.req, "bad_aggregate", body=body) - mock_update.assert_called_once_with(self.context, "bad_aggregate", + self.req, "2", body=body) + mock_update.assert_called_once_with(self.context, "2", body["set_metadata"]['metadata']) + def test_set_metadata_with_invalid_id(self): + body = {"set_metadata": {"metadata": {"foo": "bar"}}} + self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata), + self.req, 'foo', body=body) + def test_set_metadata_with_missing_metadata(self): body = {"asdf": {"foo": "bar"}} self.assertRaises(self.bad_request, eval(self.set_metadata), @@ -697,21 +722,25 @@ def test_delete_aggregate_with_bad_aggregate(self): side_effect = exception.AggregateNotFound( - aggregate_id="bogus_aggregate") + aggregate_id="2") with mock.patch.object(self.controller.api, 'delete_aggregate', side_effect=side_effect) as mock_del: self.assertRaises(exc.HTTPNotFound, self.controller.delete, - self.req, "bogus_aggregate") - mock_del.assert_called_once_with(self.context, "bogus_aggregate") + self.req, "2") + mock_del.assert_called_once_with(self.context, "2") + + def test_delete_with_invalid_id(self): + self.assertRaises(exc.HTTPBadRequest, self.controller.delete, + self.req, 'foo') def test_delete_aggregate_with_host(self): with mock.patch.object(self.controller.api, "delete_aggregate", side_effect=exception.InvalidAggregateAction( - action="delete", aggregate_id="agg1", + action="delete", aggregate_id="2", reason="not empty")): self.assertRaises(exc.HTTPBadRequest, self.controller.delete, - self.req, "agg1") + self.req, "2") def test_marshall_aggregate(self): # _marshall_aggregate() just basically turns the aggregate returned @@ -746,3 +775,11 @@ def _assert_agg_data(self, expected, actual): self.assertTrue(obj_base.obj_equal_prims(expected, actual), "The aggregate objects were not equal") + + def test_images_with_invalid_id(self): + body = {'cache': [{'id': uuidsentinel.cache}]} + req = fakes.HTTPRequest.blank('/v2/os-aggregates', + use_admin_context=True, + version='2.81') + self.assertRaises(exc.HTTPBadRequest, self.controller.images, + req, 'foo', body=body) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py 2020-04-10 17:57:57.000000000 +0000 @@ -25,6 +25,7 @@ from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance from nova.tests.unit import fake_network_cache_model @@ -109,8 +110,10 @@ raise exception.PortNotFound(port_id=port_id) -def fake_get_instance(self, *args, **kwargs): - return objects.Instance(uuid=FAKE_UUID1) +def fake_get_instance(self, context, instance_id, expected_attrs=None, + cell_down_support=False): + return fake_instance.fake_instance_obj( + context, id=1, uuid=instance_id, project_id=context.project_id) class InterfaceAttachTestsV21(test.NoDBTestCase): @@ -178,8 +181,9 @@ def test_delete(self): self.stub_out('nova.compute.api.API.detach_interface', fake_detach_interface) - - inst = objects.Instance(uuid=FAKE_UUID1) + req_context = self.req.environ['nova.context'] + inst = objects.Instance(uuid=FAKE_UUID1, + project_id=req_context.project_id) with mock.patch.object(common, 'get_instance', return_value=inst) as mock_get_instance: result = self.attachments.delete(self.req, FAKE_UUID1, @@ -360,7 +364,9 @@ def test_attach_interface_fixed_ip_already_in_use(self, attach_mock, get_mock): - fake_instance = objects.Instance(uuid=FAKE_UUID1) + req_context = self.req.environ['nova.context'] + fake_instance = objects.Instance(uuid=FAKE_UUID1, + project_id=req_context.project_id) get_mock.return_value = fake_instance attach_mock.side_effect = exception.FixedIpAlreadyInUse( address='10.0.2.2', instance_uuid=FAKE_UUID1) @@ -380,7 +386,9 @@ def test_attach_interface_port_in_use(self, attach_mock, get_mock): - fake_instance = objects.Instance(uuid=FAKE_UUID1) + req_context = self.req.environ['nova.context'] + fake_instance = objects.Instance(uuid=FAKE_UUID1, + project_id=req_context.project_id) get_mock.return_value = fake_instance attach_mock.side_effect = exception.PortInUse( port_id=FAKE_PORT_ID1) @@ -400,7 +408,9 @@ def test_attach_interface_port_not_usable(self, attach_mock, get_mock): - fake_instance = objects.Instance(uuid=FAKE_UUID1) + req_context = self.req.environ['nova.context'] + fake_instance = objects.Instance(uuid=FAKE_UUID1, + project_id=req_context.project_id) get_mock.return_value = fake_instance attach_mock.side_effect = exception.PortNotUsable( port_id=FAKE_PORT_ID1, @@ -419,8 +429,9 @@ @mock.patch.object(compute_api.API, 'get') @mock.patch.object(compute_api.API, 'attach_interface') def test_attach_interface_failed_no_network(self, attach_mock, get_mock): + req_context = self.req.environ['nova.context'] fake_instance = objects.Instance(uuid=FAKE_UUID1, - project_id=FAKE_UUID2) + project_id=req_context.project_id) get_mock.return_value = fake_instance attach_mock.side_effect = ( exception.InterfaceAttachFailedNoNetwork(project_id=FAKE_UUID2)) @@ -438,7 +449,9 @@ def test_attach_interface_no_more_fixed_ips(self, attach_mock, get_mock): - fake_instance = objects.Instance(uuid=FAKE_UUID1) + req_context = self.req.environ['nova.context'] + fake_instance = objects.Instance(uuid=FAKE_UUID1, + project_id=req_context.project_id) get_mock.return_value = fake_instance attach_mock.side_effect = exception.NoMoreFixedIps( net=FAKE_NET_ID1) @@ -457,8 +470,9 @@ @mock.patch.object(compute_api.API, 'attach_interface') def test_attach_interface_failed_securitygroup_cannot_be_applied( self, attach_mock, get_mock): + req_context = self.req.environ['nova.context'] fake_instance = objects.Instance(uuid=FAKE_UUID1, - project_id=FAKE_UUID2) + project_id=req_context.project_id) get_mock.return_value = fake_instance attach_mock.side_effect = ( exception.SecurityGroupCannotBeApplied()) @@ -568,68 +582,3 @@ list_ports.assert_called_once_with(ctxt, device_id=FAKE_UUID1) mock_get_by_instance_uuid.assert_called_once_with( self.req.environ['nova.context'], FAKE_UUID1) - - -class AttachInterfacesPolicyEnforcementv21(test.NoDBTestCase): - - def setUp(self): - super(AttachInterfacesPolicyEnforcementv21, self).setUp() - self.controller = \ - attach_interfaces_v21.InterfaceAttachmentController() - self.req = fakes.HTTPRequest.blank('') - self.rule_name = "os_compute_api:os-attach-interfaces" - self.policy.set_rules({self.rule_name: "project:non_fake"}) - - def test_index_attach_interfaces_policy_failed(self): - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % self.rule_name, - exc.format_message()) - - def test_show_attach_interfaces_policy_failed(self): - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.show, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1) - self.assertEqual( - "Policy doesn't allow %s to be performed." % self.rule_name, - exc.format_message()) - - def test_create_attach_interfaces_policy_failed(self): - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.create, self.req, fakes.FAKE_UUID, body={}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % self.rule_name, - exc.format_message()) - - def test_delete_attach_interfaces_policy_failed(self): - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1) - self.assertEqual( - "Policy doesn't allow %s to be performed." % self.rule_name, - exc.format_message()) - - def test_attach_interfaces_create_policy_failed(self): - self.policy.set_rules({self.rule_name: "@", - 'os_compute_api:os-attach-interfaces:create': - "!"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.create, self.req, fakes.FAKE_UUID, body={}) - self.assertEqual( - "Policy doesn't allow os_compute_api:os-attach-interfaces:create " - "to be performed.", exc.format_message()) - - def test_attach_interfaces_delete_policy_failed(self): - self.policy.set_rules({self.rule_name: "@", - 'os_compute_api:os-attach-interfaces:delete': - "!"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1) - self.assertEqual( - "Policy doesn't allow os_compute_api:os-attach-interfaces:delete " - "to be performed.", exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_availability_zone.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_availability_zone.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_availability_zone.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_availability_zone.py 2020-04-10 17:57:57.000000000 +0000 @@ -38,7 +38,6 @@ def fake_service_get_all(context, filters=None, **kwargs): - disabled = filters.get('disabled') if filters else None def __fake_service(binary, availability_zone, created_at, updated_at, host, disabled): @@ -54,30 +53,32 @@ db_s.pop('version', None) return objects.Service(context, **db_s) - if disabled: - svcs = [__fake_service("nova-compute", "zone-2", - datetime.datetime(2012, 11, 14, 9, 53, 25, 0), - datetime.datetime(2012, 12, 26, 14, 45, 25, 0), - "fake_host-1", True), - __fake_service("nova-scheduler", "internal", - datetime.datetime(2012, 11, 14, 9, 57, 3, 0), - datetime.datetime(2012, 12, 26, 14, 45, 25, 0), - "fake_host-1", True)] - else: - svcs = [__fake_service("nova-compute", "zone-1", - datetime.datetime(2012, 11, 14, 9, 53, 25, 0), - datetime.datetime(2012, 12, 26, 14, 45, 25, 0), - "fake_host-1", False), - __fake_service("nova-sched", "internal", - datetime.datetime(2012, 11, 14, 9, 57, 3, 0), - datetime.datetime(2012, 12, 26, 14, 45, 25, 0), - "fake_host-1", False), - # nova-conductor is in the same zone and host as nova-sched - # and is here to make sure /detail filters out duplicates. - __fake_service("nova-conductor", "internal", - datetime.datetime(2012, 11, 14, 9, 57, 3, 0), - datetime.datetime(2012, 12, 26, 14, 45, 25, 0), - "fake_host-1", False)] + svcs = [__fake_service("nova-compute", "zone-2", + datetime.datetime(2012, 11, 14, 9, 53, 25, 0), + datetime.datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", True), + __fake_service("nova-scheduler", "internal", + datetime.datetime(2012, 11, 14, 9, 57, 3, 0), + datetime.datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", True), + __fake_service("nova-compute", "zone-1", + datetime.datetime(2012, 11, 14, 9, 53, 25, 0), + datetime.datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False), + __fake_service("nova-sched", "internal", + datetime.datetime(2012, 11, 14, 9, 57, 3, 0), + datetime.datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False), + # nova-conductor is in the same zone and host as nova-sched + # and is here to make sure /detail filters out duplicates. + __fake_service("nova-conductor", "internal", + datetime.datetime(2012, 11, 14, 9, 57, 3, 0), + datetime.datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False)] + + if filters and 'disabled' in filters: + svcs = [svc for svc in svcs if svc.disabled == filters['disabled']] + return objects.ServiceList(objects=svcs) @@ -137,11 +138,16 @@ timestamp = iso8601.parse_date("2012-12-26T14:45:25Z") expected = [ { - 'zoneName': 'zone-1', + 'zoneName': 'internal', 'zoneState': {'available': True}, 'hosts': { 'fake_host-1': { - 'nova-compute': { + 'nova-sched': { + 'active': True, + 'available': True, + 'updated_at': timestamp + }, + 'nova-conductor': { 'active': True, 'available': True, 'updated_at': timestamp @@ -150,16 +156,11 @@ } }, { - 'zoneName': 'internal', + 'zoneName': 'zone-1', 'zoneState': {'available': True}, 'hosts': { 'fake_host-1': { - 'nova-sched': { - 'active': True, - 'available': True, - 'updated_at': timestamp - }, - 'nova-conductor': { + 'nova-compute': { 'active': True, 'available': True, 'updated_at': timestamp @@ -174,9 +175,7 @@ } ] self.assertEqual(expected, zones) - # We get both enabled and disabled services per cell (just one in this - # test case) so we'll query the services table twice. - self.assertEqual(2, self.mock_service_get_all.call_count, + self.assertEqual(1, self.mock_service_get_all.call_count, self.mock_service_get_all.call_args_list) @mock.patch.object(availability_zones, 'get_availability_zones', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_console_output.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_console_output.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_console_output.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_console_output.py 2020-04-10 17:57:57.000000000 +0000 @@ -149,23 +149,3 @@ def test_get_console_output_not_available(self, mock_get_console_output): body = {'os-getConsoleOutput': {}} self._check_console_output_failure(webob.exc.HTTPNotFound, body) - - -class ConsoleOutputPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(ConsoleOutputPolicyEnforcementV21, self).setUp() - self.controller = console_output_v21.ConsoleOutputController() - - def test_get_console_output_policy_failed(self): - rule_name = "os_compute_api:os-console-output" - self.policy.set_rules({rule_name: "project:non_fake"}) - req = fakes.HTTPRequest.blank('') - body = {'os-getConsoleOutput': {}} - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.get_console_output, req, fakes.FAKE_UUID, - body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_create_backup.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_create_backup.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_create_backup.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_create_backup.py 2020-04-10 17:57:57.000000000 +0000 @@ -340,8 +340,7 @@ self.assertIn("Cannot 'createBackup' instance %(id)s" % {'id': instance.uuid}, ex.explanation) - @mock.patch.object(common, 'check_img_metadata_properties_quota') - def test_create_backup_with_non_existed_instance(self, mock_check_image): + def test_create_backup_with_non_existed_instance(self): body_map = { 'createBackup': { 'name': 'Backup 1', @@ -355,7 +354,6 @@ self.assertRaises(webob.exc.HTTPNotFound, self.controller._create_backup, self.req, uuid, body=body_map) - mock_check_image.assert_called_once_with(self.context, {}) def test_create_backup_with_invalid_create_backup(self): body = { @@ -398,34 +396,6 @@ six.text_type(ex)) -class CreateBackupPolicyEnforcementv21(test.NoDBTestCase): - - def setUp(self): - super(CreateBackupPolicyEnforcementv21, self).setUp() - self.controller = create_backup_v21.CreateBackupController() - self.req = fakes.HTTPRequest.blank('') - - def test_create_backup_policy_failed(self): - rule_name = "os_compute_api:os-create-backup" - self.policy.set_rules({rule_name: "project:non_fake"}) - metadata = {'123': 'asdf'} - body = { - 'createBackup': { - 'name': 'Backup 1', - 'backup_type': 'daily', - 'rotation': 1, - 'metadata': metadata, - }, - } - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._create_backup, self.req, fakes.FAKE_UUID, - body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - class CreateBackupTestsV239(test.NoDBTestCase): def setUp(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_deferred_delete.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_deferred_delete.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_deferred_delete.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_deferred_delete.py 2020-04-10 17:57:57.000000000 +0000 @@ -166,86 +166,3 @@ mock_restore.assert_called_once_with(self.fake_context, instance) - - -class DeferredDeletePolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(DeferredDeletePolicyEnforcementV21, self).setUp() - self.controller = dd_v21.DeferredDeleteController() - self.req = fakes.HTTPRequest.blank('') - - def test_restore_policy_failed(self): - rule_name = "os_compute_api:os-deferred-delete" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._restore, self.req, fakes.FAKE_UUID, - body={'restore': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_force_delete_policy_failed_with_other_project( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-deferred-delete" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - # Change the project_id in request context. - self.req.environ['nova.context'].project_id = 'other-project' - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._force_delete, self.req, fakes.FAKE_UUID, - body={'forceDelete': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.force_delete') - @mock.patch('nova.api.openstack.common.get_instance') - def test_force_delete_overridden_policy_pass_with_same_project( - self, get_instance_mock, force_delete_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - project_id=self.req.environ['nova.context'].project_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-deferred-delete" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - self.controller._force_delete(self.req, fakes.FAKE_UUID, - body={'forceDelete': {}}) - force_delete_mock.assert_called_once_with( - self.req.environ['nova.context'], instance) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_force_delete_overridden_policy_failed_with_other_user( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-deferred-delete" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - # Change the user_id in request context. - self.req.environ['nova.context'].user_id = 'other-user' - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._force_delete, self.req, - fakes.FAKE_UUID, body={'forceDelete': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.force_delete') - @mock.patch('nova.api.openstack.common.get_instance') - def test_force_delete_overridden_policy_pass_with_same_user(self, - get_instance_mock, - force_delete_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - user_id=self.req.environ['nova.context'].user_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-deferred-delete" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - self.controller._force_delete(self.req, fakes.FAKE_UUID, - body={'forceDelete': {}}) - force_delete_mock.assert_called_once_with( - self.req.environ['nova.context'], instance) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_evacuate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_evacuate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_evacuate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_evacuate.py 2020-04-10 17:57:57.000000000 +0000 @@ -243,92 +243,6 @@ self.assertIsNone(res) -class EvacuatePolicyEnforcementv21(test.NoDBTestCase): - - def setUp(self): - super(EvacuatePolicyEnforcementv21, self).setUp() - self.controller = evacuate_v21.EvacuateController() - self.req = fakes.HTTPRequest.blank('') - req_context = self.req.environ['nova.context'] - self.stub_out('nova.compute.api.HostAPI.service_get_by_compute_host', - fake_service_get_by_compute_host) - - def fake_get_instance(self, context, id): - return fake_instance.fake_instance_obj( - req_context, - project_id=req_context.project_id, - user_id=req_context.user_id) - - self.stub_out( - 'nova.api.openstack.common.get_instance', fake_get_instance) - self.mock_list_port = self.useFixture( - fixtures.MockPatch('nova.network.neutron.API.list_ports')).mock - self.mock_list_port.return_value = {'ports': []} - - def test_evacuate_policy_failed_with_other_project(self): - rule_name = "os_compute_api:os-evacuate" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - req = fakes.HTTPRequest.blank('') - # Change the project_id in request context. - req.environ['nova.context'].project_id = 'other-project' - body = {'evacuate': {'host': 'my-host', - 'onSharedStorage': 'False', - 'adminPass': 'MyNewPass' - }} - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._evacuate, req, fakes.FAKE_UUID, - body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.evacuate') - def test_evacuate_overridden_policy_pass_with_same_project(self, - evacuate_mock): - rule_name = "os_compute_api:os-evacuate" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - body = {'evacuate': {'host': 'my-host', - 'onSharedStorage': 'False', - 'adminPass': 'MyNewPass' - }} - self.controller._evacuate(self.req, fakes.FAKE_UUID, body=body) - evacuate_mock.assert_called_once_with(self.req.environ['nova.context'], - mock.ANY, 'my-host', False, - 'MyNewPass', None) - - def test_evacuate_overridden_policy_failed_with_other_user(self): - rule_name = "os_compute_api:os-evacuate" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - req = fakes.HTTPRequest.blank('') - # Change the user_id in request context. - req.environ['nova.context'].user_id = 'other-user' - body = {'evacuate': {'host': 'my-host', - 'onSharedStorage': 'False', - 'adminPass': 'MyNewPass' - }} - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._evacuate, req, - fakes.FAKE_UUID, body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.evacuate') - def test_evacuate_overridden_policy_pass_with_same_user(self, - evacuate_mock): - rule_name = "os_compute_api:os-evacuate" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - body = {'evacuate': {'host': 'my-host', - 'onSharedStorage': 'False', - 'adminPass': 'MyNewPass' - }} - self.controller._evacuate(self.req, fakes.FAKE_UUID, body=body) - evacuate_mock.assert_called_once_with(self.req.environ['nova.context'], - mock.ANY, 'my-host', False, - 'MyNewPass', None) - - class EvacuateTestV214(EvacuateTestV21): def setUp(self): super(EvacuateTestV214, self).setUp() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_flavor_access.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_flavor_access.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_flavor_access.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_flavor_access.py 2020-04-10 17:57:57.000000000 +0000 @@ -364,47 +364,3 @@ req, '2', body=body) mock_verify.assert_called_once_with( req.environ['nova.context'], 'proj2') - - -class FlavorAccessPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(FlavorAccessPolicyEnforcementV21, self).setUp() - self.act_controller = flavor_access_v21.FlavorActionController() - self.access_controller = flavor_access_v21.FlavorAccessController() - self.req = fakes.HTTPRequest.blank('') - - def test_add_tenant_access_policy_failed(self): - rule_name = "os_compute_api:os-flavor-access:add_tenant_access" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.act_controller._add_tenant_access, self.req, fakes.FAKE_UUID, - body={'addTenantAccess': {'tenant': fakes.FAKE_UUID}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_remove_tenant_access_policy_failed(self): - rule_name = ("os_compute_api:os-flavor-access:" - "remove_tenant_access") - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.act_controller._remove_tenant_access, self.req, - fakes.FAKE_UUID, - body={'removeTenantAccess': {'tenant': fakes.FAKE_UUID}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_index_policy_failed(self): - rule_name = "os_compute_api:os-flavor-access" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.access_controller.index, self.req, - fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_flavor_manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_flavor_manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_flavor_manage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_flavor_manage.py 2020-04-10 17:57:57.000000000 +0000 @@ -20,7 +20,6 @@ import six import webob -from nova.api.openstack import api_version_request from nova.api.openstack.compute import flavor_access as flavor_access_v21 from nova.api.openstack.compute import flavor_manage as flavormanage_v21 from nova.compute import flavors @@ -595,60 +594,3 @@ body = self._get_response() for key in self.expected["flavor"]: self.assertEqual(body["flavor"][key], self.expected["flavor"][key]) - - -class FlavorManagerPolicyEnforcementV21(test.TestCase): - - def setUp(self): - super(FlavorManagerPolicyEnforcementV21, self).setUp() - self.controller = flavormanage_v21.FlavorManageController() - self.adm_req = fakes.HTTPRequest.blank('', use_admin_context=True) - self.req = fakes.HTTPRequest.blank('') - - def test_create_policy_failed(self): - rule_name = "os_compute_api:os-flavor-manage:create" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._create, self.req, - body={"flavor": { - "name": "test", - "ram": 512, - "vcpus": 2, - "disk": 1, - "swap": 512, - "rxtx_factor": 1, - }}) - # The deprecated action is being enforced since the rule that is - # configured is different than the default rule - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_delete_policy_failed(self): - rule_name = "os_compute_api:os-flavor-manage:delete" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._delete, self.req, - fakes.FAKE_UUID) - # The deprecated action is being enforced since the rule that is - # configured is different than the default rule - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_flavor_update_non_admin_fails(self): - """Tests that trying to update a flavor as a non-admin fails. - """ - rule_name = "os_compute_api:os-flavor-manage:update" - self.policy.set_rules({rule_name: "is_admin:True"}) - self.req.api_version_request = api_version_request.APIVersionRequest( - '2.55') - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._update, self.req, 'fake_id', - body={"flavor": {"description": "not authorized"}}) - self.assertEqual( - "Policy doesn't allow os_compute_api:os-flavor-manage:update to " - "be performed.", exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py 2020-04-10 17:57:57.000000000 +0000 @@ -14,6 +14,7 @@ # under the License. import mock +import testtools import webob from nova.api.openstack.compute import flavors_extraspecs \ @@ -48,11 +49,9 @@ def stub_flavor_extra_specs(): specs = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} + 'hw:cpu_policy': 'shared', + 'hw:numa_nodes': '1', + } return specs @@ -60,10 +59,14 @@ bad_request = exception.ValidationError flavorextraspecs = flavorextraspecs_v21 - def _get_request(self, url, use_admin_context=False): + def _get_request(self, url, use_admin_context=False, version=None): + kwargs = {} + if version: + kwargs['version'] = version req_url = '/v2/%s/flavors/%s' % (fakes.FAKE_PROJECT_ID, url) - return fakes.HTTPRequest.blank(req_url, - use_admin_context=use_admin_context) + return fakes.HTTPRequest.blank( + req_url, use_admin_context=use_admin_context, **kwargs, + ) def setUp(self): super(FlavorsExtraSpecsTestV21, self).setUp() @@ -72,15 +75,16 @@ def test_index(self): flavor = dict(test_flavor.fake_flavor, - extra_specs={'key1': 'value1'}) + extra_specs={'hw:numa_nodes': '1'}) req = self._get_request('1/os-extra_specs') - with mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db' - ) as mock_get: + with mock.patch( + 'nova.objects.Flavor._flavor_get_by_flavor_id_from_db' + ) as mock_get: mock_get.return_value = flavor res_dict = self.controller.index(req, 1) - self.assertEqual('value1', res_dict['extra_specs']['key1']) + self.assertEqual('1', res_dict['extra_specs']['hw:numa_nodes']) @mock.patch('nova.objects.Flavor.get_by_flavor_id') def test_index_no_data(self, mock_get): @@ -101,57 +105,61 @@ req, 1) def test_show(self): - flavor = objects.Flavor(flavorid='1', extra_specs={'key5': 'value5'}) - req = self._get_request('1/os-extra_specs/key5') + flavor = objects.Flavor( + flavorid='1', extra_specs={'hw:numa_nodes': '1'} + ) + req = self._get_request('1/os-extra_specs/hw:numa_nodes') with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get: mock_get.return_value = flavor - res_dict = self.controller.show(req, 1, 'key5') + res_dict = self.controller.show(req, 1, 'hw:numa_nodes') - self.assertEqual('value5', res_dict['key5']) + self.assertEqual('1', res_dict['hw:numa_nodes']) @mock.patch('nova.objects.Flavor.get_by_flavor_id') def test_show_spec_not_found(self, mock_get): mock_get.return_value = objects.Flavor(extra_specs={}) - req = self._get_request('1/os-extra_specs/key6') + req = self._get_request('1/os-extra_specs/hw:cpu_thread_policy') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, - req, 1, 'key6') + req, 1, 'hw:cpu_thread_policy') def test_not_found_because_flavor(self): - req = self._get_request('1/os-extra_specs/key5', + req = self._get_request('1/os-extra_specs/hw:numa_nodes', use_admin_context=True) with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get: mock_get.side_effect = exception.FlavorNotFound(flavor_id='1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, - req, 1, 'key5') + req, 1, 'hw:numa_nodes') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - req, 1, 'key5', body={'key5': 'value5'}) + req, 1, 'hw:numa_nodes', + body={'hw:numa_nodes': '1'}) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, - req, 1, 'key5') + req, 1, 'hw:numa_nodes') req = self._get_request('1/os-extra_specs', use_admin_context=True) with mock.patch('nova.objects.Flavor.get_by_flavor_id') as mock_get: mock_get.side_effect = exception.FlavorNotFound(flavor_id='1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, - req, 1, body={'extra_specs': {'key5': 'value5'}}) + req, 1, body={'extra_specs': { + 'hw:numa_nodes': '1'}}) @mock.patch('nova.objects.Flavor._flavor_get_by_flavor_id_from_db') def test_delete(self, mock_get): flavor = dict(test_flavor.fake_flavor, - extra_specs={'key5': 'value5'}) - req = self._get_request('1/os-extra_specs/key5', + extra_specs={'hw:numa_nodes': '1'}) + req = self._get_request('1/os-extra_specs/hw:numa_nodes', use_admin_context=True) mock_get.return_value = flavor with mock.patch('nova.objects.Flavor.save'): - self.controller.delete(req, 1, 'key5') + self.controller.delete(req, 1, 'hw:numa_nodes') def test_delete_no_admin(self): self.stub_out('nova.objects.flavor._flavor_extra_specs_del', delete_flavor_extra_specs) - req = self._get_request('1/os-extra_specs/key5') + req = self._get_request('1/os-extra_specs/hw:numa_nodes') self.assertRaises(exception.Forbidden, self.controller.delete, - req, 1, 'key 5') + req, 1, 'hw numa nodes') def test_delete_spec_not_found(self): req = self._get_request('1/os-extra_specs/key6', @@ -160,24 +168,28 @@ req, 1, 'key6') def test_create(self): - body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}} + body = { + 'extra_specs': { + 'hw:cpu_policy': 'shared', + 'hw:numa_nodes': '1', + } + } req = self._get_request('1/os-extra_specs', use_admin_context=True) res_dict = self.controller.create(req, 1, body=body) - self.assertEqual('value1', res_dict['extra_specs']['key1']) - self.assertEqual(0.5, res_dict['extra_specs']['key2']) - self.assertEqual(5, res_dict['extra_specs']['key3']) + self.assertEqual('shared', res_dict['extra_specs']['hw:cpu_policy']) + self.assertEqual('1', res_dict['extra_specs']['hw:numa_nodes']) def test_create_no_admin(self): - body = {"extra_specs": {"key1": "value1"}} + body = {'extra_specs': {'hw:numa_nodes': '1'}} req = self._get_request('1/os-extra_specs') self.assertRaises(exception.Forbidden, self.controller.create, req, 1, body=body) def test_create_flavor_not_found(self): - body = {"extra_specs": {"key1": "value1"}} + body = {'extra_specs': {'hw:numa_nodes': '1'}} req = self._get_request('1/os-extra_specs', use_admin_context=True) with mock.patch('nova.objects.Flavor.save', side_effect=exception.FlavorNotFound(flavor_id='')): @@ -185,7 +197,7 @@ req, 1, body=body) def test_create_flavor_db_duplicate(self): - body = {"extra_specs": {"key1": "value1"}} + body = {'extra_specs': {'hw:numa_nodes': '1'}} req = self._get_request('1/os-extra_specs', use_admin_context=True) with mock.patch( 'nova.objects.Flavor.save', @@ -212,7 +224,7 @@ self._test_create_bad_request({"extra_specs": {None: "value1"}}) def test_create_non_string_value(self): - self._test_create_bad_request({"extra_specs": {"key1": None}}) + self._test_create_bad_request({"extra_specs": {"hw:numa_nodes": None}}) def test_create_zero_length_key(self): self._test_create_bad_request({"extra_specs": {"": "value1"}}) @@ -223,7 +235,9 @@ def test_create_long_value(self): value = "a" * 256 - self._test_create_bad_request({"extra_specs": {"key1": value}}) + self._test_create_bad_request( + {"extra_specs": {"hw_numa_nodes": value}} + ) def test_create_really_long_integer_value(self): value = 10 ** 1000 @@ -232,55 +246,107 @@ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body={"extra_specs": {"key1": value}}) - def test_create_invalid_specs_key(self): - invalid_keys = ("key1/", "", "$$akey$", "!akey", "") + def test_create_invalid_specs(self): + """Test generic invalid specs. + + These are invalid regardless of the validation scheme, if any, in use. + """ + invalid_specs = { + 'key1/': 'value1', + '': 'value1', + '$$akey$': 'value1', + '!akey': 'value1', + '': 'value1', + } - for key in invalid_keys: - body = {"extra_specs": {key: "value1"}} + for key, value in invalid_specs.items(): + body = {"extra_specs": {key: value}} req = self._get_request('1/os-extra_specs', use_admin_context=True) self.assertRaises(self.bad_request, self.controller.create, req, 1, body=body) + def test_create_invalid_known_namespace(self): + """Test behavior of validator with specs from known namespace.""" + invalid_specs = { + 'hw:numa_nodes': 'foo', + 'hw:cpu_policy': 'sharrred', + 'hw:cpu_policyyyyyyy': 'shared', + 'hw:foo': 'bar', + 'trait:STORAGE_DISK_SSD': 'forbiden', + 'trait_foo:HW_CPU_X86_AVX2': 'foo', + 'trait:bar': 'required', + 'trait_foo:bar': 'required', + } + for key, value in invalid_specs.items(): + body = {'extra_specs': {key: value}} + req = self._get_request( + '1/os-extra_specs', use_admin_context=True, version='2.86', + ) + with testtools.ExpectedException( + self.bad_request, 'Validation failed; .*' + ): + self.controller.create(req, 1, body=body) + + def test_create_invalid_unknown_namespace(self): + """Test behavior of validator with specs from unknown namespace.""" + unknown_specs = { + 'foo': 'bar', + 'foo:bar': 'baz', + 'hww:cpu_policy': 'sharrred', + } + for key, value in unknown_specs.items(): + body = {'extra_specs': {key: value}} + req = self._get_request( + '1/os-extra_specs', use_admin_context=True, version='2.86', + ) + self.controller.create(req, 1, body=body) + @mock.patch('nova.objects.flavor._flavor_extra_specs_add') - def test_create_valid_specs_key(self, mock_flavor_extra_specs): - valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2") + def test_create_valid_specs(self, mock_flavor_extra_specs): + valid_specs = { + 'hide_hypervisor_id': 'true', + 'hw:numa_nodes': '1', + 'hw:numa_cpus.0': '0-3,8-9,11,10', + 'trait:STORAGE_DISK_SSD': 'forbidden', + 'trait_foo:HW_CPU_X86_AVX2': 'required', + } mock_flavor_extra_specs.side_effect = return_create_flavor_extra_specs - for key in valid_keys: - body = {"extra_specs": {key: "value1"}} + for key, value in valid_specs.items(): + body = {"extra_specs": {key: value}} req = self._get_request('1/os-extra_specs', use_admin_context=True) res_dict = self.controller.create(req, 1, body=body) - self.assertEqual('value1', res_dict['extra_specs'][key]) + self.assertEqual(value, res_dict['extra_specs'][key]) @mock.patch('nova.objects.flavor._flavor_extra_specs_add') def test_update_item(self, mock_add): mock_add.side_effect = return_create_flavor_extra_specs - body = {"key1": "value1"} + body = {'hw:cpu_policy': 'shared'} - req = self._get_request('1/os-extra_specs/key1', + req = self._get_request('1/os-extra_specs/hw:cpu_policy', use_admin_context=True) - res_dict = self.controller.update(req, 1, 'key1', body=body) + res_dict = self.controller.update(req, 1, 'hw:cpu_policy', body=body) - self.assertEqual('value1', res_dict['key1']) + self.assertEqual('shared', res_dict['hw:cpu_policy']) def test_update_item_no_admin(self): - body = {"key1": "value1"} + body = {'hw:cpu_policy': 'shared'} - req = self._get_request('1/os-extra_specs/key1') + req = self._get_request('1/os-extra_specs/hw:cpu_policy') self.assertRaises(exception.Forbidden, self.controller.update, req, 1, 'key1', body=body) def _test_update_item_bad_request(self, body): - req = self._get_request('1/os-extra_specs/key1', + req = self._get_request('1/os-extra_specs/hw:cpu_policy', use_admin_context=True) self.assertRaises(self.bad_request, self.controller.update, - req, 1, 'key1', body=body) + req, 1, 'hw:cpu_policy', body=body) def test_update_item_empty_body(self): self._test_update_item_bad_request('') def test_update_item_too_many_keys(self): - body = {"key1": "value1", "key2": "value2"} + body = {"hw:cpu_policy": "dedicated", "hw:numa_nodes": "2"} self._test_update_item_bad_request(body) def test_update_item_non_dict_extra_specs(self): @@ -290,7 +356,7 @@ self._test_update_item_bad_request({None: "value1"}) def test_update_item_non_string_value(self): - self._test_update_item_bad_request({"key1": None}) + self._test_update_item_bad_request({"hw:cpu_policy": None}) def test_update_item_zero_length_key(self): self._test_update_item_bad_request({"": "value1"}) @@ -304,38 +370,90 @@ self._test_update_item_bad_request({"key1": value}) def test_update_item_body_uri_mismatch(self): - body = {"key1": "value1"} + body = {'hw:cpu_policy': 'shared'} req = self._get_request('1/os-extra_specs/bad', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body=body) def test_update_flavor_not_found(self): - body = {"key1": "value1"} + body = {'hw:cpu_policy': 'shared'} - req = self._get_request('1/os-extra_specs/key1', + req = self._get_request('1/os-extra_specs/hw:cpu_policy', use_admin_context=True) with mock.patch('nova.objects.Flavor.save', side_effect=exception.FlavorNotFound(flavor_id='')): self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, - req, 1, 'key1', body=body) + req, 1, 'hw:cpu_policy', body=body) def test_update_flavor_db_duplicate(self): - body = {"key1": "value1"} + body = {'hw:cpu_policy': 'shared'} - req = self._get_request('1/os-extra_specs/key1', + req = self._get_request('1/os-extra_specs/hw:cpu_policy', use_admin_context=True) with mock.patch( 'nova.objects.Flavor.save', side_effect=exception.FlavorExtraSpecUpdateCreateFailed( id=1, retries=5)): self.assertRaises(webob.exc.HTTPConflict, self.controller.update, - req, 1, 'key1', body=body) + req, 1, 'hw:cpu_policy', body=body) def test_update_really_long_integer_value(self): - value = 10 ** 1000 + body = {'hw:numa_nodes': 10 ** 1000} - req = self._get_request('1/os-extra_specs/key1', + req = self._get_request('1/os-extra_specs/hw:numa_nodes', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, - req, 1, 'key1', body={"key1": value}) + req, 1, 'hw:numa_nodes', body=body) + + def test_update_invalid_specs_known_namespace(self): + """Test behavior of validator with specs from known namespace.""" + invalid_specs = { + 'hw:numa_nodes': 'foo', + 'hw:cpu_policy': 'sharrred', + 'hw:cpu_policyyyyyyy': 'shared', + 'hw:foo': 'bar', + } + for key, value in invalid_specs.items(): + body = {key: value} + req = self._get_request( + '1/os-extra_specs/{key}', + use_admin_context=True, version='2.86', + ) + with testtools.ExpectedException( + self.bad_request, 'Validation failed; .*' + ): + self.controller.update(req, 1, key, body=body) + + def test_update_invalid_specs_unknown_namespace(self): + """Test behavior of validator with specs from unknown namespace.""" + unknown_specs = { + 'foo': 'bar', + 'foo:bar': 'baz', + 'hww:cpu_policy': 'sharrred', + } + for key, value in unknown_specs.items(): + body = {key: value} + req = self._get_request( + f'1/os-extra_specs/{key}', + use_admin_context=True, version='2.86', + ) + self.controller.update(req, 1, key, body=body) + + @mock.patch('nova.objects.flavor._flavor_extra_specs_add') + def test_update_valid_specs(self, mock_flavor_extra_specs): + valid_specs = { + 'hide_hypervisor_id': 'true', + 'hw:numa_nodes': '1', + 'hw:numa_cpus.0': '0-3,8-9,11,10', + } + mock_flavor_extra_specs.side_effect = return_create_flavor_extra_specs + + for key, value in valid_specs.items(): + body = {key: value} + req = self._get_request( + f'1/os-extra_specs/{key}', use_admin_context=True, + version='2.86', + ) + res_dict = self.controller.update(req, 1, key, body=body) + self.assertEqual(value, res_dict[key]) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_floating_ips.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_floating_ips.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_floating_ips.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_floating_ips.py 2020-04-10 17:57:57.000000000 +0000 @@ -105,12 +105,6 @@ expected_exc) def test_floatingip_delete_error_disassociate_2(self): - raised_exc = exception.CannotDisassociateAutoAssignedFloatingIP - expected_exc = webob.exc.HTTPForbidden - self._test_floatingip_delete_error_disassociate(raised_exc, - expected_exc) - - def test_floatingip_delete_error_disassociate_3(self): raised_exc = exception.FloatingIpNotFoundForAddress(address='1.1.1.1') expected_exc = webob.exc.HTTPNotFound self._test_floatingip_delete_error_disassociate(raised_exc, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_hypervisors.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_hypervisors.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_hypervisors.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_hypervisors.py 2020-04-10 17:57:57.000000000 +0000 @@ -236,7 +236,6 @@ def setUp(self): super(HypervisorsTestV21, self).setUp() self._set_up_controller() - self.rule_hyp_show = "os_compute_api:os-hypervisors" host_api = self.controller.host_api host_api.compute_node_get_all = mock.MagicMock( @@ -306,11 +305,6 @@ self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result) - def test_index_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.index, req) - def test_index_compute_host_not_found(self): """Tests that if a service is deleted but the compute node is not we don't fail when listing hypervisors. @@ -387,11 +381,6 @@ self.assertEqual(dict(hypervisors=self.DETAIL_HYPERS_DICTS), result) - def test_detail_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.detail, req) - def test_detail_compute_host_not_found(self): """Tests that if a service is deleted but the compute node is not we don't fail when listing hypervisors. @@ -514,12 +503,6 @@ self.assertEqual(dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]), result) - def test_show_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.show, req, - self._get_hyper_id()) - def test_uptime_noid(self): req = self._get_request(True) hyper_id = uuids.hyper3 if self.expect_uuid_for_id else '3' @@ -553,12 +536,6 @@ req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc') - def test_uptime_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.uptime, req, - self.TEST_HYPERS_OBJ[0].id) - def test_uptime_hypervisor_down(self): with mock.patch.object(self.controller.host_api, 'get_host_uptime', side_effect=exception.ComputeServiceUnavailable(host='dummy') @@ -603,12 +580,6 @@ self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result) - def test_search_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.search, req, - self.TEST_HYPERS_OBJ[0].id) - def test_search_non_exist(self): with mock.patch.object(self.controller.host_api, 'compute_node_search_by_hypervisor', @@ -673,12 +644,6 @@ req, '115') self.assertEqual(1, mock_node_search.call_count) - def test_servers_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.servers, req, - self.TEST_HYPERS_OBJ[0].id) - def test_servers_with_non_integer_hypervisor_id(self): with mock.patch.object(self.controller.host_api, 'compute_node_search_by_hypervisor', @@ -716,11 +681,6 @@ running_vms=4, disk_available_least=200)), result) - def test_statistics_non_admin(self): - req = self._get_request(False) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.statistics, req) - class HypervisorsTestV228(HypervisorsTestV21): api_version = '2.28' @@ -746,7 +706,7 @@ 'status': 'enabled'} ], 'hypervisors_links': [ - {'href': 'http://localhost/v2/hypervisors?limit=1&marker=2', + {'href': 'http://localhost/v2/os-hypervisors?limit=1&marker=2', 'rel': 'next'} ] } @@ -807,7 +767,7 @@ 'status': 'enabled'} ], 'hypervisors_links': [ - {'href': 'http://localhost/v2/hypervisors?limit=1&marker=2', + {'href': 'http://localhost/v2/os-hypervisors?limit=1&marker=2', 'rel': 'next'} ] } @@ -820,7 +780,7 @@ req = self._get_request( True, '/v2/1234/os-hypervisors/detail?limit=1&marker=1') result = self.controller.detail(req) - link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2' + link = 'http://localhost/v2/os-hypervisors/detail?limit=1&marker=2' expected = { 'hypervisors': [ {'cpu_info': {'arch': 'x86_64', @@ -922,7 +882,7 @@ self.assertEqual(expected, result['hypervisors']) def test_detail_pagination_with_additional_filter(self): - link = 'http://localhost/v2/hypervisors/detail?limit=1&marker=2' + link = 'http://localhost/v2/os-hypervisors/detail?limit=1&marker=2' expected = { 'hypervisors': [ {'cpu_info': {'arch': 'x86_64', @@ -1080,12 +1040,6 @@ self.assertRaises(exc.HTTPNotFound, self.controller.index, req) s.assert_called_once_with(req.environ['nova.context'], 'shenzhen') - def test_servers_non_admin(self): - """There is no reason to test this for 2.53 since the - /os-hypervisors/servers route is deprecated. - """ - pass - def test_servers_non_id(self): """There is no reason to test this for 2.53 since the /os-hypervisors/servers route is deprecated. @@ -1164,12 +1118,6 @@ """ pass - def test_search_non_admin(self): - """There is no reason to test this for 2.53 since the - /os-hypervisors/search route is deprecated. - """ - pass - def test_search_unmapped(self): """This is already tested with test_index_compute_host_not_mapped.""" pass @@ -1256,7 +1204,7 @@ url='/os-hypervisors/detail?limit=1&marker=%s' % TEST_HYPERS_OBJ[0].uuid) result = self.controller.detail(req) - link = ('http://localhost/v2/hypervisors/detail?limit=1&marker=%s' % + link = ('http://localhost/v2/os-hypervisors/detail?limit=1&marker=%s' % TEST_HYPERS_OBJ[1].uuid) expected = { 'hypervisors': [ @@ -1332,7 +1280,7 @@ url='/os-hypervisors?limit=1&marker=%s' % TEST_HYPERS_OBJ[0].uuid) result = self.controller.index(req) - link = ('http://localhost/v2/hypervisors?limit=1&marker=%s' % + link = ('http://localhost/v2/os-hypervisors?limit=1&marker=%s' % TEST_HYPERS_OBJ[1].uuid) expected = { 'hypervisors': [{ diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_image_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_image_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_image_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_image_metadata.py 2020-04-10 17:57:57.000000000 +0000 @@ -42,7 +42,7 @@ super(ImageMetaDataTestV21, self).setUp() self.controller = self.controller_class() - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_index(self, get_all_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata') res_dict = self.controller.index(req, '123') @@ -50,7 +50,7 @@ self.assertEqual(res_dict, expected) get_all_mocked.assert_called_once_with(mock.ANY, '123') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_show(self, get_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1') res_dict = self.controller.show(req, '123', 'key1') @@ -59,13 +59,13 @@ self.assertEqual('value1', res_dict['meta']['key1']) get_mocked.assert_called_once_with(mock.ANY, '123') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_show_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key9') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '123', 'key9') - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotFound(image_id='100')) def test_show_image_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1') @@ -73,8 +73,8 @@ self.controller.show, req, '100', 'key9') @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_create(self, get_mocked, update_mocked, quota_mocked): mock_result = copy.deepcopy(get_image_123()) mock_result['properties']['key7'] = 'value7' @@ -99,8 +99,8 @@ self.assertEqual(expected_output, res) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotFound(image_id='100')) def test_create_image_not_found(self, _get_mocked, update_mocked, quota_mocked): @@ -116,8 +116,8 @@ self.assertFalse(update_mocked.called) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_update_all(self, get_mocked, update_mocked, quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata') req.method = 'PUT' @@ -138,7 +138,7 @@ self.assertEqual(expected_output, res) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotFound(image_id='100')) def test_update_all_image_not_found(self, _get_mocked, quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '100/metadata') @@ -152,8 +152,8 @@ self.assertFalse(quota_mocked.called) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_update_item(self, _get_mocked, update_mocked, quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1') req.method = 'PUT' @@ -173,7 +173,7 @@ self.assertEqual(res, expected_output) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotFound(image_id='100')) def test_update_item_image_not_found(self, _get_mocked, quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1') @@ -188,8 +188,8 @@ self.assertFalse(quota_mocked.called) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get') + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get') def test_update_item_bad_body(self, get_mocked, update_mocked, quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1') @@ -207,8 +207,8 @@ @mock.patch(CHK_QUOTA_STR, side_effect=webob.exc.HTTPBadRequest()) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get') + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get') def test_update_item_too_many_keys(self, get_mocked, update_mocked, _quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1') @@ -224,8 +224,8 @@ self.assertFalse(update_mocked.called) @mock.patch(CHK_QUOTA_STR) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked, quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/bad') @@ -240,8 +240,8 @@ self.assertFalse(quota_mocked.called) self.assertFalse(update_mocked.called) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_delete(self, _get_mocked, update_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1') req.method = 'DELETE' @@ -253,7 +253,7 @@ self.assertIsNone(res) - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_delete_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah') req.method = 'DELETE' @@ -261,7 +261,7 @@ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '123', 'blah') - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotFound(image_id='100')) def test_delete_image_not_found(self, _get_mocked): req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1') @@ -272,8 +272,8 @@ @mock.patch(CHK_QUOTA_STR, side_effect=webob.exc.HTTPForbidden(explanation='')) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_too_many_metadata_items_on_create(self, _get_mocked, update_mocked, _quota_mocked): body = {"metadata": {"foo": "bar"}} @@ -288,8 +288,8 @@ @mock.patch(CHK_QUOTA_STR, side_effect=webob.exc.HTTPForbidden(explanation='')) - @mock.patch('nova.image.api.API.update') - @mock.patch('nova.image.api.API.get', return_value=get_image_123()) + @mock.patch('nova.image.glance.API.update') + @mock.patch('nova.image.glance.API.get', return_value=get_image_123()) def test_too_many_metadata_items_on_put(self, _get_mocked, update_mocked, _quota_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah') @@ -303,7 +303,7 @@ body=body) self.assertFalse(update_mocked.called) - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotAuthorized(image_id='123')) def test_image_not_authorized_update(self, _get_mocked): req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1') @@ -316,7 +316,7 @@ self.controller.update, req, '123', 'key1', body=body) - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotAuthorized(image_id='123')) def test_image_not_authorized_update_all(self, _get_mocked): image_id = 131 @@ -333,7 +333,7 @@ self.controller.update_all, req, image_id, body=body) - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotAuthorized(image_id='123')) def test_image_not_authorized_create(self, _get_mocked): image_id = 131 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_images.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_images.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_images.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_images.py 2020-04-10 17:57:57.000000000 +0000 @@ -144,7 +144,7 @@ }, } - @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[0]) + @mock.patch('nova.image.glance.API.get', return_value=IMAGE_FIXTURES[0]) def test_get_image(self, get_mocked): request = self.http_request.blank(self.url_base + 'images/123') actual_image = self.controller.show(request, '123') @@ -152,7 +152,7 @@ matchers.DictMatches(self.expected_image_123)) get_mocked.assert_called_once_with(mock.ANY, '123') - @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[1]) + @mock.patch('nova.image.glance.API.get', return_value=IMAGE_FIXTURES[1]) def test_get_image_with_custom_prefix(self, _get_mocked): self.flags(compute_link_prefix='https://zoo.com:42', glance_link_prefix='http://circus.com:34', @@ -176,14 +176,14 @@ self.assertThat(actual_image, matchers.DictMatches(expected_image)) - @mock.patch('nova.image.api.API.get', + @mock.patch('nova.image.glance.API.get', side_effect=exception.ImageNotFound(image_id='')) def test_get_image_404(self, _get_mocked): fake_req = self.http_request.blank(self.url_base + 'images/unknown') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, fake_req, 'unknown') - @mock.patch('nova.image.api.API.get_all', return_value=IMAGE_FIXTURES) + @mock.patch('nova.image.glance.API.get_all', return_value=IMAGE_FIXTURES) def test_get_image_details(self, get_all_mocked): request = self.http_request.blank(self.url_base + 'images/detail') response = self.controller.detail(request) @@ -270,14 +270,14 @@ self.assertThat(expected, matchers.DictListMatches(response_list)) - @mock.patch('nova.image.api.API.get_all') + @mock.patch('nova.image.glance.API.get_all') def test_get_image_details_with_limit(self, get_all_mocked): request = self.http_request.blank(self.url_base + 'images/detail?limit=2') self.controller.detail(request) get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={}) - @mock.patch('nova.image.api.API.get_all') + @mock.patch('nova.image.glance.API.get_all') def test_get_image_details_with_limit_and_page_size(self, get_all_mocked): request = self.http_request.blank( self.url_base + 'images/detail?limit=2&page_size=1') @@ -285,7 +285,7 @@ get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={}, page_size=1) - @mock.patch('nova.image.api.API.get_all') + @mock.patch('nova.image.glance.API.get_all') def _detail_request(self, filters, request, get_all_mocked): self.controller.detail(request) get_all_mocked.assert_called_once_with(mock.ANY, filters=filters) @@ -344,7 +344,7 @@ request = self.http_request.blank(self.url_base + 'images/detail') self._detail_request(filters, request) - @mock.patch('nova.image.api.API.get_all', side_effect=exception.Invalid) + @mock.patch('nova.image.glance.API.get_all', side_effect=exception.Invalid) def test_image_detail_invalid_marker(self, _get_all_mocked): request = self.http_request.blank(self.url_base + '?marker=invalid') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, @@ -360,7 +360,7 @@ def _check_response(self, controller_method, response, expected_code): self.assertEqual(expected_code, controller_method.wsgi_code) - @mock.patch('nova.image.api.API.delete') + @mock.patch('nova.image.glance.API.delete') def test_delete_image(self, delete_mocked): request = self.http_request.blank(self.url_base + 'images/124') request.method = 'DELETE' @@ -369,7 +369,7 @@ self._check_response(delete_method, response, 204) delete_mocked.assert_called_once_with(mock.ANY, '124') - @mock.patch('nova.image.api.API.delete', + @mock.patch('nova.image.glance.API.delete', side_effect=exception.ImageNotAuthorized(image_id='123')) def test_delete_deleted_image(self, _delete_mocked): # If you try to delete a deleted image, you get back 403 Forbidden. @@ -378,7 +378,7 @@ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, '123') - @mock.patch('nova.image.api.API.delete', + @mock.patch('nova.image.glance.API.delete', side_effect=exception.ImageNotFound(image_id='123')) def test_delete_image_not_found(self, _delete_mocked): request = self.http_request.blank(self.url_base + 'images/300') @@ -386,7 +386,8 @@ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, '300') - @mock.patch('nova.image.api.API.get_all', return_value=[IMAGE_FIXTURES[0]]) + @mock.patch('nova.image.glance.API.get_all', + return_value=[IMAGE_FIXTURES[0]]) def test_get_image_next_link(self, get_all_mocked): request = self.http_request.blank( self.url_base + 'imagesl?limit=1') @@ -398,7 +399,8 @@ self.assertThat({'limit': ['1'], 'marker': [IMAGE_FIXTURES[0]['id']]}, matchers.DictMatches(params)) - @mock.patch('nova.image.api.API.get_all', return_value=[IMAGE_FIXTURES[0]]) + @mock.patch('nova.image.glance.API.get_all', + return_value=[IMAGE_FIXTURES[0]]) def test_get_image_details_next_link(self, get_all_mocked): request = self.http_request.blank( self.url_base + 'images/detail?limit=1') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_instance_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_instance_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_instance_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_instance_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -65,7 +65,7 @@ expect_hostId=False): '''Remove keys that aren't serialized.''' to_delete = ['id', 'created_at', 'updated_at', 'deleted_at', 'deleted', - 'action_id'] + 'action_id', 'details'] if not expect_traceback: to_delete.append('traceback') if not expect_host: @@ -84,45 +84,6 @@ return event -class InstanceActionsPolicyTestV21(test.NoDBTestCase): - instance_actions = instance_actions_v21 - - def setUp(self): - super(InstanceActionsPolicyTestV21, self).setUp() - self.controller = self.instance_actions.InstanceActionsController() - - def _get_http_req(self, action): - fake_url = '/%s/servers/12/%s' % (fakes.FAKE_PROJECT_ID, action) - return fakes.HTTPRequest.blank(fake_url) - - def _get_instance_other_project(self, req): - context = req.environ['nova.context'] - project_id = '%s_unequal' % context.project_id - return objects.Instance(project_id=project_id) - - def _set_policy_rules(self): - rules = {'compute:get': '', - 'os_compute_api:os-instance-actions': - 'project_id:%(project_id)s'} - policy.set_rules(oslo_policy.Rules.from_dict(rules)) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_list_actions_restricted_by_project(self, mock_instance_get): - self._set_policy_rules() - req = self._get_http_req('os-instance-actions') - mock_instance_get.return_value = self._get_instance_other_project(req) - self.assertRaises(exception.Forbidden, self.controller.index, req, - uuids.fake) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_get_action_restricted_by_project(self, mock_instance_get): - self._set_policy_rules() - req = self._get_http_req('os-instance-actions/1') - mock_instance_get.return_value = self._get_instance_other_project(req) - self.assertRaises(exception.Forbidden, self.controller.show, req, - uuids.fake, '1') - - class InstanceActionsTestV21(test.NoDBTestCase): instance_actions = instance_actions_v21 wsgi_api_version = os_wsgi.DEFAULT_API_VERSION @@ -132,7 +93,8 @@ def fake_get(self, context, instance_uuid, expected_attrs=None, cell_down_support=False): - return objects.Instance(uuid=instance_uuid) + return objects.Instance( + context, id=1, uuid=instance_uuid, project_id=context.project_id) def setUp(self): super(InstanceActionsTestV21, self).setUp() @@ -159,7 +121,7 @@ def _set_policy_rules(self): rules = {'compute:get': '', - 'os_compute_api:os-instance-actions': '', + 'os_compute_api:os-instance-actions:show': '', 'os_compute_api:os-instance-actions:events': 'is_admin:True'} policy.set_rules(oslo_policy.Rules.from_dict(rules)) @@ -273,7 +235,8 @@ def fake_get(self, context, instance_uuid, expected_attrs=None, cell_down_support=False): self.assertEqual('yes', context.read_deleted) - return objects.Instance(uuid=instance_uuid) + return objects.Instance( + context, id=1, uuid=instance_uuid, project_id=context.project_id) class InstanceActionsTestV251(InstanceActionsTestV221): @@ -452,3 +415,47 @@ self.controller.index, req) detail = 'Additional properties are not allowed' self.assertIn(detail, six.text_type(ex)) + + +class InstanceActionsTestV284(InstanceActionsTestV266): + wsgi_api_version = "2.84" + + def _set_policy_rules(self, overwrite=True): + rules = {'os_compute_api:os-instance-actions:show': '', + 'os_compute_api:os-instance-actions:events:details': + 'project_id:%(project_id)s'} + policy.set_rules(oslo_policy.Rules.from_dict(rules), + overwrite=overwrite) + + def test_show_action_with_details(self): + def fake_get_action(context, uuid, request_id): + return self.fake_actions[uuid][request_id] + + def fake_get_events(context, action_id): + return self.fake_events[action_id] + + self.stub_out('nova.db.api.action_get_by_request_id', fake_get_action) + self.stub_out('nova.db.api.action_events_get', fake_get_events) + + self._set_policy_rules(overwrite=False) + req = self._get_http_req('os-instance-actions/1') + res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID) + for event in res_dict['instanceAction']['events']: + self.assertIn('details', event) + + def test_show_action_with_details_old_microversion(self): + """Before microversion 2.84, we cannot get the details in events.""" + def fake_get_action(context, uuid, request_id): + return self.fake_actions[uuid][request_id] + + def fake_get_events(context, action_id): + return self.fake_events[action_id] + + self.stub_out('nova.db.api.action_get_by_request_id', fake_get_action) + self.stub_out('nova.db.api.action_events_get', fake_get_events) + + req = self._get_http_req_with_version('os-instance-actions/1', + version="2.83") + res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID) + for event in res_dict['instanceAction']['events']: + self.assertNotIn('details', event) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py 2020-04-10 17:57:57.000000000 +0000 @@ -19,7 +19,6 @@ from nova.api.openstack.compute import instance_usage_audit_log as v21_ial from nova import context -from nova import exception from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit.objects import test_service @@ -183,31 +182,3 @@ self.assertEqual(0, logs['num_hosts_not_run']) self.assertEqual("ALL hosts done. 3 errors.", logs['overall_status']) - - -class InstanceUsageAuditPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(InstanceUsageAuditPolicyEnforcementV21, self).setUp() - self.controller = v21_ial.InstanceUsageAuditLogController() - self.req = fakes.HTTPRequest.blank('') - - def test_index_policy_failed(self): - rule_name = "os_compute_api:os-instance-usage-audit-log" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_show_policy_failed(self): - rule_name = "os_compute_api:os-instance-usage-audit-log" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.show, self.req, '2012-07-05 10:00:00') - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_limits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_limits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_limits.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_limits.py 2020-04-10 17:57:57.000000000 +0000 @@ -28,7 +28,7 @@ from nova.api.openstack import wsgi import nova.context from nova import exception -from nova.policies import used_limits as ul_policies +from nova.policies import limits as l_policies from nova import quota from nova import test from nova.tests.unit.api.openstack import fakes @@ -202,10 +202,6 @@ project_id = "123456" user_id = "A1234" tenant_id = 'abcd' - target = { - "project_id": tenant_id, - "user_id": user_id - } fake_req = self._get_index_request(tenant_id=tenant_id, user_id=user_id, project_id=project_id) @@ -214,8 +210,9 @@ return_value={}) as mock_get_quotas: fake_req.get_response(self.controller) self.assertEqual(2, self.mock_can.call_count) - self.mock_can.assert_called_with(ul_policies.BASE_POLICY_NAME, - target) + self.mock_can.assert_called_with( + l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME, + target={"project_id": tenant_id}) mock_get_quotas.assert_called_once_with(context, tenant_id, usages=True) @@ -349,9 +346,6 @@ self.view_builder = views.limits.ViewBuilder() self.req = fakes.HTTPRequest.blank('/?tenant_id=None') self.rate_limits = [] - patcher = self.mock_can = mock.patch('nova.context.RequestContext.can') - self.mock_can = patcher.start() - self.addCleanup(patcher.stop) self.absolute_limits = {"metadata_items": {'limit': 1, 'in_use': 1}, "injected_files": {'limit': 5, 'in_use': 1}, "injected_file_content_bytes": @@ -376,45 +370,6 @@ output = self.view_builder.build(self.req, quotas) self.assertThat(output, matchers.DictMatches(expected_limits)) - def test_non_admin_cannot_fetch_used_limits_for_any_other_project(self): - project_id = "123456" - user_id = "A1234" - tenant_id = "abcd" - target = { - "project_id": tenant_id, - "user_id": user_id - } - req = fakes.HTTPRequest.blank('/?tenant_id=%s' % tenant_id) - context = nova.context.RequestContext(user_id, project_id) - req.environ["nova.context"] = context - - self.mock_can.side_effect = exception.PolicyNotAuthorized( - action="os_compute_api:os-used-limits") - self.assertRaises(exception.PolicyNotAuthorized, - self.view_builder.build, - req, self.absolute_limits) - - self.mock_can.assert_called_with(ul_policies.BASE_POLICY_NAME, - target) - - -class LimitsPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(LimitsPolicyEnforcementV21, self).setUp() - self.controller = limits_v21.LimitsController() - - def test_limits_index_policy_failed(self): - rule_name = "os_compute_api:limits" - self.policy.set_rules({rule_name: "project:non_fake"}) - req = fakes.HTTPRequest.blank('') - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, req=req) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - class LimitsControllerTestV236(BaseLimitTestSuite): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_lock_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_lock_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_lock_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_lock_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -19,18 +19,14 @@ from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute import lock_server as lock_server_v21 -from nova import context from nova import exception -from nova import test from nova.tests.unit.api.openstack.compute import admin_only_action_common -from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_instance class LockServerTestsV21(admin_only_action_common.CommonTests): lock_server = lock_server_v21 controller_name = 'LockServerController' - authorization_error = exception.PolicyNotAuthorized _api_version = '2.1' def setUp(self): @@ -52,44 +48,6 @@ self._test_actions_with_non_existed_instance(['_lock', '_unlock'], body_map=body_map) - def test_unlock_not_authorized(self): - instance = self._stub_instance_get() - - body = {} - with mock.patch.object( - self.compute_api, 'unlock', - side_effect=exception.PolicyNotAuthorized( - action='unlock')) as mock_unlock: - self.assertRaises(self.authorization_error, - self.controller._unlock, - self.req, instance.uuid, body) - mock_unlock.assert_called_once_with(self.context, instance) - self.mock_get.assert_called_once_with(self.context, instance.uuid, - expected_attrs=None, - cell_down_support=False) - - @mock.patch.object(common, 'get_instance') - def test_unlock_override_not_authorized_with_non_admin_user( - self, mock_get_instance): - instance = fake_instance.fake_instance_obj(self.context) - instance.locked_by = "owner" - mock_get_instance.return_value = instance - self.assertRaises(self.authorization_error, - self.controller._unlock, self.req, - instance.uuid, - {'unlock': None}) - - @mock.patch.object(common, 'get_instance') - def test_unlock_override_with_admin_user(self, mock_get_instance): - admin_req = fakes.HTTPRequest.blank('', use_admin_context=True) - admin_ctxt = admin_req.environ['nova.context'] - instance = fake_instance.fake_instance_obj(admin_ctxt) - instance.locked_by = "owner" - mock_get_instance.return_value = instance - with mock.patch.object(self.compute_api, 'unlock') as mock_unlock: - self.controller._unlock(admin_req, instance.uuid, {'unlock': None}) - mock_unlock.assert_called_once_with(admin_ctxt, instance) - @mock.patch.object(common, 'get_instance') def test_unlock_with_any_body(self, get_instance_mock): instance = fake_instance.fake_instance_obj( @@ -167,91 +125,3 @@ exp = self.assertRaises(exception.ValidationError, self.controller._lock, self.req, instance.uuid, body=body) self.assertIn("('blah' was unexpected)", six.text_type(exp)) - - -class LockServerPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(LockServerPolicyEnforcementV21, self).setUp() - self.controller = lock_server_v21.LockServerController() - self.req = fakes.HTTPRequest.blank('') - - @mock.patch('nova.api.openstack.common.get_instance') - def test_lock_policy_failed_with_other_project(self, get_instance_mock): - get_instance_mock.return_value = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - project_id=self.req.environ['nova.context'].project_id) - rule_name = "os_compute_api:os-lock-server:lock" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - # Change the project_id in request context. - self.req.environ['nova.context'].project_id = 'other-project' - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._lock, self.req, - fakes.FAKE_UUID, - body={'lock': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_lock_overridden_policy_failed_with_other_user_in_same_project( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-lock-server:lock" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - # Change the user_id in request context. - self.req.environ['nova.context'].user_id = 'other-user' - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._lock, self.req, - fakes.FAKE_UUID, body={'lock': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.lock') - @mock.patch('nova.api.openstack.common.get_instance') - def test_lock_overridden_policy_pass_with_same_user(self, - get_instance_mock, - lock_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - user_id=self.req.environ['nova.context'].user_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-lock-server:lock" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - self.controller._lock(self.req, fakes.FAKE_UUID, body={'lock': {}}) - lock_mock.assert_called_once_with(self.req.environ['nova.context'], - instance, reason=None) - - def test_unlock_policy_failed(self): - rule_name = "os_compute_api:os-lock-server:unlock" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._unlock, self.req, - fakes.FAKE_UUID, - body={'unlock': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch.object(common, 'get_instance') - def test_unlock_policy_failed_with_unlock_override(self, - get_instance_mock): - ctxt = context.RequestContext('fake', 'fake') - instance = fake_instance.fake_instance_obj(ctxt) - instance.locked_by = "fake" - get_instance_mock.return_value = instance - rule_name = ("os_compute_api:os-lock-server:" - "unlock:unlock_override") - rules = {"os_compute_api:os-lock-server:unlock": "@", - rule_name: "project:non_fake"} - self.policy.set_rules(rules) - exc = self.assertRaises( - exception.PolicyNotAuthorized, self.controller._unlock, - self.req, fakes.FAKE_UUID, body={'unlock': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_migrate_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_migrate_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_migrate_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_migrate_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -25,7 +25,6 @@ migrate_server_v21 from nova import exception from nova import objects -from nova import test from nova.tests.unit.api.openstack.compute import admin_only_action_common from nova.tests.unit.api.openstack import fakes @@ -295,14 +294,11 @@ expected_exc=webob.exc.HTTPInternalServerError, check_response=False) - @mock.patch('nova.api.openstack.common.' - 'supports_port_resource_request_during_move', - return_value=True) @mock.patch('nova.objects.Service.get_by_host_and_binary') @mock.patch('nova.api.openstack.common.' 'instance_has_port_with_resource_request', return_value=True) def test_migrate_with_bandwidth_from_old_compute_not_supported( - self, mock_has_res_req, mock_get_service, mock_support): + self, mock_has_res_req, mock_get_service): instance = self._stub_instance_get() mock_get_service.return_value = objects.Service(host=instance['host']) @@ -625,38 +621,3 @@ self.controller._migrate_live, self.req, fakes.FAKE_UUID, body=body) self.assertIn('force', six.text_type(ex)) - - -class MigrateServerPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(MigrateServerPolicyEnforcementV21, self).setUp() - self.controller = migrate_server_v21.MigrateServerController() - self.req = fakes.HTTPRequest.blank('') - - def test_migrate_policy_failed(self): - rule_name = "os_compute_api:os-migrate-server:migrate" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._migrate, self.req, - fakes.FAKE_UUID, - body={'migrate': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_migrate_live_policy_failed(self): - rule_name = "os_compute_api:os-migrate-server:migrate_live" - self.policy.set_rules({rule_name: "project:non_fake"}) - body_args = {'os-migrateLive': {'host': 'hostname', - 'block_migration': False, - 'disk_over_commit': False}} - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._migrate_live, self.req, - fakes.FAKE_UUID, - body=body_args) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_migrations.py 2020-04-10 17:57:57.000000000 +0000 @@ -504,36 +504,3 @@ self.controller.index, req) self.assertIn('Additional properties are not allowed', six.text_type(ex)) - - -class MigrationsPolicyEnforcement(test.NoDBTestCase): - def setUp(self): - super(MigrationsPolicyEnforcement, self).setUp() - self.controller = migrations_v21.MigrationsController() - self.req = fakes.HTTPRequest.blank('') - - def test_list_policy_failed(self): - rule_name = "os_compute_api:os-migrations:index" - self.policy.set_rules({rule_name: "project_id:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - -class MigrationsPolicyEnforcementV223(MigrationsPolicyEnforcement): - wsgi_api_version = '2.23' - - def setUp(self): - super(MigrationsPolicyEnforcementV223, self).setUp() - self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version) - - -class MigrationsPolicyEnforcementV259(MigrationsPolicyEnforcementV223): - wsgi_api_version = '2.59' - - -class MigrationsPolicyEnforcementV280(MigrationsPolicyEnforcementV259): - wsgi_api_version = '2.80' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_multinic.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_multinic.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_multinic.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_multinic.py 2020-04-10 17:57:57.000000000 +0000 @@ -134,7 +134,7 @@ UUID, body=body) @mock.patch.object(compute.api.API, 'remove_fixed_ip', - side_effect=exception.FixedIpNotFoundForSpecificInstance( + side_effect=exception.FixedIpNotFoundForInstance( instance_uuid=UUID, ip='10.10.10.1')) def test_remove_fixed_ip_not_found(self, _remove_fixed_ip): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_pause_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_pause_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_pause_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_pause_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -13,15 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -import mock - from nova.api.openstack.compute import pause_server as \ pause_server_v21 -from nova import exception -from nova import test from nova.tests.unit.api.openstack.compute import admin_only_action_common -from nova.tests.unit.api.openstack import fakes -from nova.tests.unit import fake_instance class PauseServerTestsV21(admin_only_action_common.CommonTests): @@ -57,70 +51,3 @@ def test_actions_with_locked_instance(self): self._test_actions_with_locked_instance(['_pause', '_unpause']) - - -class PauseServerPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(PauseServerPolicyEnforcementV21, self).setUp() - self.controller = pause_server_v21.PauseServerController() - self.req = fakes.HTTPRequest.blank('') - - @mock.patch('nova.api.openstack.common.get_instance') - def test_pause_policy_failed_with_other_project(self, get_instance_mock): - get_instance_mock.return_value = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - project_id=self.req.environ['nova.context'].project_id) - rule_name = "os_compute_api:os-pause-server:pause" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - # Change the project_id in request context. - self.req.environ['nova.context'].project_id = 'other-project' - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._pause, self.req, fakes.FAKE_UUID, - body={'pause': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_pause_overridden_policy_failed_with_other_user_in_same_project( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-pause-server:pause" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - # Change the user_id in request context. - self.req.environ['nova.context'].user_id = 'other-user' - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._pause, self.req, - fakes.FAKE_UUID, body={'pause': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.pause') - @mock.patch('nova.api.openstack.common.get_instance') - def test_pause_overridden_policy_pass_with_same_user(self, - get_instance_mock, - pause_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - user_id=self.req.environ['nova.context'].user_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-pause-server:pause" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - self.controller._pause(self.req, fakes.FAKE_UUID, body={'pause': {}}) - pause_mock.assert_called_once_with(self.req.environ['nova.context'], - instance) - - def test_unpause_policy_failed(self): - rule_name = "os_compute_api:os-pause-server:unpause" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._unpause, self.req, fakes.FAKE_UUID, - body={'unpause': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_remote_consoles.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_remote_consoles.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_remote_consoles.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_remote_consoles.py 2020-04-10 17:57:57.000000000 +0000 @@ -24,6 +24,7 @@ from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance class ConsolesExtensionTestV21(test.NoDBTestCase): @@ -366,10 +367,12 @@ self.context = self.req.environ['nova.context'] self.req.api_version_request = api_version_request.APIVersionRequest( '2.6') + self.instance = fake_instance.fake_instance_obj(self.context) + self.stub_out('nova.compute.api.API.get', + lambda *a, **kw: self.instance) self.controller = console_v21.RemoteConsolesController() - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_vnc_console(self, mock_get): + def test_create_vnc_console(self): mock_handler = mock.MagicMock() mock_handler.return_value = {'url': "http://fake"} self.controller.handlers['vnc'] = mock_handler @@ -379,11 +382,10 @@ self.assertEqual({'remote_console': {'protocol': 'vnc', 'type': 'novnc', 'url': 'http://fake'}}, output) - mock_handler.assert_called_once_with(self.context, 'fake_instance', + mock_handler.assert_called_once_with(self.context, self.instance, 'novnc') - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_spice_console(self, mock_get): + def test_create_spice_console(self): mock_handler = mock.MagicMock() mock_handler.return_value = {'url': "http://fake"} self.controller.handlers['spice'] = mock_handler @@ -394,11 +396,10 @@ self.assertEqual({'remote_console': {'protocol': 'spice', 'type': 'spice-html5', 'url': 'http://fake'}}, output) - mock_handler.assert_called_once_with(self.context, 'fake_instance', + mock_handler.assert_called_once_with(self.context, self.instance, 'spice-html5') - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_rdp_console(self, mock_get): + def test_create_rdp_console(self): mock_handler = mock.MagicMock() mock_handler.return_value = {'url': "http://fake"} self.controller.handlers['rdp'] = mock_handler @@ -408,11 +409,10 @@ self.assertEqual({'remote_console': {'protocol': 'rdp', 'type': 'rdp-html5', 'url': 'http://fake'}}, output) - mock_handler.assert_called_once_with(self.context, 'fake_instance', + mock_handler.assert_called_once_with(self.context, self.instance, 'rdp-html5') - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_serial_console(self, mock_get): + def test_create_serial_console(self): mock_handler = mock.MagicMock() mock_handler.return_value = {'url': "ws://fake"} self.controller.handlers['serial'] = mock_handler @@ -422,11 +422,10 @@ self.assertEqual({'remote_console': {'protocol': 'serial', 'type': 'serial', 'url': 'ws://fake'}}, output) - mock_handler.assert_called_once_with(self.context, 'fake_instance', + mock_handler.assert_called_once_with(self.context, self.instance, 'serial') - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_instance_not_ready(self, mock_get): + def test_create_console_instance_not_ready(self): mock_handler = mock.MagicMock() mock_handler.side_effect = exception.InstanceNotReady( instance_id='xxx') @@ -436,8 +435,7 @@ self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, fakes.FAKE_UUID, body=body) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_unavailable(self, mock_get): + def test_create_console_unavailable(self): mock_handler = mock.MagicMock() mock_handler.side_effect = exception.ConsoleTypeUnavailable( console_type='vnc') @@ -448,8 +446,7 @@ self.req, fakes.FAKE_UUID, body=body) self.assertTrue(mock_handler.called) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_not_found(self, mock_get): + def test_create_console_not_found(self,): mock_handler = mock.MagicMock() mock_handler.side_effect = exception.InstanceNotFound( instance_id='xxx') @@ -459,8 +456,7 @@ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.req, fakes.FAKE_UUID, body=body) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_not_implemented(self, mock_get): + def test_create_console_not_implemented(self): mock_handler = mock.MagicMock() mock_handler.side_effect = NotImplementedError() self.controller.handlers['vnc'] = mock_handler @@ -469,8 +465,7 @@ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create, self.req, fakes.FAKE_UUID, body=body) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_nport_invalid(self, mock_get): + def test_create_console_nport_invalid(self): mock_handler = mock.MagicMock() mock_handler.side_effect = exception.ImageSerialPortNumberInvalid( num_ports='x', property="hw_serial_port_count") @@ -480,8 +475,7 @@ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, fakes.FAKE_UUID, body=body) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_nport_exceed(self, mock_get): + def test_create_console_nport_exceed(self): mock_handler = mock.MagicMock() mock_handler.side_effect = ( exception.ImageSerialPortNumberExceedFlavorValue()) @@ -491,8 +485,7 @@ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, fakes.FAKE_UUID, body=body) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_socket_exhausted(self, mock_get): + def test_create_console_socket_exhausted(self): mock_handler = mock.MagicMock() mock_handler.side_effect = ( exception.SocketPortRangeExhaustedException(host='127.0.0.1')) @@ -502,8 +495,7 @@ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, fakes.FAKE_UUID, body=body) - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_console_invalid_type(self, mock_get): + def test_create_console_invalid_type(self): mock_handler = mock.MagicMock() mock_handler.side_effect = ( exception.ConsoleTypeInvalid(console_type='invalid_type')) @@ -522,8 +514,7 @@ '2.8') self.controller = console_v21.RemoteConsolesController() - @mock.patch.object(compute_api.API, 'get', return_value='fake_instance') - def test_create_mks_console(self, mock_get): + def test_create_mks_console(self): mock_handler = mock.MagicMock() mock_handler.return_value = {'url': "http://fake"} self.controller.handlers['mks'] = mock_handler @@ -533,43 +524,5 @@ self.assertEqual({'remote_console': {'protocol': 'mks', 'type': 'webmks', 'url': 'http://fake'}}, output) - mock_handler.assert_called_once_with(self.context, 'fake_instance', + mock_handler.assert_called_once_with(self.context, self.instance, 'webmks') - - -class TestRemoteConsolePolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(TestRemoteConsolePolicyEnforcementV21, self).setUp() - self.controller = console_v21.RemoteConsolesController() - self.req = fakes.HTTPRequest.blank('') - - def _common_policy_check(self, func, *arg, **kwarg): - rule_name = "os_compute_api:os-remote-consoles" - rule = {rule_name: "project:non_fake"} - self.policy.set_rules(rule) - exc = self.assertRaises( - exception.PolicyNotAuthorized, func, *arg, **kwarg) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_remote_vnc_console_policy_failed(self): - body = {'os-getVNCConsole': {'type': 'novnc'}} - self._common_policy_check(self.controller.get_vnc_console, self.req, - fakes.FAKE_UUID, body=body) - - def test_remote_splice_console_policy_failed(self): - body = {'os-getSPICEConsole': {'type': 'spice-html5'}} - self._common_policy_check(self.controller.get_spice_console, self.req, - fakes.FAKE_UUID, body=body) - - def test_remote_rdp_console_policy_failed(self): - body = {'os-getRDPConsole': {'type': 'rdp-html5'}} - self._common_policy_check(self.controller.get_rdp_console, self.req, - fakes.FAKE_UUID, body=body) - - def test_remote_serial_console_policy_failed(self): - body = {'os-getSerialConsole': {'type': 'serial'}} - self._common_policy_check(self.controller.get_serial_console, self.req, - fakes.FAKE_UUID, body=body) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_rescue.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_rescue.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_rescue.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_rescue.py 2020-04-10 17:57:57.000000000 +0000 @@ -212,76 +212,3 @@ self.assertRaises(exception.ValidationError, self.controller._rescue, self.fake_req, UUID, body=body) - - -class RescuePolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(RescuePolicyEnforcementV21, self).setUp() - self.controller = rescue_v21.RescueController() - self.req = fakes.HTTPRequest.blank('') - - @mock.patch('nova.api.openstack.common.get_instance') - def test_rescue_policy_failed_with_other_project(self, get_instance_mock): - get_instance_mock.return_value = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - project_id=self.req.environ['nova.context'].project_id) - rule_name = "os_compute_api:os-rescue" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - body = {"rescue": {"adminPass": "AABBCC112233"}} - # Change the project_id in request context. - self.req.environ['nova.context'].project_id = 'other-project' - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._rescue, self.req, fakes.FAKE_UUID, - body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_rescue_overridden_policy_failed_with_other_user_in_same_project( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-rescue" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - # Change the user_id in request context. - self.req.environ['nova.context'].user_id = 'other-user' - body = {"rescue": {"adminPass": "AABBCC112233"}} - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._rescue, self.req, - fakes.FAKE_UUID, body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.rescue') - @mock.patch('nova.api.openstack.common.get_instance') - def test_lock_overridden_policy_pass_with_same_user(self, - get_instance_mock, - rescue_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - user_id=self.req.environ['nova.context'].user_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-rescue" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - body = {"rescue": {"adminPass": "AABBCC112233"}} - self.controller._rescue(self.req, fakes.FAKE_UUID, body=body) - rescue_mock.assert_called_once_with(self.req.environ['nova.context'], - instance, - rescue_password='AABBCC112233', - rescue_image_ref=None) - - def test_unrescue_policy_failed(self): - rule_name = "os_compute_api:os-rescue" - self.policy.set_rules({rule_name: "project:non_fake"}) - body = dict(unrescue=None) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._unrescue, self.req, fakes.FAKE_UUID, - body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_security_groups.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_security_groups.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_security_groups.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_security_groups.py 2020-04-10 17:57:57.000000000 +0000 @@ -35,7 +35,6 @@ from nova import test from nova.tests.unit.api.openstack import fakes - CONF = cfg.CONF FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16' FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18' @@ -395,13 +394,15 @@ self.fake_id = '11111111-1111-1111-1111-111111111111' self.req = fakes.HTTPRequest.blank('') + project_id = self.req.environ['nova.context'].project_id self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True) self.stub_out('nova.compute.api.API.get', fakes.fake_compute_get( **{'power_state': 0x01, 'host': "localhost", 'uuid': UUID_SERVER, - 'name': 'asdf'})) + 'name': 'asdf', + 'project_id': project_id})) self.original_client = neutron_api.get_client neutron_api.get_client = get_client @@ -1576,84 +1577,6 @@ self.assertEqual(res.status_int, 404) -class PolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(PolicyEnforcementV21, self).setUp() - self.req = fakes.HTTPRequest.blank('') - self.rule_name = "os_compute_api:os-security-groups" - self.rule = {self.rule_name: "project:non_fake"} - - def _common_policy_check(self, func, *arg, **kwarg): - self.policy.set_rules(self.rule) - exc = self.assertRaises( - exception.PolicyNotAuthorized, func, *arg, **kwarg) - self.assertEqual( - "Policy doesn't allow %s to be performed." % self.rule_name, - exc.format_message()) - - -class SecurityGroupPolicyEnforcementV21(PolicyEnforcementV21): - - def setUp(self): - super(SecurityGroupPolicyEnforcementV21, self).setUp() - self.controller = secgroups_v21.SecurityGroupController() - - def test_create_policy_failed(self): - self._common_policy_check(self.controller.create, self.req, {}) - - def test_show_policy_failed(self): - self._common_policy_check(self.controller.show, self.req, FAKE_UUID1) - - def test_delete_policy_failed(self): - self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1) - - def test_index_policy_failed(self): - self._common_policy_check(self.controller.index, self.req) - - def test_update_policy_failed(self): - self._common_policy_check( - self.controller.update, self.req, FAKE_UUID1, {}) - - -class ServerSecurityGroupPolicyEnforcementV21(PolicyEnforcementV21): - - def setUp(self): - super(ServerSecurityGroupPolicyEnforcementV21, self).setUp() - self.controller = secgroups_v21.ServerSecurityGroupController() - - def test_index_policy_failed(self): - self._common_policy_check(self.controller.index, self.req, FAKE_UUID1) - - -class SecurityGroupRulesPolicyEnforcementV21(PolicyEnforcementV21): - - def setUp(self): - super(SecurityGroupRulesPolicyEnforcementV21, self).setUp() - self.controller = secgroups_v21.SecurityGroupRulesController() - - def test_create_policy_failed(self): - self._common_policy_check(self.controller.create, self.req, {}) - - def test_delete_policy_failed(self): - self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1) - - -class SecurityGroupActionPolicyEnforcementV21(PolicyEnforcementV21): - - def setUp(self): - super(SecurityGroupActionPolicyEnforcementV21, self).setUp() - self.controller = secgroups_v21.SecurityGroupActionController() - - def test_add_security_group_policy_failed(self): - self._common_policy_check( - self.controller._addSecurityGroup, self.req, FAKE_UUID1, {}) - - def test_remove_security_group_policy_failed(self): - self._common_policy_check( - self.controller._removeSecurityGroup, self.req, FAKE_UUID1, {}) - - class TestSecurityGroupsDeprecation(test.NoDBTestCase): def setUp(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_actions.py 2020-04-10 17:57:57.000000000 +0000 @@ -25,7 +25,6 @@ from nova.compute import vm_states import nova.conf from nova import exception -from nova import image from nova.image import glance from nova import objects from nova import test @@ -85,7 +84,7 @@ project_id=fakes.FAKE_PROJECT_ID) self.context = self.req.environ['nova.context'] - self.image_api = image.API() + self.image_api = glance.API() # Assume that anything that hits the compute API and looks for a # RequestSpec doesn't care about it, since testing logic that deep # should be done in nova.tests.unit.compute.test_compute_api. @@ -659,6 +658,20 @@ self.controller._action_rebuild, self.req, FAKE_UUID, body=body) + @mock.patch.object(compute_api.API, 'rebuild') + def test_rebuild_raise_invalid_architecture_exc(self, mock_rebuild): + body = { + "rebuild": { + "imageRef": self._image_href, + }, + } + + mock_rebuild.side_effect = exception.InvalidArchitectureName('arm64') + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller._action_rebuild, + self.req, FAKE_UUID, body=body) + def test_resize_server(self): body = dict(resize=dict(flavorRef="http://localhost/3")) @@ -1234,14 +1247,11 @@ self.controller._action_create_image, self.req, FAKE_UUID, body=body) - @mock.patch('nova.api.openstack.common.' - 'supports_port_resource_request_during_move', - return_value=True) @mock.patch('nova.objects.Service.get_by_host_and_binary') @mock.patch('nova.api.openstack.common.' 'instance_has_port_with_resource_request', return_value=True) def test_resize_with_bandwidth_from_old_compute_not_supported( - self, mock_has_res_req, mock_get_service, mock_support): + self, mock_has_res_req, mock_get_service): body = dict(resize=dict(flavorRef="http://localhost/3")) mock_get_service.return_value = objects.Service() mock_get_service.return_value.version = 38 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py 2020-04-10 17:57:57.000000000 +0000 @@ -17,7 +17,6 @@ from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids -from nova.api.openstack.compute import server_diagnostics from nova.api.openstack import wsgi as os_wsgi from nova.compute import api as compute_api from nova import exception @@ -33,7 +32,9 @@ cell_down_support=False): if instance_uuid != UUID: raise Exception("Invalid UUID") - return objects.Instance(uuid=instance_uuid, host='123') + return objects.Instance(uuid=instance_uuid, + project_id=_context.project_id, + host='123') class ServerDiagnosticsTestV21(test.NoDBTestCase): @@ -172,26 +173,3 @@ 'memory_details': {'maximum': 8192, 'used': 3072}} self._test_get_diagnostics(expected, return_value) - - -class ServerDiagnosticsEnforcementV21(test.NoDBTestCase): - api_version = '2.1' - - def setUp(self): - super(ServerDiagnosticsEnforcementV21, self).setUp() - self.controller = server_diagnostics.ServerDiagnosticsController() - self.req = fakes.HTTPRequest.blank('', version=self.api_version) - - def test_get_diagnostics_policy_failed(self): - rule_name = "os_compute_api:os-server-diagnostics" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - -class ServerDiagnosticsEnforcementV248(ServerDiagnosticsEnforcementV21): - api_version = '2.48' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_external_events.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_external_events.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_external_events.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_external_events.py 2020-04-10 17:57:57.000000000 +0000 @@ -262,3 +262,34 @@ body=body) self.assertIn('Invalid input for field/attribute name.', six.text_type(exp)) + + +@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids', + fake_get_by_instance_uuids) +@mock.patch('nova.objects.InstanceList.get_by_filters', + fake_get_by_filters) +class ServerExternalEventsTestV282(ServerExternalEventsTestV21): + wsgi_api_version = '2.82' + + def setUp(self): + super(ServerExternalEventsTestV282, self).setUp() + self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) + self.stdlog = self.useFixture(fixtures.StandardLogging()) + + def test_accelerator_request_bound_event(self): + body = self.default_body + event_name = 'accelerator-request-bound' + body['events'][0]['name'] = event_name # event 0 has a tag + body['events'][1]['name'] = event_name # event 1 has no tag + + result, code = self._assert_call( + body, [fake_instance_uuids[0]], [event_name]) + + self.assertEqual(200, result['events'][0]['code']) + self.assertEqual('completed', result['events'][0]['status']) + + msg = "Event tag is missing for instance" + self.assertIn(msg, self.stdlog.logger.output) + self.assertEqual(400, result['events'][1]['code']) + self.assertEqual('failed', result['events'][1]['status']) + self.assertEqual(207, code) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_metadata.py 2020-04-10 17:57:57.000000000 +0000 @@ -712,74 +712,3 @@ req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all, req, self.uuid, body=expected) - - -class ServerMetaPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(ServerMetaPolicyEnforcementV21, self).setUp() - self.controller = server_metadata_v21.ServerMetadataController() - self.req = fakes.HTTPRequest.blank('') - - def test_create_policy_failed(self): - rule_name = "os_compute_api:server-metadata:create" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.create, self.req, fakes.FAKE_UUID, - body={'metadata': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_index_policy_failed(self): - rule_name = "os_compute_api:server-metadata:index" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_update_policy_failed(self): - rule_name = "os_compute_api:server-metadata:update" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.update, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID, - body={'meta': {'fake_meta': 'fake_meta'}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_update_all_policy_failed(self): - rule_name = "os_compute_api:server-metadata:update_all" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.update_all, self.req, fakes.FAKE_UUID, - body={'metadata': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_delete_policy_failed(self): - rule_name = "os_compute_api:server-metadata:delete" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.delete, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_show_policy_failed(self): - rule_name = "os_compute_api:server-metadata:show" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_migrations.py 2020-04-10 17:57:57.000000000 +0000 @@ -350,61 +350,3 @@ class ServerMigrationsTestsV280(ServerMigrationsTestsV265): wsgi_api_version = '2.80' - - -class ServerMigrationsPolicyEnforcementV21(test.NoDBTestCase): - wsgi_api_version = '2.22' - - def setUp(self): - super(ServerMigrationsPolicyEnforcementV21, self).setUp() - self.controller = server_migrations.ServerMigrationsController() - self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version) - - def test_force_complete_policy_failed(self): - rule_name = "os_compute_api:servers:migrations:force_complete" - self.policy.set_rules({rule_name: "project:non_fake"}) - body_args = {'force_complete': None} - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._force_complete, self.req, - fakes.FAKE_UUID, fakes.FAKE_UUID, - body=body_args) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - -class ServerMigrationsPolicyEnforcementV223( - ServerMigrationsPolicyEnforcementV21): - - wsgi_api_version = '2.23' - - def test_migration_index_failed(self): - rule_name = "os_compute_api:servers:migrations:index" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller.index, self.req, - fakes.FAKE_UUID) - self.assertEqual("Policy doesn't allow %s to be performed." % - rule_name, exc.format_message()) - - def test_migration_show_failed(self): - rule_name = "os_compute_api:servers:migrations:show" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller.show, self.req, - fakes.FAKE_UUID, 1) - self.assertEqual("Policy doesn't allow %s to be performed." % - rule_name, exc.format_message()) - - -class ServerMigrationsPolicyEnforcementV224( - ServerMigrationsPolicyEnforcementV223): - - wsgi_api_version = '2.24' - - def test_migrate_delete_failed(self): - rule_name = "os_compute_api:servers:migrations:delete" - self.policy.set_rules({rule_name: "project:non_fake"}) - self.assertRaises(exception.PolicyNotAuthorized, - self.controller.delete, self.req, - fakes.FAKE_UUID, '10') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_password.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_password.py 2020-04-10 17:57:57.000000000 +0000 @@ -17,7 +17,6 @@ from nova.api.openstack.compute import server_password \ as server_password_v21 -from nova import exception from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_instance @@ -61,29 +60,3 @@ res = self.controller.index(self.fake_req, 'fake') self.assertEqual(res['password'], '') - - -class ServerPasswordPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(ServerPasswordPolicyEnforcementV21, self).setUp() - self.controller = server_password_v21.ServerPasswordController() - self.req = fakes.HTTPRequest.blank('') - - def _test_policy_failed(self, method, rule_name): - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - method, self.req, fakes.FAKE_UUID) - - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_get_password_policy_failed(self): - rule_name = "os_compute_api:os-server-password" - self._test_policy_failed(self.controller.index, rule_name) - - def test_clear_password_policy_failed(self): - rule_name = "os_compute_api:os-server-password" - self._test_policy_failed(self.controller.clear, rule_name) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_serversV21.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_serversV21.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_serversV21.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_serversV21.py 2020-04-10 17:57:57.000000000 +0000 @@ -2739,6 +2739,70 @@ self.assertIn('OS-EXT-IPS-MAC:mac_addr', item) +class ServersControllerTestV283(ControllerTest): + filters = ['availability_zone', 'config_drive', 'key_name', + 'created_at', 'launched_at', 'terminated_at', + 'power_state', 'task_state', 'vm_state', 'progress', + 'user_id'] + + def test_get_servers_by_new_filter_for_non_admin(self): + def fake_get_all(context, search_opts=None, **kwargs): + self.assertIsNotNone(search_opts) + for f in self.filters: + self.assertIn(f, search_opts) + return objects.InstanceList( + objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)]) + + self.mock_get_all.side_effect = fake_get_all + + query_str = '&'.join('%s=test_value' % f for f in self.filters) + req = fakes.HTTPRequest.blank(self.path_with_query % query_str, + version='2.83') + servers = self.controller.index(req)['servers'] + + self.assertEqual(1, len(servers)) + self.assertEqual(uuids.fake, servers[0]['id']) + + def test_get_servers_new_filters_for_non_admin_old_version(self): + def fake_get_all(context, search_opts=None, **kwargs): + self.assertIsNotNone(search_opts) + for f in self.filters: + self.assertNotIn(f, search_opts) + return objects.InstanceList( + objects=[]) + + # Without policy edition, test will fail and admin filter will work. + self.policy.set_rules({'os_compute_api:servers:index': ''}) + self.mock_get_all.side_effect = fake_get_all + + query_str = '&'.join('%s=test_value' % f for f in self.filters) + req = fakes.HTTPRequest.blank(self.path_with_query % query_str, + version='2.82') + servers = self.controller.index(req)['servers'] + + self.assertEqual(0, len(servers)) + + def test_get_servers_by_node_fail_non_admin(self): + def fake_get_all(context, search_opts=None, **kwargs): + self.assertIsNotNone(search_opts) + self.assertNotIn('node', search_opts) + return objects.InstanceList( + objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)]) + + server_filter_rule = 'os_compute_api:servers:allow_all_filters' + self.policy.set_rules({'os_compute_api:servers:index': '', + server_filter_rule: 'role:admin'}) + self.mock_get_all.side_effect = fake_get_all + + query_str = "node=node1" + req = fakes.HTTPRequest.blank(self.path_with_query % query_str, + version='2.83') + servers = self.controller.index(req)['servers'] + + self.assertEqual(1, len(servers)) + self.assertEqual(uuids.fake, servers[0]['id']) + + class ServersControllerDeleteTest(ControllerTest): def setUp(self): @@ -4119,8 +4183,6 @@ fakes.stub_out_key_pair_funcs(self) fake.stub_out_image_service(self) - self.stub_out('nova.db.api.project_get_networks', - lambda c, u: dict(id='1', host='localhost')) self.stub_out('nova.db.api.instance_create', instance_create) self.stub_out('nova.db.api.instance_system_metadata_update', lambda *a, **kw: None) @@ -4253,6 +4315,22 @@ "Flavor's disk is too small for requested image."): self.controller.create(self.req, body=self.body) + @mock.patch.object(fake._FakeImageService, 'show', + return_value=dict( + id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', + status='active', + properties=dict( + cinder_encryption_key_id=fakes.FAKE_UUID))) + def test_create_server_image_nonbootable(self, mock_show): + self.req.body = jsonutils.dump_as_bytes(self.body) + + expected_msg = ("Image {} is unacceptable: Direct booting of an image " + "uploaded from an encrypted volume is unsupported.") + with testtools.ExpectedException( + webob.exc.HTTPBadRequest, + expected_msg.format(self.image_uuid)): + self.controller.create(self.req, body=self.body) + def test_create_instance_with_image_non_uuid(self): self.body['server']['imageRef'] = 'not-uuid' self.assertRaises(exception.ValidationError, @@ -8022,37 +8100,6 @@ self._invalid_server_create(body=body) -# TODO(alex_xu): There isn't specified file for ips extension. Most of -# unittest related to ips extension is in this file. So put the ips policy -# enforcement tests at here until there is specified file for ips extension. -class IPsPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(IPsPolicyEnforcementV21, self).setUp() - self.controller = ips.IPsController() - self.req = fakes.HTTPRequest.blank("/v2/%s" % fakes.FAKE_PROJECT_ID) - - def test_index_policy_failed(self): - rule_name = "os_compute_api:ips:index" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.index, self.req, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_show_policy_failed(self): - rule_name = "os_compute_api:ips:show" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - class ServersPolicyEnforcementV21(test.NoDBTestCase): def setUp(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_tags.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_tags.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_tags.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_tags.py 2020-04-10 17:57:57.000000000 +0000 @@ -51,6 +51,7 @@ super(ServerTagsTest, self).setUp() self.controller = server_tags.ServerTagsController() inst_map = objects.InstanceMapping( + project_id=fakes.FAKE_PROJECT_ID, cell_mapping=objects.CellMappingList.get_all( context.get_admin_context())[1]) self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_topology.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_topology.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_server_topology.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_server_topology.py 2020-04-10 17:57:57.000000000 +0000 @@ -11,6 +11,7 @@ # License for the specific language governing permissions and limitations # under the License. +import fixtures import mock from oslo_utils.fixture import uuidsentinel as uuids from webob import exc @@ -22,6 +23,7 @@ from nova.objects import instance_numa as numa from nova import test from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance class ServerTopologyTestV278(test.NoDBTestCase): @@ -36,6 +38,7 @@ version=self.api_version, use_admin_context=True) self.controller = server_topology.ServerTopologyController() + self.context = self.req.environ['nova.context'] def _fake_numa(self, cpu_pinning=None): ce0 = numa.InstanceNUMACell(node=0, memory=1024, pagesize=4, id=0, @@ -57,7 +60,8 @@ @mock.patch.object(common, 'get_instance') def test_get_topology_with_no_topology(self, fake_get): expect = {'nodes': [], 'pagesize_kb': None} - inst = objects.instance.Instance(uuid=self.uuid, host='123') + inst = objects.instance.Instance(uuid=self.uuid, host='123', + project_id=self.context.project_id) inst.numa_topology = None fake_get.return_value = inst @@ -74,7 +78,8 @@ 'cpu_pinning':{}}], 'pagesize_kb': 4} - inst = objects.instance.Instance(uuid=self.uuid, host='123') + inst = objects.instance.Instance(uuid=self.uuid, host='123', + project_id=self.context.project_id) inst.numa_topology = self._fake_numa(cpu_pinning=None) fake_get.return_value = inst @@ -104,6 +109,12 @@ super(ServerTopologyEnforcementV278, self).setUp() self.controller = server_topology.ServerTopologyController() self.req = fakes.HTTPRequest.blank('', version=self.api_version) + context = self.req.environ['nova.context'] + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + context, id=1, project_id=context.project_id) + self.mock_get.return_value = self.instance def test_get_topology_policy_failed(self): rule_name = "compute:server:topology:index" diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_shelve.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_shelve.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_shelve.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_shelve.py 2020-04-10 17:57:57.000000000 +0000 @@ -13,7 +13,6 @@ # under the License. import mock -from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel import six @@ -24,7 +23,6 @@ from nova.compute import task_states from nova.compute import vm_states from nova import exception -from nova import policy from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_instance @@ -83,123 +81,6 @@ self.req, uuidsentinel.fake, {}) -class ShelvePolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(ShelvePolicyEnforcementV21, self).setUp() - self.controller = shelve_v21.ShelveController() - self.req = fakes.HTTPRequest.blank('') - - @mock.patch('nova.api.openstack.common.get_instance') - def test_shelve_restricted_by_role(self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rules = {'os_compute_api:os-shelve:shelve': 'role:admin'} - policy.set_rules(oslo_policy.Rules.from_dict(rules)) - - self.assertRaises(exception.Forbidden, self.controller._shelve, - self.req, uuidsentinel.fake, {}) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_shelve_policy_failed_with_other_project(self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-shelve:shelve" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - # Change the project_id in request context. - self.req.environ['nova.context'].project_id = 'other-project' - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._shelve, self.req, fakes.FAKE_UUID, - body={'shelve': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.shelve') - @mock.patch('nova.api.openstack.common.get_instance') - def test_shelve_overridden_policy_pass_with_same_project(self, - get_instance_mock, - shelve_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - project_id=self.req.environ['nova.context'].project_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-shelve:shelve" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - self.controller._shelve(self.req, fakes.FAKE_UUID, body={'shelve': {}}) - shelve_mock.assert_called_once_with(self.req.environ['nova.context'], - instance) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_shelve_overridden_policy_failed_with_other_user_in_same_project( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-shelve:shelve" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - # Change the user_id in request context. - self.req.environ['nova.context'].user_id = 'other-user' - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._shelve, self.req, - fakes.FAKE_UUID, body={'shelve': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.shelve') - @mock.patch('nova.api.openstack.common.get_instance') - def test_shelve_overridden_policy_pass_with_same_user(self, - get_instance_mock, - shelve_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - user_id=self.req.environ['nova.context'].user_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-shelve:shelve" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - self.controller._shelve(self.req, fakes.FAKE_UUID, body={'shelve': {}}) - shelve_mock.assert_called_once_with(self.req.environ['nova.context'], - instance) - - def test_shelve_offload_restricted_by_role(self): - rules = {'os_compute_api:os-shelve:shelve_offload': 'role:admin'} - policy.set_rules(oslo_policy.Rules.from_dict(rules)) - - self.assertRaises(exception.Forbidden, - self.controller._shelve_offload, self.req, - uuidsentinel.fake, {}) - - def test_shelve_offload_policy_failed(self): - rule_name = "os_compute_api:os-shelve:shelve_offload" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._shelve_offload, self.req, fakes.FAKE_UUID, - body={'shelve_offload': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_unshelve_restricted_by_role(self): - rules = {'os_compute_api:os-shelve:unshelve': 'role:admin'} - policy.set_rules(oslo_policy.Rules.from_dict(rules)) - - self.assertRaises(exception.Forbidden, self.controller._unshelve, - self.req, uuidsentinel.fake, body={'unshelve': {}}) - - def test_unshelve_policy_failed(self): - rule_name = "os_compute_api:os-shelve:unshelve" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._unshelve, self.req, fakes.FAKE_UUID, - body={'unshelve': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - class UnshelveServerControllerTestV277(test.NoDBTestCase): """Server controller test for microversion 2.77 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_suspend_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_suspend_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_suspend_server.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_suspend_server.py 2020-04-10 17:57:57.000000000 +0000 @@ -18,12 +18,9 @@ from nova.api.openstack.compute import suspend_server as \ suspend_server_v21 -from nova import exception from nova import objects -from nova import test from nova.tests.unit.api.openstack.compute import admin_only_action_common from nova.tests.unit.api.openstack import fakes -from nova.tests.unit import fake_instance class SuspendServerTestsV21(admin_only_action_common.CommonTests): @@ -62,71 +59,3 @@ def test_actions_with_locked_instance(self): self._test_actions_with_locked_instance(['_suspend', '_resume']) - - -class SuspendServerPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(SuspendServerPolicyEnforcementV21, self).setUp() - self.controller = suspend_server_v21.SuspendServerController() - self.req = fakes.HTTPRequest.blank('') - - @mock.patch('nova.api.openstack.common.get_instance') - def test_suspend_policy_failed_with_other_project(self, get_instance_mock): - get_instance_mock.return_value = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - project_id=self.req.environ['nova.context'].project_id) - rule_name = "os_compute_api:os-suspend-server:suspend" - self.policy.set_rules({rule_name: "project_id:%(project_id)s"}) - # Change the project_id in request context. - self.req.environ['nova.context'].project_id = 'other-project' - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._suspend, self.req, fakes.FAKE_UUID, - body={'suspend': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.api.openstack.common.get_instance') - def test_suspend_overridden_policy_failed_with_other_user_in_same_project( - self, get_instance_mock): - get_instance_mock.return_value = ( - fake_instance.fake_instance_obj(self.req.environ['nova.context'])) - rule_name = "os_compute_api:os-suspend-server:suspend" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - # Change the user_id in request context. - self.req.environ['nova.context'].user_id = 'other-user' - exc = self.assertRaises(exception.PolicyNotAuthorized, - self.controller._suspend, self.req, - fakes.FAKE_UUID, body={'suspend': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - @mock.patch('nova.compute.api.API.suspend') - @mock.patch('nova.api.openstack.common.get_instance') - def test_suspend_overridden_policy_pass_with_same_user(self, - get_instance_mock, - suspend_mock): - instance = fake_instance.fake_instance_obj( - self.req.environ['nova.context'], - user_id=self.req.environ['nova.context'].user_id) - get_instance_mock.return_value = instance - rule_name = "os_compute_api:os-suspend-server:suspend" - self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) - self.controller._suspend(self.req, fakes.FAKE_UUID, - body={'suspend': {}}) - suspend_mock.assert_called_once_with(self.req.environ['nova.context'], - instance) - - def test_resume_policy_failed(self): - rule_name = "os_compute_api:os-suspend-server:resume" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller._resume, self.req, fakes.FAKE_UUID, - body={'resume': {}}) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_urlmap.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_urlmap.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_urlmap.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_urlmap.py 2020-04-10 17:57:57.000000000 +0000 @@ -114,3 +114,25 @@ self.assertEqual("application/json", res.content_type) body = jsonutils.loads(res.body) self.assertEqual('v2.1', body['version']['id']) + + def test_script_name_path_info(self): + """Ensure URLMap preserves SCRIPT_NAME and PATH_INFO correctly.""" + data = ( + ('', '', ''), + ('/', '', '/'), + ('/v2', '/v2', ''), + ('/v2/', '/v2', '/'), + ('/v2.1', '/v2.1', ''), + ('/v2.1/', '/v2.1', '/'), + ('/v2/foo', '/v2', '/foo'), + ('/v2.1/foo', '/v2.1', '/foo'), + ('/bar/baz', '', '/bar/baz') + ) + app = fakes.wsgi_app_v21() + for url, exp_script_name, exp_path_info in data: + req = fakes.HTTPRequest.blank(url) + req.get_response(app) + # The app uses /v2 as the base URL :( + exp_script_name = '/v2' + exp_script_name + self.assertEqual(exp_script_name, req.environ['SCRIPT_NAME']) + self.assertEqual(exp_path_info, req.environ['PATH_INFO']) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_versions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_versions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_versions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_versions.py 2020-04-10 17:57:57.000000000 +0000 @@ -141,14 +141,6 @@ ] self.assertEqual(expected, versions) - def test_get_version_list_302(self): - req = fakes.HTTPRequest.blank('/v2') - req.accept = "application/json" - res = req.get_response(self.wsgi_app) - self.assertEqual(302, res.status_int) - redirect_req = fakes.HTTPRequest.blank('/v2/') - self.assertEqual(redirect_req.url, res.location) - def _test_get_version_2_detail(self, url, accept=None): if accept is None: accept = "application/json" @@ -252,7 +244,7 @@ """Make sure multi choice responses do not have content-type application/atom+xml (should use default of json) """ - req = fakes.HTTPRequest.blank('/servers') + req = fakes.HTTPRequest.blank('/servers', base_url='') req.accept = "application/atom+xml" res = req.get_response(self.wsgi_app) self.assertEqual(300, res.status_int) @@ -448,14 +440,6 @@ def wsgi_app(self): return fakes.wsgi_app_v21() - def test_get_version_list_302(self): - req = fakes.HTTPRequest.blank('/v2.1') - req.accept = "application/json" - res = req.get_response(self.wsgi_app) - self.assertEqual(302, res.status_int) - redirect_req = fakes.HTTPRequest.blank('/v2.1/') - self.assertEqual(redirect_req.url, res.location) - def test_get_version_21_detail(self): req = fakes.HTTPRequest.blank('/v2.1/', base_url='') req.accept = "application/json" diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_volumes.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_volumes.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/compute/test_volumes.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/compute/test_volumes.py 2020-04-10 17:57:57.000000000 +0000 @@ -39,6 +39,7 @@ from nova import context from nova import exception from nova import objects +from nova.objects import block_device as block_device_obj from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_block_device @@ -62,7 +63,8 @@ def fake_get_instance(self, context, instance_id, expected_attrs=None, cell_down_support=False): - return fake_instance.fake_instance_obj(context, **{'uuid': instance_id}) + return fake_instance.fake_instance_obj( + context, id=1, uuid=instance_id, project_id=context.project_id) def fake_get_volume(self, context, id): @@ -730,8 +732,9 @@ side_effect=exception.InstanceIsLocked( instance_uuid=uuids.instance)) def test_swap_volume_for_locked_server(self, mock_swap_volume): - self.assertRaises(webob.exc.HTTPConflict, self._test_swap, - self.attachments) + with mock.patch.object(self.attachments, '_update_volume_regular'): + self.assertRaises(webob.exc.HTTPConflict, self._test_swap, + self.attachments) mock_swap_volume.assert_called_once_with( self.req.environ['nova.context'], test.MatchType(objects.Instance), {'attach_status': 'attached', @@ -770,8 +773,9 @@ mock_get.side_effect = [ None, exception.VolumeNotFound(volume_id=FAKE_UUID_C)] body = {'volumeAttachment': {'volumeId': FAKE_UUID_C}} - self.assertRaises(exc.HTTPBadRequest, self._test_swap, - self.attachments, body=body) + with mock.patch.object(self.attachments, '_update_volume_regular'): + self.assertRaises(exc.HTTPBadRequest, self._test_swap, + self.attachments, body=body) mock_get.assert_has_calls([ mock.call(self.req.environ['nova.context'], FAKE_UUID_A), mock.call(self.req.environ['nova.context'], FAKE_UUID_C)]) @@ -795,17 +799,30 @@ @mock.patch.object(compute_api.API, 'swap_volume', side_effect=exception.VolumeBDMNotFound( volume_id=FAKE_UUID_B)) - def test_swap_volume_for_bdm_not_found(self, mock_swap_volume): + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance', + side_effect=exception.VolumeBDMNotFound( + volume_id=FAKE_UUID_A)) + def test_swap_volume_for_bdm_not_found(self, mock_bdm, mock_swap_volume): self.assertRaises(webob.exc.HTTPNotFound, self._test_swap, self.attachments) - mock_swap_volume.assert_called_once_with( - self.req.environ['nova.context'], test.MatchType(objects.Instance), - {'attach_status': 'attached', - 'status': 'in-use', - 'id': FAKE_UUID_A}, - {'attach_status': 'detached', - 'status': 'available', - 'id': FAKE_UUID_B}) + if mock_bdm.called: + # New path includes regular PUT procedure + mock_bdm.assert_called_once_with(self.req.environ['nova.context'], + FAKE_UUID_A, uuids.instance) + mock_swap_volume.assert_not_called() + else: + # Old path is pure swap-volume + mock_bdm.assert_not_called() + mock_swap_volume.assert_called_once_with( + self.req.environ['nova.context'], + test.MatchType(objects.Instance), + {'attach_status': 'attached', + 'status': 'in-use', + 'id': FAKE_UUID_A}, + {'attach_status': 'detached', + 'status': 'available', + 'id': FAKE_UUID_B}) def _test_list_with_invalid_filter(self, url): req = self._build_request(url) @@ -1092,6 +1109,431 @@ self.assertIn("Invalid input for field/attribute " "delete_on_termination.", six.text_type(ex)) + @mock.patch('nova.compute.api.API.attach_volume', return_value=None) + def test_attach_volume_v279(self, mock_attach_volume): + body = {'volumeAttachment': {'volumeId': FAKE_UUID_A, + 'delete_on_termination': True}} + req = self._get_req(body) + result = self.attachments.create(req, FAKE_UUID, body=body) + self.assertTrue(result['volumeAttachment']['delete_on_termination']) + mock_attach_volume.assert_called_once_with( + req.environ['nova.context'], test.MatchType(objects.Instance), + FAKE_UUID_A, None, tag=None, supports_multiattach=True, + delete_on_termination=True) + + def test_show_pre_v279(self): + """Before microversion 2.79, show a detail of a volume attachment + does not contain the 'delete_on_termination' field in the response + body. + """ + req = self._get_req(body={}, microversion='2.78') + req.method = 'GET' + result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A) + + self.assertNotIn('delete_on_termination', result['volumeAttachment']) + + @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') + def test_list_pre_v279(self, mock_get_bdms): + """Before microversion 2.79, list of a volume attachment + does not contain the 'delete_on_termination' field in the response + body. + """ + req = fakes.HTTPRequest.blank( + '/v2/servers/id/os-volume_attachments', + version="2.78") + req.body = jsonutils.dump_as_bytes({}) + req.method = 'GET' + req.headers['content-type'] = 'application/json' + + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=True, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + bdms = objects.BlockDeviceMappingList(objects=[vol_bdm]) + + mock_get_bdms.return_value = bdms + result = self.attachments.index(req, FAKE_UUID) + + self.assertNotIn('delete_on_termination', result['volumeAttachments']) + + +class UpdateVolumeAttachTests(VolumeAttachTestsV279): + microversion = '2.85' + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_swap_volume(self, mock_save_bdm, mock_get_bdm): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_bdm.return_value = vol_bdm + # On the newer microversion, this test will try to look up the + # BDM to check for update of other fields. + super(UpdateVolumeAttachTests, self).test_swap_volume() + + def test_swap_volume_with_extra_arg(self): + # NOTE(danms): Override this from parent because now device + # is checked for unchanged-ness. + body = {'volumeAttachment': {'volumeId': FAKE_UUID_A, + 'device': '/dev/fake0', + 'notathing': 'foo'}} + + self.assertRaises(self.validation_error, + self._test_swap, + self.attachments, + body=body) + + @mock.patch.object(compute_api.API, 'swap_volume') + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_update_volume(self, mock_bdm_save, + mock_get_vol_and_inst, mock_swap): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'tag': 'fake-tag', + 'delete_on_termination': True, + 'device': '/dev/fake0', + }} + self.attachments.update(self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + mock_swap.assert_not_called() + mock_bdm_save.assert_called_once() + self.assertTrue(vol_bdm['delete_on_termination']) + + @mock.patch.object(compute_api.API, 'swap_volume') + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_update_volume_with_bool_from_string( + self, mock_bdm_save, mock_get_vol_and_inst, mock_swap): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=True, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'tag': 'fake-tag', + 'delete_on_termination': 'False', + 'device': '/dev/fake0', + }} + self.attachments.update(self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + mock_swap.assert_not_called() + mock_bdm_save.assert_called_once() + self.assertFalse(vol_bdm['delete_on_termination']) + + # Update delete_on_termination to False + body['volumeAttachment']['delete_on_termination'] = '0' + self.attachments.update(self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + mock_swap.assert_not_called() + mock_bdm_save.assert_called() + self.assertFalse(vol_bdm['delete_on_termination']) + + # Update delete_on_termination to True + body['volumeAttachment']['delete_on_termination'] = '1' + self.attachments.update(self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + mock_swap.assert_not_called() + mock_bdm_save.assert_called() + self.assertTrue(vol_bdm['delete_on_termination']) + + @mock.patch.object(compute_api.API, 'swap_volume') + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_update_volume_swap(self, mock_bdm_save, + mock_get_vol_and_inst, mock_swap): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_B, + 'tag': 'fake-tag', + 'delete_on_termination': True, + }} + self.attachments.update(self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + mock_bdm_save.assert_called_once() + self.assertTrue(vol_bdm['delete_on_termination']) + # Swap volume is tested elsewhere, just make sure that we did + # attempt to call it in addition to updating the BDM + self.assertTrue(mock_swap.called) + + @mock.patch.object(compute_api.API, 'swap_volume') + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_update_volume_swap_only_old_microversion( + self, mock_bdm_save, mock_get_vol_and_inst, mock_swap): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_B, + }} + req = self._get_req(body, microversion='2.84') + self.attachments.update(req, FAKE_UUID, + FAKE_UUID_A, body=body) + mock_swap.assert_called_once() + mock_bdm_save.assert_not_called() + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance', + side_effect=exception.VolumeBDMNotFound( + volume_id=FAKE_UUID_A)) + def test_update_volume_with_invalid_volume_id(self, mock_mr): + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'delete_on_termination': True, + }} + self.assertRaises(exc.HTTPNotFound, + self.attachments.update, + self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + def test_update_volume_with_changed_attachment_id(self, + mock_get_vol_and_inst): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'id': uuids.attachment_id2, + }} + self.assertRaises(exc.HTTPBadRequest, + self.attachments.update, + self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + def test_update_volume_with_changed_attachment_id_old_microversion( + self, mock_get_vol_and_inst): + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'id': uuids.attachment_id, + }} + req = self._get_req(body, microversion='2.84') + ex = self.assertRaises(exception.ValidationError, + self.attachments.update, + req, FAKE_UUID, + FAKE_UUID_A, body=body) + self.assertIn('Additional properties are not allowed', + six.text_type(ex)) + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + def test_update_volume_with_changed_serverId(self, + mock_get_vol_and_inst): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'serverId': uuids.server_id, + }} + self.assertRaises(exc.HTTPBadRequest, + self.attachments.update, + self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + def test_update_volume_with_changed_serverId_old_microversion( + self, mock_get_vol_and_inst): + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'serverId': uuids.server_id, + }} + req = self._get_req(body, microversion='2.84') + ex = self.assertRaises(exception.ValidationError, + self.attachments.update, + req, FAKE_UUID, + FAKE_UUID_A, body=body) + self.assertIn('Additional properties are not allowed', + six.text_type(ex)) + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + def test_update_volume_with_changed_device(self, mock_get_vol_and_inst): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'device': '/dev/sdz', + }} + self.assertRaises(exc.HTTPBadRequest, + self.attachments.update, + self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + + def test_update_volume_with_device_name_old_microversion(self): + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'device': '/dev/fake0', + }} + req = self._get_req(body, microversion='2.84') + ex = self.assertRaises(exception.ValidationError, + self.attachments.update, + req, FAKE_UUID, + FAKE_UUID_A, body=body) + self.assertIn('Additional properties are not allowed', + six.text_type(ex)) + + @mock.patch.object(objects.BlockDeviceMapping, + 'get_by_volume_and_instance') + def test_update_volume_with_changed_tag(self, mock_get_vol_and_inst): + vol_bdm = objects.BlockDeviceMapping( + self.context, + id=1, + instance_uuid=FAKE_UUID, + volume_id=FAKE_UUID_A, + source_type='volume', + destination_type='volume', + delete_on_termination=False, + connection_info=None, + tag='fake-tag', + device_name='/dev/fake0', + attachment_id=uuids.attachment_id) + mock_get_vol_and_inst.return_value = vol_bdm + + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'tag': 'icanhaznewtag', + }} + self.assertRaises(exc.HTTPBadRequest, + self.attachments.update, + self.req, FAKE_UUID, + FAKE_UUID_A, body=body) + + def test_update_volume_with_tag_old_microversion(self): + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'tag': 'fake-tag', + }} + req = self._get_req(body, microversion='2.84') + ex = self.assertRaises(exception.ValidationError, + self.attachments.update, + req, FAKE_UUID, + FAKE_UUID_A, body=body) + self.assertIn('Additional properties are not allowed', + six.text_type(ex)) + + def test_update_volume_with_delete_flag_old_microversion(self): + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'delete_on_termination': True, + }} + req = self._get_req(body, microversion='2.84') + ex = self.assertRaises(exception.ValidationError, + self.attachments.update, + req, FAKE_UUID, + FAKE_UUID_A, body=body) + self.assertIn('Additional properties are not allowed', + six.text_type(ex)) + class SwapVolumeMultiattachTestCase(test.NoDBTestCase): @@ -1450,90 +1892,6 @@ self.controller.delete, req, 1) -class TestAssistedVolumeSnapshotsPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(TestAssistedVolumeSnapshotsPolicyEnforcementV21, self).setUp() - self.controller = ( - assisted_snaps_v21.AssistedVolumeSnapshotsController()) - self.req = fakes.HTTPRequest.blank('') - - def test_create_assisted_volumes_snapshots_policy_failed(self): - rule_name = "os_compute_api:os-assisted-volume-snapshots:create" - self.policy.set_rules({rule_name: "project:non_fake"}) - body = {'snapshot': - {'volume_id': '1', - 'create_info': {'type': 'qcow2', - 'new_file': 'new_file', - 'snapshot_id': 'snapshot_id'}}} - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.create, self.req, body=body) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_delete_assisted_volumes_snapshots_policy_failed(self): - rule_name = "os_compute_api:os-assisted-volume-snapshots:delete" - self.policy.set_rules({rule_name: "project:non_fake"}) - exc = self.assertRaises( - exception.PolicyNotAuthorized, - self.controller.delete, self.req, '5') - - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - -class TestVolumeAttachPolicyEnforcementV21(test.NoDBTestCase): - - def setUp(self): - super(TestVolumeAttachPolicyEnforcementV21, self).setUp() - self.controller = volumes_v21.VolumeAttachmentController() - self.req = fakes.HTTPRequest.blank('') - - def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg): - self.policy.set_rules(rules) - exc = self.assertRaises( - exception.PolicyNotAuthorized, func, *arg, **kwarg) - self.assertEqual( - "Policy doesn't allow %s to be performed." % rule_name, - exc.format_message()) - - def test_index_volume_attach_policy_failed(self): - rule_name = "os_compute_api:os-volumes-attachments:index" - rules = {rule_name: "project:non_fake"} - self._common_policy_check(rules, rule_name, - self.controller.index, self.req, FAKE_UUID) - - def test_show_volume_attach_policy_failed(self): - rule_name = "os_compute_api:os-volumes-attachments:show" - rules = {rule_name: "project:non_fake"} - self._common_policy_check(rules, rule_name, self.controller.show, - self.req, FAKE_UUID, FAKE_UUID_A) - - def test_create_volume_attach_policy_failed(self): - rule_name = "os_compute_api:os-volumes-attachments:create" - rules = {rule_name: "project:non_fake"} - body = {'volumeAttachment': {'volumeId': FAKE_UUID_A, - 'device': '/dev/fake'}} - self._common_policy_check(rules, rule_name, self.controller.create, - self.req, FAKE_UUID, body=body) - - def test_update_volume_attach_policy_failed(self): - rule_name = "os_compute_api:os-volumes-attachments:update" - rules = {rule_name: "project:non_fake"} - body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}} - self._common_policy_check(rules, rule_name, self.controller.update, - self.req, FAKE_UUID, FAKE_UUID_A, body=body) - - def test_delete_volume_attach_policy_failed(self): - rule_name = "os_compute_api:os-volumes-attachments:delete" - rules = {rule_name: "project:non_fake"} - self._common_policy_check(rules, rule_name, self.controller.delete, - self.req, FAKE_UUID, FAKE_UUID_A) - - class TestVolumesAPIDeprecation(test.NoDBTestCase): def setUp(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/fakes.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/fakes.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/openstack/fakes.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/openstack/fakes.py 2020-04-10 17:57:57.000000000 +0000 @@ -62,23 +62,32 @@ def wsgi_app_v21(fake_auth_context=None, v2_compatible=False, custom_routes=None): + # NOTE(efried): Keep this (roughly) in sync with api-paste.ini - inner_app_v21 = compute.APIRouterV21(custom_routes=custom_routes) + def wrap(app, use_context=False): + if v2_compatible: + app = openstack_api.LegacyV2CompatibleWrapper(app) + + if use_context: + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext( + 'fake', FAKE_PROJECT_ID, auth_token=True) + app = api_auth.InjectContext(ctxt, app) + + app = openstack_api.FaultWrapper(app) - if v2_compatible: - inner_app_v21 = openstack_api.LegacyV2CompatibleWrapper(inner_app_v21) + return app + + inner_app_v21 = compute.APIRouterV21(custom_routes=custom_routes) - if fake_auth_context is not None: - ctxt = fake_auth_context - else: - ctxt = context.RequestContext( - 'fake', FAKE_PROJECT_ID, auth_token=True) - api_v21 = openstack_api.FaultWrapper( - api_auth.InjectContext(ctxt, inner_app_v21)) mapper = urlmap.URLMap() - mapper['/v2'] = api_v21 - mapper['/v2.1'] = api_v21 - mapper['/'] = openstack_api.FaultWrapper(versions.Versions()) + mapper['/'] = wrap(versions.Versions()) + mapper['/v2'] = wrap(versions.VersionsV2()) + mapper['/v2.1'] = wrap(versions.VersionsV2()) + mapper['/v2/+'] = wrap(inner_app_v21, use_context=True) + mapper['/v2.1/+'] = wrap(inner_app_v21, use_context=True) return mapper diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/validation/extra_specs/test_validators.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/validation/extra_specs/test_validators.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/api/validation/extra_specs/test_validators.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/api/validation/extra_specs/test_validators.py 2020-04-10 17:57:57.000000000 +0000 @@ -0,0 +1,133 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from nova.api.validation.extra_specs import validators +from nova import exception +from nova import test + + +class TestValidators(test.NoDBTestCase): + + def test_namespaces(self): + """Ensure we see at least the in-tree namespaces. + + If we add new namespaces, they should be added to this list. + """ + namespaces = { + 'accel', 'aggregate_instance_extra_specs', 'capabilities', 'hw', + 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'powervm', 'quota', + 'resources(?P(_[a-zA-z0-9_]*|\\d+)?)', + 'trait(?P(_[a-zA-z0-9_]*|\\d+)?)', 'vmware', + } + self.assertTrue( + namespaces.issubset(validators.NAMESPACES), + f'{namespaces} is not a subset of {validators.NAMESPACES}', + ) + + def test_spec(self): + unknown_namespaces = ( + ('hhw:cpu_realtime_mask', '^0'), + ('w:cpu_realtime_mask', '^0'), + ('hw_cpu_realtime_mask', '^0'), + ('foo', 'bar'), + ) + for key, value in unknown_namespaces: + validators.validate(key, value) + + known_invalid_namespaces = ( + ('hw:cpu_realtime_maskk', '^0'), + ('hw:cpu_realtime_mas', '^0'), + ('hw:foo', 'bar'), + ) + for key, value in known_invalid_namespaces: + with testtools.ExpectedException(exception.ValidationError): + validators.validate(key, value) + + def test_value__str(self): + valid_specs = ( + # patterns + ('hw:cpu_realtime_mask', '^0'), + ('hw:cpu_realtime_mask', '^0,2-3,1'), + ('hw:mem_page_size', 'large'), + ('hw:mem_page_size', '2kbit'), + ('hw:mem_page_size', '1GB'), + # enums + ('hw:cpu_thread_policy', 'prefer'), + ('hw:emulator_threads_policy', 'isolate'), + ('hw:pci_numa_affinity_policy', 'legacy'), + ) + for key, value in valid_specs: + validators.validate(key, value) + + invalid_specs = ( + # patterns + ('hw:cpu_realtime_mask', '0'), + ('hw:cpu_realtime_mask', '^0,2-3,b'), + ('hw:mem_page_size', 'largest'), + ('hw:mem_page_size', '2kbits'), + ('hw:mem_page_size', '1gigabyte'), + # enums + ('hw:cpu_thread_policy', 'preferred'), + ('hw:emulator_threads_policy', 'iisolate'), + ('hw:pci_numa_affinity_policy', 'lgacy'), + ) + for key, value in invalid_specs: + with testtools.ExpectedException(exception.ValidationError): + validators.validate(key, value) + + def test_value__int(self): + valid_specs = ( + ('hw:numa_nodes', '1'), + ('os:monitors', '1'), + ('powervm:shared_weight', '1'), + ('os:monitors', '8'), + ('powervm:shared_weight', '255'), + ) + for key, value in valid_specs: + validators.validate(key, value) + + invalid_specs = ( + ('hw:serial_port_count', 'five'), # NaN + ('hw:serial_port_count', '!'), # NaN + ('hw:numa_nodes', '0'), # has min + ('os:monitors', '0'), # has min + ('powervm:shared_weight', '-1'), # has min + ('os:monitors', '9'), # has max + ('powervm:shared_weight', '256'), # has max + ) + for key, value in invalid_specs: + with testtools.ExpectedException(exception.ValidationError): + validators.validate(key, value) + + def test_value__bool(self): + valid_specs = ( + ('hw:cpu_realtime', '1'), + ('hw:cpu_realtime', '0'), + ('hw:mem_encryption', 'true'), + ('hw:boot_menu', 'y'), + ) + for key, value in valid_specs: + validators.validate(key, value) + + invalid_specs = ( + ('hw:cpu_realtime', '2'), + ('hw:cpu_realtime', '00'), + ('hw:mem_encryption', 'tru'), + ('hw:boot_menu', 'yah'), + ) + for key, value in invalid_specs: + with testtools.ExpectedException(exception.ValidationError): + validators.validate(key, value) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_baseproxy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_baseproxy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_baseproxy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_baseproxy.py 2020-04-10 17:57:57.000000000 +0000 @@ -57,17 +57,20 @@ @mock.patch.object(logging, 'setup') @mock.patch.object(gmr.TextGuruMeditation, 'setup_autorun') @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__', - return_value=None) + return_value=None) @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server') - def test_proxy(self, mock_start, mock_init, mock_gmr, mock_log, - mock_exists): + @mock.patch('websockify.websocketproxy.select_ssl_version', + return_value=None) + def test_proxy(self, mock_select_ssl_version, mock_start, mock_init, + mock_gmr, mock_log, mock_exists): baseproxy.proxy('0.0.0.0', '6080') mock_log.assert_called_once_with(baseproxy.CONF, 'nova') mock_gmr.assert_called_once_with(version, conf=baseproxy.CONF) mock_init.assert_called_once_with( listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False, - cert='self.pem', key=None, ssl_only=False, - daemon=False, record=None, security_proxy=None, traffic=True, + cert='self.pem', key=None, ssl_only=False, ssl_ciphers=None, + ssl_minimum_version='default', daemon=False, record=None, + security_proxy=None, traffic=True, web='/usr/share/spice-html5', file_only=True, RequestHandlerClass=websocketproxy.NovaProxyRequestHandler) mock_start.assert_called_once_with() @@ -81,3 +84,19 @@ self.assertEqual(self.stderr.getvalue(), "SSL only and self.pem not found\n") mock_exit.assert_called_once_with(-1) + + @mock.patch('os.path.exists', return_value=True) + @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__', + return_value=None) + @mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server') + def test_proxy_ssl_settings(self, mock_start, mock_init, mock_exists): + self.flags(ssl_minimum_version='tlsv1_3', group='console') + self.flags(ssl_ciphers='ALL:!aNULL', group='console') + baseproxy.proxy('0.0.0.0', '6080') + mock_init.assert_called_once_with( + listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False, + cert='self.pem', key=None, ssl_only=False, + ssl_ciphers='ALL:!aNULL', ssl_minimum_version='tlsv1_3', + daemon=False, record=None, security_proxy=None, traffic=True, + web='/usr/share/spice-html5', file_only=True, + RequestHandlerClass=websocketproxy.NovaProxyRequestHandler) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_manage.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_manage.py 2020-04-10 17:57:57.000000000 +0000 @@ -34,6 +34,7 @@ from nova.db.sqlalchemy import migration as sqla_migration from nova import exception from nova import objects +from nova.scheduler.client import report from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import fake_requests @@ -2346,6 +2347,24 @@ self.cli = manage.PlacementCommands() self.useFixture(fixtures.MockPatch('nova.network.neutron.get_client')) + def test_heal_allocations_with_cell_instance_id(self): + """Test heal allocation with both cell id and instance id""" + cell_uuid = uuidutils.generate_uuid() + instance_uuid = uuidutils.generate_uuid() + self.assertEqual(127, self.cli.heal_allocations( + instance_uuid=instance_uuid, + cell_uuid=cell_uuid)) + self.assertIn('The --cell and --instance options', + self.output.getvalue()) + + @mock.patch('nova.objects.CellMapping.get_by_uuid', + side_effect=exception.CellMappingNotFound(uuid='fake')) + def test_heal_allocations_with_cell_id_not_found(self, mock_get): + """Test the case where cell_id is not found""" + self.assertEqual(127, self.cli.heal_allocations(cell_uuid='fake')) + output = self.output.getvalue().strip() + self.assertEqual('Cell with uuid fake was not found.', output) + @ddt.data(-1, 0, "one") def test_heal_allocations_invalid_max_count(self, max_count): self.assertEqual(127, self.cli.heal_allocations(max_count=max_count)) @@ -2860,6 +2879,142 @@ neutron.update_port.assert_called_once_with( uuidsentinel.port_id, body=expected_update_body) + def test_audit_with_wrong_provider_uuid(self): + with mock.patch.object( + self.cli, '_get_resource_provider', + side_effect=exception.ResourceProviderNotFound( + name_or_uuid=uuidsentinel.fake_uuid)): + ret = self.cli.audit( + provider_uuid=uuidsentinel.fake_uuid) + self.assertEqual(127, ret) + output = self.output.getvalue() + self.assertIn( + 'Resource provider with UUID %s' % uuidsentinel.fake_uuid, + output) + + @mock.patch.object(manage.PlacementCommands, + '_check_orphaned_allocations_for_provider') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get') + def _test_audit(self, get_resource_providers, check_orphaned_allocs, + verbose=False, delete=False, errors=False, found=False): + rps = [ + {"generation": 1, + "uuid": uuidsentinel.rp1, + "links": None, + "name": "rp1", + "parent_provider_uuid": None, + "root_provider_uuid": uuidsentinel.rp1}, + {"generation": 1, + "uuid": uuidsentinel.rp2, + "links": None, + "name": "rp2", + "parent_provider_uuid": None, + "root_provider_uuid": uuidsentinel.rp2}, + ] + get_resource_providers.return_value = fake_requests.FakeResponse( + 200, content=jsonutils.dumps({"resource_providers": rps})) + + if errors: + # We found one orphaned allocation per RP but RP1 got a fault + check_orphaned_allocs.side_effect = ((1, 1), (1, 0)) + elif found: + # we found one orphaned allocation per RP and we had no faults + check_orphaned_allocs.side_effect = ((1, 0), (1, 0)) + else: + # No orphaned allocations are found for all the RPs + check_orphaned_allocs.side_effect = ((0, 0), (0, 0)) + + ret = self.cli.audit(verbose=verbose, delete=delete) + if errors: + # Any fault stops the audit and provides a return code equals to 1 + expected_ret = 1 + elif found and delete: + # We found orphaned allocations and deleted them + expected_ret = 4 + elif found and not delete: + # We found orphaned allocations but we left them + expected_ret = 3 + else: + # Nothing was found + expected_ret = 0 + self.assertEqual(expected_ret, ret) + + call1 = mock.call(mock.ANY, mock.ANY, mock.ANY, rps[0], delete) + call2 = mock.call(mock.ANY, mock.ANY, mock.ANY, rps[1], delete) + if errors: + # We stop checking other RPs once we got a fault + check_orphaned_allocs.assert_has_calls([call1]) + else: + # All the RPs are checked + check_orphaned_allocs.assert_has_calls([call1, call2]) + + if verbose and found: + output = self.output.getvalue() + self.assertIn('Processed 2 allocations', output) + if errors: + output = self.output.getvalue() + self.assertIn( + 'The Resource Provider %s had problems' % rps[0]["uuid"], + output) + + def test_audit_not_found_orphaned_allocs(self): + self._test_audit(found=False) + + def test_audit_found_orphaned_allocs_not_verbose(self): + self._test_audit(found=True) + + def test_audit_found_orphaned_allocs_verbose(self): + self._test_audit(found=True, verbose=True) + + def test_audit_found_orphaned_allocs_and_deleted_them(self): + self._test_audit(found=True, delete=True) + + def test_audit_found_orphaned_allocs_but_got_errors(self): + self._test_audit(errors=True) + + @mock.patch.object(manage.PlacementCommands, + '_delete_allocations_from_consumer') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocations_for_resource_provider') + @mock.patch.object(manage.PlacementCommands, + '_get_instances_and_current_migrations') + def test_check_orphaned_allocations_for_provider(self, + get_insts_and_migs, + get_allocs_for_rp, + delete_allocs): + provider = {"generation": 1, + "uuid": uuidsentinel.rp1, + "links": None, + "name": "rp1", + "parent_provider_uuid": None, + "root_provider_uuid": uuidsentinel.rp1} + compute_resources = {'VCPU': 1, 'MEMORY_MB': 2048, 'DISK_GB': 20} + allocations = { + # Some orphaned compute allocation + uuidsentinel.orphaned_alloc1: {'resources': compute_resources}, + # Some existing instance allocation + uuidsentinel.inst1: {'resources': compute_resources}, + # Some existing migration allocation + uuidsentinel.mig1: {'resources': compute_resources}, + # Some other allocation not related to Nova + uuidsentinel.other_alloc1: {'resources': {'CUSTOM_GOO'}}, + } + + get_insts_and_migs.return_value = ( + [uuidsentinel.inst1], + [uuidsentinel.mig1]) + get_allocs_for_rp.return_value = report.ProviderAllocInfo(allocations) + + ctxt = context.RequestContext() + placement = report.SchedulerReportClient() + ret = self.cli._check_orphaned_allocations_for_provider( + ctxt, placement, lambda x: x, provider, True) + get_allocs_for_rp.assert_called_once_with(ctxt, uuidsentinel.rp1) + delete_allocs.assert_called_once_with(ctxt, placement, provider, + uuidsentinel.orphaned_alloc1, + 'instance') + self.assertEqual((1, 0), ret) + class TestNovaManageMain(test.NoDBTestCase): """Tests the nova-manage:main() setup code.""" diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_policy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_policy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_policy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_policy.py 2020-04-10 17:57:57.000000000 +0000 @@ -117,13 +117,18 @@ r.name for r in ia_policies.list_rules()] passing_rules = self.cmd._filter_rules( - context, 'os-instance-actions', target) + context, 'os-instance-actions:list', target) + passing_rules += self.cmd._filter_rules( + context, 'os-instance-actions:show', target) + passing_rules += self.cmd._filter_rules( + context, 'os-instance-actions:events', target) + passing_rules += self.cmd._filter_rules( + context, 'os-instance-actions:events:details', target) self.assertEqual(set(expected_rules), set(passing_rules)) def test_filter_rules_non_admin(self): context = nova_context.RequestContext() - rule_conditions = [base_policies.RULE_ANY, - base_policies.RULE_ADMIN_OR_OWNER] + rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER] expected_rules = [r.name for r in ia_policies.list_rules() if r.check_str in rule_conditions] self._check_filter_rules(context, expected_rules=expected_rules) @@ -150,8 +155,7 @@ db_context = nova_context.RequestContext(user_id='fake-user', project_id='fake-project') instance = fake_instance.fake_instance_obj(db_context) - rule_conditions = [base_policies.RULE_ANY, - base_policies.RULE_ADMIN_OR_OWNER] + rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER] expected_rules = [r.name for r in ia_policies.list_rules() if r.check_str in rule_conditions] self._check_filter_rules(db_context, instance, expected_rules) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_scheduler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_scheduler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/cmd/test_scheduler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/cmd/test_scheduler.py 2020-04-10 17:57:57.000000000 +0000 @@ -46,16 +46,3 @@ mock_serve.assert_called_once_with( service_create.return_value, workers=4) mock_wait.assert_called_once_with() - - @mock.patch('nova.service.Service.create') - @mock.patch('nova.service.serve') - @mock.patch('nova.service.wait') - @mock.patch('oslo_concurrency.processutils.get_worker_count') - def test_workers_fake_scheduler(self, get_worker_count, mock_wait, - mock_serve, service_create): - self.flags(driver='fake_scheduler', group='scheduler') - scheduler.main() - get_worker_count.assert_not_called() - mock_serve.assert_called_once_with( - service_create.return_value, workers=1) - mock_wait.assert_called_once_with() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute_api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute_api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute_api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute_api.py 2020-04-10 17:57:57.000000000 +0000 @@ -41,7 +41,7 @@ from nova import context from nova.db import api as db from nova import exception -from nova.image import api as image_api +from nova.image import glance as image_api from nova.network import constants from nova.network import model from nova.network import neutron as neutron_api @@ -57,6 +57,7 @@ from nova.tests.unit import fake_block_device from nova.tests.unit import fake_build_request from nova.tests.unit import fake_instance +from nova.tests.unit import fake_request_spec from nova.tests.unit import fake_volume from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers @@ -118,8 +119,14 @@ } if updates: flavor.update(updates) + + expected_attrs = None + if 'extra_specs' in updates and updates['extra_specs']: + expected_attrs = ['extra_specs'] + return objects.Flavor._from_db_object( - self.context, objects.Flavor(extra_specs={}), flavor) + self.context, objects.Flavor(extra_specs={}), flavor, + expected_attrs=expected_attrs) def _create_instance_obj(self, params=None, flavor=None): """Create a test instance.""" @@ -162,6 +169,7 @@ instance.info_cache = objects.InstanceInfoCache() instance.flavor = flavor instance.old_flavor = instance.new_flavor = None + instance.numa_topology = None if params: instance.update(params) @@ -1288,6 +1296,26 @@ mock_dealloc.assert_called_once_with(self.context, inst) + @mock.patch.object(compute_utils, 'notify_about_instance_action') + @mock.patch.object(objects.BlockDeviceMapping, 'destroy') + @mock.patch.object(cinder.API, 'detach') + @mock.patch.object(compute_utils, 'notify_about_instance_usage') + @mock.patch.object(neutron_api.API, 'deallocate_for_instance') + @mock.patch.object(context.RequestContext, 'elevated') + @mock.patch.object(objects.Instance, 'destroy') + @mock.patch.object(compute_utils, 'delete_arqs_if_needed') + def test_local_delete_for_arqs( + self, mock_del_arqs, mock_inst_destroy, mock_elevated, + mock_dealloc, mock_notify_legacy, mock_detach, + mock_bdm_destroy, mock_notify): + inst = self._create_instance_obj() + inst._context = self.context + mock_elevated.return_value = self.context + bdms = [] + self.compute_api._local_delete(self.context, inst, bdms, + 'delete', self._fake_do_delete) + mock_del_arqs.assert_called_once_with(self.context, inst) + @mock.patch.object(objects.BlockDeviceMapping, 'destroy') def test_local_cleanup_bdm_volumes_stashed_connector(self, mock_destroy): """Tests that we call volume_api.terminate_connection when we found @@ -1639,8 +1667,9 @@ def test_confirm_resize_with_migration_ref(self): self._test_confirm_resize(mig_ref_passed=True) + @mock.patch('nova.virt.hardware.numa_get_constraints') @mock.patch('nova.network.neutron.API.get_requested_resource_for_instance', - return_value=mock.sentinel.res_req) + return_value=[]) @mock.patch('nova.availability_zones.get_host_availability_zone', return_value='nova') @mock.patch('nova.objects.Quotas.check_deltas') @@ -1649,33 +1678,60 @@ @mock.patch('nova.objects.RequestSpec.get_by_instance_uuid') def _test_revert_resize( self, mock_get_reqspec, mock_elevated, mock_get_migration, - mock_check, mock_get_host_az, mock_get_requested_resources): + mock_check, mock_get_host_az, mock_get_requested_resources, + mock_get_numa, same_flavor): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_inst.info_cache.network_info = model.NetworkInfo([ model.VIF(id=uuids.port1, profile={'allocation': uuids.rp})]) - fake_inst.old_flavor = fake_inst.flavor fake_mig = objects.Migration._from_db_object( self.context, objects.Migration(), test_migration.fake_db_migration()) + fake_reqspec = objects.RequestSpec() + fake_reqspec.flavor = fake_inst.flavor + fake_numa_topology = objects.InstanceNUMATopology(cells=[ + objects.InstanceNUMACell( + id=0, cpuset=set([0]), memory=512, pagesize=None, + cpu_pinning_raw=None, cpuset_reserved=None, cpu_policy=None, + cpu_thread_policy=None)]) + + if same_flavor: + fake_inst.old_flavor = fake_inst.flavor + else: + fake_inst.old_flavor = self._create_flavor( + id=200, flavorid='new-flavor-id', name='new_flavor', + disabled=False, extra_specs={'hw:numa_nodes': '1'}) mock_elevated.return_value = self.context mock_get_migration.return_value = fake_mig + mock_get_reqspec.return_value = fake_reqspec + mock_get_numa.return_value = fake_numa_topology + + def _check_reqspec(): + if same_flavor: + assert_func = self.assertNotEqual + else: + assert_func = self.assertEqual + + assert_func(fake_numa_topology, fake_reqspec.numa_topology) + assert_func(fake_inst.old_flavor, fake_reqspec.flavor) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_REVERTING, fake_inst.task_state) - def _check_mig(expected_task_state=None): + def _check_mig(): self.assertEqual('reverting', fake_mig.status) with test.nested( + mock.patch.object(fake_reqspec, 'save', + side_effect=_check_reqspec), mock.patch.object(fake_inst, 'save', side_effect=_check_state), mock.patch.object(fake_mig, 'save', side_effect=_check_mig), mock.patch.object(self.compute_api, '_record_action_start'), mock.patch.object(self.compute_api.compute_rpcapi, 'revert_resize') - ) as (mock_inst_save, mock_mig_save, mock_record_action, - mock_revert_resize): + ) as (mock_reqspec_save, mock_inst_save, mock_mig_save, + mock_record_action, mock_revert_resize): self.compute_api.revert_resize(self.context, fake_inst) mock_elevated.assert_called_once_with() @@ -1685,7 +1741,15 @@ mock_mig_save.assert_called_once_with() mock_get_reqspec.assert_called_once_with( self.context, fake_inst.uuid) - mock_get_reqspec.return_value.save.assert_called_once_with() + if same_flavor: + # if we are not changing flavors through the revert, we + # shouldn't attempt to rebuild the NUMA topology since it won't + # have changed + mock_get_numa.assert_not_called() + else: + # not so if the flavor *has* changed though + mock_get_numa.assert_called_once_with( + fake_inst.old_flavor, mock.ANY) mock_record_action.assert_called_once_with(self.context, fake_inst, 'revertResize') mock_revert_resize.assert_called_once_with( @@ -1694,11 +1758,17 @@ mock_get_requested_resources.assert_called_once_with( self.context, fake_inst.uuid) self.assertEqual( - mock.sentinel.res_req, + [], mock_get_reqspec.return_value.requested_resources) def test_revert_resize(self): - self._test_revert_resize() + self._test_revert_resize(same_flavor=False) + + def test_revert_resize_same_flavor(self): + """Test behavior when reverting a migration or a resize to the same + flavor. + """ + self._test_revert_resize(same_flavor=True) @mock.patch('nova.network.neutron.API.get_requested_resource_for_instance') @mock.patch('nova.availability_zones.get_host_availability_zone', @@ -1741,6 +1811,7 @@ @mock.patch('nova.compute.api.API.get_instance_host_status', new=mock.Mock(return_value=fields_obj.HostStatus.UP)) + @mock.patch('nova.virt.hardware.numa_get_constraints') @mock.patch('nova.compute.api.API._allow_resize_to_same_host') @mock.patch('nova.compute.utils.is_volume_backed_instance', return_value=False) @@ -1759,6 +1830,7 @@ mock_inst_save, mock_count, mock_limit, mock_record, mock_migration, mock_validate, mock_is_vol_backed, mock_allow_resize_to_same_host, + mock_get_numa, flavor_id_passed=True, same_host=False, allow_same_host=False, project_id=None, @@ -1777,10 +1849,16 @@ # To test instance w/ different project id than context (admin) params['project_id'] = project_id fake_inst = self._create_instance_obj(params=params) + fake_numa_topology = objects.InstanceNUMATopology(cells=[ + objects.InstanceNUMACell( + id=0, cpuset=set([0]), memory=512, pagesize=None, + cpu_pinning_raw=None, cpuset_reserved=None, cpu_policy=None, + cpu_thread_policy=None)]) mock_resize = self.useFixture( fixtures.MockPatchObject(self.compute_api.compute_task_api, 'resize_instance')).mock + mock_get_numa.return_value = fake_numa_topology if host_name: mock_get_all_by_host.return_value = [objects.ComputeNode( @@ -1788,8 +1866,9 @@ current_flavor = fake_inst.get_flavor() if flavor_id_passed: - new_flavor = self._create_flavor(id=200, flavorid='new-flavor-id', - name='new_flavor', disabled=False) + new_flavor = self._create_flavor( + id=200, flavorid='new-flavor-id', name='new_flavor', + disabled=False, extra_specs={'hw:numa_nodes': '1'}) if same_flavor: new_flavor.id = current_flavor.id mock_get_flavor.return_value = new_flavor @@ -1876,6 +1955,11 @@ fake_spec.requested_destination.allow_cross_cell_move) mock_allow_resize_to_same_host.assert_called_once() + if flavor_id_passed and not same_flavor: + mock_get_numa.assert_called_once_with(new_flavor, mock.ANY) + else: + mock_get_numa.assert_not_called() + if host_name: mock_get_all_by_host.assert_called_once_with( self.context, host_name, True) @@ -1973,7 +2057,7 @@ 'user': user_count} cur_flavor = objects.Flavor(id=1, name='foo', vcpus=1, memory_mb=512, - root_gb=10, disabled=False) + root_gb=10, disabled=False, extra_specs={}) fake_inst = self._create_instance_obj() fake_inst.flavor = cur_flavor new_flavor = objects.Flavor(id=2, name='bar', vcpus=1, memory_mb=2048, @@ -3037,7 +3121,7 @@ self.stub_out('nova.objects.BlockDeviceMappingList' '.get_by_instance_uuid', fake_bdm_list_get_by_instance_uuid) - self.stub_out('nova.image.api.API.create', fake_image_create) + self.stub_out('nova.image.glance.API.create', fake_image_create) self.stub_out('nova.volume.cinder.API.get', lambda self, context, volume_id: {'id': volume_id, 'display_description': ''}) @@ -3516,6 +3600,19 @@ lambda obj, context, image_id, **kwargs: self.fake_image) return self.fake_image['id'] + def _setup_fake_image_with_invalid_arch(self): + self.fake_image = { + 'id': 2, + 'name': 'fake_name', + 'status': 'active', + 'properties': {"hw_architecture": "arm64"}, + } + + fake_image.stub_out_image_service(self) + self.stub_out('nova.tests.unit.image.fake._FakeImageService.show', + lambda obj, context, image_id, **kwargs: self.fake_image) + return self.fake_image['id'] + @mock.patch('nova.compute.api.API.get_instance_host_status', new=mock.Mock(return_value=fields_obj.HostStatus.UP)) def test_resize_with_disabled_auto_disk_config_fails(self): @@ -3546,6 +3643,21 @@ "new password", auto_disk_config=True) + def test_rebuild_with_invalid_image_arch(self): + instance = fake_instance.fake_instance_obj( + self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', + launched_at=timeutils.utcnow(), + system_metadata={}, image_ref='foo', + expected_attrs=['system_metadata']) + image_id = self._setup_fake_image_with_invalid_arch() + self.assertRaises(exception.InvalidArchitectureName, + self.compute_api.rebuild, + self.context, + instance, + image_id, + "new password") + self.assertIsNone(instance.task_state) + @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid') @mock.patch.object(objects.Instance, 'save') @mock.patch.object(objects.Instance, 'get_flavor') @@ -4650,6 +4762,7 @@ def test_provision_instances_with_keypair(self, mock_im, mock_instance, mock_br, mock_rs): fake_keypair = objects.KeyPair(name='test') + inst_type = self._create_flavor() @mock.patch.object(self.compute_api, '_get_volumes_for_bdms') @mock.patch.object(self.compute_api, @@ -4664,7 +4777,7 @@ def do_test(mock_bdm_v, mock_cdb, mock_sg, mock_cniq, mock_get_vols): mock_cniq.return_value = 1 self.compute_api._provision_instances(self.context, - mock.sentinel.flavor, + inst_type, 1, 1, mock.MagicMock(), {}, None, None, None, None, {}, None, @@ -4675,7 +4788,7 @@ 'test', mock_instance.return_value.keypairs.objects[0].name) self.compute_api._provision_instances(self.context, - mock.sentinel.flavor, + inst_type, 1, 1, mock.MagicMock(), {}, None, None, None, None, {}, None, @@ -4687,6 +4800,72 @@ do_test() + @mock.patch('nova.accelerator.cyborg.get_device_profile_request_groups') + @mock.patch('nova.objects.RequestSpec.from_components') + @mock.patch('nova.objects.BuildRequest') + @mock.patch('nova.objects.Instance') + @mock.patch('nova.objects.InstanceMapping.create') + def _test_provision_instances_with_accels(self, + instance_type, dp_request_groups, prev_request_groups, + mock_im, mock_instance, mock_br, mock_rs, mock_get_dp): + + @mock.patch.object(self.compute_api, '_get_volumes_for_bdms') + @mock.patch.object(self.compute_api, + '_create_reqspec_buildreq_instmapping', + new=mock.MagicMock()) + @mock.patch('nova.compute.utils.check_num_instances_quota') + @mock.patch('nova.network.security_group_api') + @mock.patch.object(self.compute_api, + 'create_db_entry_for_new_instance') + @mock.patch.object(self.compute_api, + '_bdm_validate_set_size_and_instance') + def do_test(mock_bdm_v, mock_cdb, mock_sg, mock_cniq, mock_get_vols): + mock_cniq.return_value = 1 + self.compute_api._provision_instances(self.context, + instance_type, + 1, 1, mock.MagicMock(), + {}, None, + None, None, None, {}, None, + None, + objects.TagList(), None, + False) + + mock_get_dp.return_value = dp_request_groups + fake_rs = fake_request_spec.fake_spec_obj() + fake_rs.requested_resources = prev_request_groups + mock_rs.return_value = fake_rs + do_test() + return mock_get_dp, fake_rs + + def test_provision_instances_with_accels_ok(self): + # If extra_specs has accel spec, device profile's request_groups + # should be obtained, and added to reqspec's requested_resources. + dp_name = 'mydp' + extra_specs = {'extra_specs': {'accel:device_profile': dp_name}} + instance_type = self._create_flavor(**extra_specs) + + prev_groups = [objects.RequestGroup(requester_id='prev0'), + objects.RequestGroup(requester_id='prev1')] + dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'), + objects.RequestGroup(requester_id='deviceprofile3')] + + mock_get_dp, fake_rs = self._test_provision_instances_with_accels( + instance_type, dp_groups, prev_groups) + mock_get_dp.assert_called_once_with(self.context, dp_name) + self.assertEqual(prev_groups + dp_groups, fake_rs.requested_resources) + + def test_provision_instances_with_accels_no_dp(self): + # If extra specs has no accel spec, no attempt should be made to + # get device profile's request_groups, and reqspec.requested_resources + # should be left unchanged. + instance_type = self._create_flavor() + prev_groups = [objects.RequestGroup(requester_id='prev0'), + objects.RequestGroup(requester_id='prev1')] + mock_get_dp, fake_rs = self._test_provision_instances_with_accels( + instance_type, [], prev_groups) + mock_get_dp.assert_not_called() + self.assertEqual(prev_groups, fake_rs.requested_resources) + def test_provision_instances_creates_build_request(self): @mock.patch.object(self.compute_api, '_get_volumes_for_bdms') @mock.patch.object(self.compute_api, @@ -4781,10 +4960,9 @@ @mock.patch.object(objects.Instance, 'create', new=mock.MagicMock()) @mock.patch.object(self.compute_api, '_validate_bdm', new=mock.MagicMock()) - @mock.patch.object(objects.RequestSpec, 'from_components', - mock.MagicMock()) + @mock.patch.object(objects.RequestSpec, 'from_components') @mock.patch('nova.objects.InstanceMapping') - def do_test(mock_inst_mapping, mock_check_num_inst_quota, + def do_test(mock_inst_mapping, mock_rs, mock_check_num_inst_quota, mock_get_vols): inst_mapping_mock = mock.MagicMock() @@ -4890,7 +5068,7 @@ mock_inst_mapping.side_effect = inst_map_mocks ctxt = context.RequestContext('fake-user', 'fake-project') - flavor = self._create_flavor() + flavor = self._create_flavor(extra_specs={}) boot_meta = { 'id': 'fake-image-id', 'properties': {'mappings': []}, @@ -4970,7 +5148,8 @@ def test(mock_objects, mock_secgroup, mock_cniq): ctxt = context.RequestContext('fake-user', 'fake-project') mock_cniq.return_value = 1 - self.compute_api._provision_instances(ctxt, None, None, None, + inst_type = self._create_flavor() + self.compute_api._provision_instances(ctxt, inst_type, None, None, mock.MagicMock(), None, None, [], None, None, None, None, None, objects.TagList(), @@ -6549,7 +6728,7 @@ access_ip_v4 = access_ip_v6 = config_drive = \ auto_disk_config = reservation_id = None # This tests that 'default' is unchanged, but 'fake-security-group' - # will be translated to a uuid for Neutron. + # will be translated to a UUID for Neutron. requested_secgroups = ['default', 'fake-security-group'] # This will short-circuit _check_requested_networks requested_networks = objects.NetworkRequestList(objects=[ @@ -6557,8 +6736,8 @@ max_count = 1 supports_port_resource_request = False with mock.patch( - 'nova.network.security_group_api.get', - return_value={'id': uuids.secgroup_uuid}) as scget: + 'nova.network.security_group_api.validate_name', + return_value=uuids.secgroup_uuid) as scget: base_options, max_network_count, key_pair, security_groups, \ network_metadata = ( self.compute_api._validate_and_build_base_options( @@ -7035,6 +7214,41 @@ mock_get_min_ver.assert_called_once_with( self.context, ['nova-compute']) + def _test_block_accelerators(self, instance, args_info): + @compute_api.block_accelerators + def myfunc(self, context, instance, *args, **kwargs): + args_info['args'] = (context, instance, *args) + args_info['kwargs'] = dict(**kwargs) + + args = ('arg1', 'arg2') + kwargs = {'arg3': 'dummy3', 'arg4': 'dummy4'} + + myfunc(mock.ANY, self.context, instance, *args, **kwargs) + + expected_args = (self.context, instance, *args) + return expected_args, kwargs + + def test_block_accelerators_no_device_profile(self): + instance = self._create_instance_obj() + args_info = {} + + expected_args, kwargs = self._test_block_accelerators( + instance, args_info) + self.assertEqual(expected_args, args_info['args']) + self.assertEqual(kwargs, args_info['kwargs']) + + def test_block_accelerators_with_device_profile(self): + extra_specs = {'accel:device_profile': 'mydp'} + flavor = self._create_flavor(extra_specs=extra_specs) + instance = self._create_instance_obj(flavor=flavor) + args_info = {} + + self.assertRaisesRegex(exception.ForbiddenWithAccelerators, + 'Forbidden with instances that have accelerators.', + self._test_block_accelerators, instance, args_info) + # myfunc was not called + self.assertEqual({}, args_info) + class DiffDictTestCase(test.NoDBTestCase): """Unit tests for _diff_dict().""" diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute_mgr.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute_mgr.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute_mgr.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute_mgr.py 2020-04-10 17:57:57.000000000 +0000 @@ -264,6 +264,37 @@ phase='end', bdms=mock_bdms)]) + @mock.patch.object(objects.Instance, 'destroy') + @mock.patch.object(objects.Instance, 'save') + @mock.patch.object(manager.ComputeManager, '_complete_deletion') + @mock.patch.object(manager.ComputeManager, '_cleanup_volumes') + @mock.patch.object(manager.ComputeManager, '_shutdown_instance') + @mock.patch.object(compute_utils, 'notify_about_instance_action') + @mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage') + def _test_delete_instance_with_accels(self, instance, mock_inst_usage, + mock_inst_action, mock_shutdown, mock_cleanup_vols, + mock_complete_del, mock_inst_save, mock_inst_destroy): + self.compute._delete_instance(self.context, instance, bdms=None) + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'delete_arqs_for_instance') + def test_delete_instance_with_accels_ok(self, mock_del_arqs): + # _delete_instance() calls Cyborg to delete ARQs, if + # the extra specs has a device profile name. + instance = fake_instance.fake_instance_obj(self.context) + instance.flavor.extra_specs = {'accel:device_profile': 'mydp'} + self._test_delete_instance_with_accels(instance) + mock_del_arqs.assert_called_once_with(instance.uuid) + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'delete_arqs_for_instance') + def test_delete_instance_with_accels_no_dp(self, mock_del_arqs): + # _delete_instance() does not call Cyborg to delete ARQs, if + # the extra specs has no device profile name. + instance = fake_instance.fake_instance_obj(self.context) + self._test_delete_instance_with_accels(instance) + mock_del_arqs.assert_not_called() + def _make_compute_node(self, hyp_hostname, cn_id): cn = mock.Mock(spec_set=['hypervisor_hostname', 'id', 'uuid', 'destroy']) @@ -3027,10 +3058,10 @@ post_claim_md, self.compute._live_migration_claim( self.context, instance, md, migration, - mock.sentinel.limits)) + mock.sentinel.limits, None)) mock_lm_claim.assert_called_once_with( self.context, instance, 'fake-dest-node', migration, - mock.sentinel.limits) + mock.sentinel.limits, None) mock_post_claim_migrate_data.assert_called_once_with( self.context, instance, md, mock_claim) @@ -3055,10 +3086,10 @@ exception.MigrationPreCheckError, self.compute._live_migration_claim, self.context, instance, objects.LibvirtLiveMigrateData(), - migration, mock.sentinel.limits) + migration, mock.sentinel.limits, None) mock_lm_claim.assert_called_once_with( self.context, instance, 'fake-dest-node', migration, - mock.sentinel.limits) + mock.sentinel.limits, None) mock_get_nodename.assert_called_once_with(instance) mock_post_claim_migrate_data.assert_not_called() @@ -3199,7 +3230,7 @@ 'Destination was ready for NUMA live migration')) else: mock_lm_claim.assert_called_once_with( - self.context, instance, mig_data, migration, limits) + self.context, instance, mig_data, migration, limits, None) self.assertEqual(post_claim_md, result) mock_check_clean.assert_called_once_with(self.context, dest_check_data) @@ -3670,7 +3701,8 @@ self.context, inst_obj, uuids.volume_id) bdm_save.assert_called_once_with() extend_volume.assert_called_once_with( - connection_info, inst_obj, new_size * pow(1024, 3)) + self.context, connection_info, inst_obj, + new_size * pow(1024, 3)) do_test() @@ -4269,6 +4301,11 @@ return_value=fake_nw_info), mock.patch.object(self.compute, '_get_rescue_image', return_value=rescue_image_meta), + mock.patch.object(objects.BlockDeviceMappingList, + 'get_by_instance_uuid', + return_value=mock.sentinel.bdms), + mock.patch.object(self.compute, '_get_instance_block_device_info', + return_value=mock.sentinel.block_device_info), mock.patch.object(self.compute, '_notify_about_instance_usage'), mock.patch.object(self.compute, '_power_off_instance'), mock.patch.object(self.compute.driver, 'rescue'), @@ -4278,8 +4315,9 @@ mock.patch.object(instance, 'save') ) as ( elevated_context, get_nw_info, get_rescue_image, - notify_instance_usage, power_off_instance, driver_rescue, - notify_usage_exists, get_power_state, instance_save + get_bdm_list, get_block_info, notify_instance_usage, + power_off_instance, driver_rescue, notify_usage_exists, + get_power_state, instance_save ): self.compute.rescue_instance( self.context, instance, rescue_password='verybadpass', @@ -4295,6 +4333,9 @@ get_nw_info.assert_called_once_with(self.context, instance) get_rescue_image.assert_called_once_with( self.context, instance, None) + get_bdm_list.assert_called_once_with(self.context, instance.uuid) + get_block_info.assert_called_once_with(self.context, instance, + bdms=mock.sentinel.bdms) extra_usage_info = {'rescue_image_name': uuids.image_name} notify_calls = [ @@ -4312,7 +4353,7 @@ driver_rescue.assert_called_once_with( self.context, instance, fake_nw_info, rescue_image_meta, - 'verybadpass') + 'verybadpass', mock.sentinel.block_device_info) notify_usage_exists.assert_called_once_with(self.compute.notifier, self.context, instance, 'fake-mini', current_period=True) @@ -5926,6 +5967,7 @@ self.requested_networks = [] self.security_groups = [] self.block_device_mapping = [] + self.accel_uuids = None self.filter_properties = {'retry': {'num_attempts': 1, 'hosts': [[self.compute.host, 'fake-node']]}} @@ -5992,6 +6034,308 @@ mock_hooks.setdefault().run_post.assert_called_once_with( 'build_instance', result, mock.ANY, mock.ANY, f=None) + @mock.patch.object(objects.Instance, 'save') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_default_block_device_names') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_prep_block_device') + @mock.patch.object(virt_driver.ComputeDriver, + 'prepare_for_spawn') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_build_networks_for_instance') + @mock.patch.object(virt_driver.ComputeDriver, + 'prepare_networks_before_block_device_mapping') + def _test_accel_build_resources(self, accel_uuids, + mock_prep_net, mock_build_net, mock_prep_spawn, + mock_prep_bd, mock_bdnames, mock_save): + + args = (self.context, self.instance, self.requested_networks, + self.security_groups, self.image, self.block_device_mapping, + self.resource_provider_mapping, accel_uuids) + + resources = [] + with self.compute._build_resources(*args) as resources: + pass + + return resources + + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_bound_arq_resources') + def test_accel_build_resources_no_device_profile(self, mock_get_arqs): + # If dp_name is None, accel path is a no-op. + self.instance.flavor.extra_specs = {} + self._test_accel_build_resources(None) + mock_get_arqs.assert_not_called() + + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_bound_arq_resources') + def test_accel_build_resources(self, mock_get_arqs): + # Happy path for accels in build_resources + dp_name = "mydp" + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + arq_list = fixtures.CyborgFixture.bound_arq_list + mock_get_arqs.return_value = arq_list + arq_uuids = [arq['uuid'] for arq in arq_list] + + resources = self._test_accel_build_resources(arq_uuids) + + mock_get_arqs.assert_called_once_with(self.context, + dp_name, self.instance, arq_uuids) + self.assertEqual(sorted(resources['accel_info']), sorted(arq_list)) + + @mock.patch.object(virt_driver.ComputeDriver, + 'clean_networks_preparation') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_bound_arq_resources') + def test_accel_build_resources_exception(self, mock_get_arqs, + mock_clean_net): + dp_name = "mydp" + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + mock_get_arqs.side_effect = ( + exception.AcceleratorRequestOpFailed(op='get', msg='')) + + self.assertRaises(exception.NovaException, + self._test_accel_build_resources, None) + mock_clean_net.assert_called_once() + + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'exit_wait_early') + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'wait_for_instance_event') + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'get_arqs_for_instance') + def test_arq_bind_wait_exit_early(self, mock_get_arqs, + mock_wait_inst_ev, mock_exit_wait_early): + # Bound ARQs available on first query, quit early. + dp_name = fixtures.CyborgFixture.dp_name + arq_list = fixtures.CyborgFixture.bound_arq_list + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + arq_events = [('accelerator-request-bound', arq['uuid']) + for arq in arq_list] + arq_uuids = [arq['uuid'] for arq in arq_list] + + mock_get_arqs.return_value = arq_list + + ret_arqs = self.compute._get_bound_arq_resources( + self.context, dp_name, self.instance, arq_uuids) + + mock_wait_inst_ev.assert_called_once_with( + self.instance, arq_events, deadline=mock.ANY) + mock_exit_wait_early.assert_called_once_with(arq_events) + + mock_get_arqs.assert_has_calls([ + mock.call(self.instance.uuid, only_resolved=True)]) + + self.assertEqual(sorted(ret_arqs), sorted(arq_list)) + + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'exit_wait_early') + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'wait_for_instance_event') + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'get_arqs_for_instance') + def test_arq_bind_wait_exit_early_no_arq_uuids(self, mock_get_arqs, + mock_wait_inst_ev, mock_exit_wait_early): + # If no ARQ UUIDs are passed in, call Cyborg to get the ARQs. + # Then, if bound ARQs available on first query, quit early. + dp_name = fixtures.CyborgFixture.dp_name + arq_list = fixtures.CyborgFixture.bound_arq_list + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + arq_events = [('accelerator-request-bound', arq['uuid']) + for arq in arq_list] + + mock_get_arqs.side_effect = [arq_list, arq_list] + + ret_arqs = self.compute._get_bound_arq_resources( + self.context, dp_name, self.instance, arq_uuids=None) + + mock_wait_inst_ev.assert_called_once_with( + self.instance, arq_events, deadline=mock.ANY) + mock_exit_wait_early.assert_called_once_with(arq_events) + + mock_get_arqs.assert_has_calls([ + mock.call(self.instance.uuid), + mock.call(self.instance.uuid, only_resolved=True)]) + + self.assertEqual(sorted(ret_arqs), sorted(arq_list)) + + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'exit_wait_early') + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'wait_for_instance_event') + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'get_arqs_for_instance') + def test_arq_bind_wait(self, mock_get_arqs, + mock_wait_inst_ev, mock_exit_wait_early): + # If binding is in progress, must wait. + dp_name = fixtures.CyborgFixture.dp_name + arq_list = fixtures.CyborgFixture.bound_arq_list + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + arq_events = [('accelerator-request-bound', arq['uuid']) + for arq in arq_list] + arq_uuids = [arq['uuid'] for arq in arq_list] + # get_arqs_for_instance gets called 2 times, returning the + # resolved ARQs first, and the full list finally + mock_get_arqs.side_effect = [[], arq_list] + + ret_arqs = self.compute._get_bound_arq_resources( + self.context, dp_name, self.instance, arq_uuids) + + mock_wait_inst_ev.assert_called_once_with( + self.instance, arq_events, deadline=mock.ANY) + mock_exit_wait_early.assert_not_called() + self.assertEqual(sorted(ret_arqs), sorted(arq_list)) + mock_get_arqs.assert_has_calls([ + mock.call(self.instance.uuid, only_resolved=True), + mock.call(self.instance.uuid)]) + + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'exit_wait_early') + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'wait_for_instance_event') + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'get_arqs_for_instance') + def test_arq_bind_timeout(self, mock_get_arqs, + mock_wait_inst_ev, mock_exit_wait_early): + # If binding fails even after wait, exception is thrown + dp_name = fixtures.CyborgFixture.dp_name + arq_list = fixtures.CyborgFixture.bound_arq_list + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + arq_events = [('accelerator-request-bound', arq['uuid']) + for arq in arq_list] + arq_uuids = [arq['uuid'] for arq in arq_list] + + mock_get_arqs.return_value = arq_list + mock_wait_inst_ev.side_effect = eventlet_timeout.Timeout + + self.assertRaises(eventlet_timeout.Timeout, + self.compute._get_bound_arq_resources, + self.context, dp_name, self.instance, arq_uuids) + + mock_wait_inst_ev.assert_called_once_with( + self.instance, arq_events, deadline=mock.ANY) + mock_exit_wait_early.assert_not_called() + mock_get_arqs.assert_not_called() + + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'exit_wait_early') + @mock.patch.object(nova.compute.manager.ComputeVirtAPI, + 'wait_for_instance_event') + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'get_arqs_for_instance') + def test_arq_bind_exception(self, mock_get_arqs, + mock_wait_inst_ev, mock_exit_wait_early): + # If the code inside the context manager of _get_bound_arq_resources + # raises an exception, that exception must be handled. + dp_name = fixtures.CyborgFixture.dp_name + arq_list = fixtures.CyborgFixture.bound_arq_list + self.instance.flavor.extra_specs = {"accel:device_profile": dp_name} + arq_events = [('accelerator-request-bound', arq['uuid']) + for arq in arq_list] + arq_uuids = [arq['uuid'] for arq in arq_list] + + mock_get_arqs.side_effect = ( + exception.AcceleratorRequestOpFailed(op='', msg='')) + + self.assertRaises(exception.AcceleratorRequestOpFailed, + self.compute._get_bound_arq_resources, + self.context, dp_name, self.instance, arq_uuids) + + mock_wait_inst_ev.assert_called_once_with( + self.instance, arq_events, deadline=mock.ANY) + mock_exit_wait_early.assert_not_called() + mock_get_arqs.assert_called_once_with( + self.instance.uuid, only_resolved=True) + + @mock.patch.object(fake_driver.FakeDriver, 'spawn') + @mock.patch('nova.objects.Instance.save') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocations_for_consumer') + @mock.patch.object(manager.ComputeManager, '_get_request_group_mapping') + @mock.patch.object(manager.ComputeManager, '_check_trusted_certs') + @mock.patch.object(manager.ComputeManager, '_check_device_tagging') + @mock.patch.object(compute_utils, 'notify_about_instance_create') + @mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage') + def test_spawn_called_with_accel_info(self, mock_ins_usage, + mock_ins_create, mock_dev_tag, mock_certs, mock_req_group_map, + mock_get_allocations, mock_ins_save, mock_spawn): + + accel_info = [{'k1': 'v1', 'k2': 'v2'}] + @contextlib.contextmanager + def fake_build_resources(compute_mgr, *args, **kwargs): + yield { + 'block_device_info': None, + 'network_info': None, + 'accel_info': accel_info, + } + + self.stub_out('nova.compute.manager.ComputeManager._build_resources', + fake_build_resources) + mock_req_group_map.return_value = None + mock_get_allocations.return_value = mock.sentinel.allocation + + self.compute._build_and_run_instance(self.context, self.instance, + self.image, injected_files=self.injected_files, + admin_password=self.admin_pass, + requested_networks=self.requested_networks, + security_groups=self.security_groups, + block_device_mapping=self.block_device_mapping, + node=self.node, limits=self.limits, + filter_properties=self.filter_properties) + + mock_spawn.assert_called_once_with(self.context, self.instance, + mock.ANY, self.injected_files, self.admin_pass, mock.ANY, + network_info=None, block_device_info=None, accel_info=accel_info) + + @mock.patch.object(objects.Instance, 'save') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_build_networks_for_instance') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_default_block_device_names') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_prep_block_device') + @mock.patch.object(virt_driver.ComputeDriver, + 'prepare_for_spawn') + @mock.patch.object(virt_driver.ComputeDriver, + 'prepare_networks_before_block_device_mapping') + @mock.patch.object(virt_driver.ComputeDriver, + 'clean_networks_preparation') + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_bound_arq_resources') + def _test_delete_arqs_exception(self, mock_get_arqs, + mock_clean_net, mock_prep_net, mock_prep_spawn, mock_prep_bd, + mock_bdnames, mock_build_net, mock_save): + args = (self.context, self.instance, self.requested_networks, + self.security_groups, self.image, self.block_device_mapping, + self.resource_provider_mapping, self.accel_uuids) + mock_get_arqs.side_effect = ( + exception.AcceleratorRequestOpFailed(op='get', msg='')) + + with self.compute._build_resources(*args): + raise test.TestingException() + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'delete_arqs_for_instance') + def test_delete_arqs_if_build_res_exception(self, mock_del_arqs): + # Cyborg is called to delete ARQs if exception is thrown inside + # the context of # _build_resources(). + self.instance.flavor.extra_specs = {'accel:device_profile': 'mydp'} + self.assertRaisesRegex(exception.BuildAbortException, + 'Failure getting accelerator requests', + self._test_delete_arqs_exception) + mock_del_arqs.assert_called_once_with(self.instance.uuid) + + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'delete_arqs_for_instance') + def test_delete_arqs_if_build_res_exception_no_dp(self, mock_del_arqs): + # Cyborg is not called to delete ARQs, even if an exception is + # thrown inside the context of _build_resources(), if there is no + # device profile name in the extra specs. + self.instance.flavor.extra_specs = {} + self.assertRaises(exception.BuildAbortException, + self._test_delete_arqs_exception) + mock_del_arqs.assert_not_called() + def test_build_and_run_instance_called_with_proper_args(self): self._test_build_and_run_instance() @@ -6028,7 +6372,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) # This test when sending an icehouse compatible rpc call to juno compute # node, NetworkRequest object can load from three items tuple. @@ -6094,7 +6438,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_clean_net.assert_called_once_with(self.context, self.instance, self.requested_networks) mock_clean_vol.assert_called_once_with(self.context, @@ -6122,7 +6466,11 @@ mock_build_run.side_effect = exception.RescheduledException(reason='', instance_uuid=self.instance.uuid) - self.compute.build_and_run_instance(self.context, self.instance, + with mock.patch.object( + self.compute.network_api, 'get_instance_nw_info', + ): + self.compute.build_and_run_instance( + self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, @@ -6141,7 +6489,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_nil.assert_called_once_with(self.instance) mock_build.assert_called_once_with(self.context, [self.instance], self.image, self.filter_properties, @@ -6167,7 +6515,8 @@ self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties) + self.limits, self.filter_properties, + self.accel_uuids) mock_save.assert_has_calls([ mock.call(), mock.call(), @@ -6186,7 +6535,7 @@ mock_spawn.assert_called_once_with(self.context, self.instance, test.MatchType(objects.ImageMeta), self.injected_files, self.admin_pass, self.allocations, network_info=self.network_info, - block_device_info=self.block_device_info) + block_device_info=self.block_device_info, accel_info=[]) @mock.patch.object(manager.ComputeManager, '_build_and_run_instance') @mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances') @@ -6206,22 +6555,26 @@ mock_build_and_run.side_effect = exception.RescheduledException( reason='', instance_uuid=self.instance.uuid) - self.compute._do_build_and_run_instance(self.context, instance, - self.image, request_spec={}, - filter_properties=self.filter_properties, - injected_files=self.injected_files, - admin_password=self.admin_pass, - requested_networks=self.requested_networks, - security_groups=self.security_groups, - block_device_mapping=self.block_device_mapping, node=self.node, - limits=self.limits, host_list=fake_host_list) + with mock.patch.object( + self.compute.network_api, 'get_instance_nw_info', + ): + self.compute._do_build_and_run_instance( + self.context, instance, + self.image, request_spec={}, + filter_properties=self.filter_properties, + injected_files=self.injected_files, + admin_password=self.admin_pass, + requested_networks=self.requested_networks, + security_groups=self.security_groups, + block_device_mapping=self.block_device_mapping, node=self.node, + limits=self.limits, host_list=fake_host_list) mock_build_and_run.assert_called_once_with(self.context, instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_build_ins.assert_called_once_with(self.context, [instance], self.image, self.filter_properties, self.admin_pass, self.injected_files, self.requested_networks, @@ -6265,7 +6618,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_cleanup_network.assert_called_once_with( self.context, instance, self.requested_networks) mock_build_ins.assert_called_once_with(self.context, @@ -6319,7 +6672,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_cleanup_network.assert_called_once_with( self.context, instance, self.requested_networks) mock_build_ins.assert_called_once_with(self.context, @@ -6364,7 +6717,8 @@ mock_build_run.assert_called_once_with(self.context, self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, - self.block_device_mapping, self.node, self.limits, {}, {}) + self.block_device_mapping, self.node, self.limits, {}, {}, + self.accel_uuids) mock_clean_net.assert_called_once_with(self.context, self.instance, self.requested_networks) mock_clean_vol.assert_called_once_with(self.context, @@ -6413,7 +6767,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_nil.assert_called_once_with(self.instance) mock_build.assert_called_once_with(self.context, [self.instance], self.image, self.filter_properties, @@ -6456,7 +6810,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_clean.assert_called_once_with(self.context, self.instance, self.requested_networks) mock_nil.assert_called_once_with(self.instance) @@ -6516,7 +6870,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties, {}) + self.filter_properties, {}, self.accel_uuids) mock_clean_net.assert_called_once_with(self.context, self.instance, self.requested_networks) @@ -6652,7 +7006,8 @@ self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties) + self.limits, self.filter_properties, + self.accel_uuids) mock_save.assert_has_calls([ mock.call(), @@ -6673,7 +7028,7 @@ self.context, self.instance, test.MatchType(objects.ImageMeta), self.injected_files, self.admin_pass, self.allocations, network_info=self.network_info, - block_device_info=self.block_device_info) + block_device_info=self.block_device_info, accel_info=[]) def test_instance_not_found(self): got_exc = exception.InstanceNotFound(instance_id=1) @@ -6683,11 +7038,6 @@ got_exc = test.TestingException() self._test_instance_exception(got_exc, exception.RescheduledException) - def test_spawn_network_alloc_failure(self): - # Because network allocation is asynchronous, failures may not present - # themselves until the virt spawn method is called. - self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks()) - def test_spawn_network_auto_alloc_failure(self): # This isn't really a driver.spawn failure, it's a failure from # network_api.allocate_for_instance, but testing it here is convenient. @@ -6782,7 +7132,7 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties) + self.limits, self.filter_properties, self.accel_uuids) _validate_instance_group_policy.assert_called_once_with( self.context, self.instance, {}) @@ -6813,7 +7163,7 @@ test.MatchType(objects.ImageMeta), self.injected_files, self.admin_pass, self.allocations, network_info=self.network_info, - block_device_info=self.block_device_info)]) + block_device_info=self.block_device_info, accel_info=[])]) _shutdown_instance.assert_called_once_with(self.context, self.instance, self.block_device_mapping, @@ -6836,7 +7186,11 @@ mock_claim.side_effect = exc self._do_build_instance_update(mock_save, reschedule_update=True) - self.compute.build_and_run_instance(self.context, self.instance, + with mock.patch.object( + self.compute.network_api, 'get_instance_nw_info', + ): + self.compute.build_and_run_instance( + self.context, self.instance, self.image, request_spec={}, filter_properties=self.filter_properties, injected_files=self.injected_files, @@ -6877,7 +7231,8 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, - self.node, self.limits, self.filter_properties) + self.node, self.limits, self.filter_properties, + self.accel_uuids) mock_save.assert_called_once_with() mock_notify.assert_has_calls([ @@ -6888,7 +7243,7 @@ mock_build.assert_called_once_with(self.context, self.instance, self.requested_networks, self.security_groups, test.MatchType(objects.ImageMeta), self.block_device_mapping, - self.resource_provider_mapping) + self.resource_provider_mapping, self.accel_uuids) @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup') @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn') @@ -6910,7 +7265,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): pass except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) @@ -7029,7 +7384,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): pass except Exception as e: self.assertIsInstance(e, @@ -7057,7 +7412,8 @@ try: with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, - self.block_device_mapping, self.resource_provider_mapping): + self.block_device_mapping, self.resource_provider_mapping, + self.accel_uuids): pass except Exception as e: self.assertIsInstance(e, exception.BuildAbortException) @@ -7088,7 +7444,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): pass except Exception as e: self.assertIsInstance(e, exc) @@ -7119,7 +7475,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): fake_spawn() except Exception as e: self.assertEqual(test_exception, e) @@ -7153,7 +7509,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): raise test.TestingException() except Exception as e: self.assertEqual(expected_exc, e) @@ -7184,7 +7540,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): raise test.TestingException() except exception.BuildAbortException: pass @@ -7212,7 +7568,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): raise test.TestingException() except exception.BuildAbortException: pass @@ -7244,7 +7600,7 @@ with self.compute._build_resources(self.context, self.instance, self.requested_networks, self.security_groups, self.image, self.block_device_mapping, - self.resource_provider_mapping): + self.resource_provider_mapping, self.accel_uuids): fake_spawn() self.assertTrue(mock_log.warning.called) @@ -7321,18 +7677,48 @@ mock_setup.assert_called_once_with(self.context, instance, instance.host) - def test_cleanup_allocated_networks_instance_not_found(self): + def test__cleanup_allocated_networks__instance_not_found(self): with test.nested( - mock.patch.object(self.compute, '_deallocate_network'), - mock.patch.object(self.instance, 'save', - side_effect=exception.InstanceNotFound(instance_id='')) - ) as (_deallocate_network, save): + mock.patch.object(self.compute.network_api, + 'get_instance_nw_info'), + mock.patch.object(self.compute.driver, 'unplug_vifs'), + mock.patch.object(self.compute, '_deallocate_network'), + mock.patch.object(self.instance, 'save', + side_effect=exception.InstanceNotFound(instance_id='')) + ) as (mock_nwinfo, mock_unplug, mock_deallocate_network, mock_save): # Testing that this doesn't raise an exception - self.compute._cleanup_allocated_networks(self.context, - self.instance, self.requested_networks) - save.assert_called_once_with() - self.assertEqual('False', - self.instance.system_metadata['network_allocated']) + self.compute._cleanup_allocated_networks( + self.context, self.instance, self.requested_networks) + + mock_nwinfo.assert_called_once_with( + self.context, self.instance) + mock_unplug.assert_called_once_with( + self.instance, mock_nwinfo.return_value) + mock_deallocate_network.assert_called_once_with( + self.context, self.instance, self.requested_networks) + mock_save.assert_called_once_with() + self.assertEqual( + 'False', self.instance.system_metadata['network_allocated']) + + @mock.patch('nova.compute.manager.LOG') + def test__cleanup_allocated_networks__error(self, mock_log): + with test.nested( + mock.patch.object( + self.compute.network_api, 'get_instance_nw_info', + side_effect=Exception('some neutron error') + ), + mock.patch.object(self.compute.driver, 'unplug_vifs'), + ) as (mock_nwinfo, mock_unplug): + self.compute._cleanup_allocated_networks( + self.context, self.instance, self.requested_networks) + + mock_nwinfo.assert_called_once_with(self.context, self.instance) + self.assertEqual(1, mock_log.warning.call_count) + self.assertIn( + 'Failed to update network info cache', + mock_log.warning.call_args[0][0], + ) + mock_unplug.assert_not_called() def test_deallocate_network_none_requested(self): # Tests that we don't deallocate networks if 'none' were @@ -7426,7 +7812,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties) + self.filter_properties, self.accel_uuids) expected_call = mock.call(self.context, self.instance, 'create.end', extra_usage_info={'message': u'Success'}, network_info=[]) @@ -7458,7 +7844,7 @@ self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, self.limits, - self.filter_properties) + self.filter_properties, self.accel_uuids) updates = {'vm_state': u'active', 'access_ip_v6': netaddr.IPAddress('2001:db8:0:1:dcad:beff:feef:1'), @@ -7498,7 +7884,7 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties) + self.limits, self.filter_properties, self.accel_uuids) expected_call = mock.call(self.context, self.instance, 'create.error', fault=exc) create_error_call = mock_notify.call_args_list[ @@ -7522,7 +7908,8 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties, request_spec) + self.limits, self.filter_properties, request_spec, + self.accel_uuids) mock_networks.assert_called_once_with( self.context, self.instance, self.requested_networks, @@ -7568,7 +7955,8 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties, request_spec) + self.limits, self.filter_properties, request_spec, + self.accel_uuids) mock_networks.assert_called_once_with( self.context, self.instance, self.requested_networks, @@ -7606,7 +7994,8 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties, request_spec) + self.limits, self.filter_properties, request_spec, + self.accel_uuids) def test_build_with_resource_request_sriov_rp_wrongly_formatted_name(self): request_spec = objects.RequestSpec( @@ -7630,7 +8019,8 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties, request_spec) + self.limits, self.filter_properties, request_spec, + self.accel_uuids) def test_build_with_resource_request_more_than_one_providers(self): request_spec = objects.RequestSpec( @@ -7649,7 +8039,8 @@ self.instance, self.image, self.injected_files, self.admin_pass, self.requested_networks, self.security_groups, self.block_device_mapping, self.node, - self.limits, self.filter_properties, request_spec) + self.limits, self.filter_properties, request_spec, + self.accel_uuids) class ComputeManagerErrorsOutMigrationTestCase(test.NoDBTestCase): @@ -8956,7 +9347,7 @@ migration.id = 0 @mock.patch('nova.compute.utils.notify_about_instance_action') - @mock.patch('nova.image.api.API.generate_image_url', + @mock.patch('nova.image.glance.API.generate_image_url', return_value='fake-url') @mock.patch.object(objects.Migration, 'get_by_id', return_value=migration) @@ -9199,13 +9590,11 @@ @mock.patch.object(self.compute, 'compute_rpcapi') @mock.patch.object(self.compute, '_notify_about_instance_usage') @mock.patch.object(self.compute, 'network_api') - @mock.patch.object(objects.Instance, 'refresh') - def _do_call(refresh, nwapi, notify, rpc, update): + def _do_call(nwapi, notify, rpc, update): bdms = objects.BlockDeviceMappingList(objects=[]) result = self.compute._post_live_migration( self.context, self.instance, 'foo', *args, source_bdms=bdms, **kwargs) - refresh.assert_called_once_with() return result mock_rt = self._mock_rt() @@ -9360,8 +9749,7 @@ @mock.patch.object(self.compute, 'update_available_resource') @mock.patch.object(self.compute, '_update_scheduler_instance_info') @mock.patch.object(self.compute, '_clean_instance_console_tokens') - @mock.patch.object(objects.Instance, 'refresh') - def _test(_refresh, _clean_instance_console_tokens, + def _test(_clean_instance_console_tokens, _update_scheduler_instance_info, update_available_resource, driver_cleanup, _live_migration_cleanup_flags, post_live_migration_at_destination, @@ -9375,7 +9763,6 @@ post_live_migration_at_source.assert_called_once_with( self.context, self.instance, test.MatchType(network_model.NetworkInfo)) - _refresh.assert_called_once_with() driver_cleanup.assert_called_once_with( self.context, self.instance, test.MatchType(network_model.NetworkInfo), destroy_disks=False, @@ -9759,6 +10146,25 @@ instance, migration.id) + def test_live_migration_cleanup_flags_shared_path_and_vpmem_libvirt(self): + migrate_data = objects.LibvirtLiveMigrateData( + is_shared_block_storage=False, + is_shared_instance_path=True) + migr_ctxt = objects.MigrationContext() + vpmem_resource = objects.Resource( + provider_uuid=uuids.rp_uuid, + resource_class="CUSTOM_PMEM_NAMESPACE_4GB", + identifier='ns_0', metadata=objects.LibvirtVPMEMDevice( + label='4GB', + name='ns_0', devpath='/dev/dax0.0', + size=4292870144, align=2097152)) + migr_ctxt.old_resources = objects.ResourceList( + objects=[vpmem_resource]) + do_cleanup, destroy_disks = self.compute._live_migration_cleanup_flags( + migrate_data, migr_ctxt) + self.assertTrue(do_cleanup) + self.assertTrue(destroy_disks) + def test_live_migration_cleanup_flags_block_migrate_libvirt(self): migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=False, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute.py 2020-04-10 17:57:57.000000000 +0000 @@ -58,7 +58,7 @@ from nova import context from nova.db import api as db from nova import exception -from nova.image import api as image_api +from nova.image import glance as image_api from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj @@ -724,7 +724,7 @@ else: return {} - self.stub_out('nova.image.api.API.get', image_api_get) + self.stub_out('nova.image.glance.API.get', image_api_get) block_device_mapping = [{ 'boot_index': 0, @@ -2277,7 +2277,7 @@ 'unrescued': False} def fake_rescue(self, context, instance_ref, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): called['rescued'] = True self.stub_out('nova.virt.fake.FakeDriver.rescue', fake_rescue) @@ -2309,7 +2309,7 @@ def test_rescue_notifications(self, mock_context, mock_notify): # Ensure notifications on instance rescue. def fake_rescue(self, context, instance_ref, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): pass self.stub_out('nova.virt.fake.FakeDriver.rescue', fake_rescue) @@ -2406,14 +2406,18 @@ self.compute.terminate_instance(self.context, instance, []) + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_instance_block_device_info') @mock.patch.object(fake.FakeDriver, 'power_off') @mock.patch.object(fake.FakeDriver, 'rescue') @mock.patch.object(compute_manager.ComputeManager, '_get_rescue_image') - def test_rescue_handle_err(self, mock_get, mock_rescue, mock_power_off): + def test_rescue_handle_err(self, mock_get, mock_rescue, mock_power_off, + mock_get_block_info): # If the driver fails to rescue, instance state should got to ERROR # and the exception should be converted to InstanceNotRescuable inst_obj = self._create_fake_instance_obj() mock_get.return_value = objects.ImageMeta.from_dict({}) + mock_get_block_info.return_value = mock.sentinel.block_device_info mock_rescue.side_effect = RuntimeError("Try again later") expected_message = ('Instance %s cannot be rescued: ' @@ -2429,13 +2433,16 @@ self.assertEqual(vm_states.ERROR, inst_obj.vm_state) mock_get.assert_called_once_with(mock.ANY, inst_obj, mock.ANY) mock_rescue.assert_called_once_with(mock.ANY, inst_obj, [], - mock.ANY, 'password') + mock.ANY, 'password', + mock.sentinel.block_device_info) + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_instance_block_device_info') @mock.patch.object(image_api.API, "get") @mock.patch.object(fake.FakeDriver, 'power_off') @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_image_specified(self, mock_rescue, mock_power_off, - mock_image_get): + mock_image_get, mock_get_block_info): image_ref = uuids.image_instance rescue_image_meta = {} params = {"task_state": task_states.RESCUING} @@ -2445,6 +2452,7 @@ mock_context = mock.Mock() mock_context.elevated.return_value = ctxt + mock_get_block_info.return_value = mock.sentinel.block_device_info mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, @@ -2454,14 +2462,17 @@ mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], test.MatchType(objects.ImageMeta), - 'password') + 'password', + mock.sentinel.block_device_info) self.compute.terminate_instance(ctxt, instance, []) + @mock.patch.object(nova.compute.manager.ComputeManager, + '_get_instance_block_device_info') @mock.patch.object(image_api.API, "get") @mock.patch.object(fake.FakeDriver, 'power_off') @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_base_image_when_image_not_specified(self, - mock_rescue, mock_power_off, mock_image_get): + mock_rescue, mock_power_off, mock_image_get, mock_get_block_info): image_ref = FAKE_IMAGE_REF system_meta = {"image_base_image_ref": image_ref} rescue_image_meta = {} @@ -2473,6 +2484,7 @@ mock_context = mock.Mock() mock_context.elevated.return_value = ctxt + mock_get_block_info.return_value = mock.sentinel.block_device_info mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, @@ -2484,7 +2496,8 @@ mock_rescue.assert_called_with(ctxt, instance, [], test.MatchType(objects.ImageMeta), - 'password') + 'password', + mock.sentinel.block_device_info) self.compute.terminate_instance(self.context, instance, []) def test_power_on(self): @@ -2493,7 +2506,7 @@ called = {'power_on': False} def fake_driver_power_on(self, context, instance, network_info, - block_device_info): + block_device_info, accel_device_info=None): called['power_on'] = True self.stub_out('nova.virt.fake.FakeDriver.power_on', @@ -2512,6 +2525,25 @@ self.assertTrue(called['power_on']) self.compute.terminate_instance(self.context, inst_obj, []) + @mock.patch.object(compute_manager.ComputeManager, + '_get_instance_block_device_info') + @mock.patch('nova.network.neutron.API.get_instance_nw_info') + @mock.patch.object(fake.FakeDriver, 'power_on') + @mock.patch('nova.accelerator.cyborg._CyborgClient.get_arqs_for_instance') + def test_power_on_with_accels(self, mock_get_arqs, + mock_power_on, mock_nw_info, mock_blockdev): + instance = self._create_fake_instance_obj() + instance.flavor.extra_specs = {'accel:device_profile': 'mydp'} + accel_info = [{'k1': 'v1', 'k2': 'v2'}] + mock_get_arqs.return_value = accel_info + mock_nw_info.return_value = 'nw_info' + mock_blockdev.return_value = 'blockdev_info' + + self.compute._power_on(self.context, instance) + mock_get_arqs.assert_called_once_with(instance['uuid']) + mock_power_on.assert_called_once_with(self.context, + instance, 'nw_info', 'blockdev_info', accel_info) + def test_power_off(self): # Ensure instance can be powered off. @@ -2975,6 +3007,7 @@ launched_at=timeutils.utcnow())) instance = objects.Instance._from_db_object(econtext, objects.Instance(), db_instance) + instance.flavor = self.default_flavor updated_dbinstance1 = fake_instance.fake_db_instance( **dict(uuid=uuids.db_instance_1, @@ -3042,7 +3075,8 @@ expected_call_info = { 'args': (econtext, instance, expected_nw_info, reboot_type), - 'kwargs': {'block_device_info': fake_block_dev_info}} + 'kwargs': {'block_device_info': fake_block_dev_info, + 'accel_info': []}} fault = exception.InstanceNotFound(instance_id='instance-0000') def fake_reboot(self, *args, **kwargs): @@ -3158,6 +3192,58 @@ def test_reboot_hard_and_delete_and_rescued(self): self._test_reboot(False, test_delete=True, test_unrescue=True) + @mock.patch('nova.virt.fake.FakeDriver.reboot') + @mock.patch('nova.objects.instance.Instance.save') + @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') + @mock.patch.object(compute_manager.ComputeManager, + '_get_instance_block_device_info') + @mock.patch.object(compute_manager.ComputeManager, + '_notify_about_instance_usage') + @mock.patch.object(compute_manager.ComputeManager, '_instance_update') + @mock.patch.object(db, 'instance_update_and_get_original') + @mock.patch.object(compute_manager.ComputeManager, '_get_power_state') + @mock.patch('nova.compute.utils.notify_about_instance_action') + def _test_reboot_with_accels(self, mock_notify_action, mock_get_power, + mock_get_orig, mock_update, mock_notify_usage, + mock_get_blk, mock_get_bdms, mock_inst_save, mock_reboot, + extra_specs=None, accel_info=None): + + self.compute.network_api.get_instance_nw_info = mock.Mock() + + reboot_type = 'SOFT' + instance = self._create_fake_instance_obj() + if extra_specs: + instance.flavor.extra_specs = extra_specs + + self.compute.reboot_instance(self.context, instance=instance, + block_device_info=None, reboot_type=reboot_type) + + mock_reboot.assert_called_once_with( + mock.ANY, instance, mock.ANY, reboot_type, + block_device_info=mock.ANY, + bad_volumes_callback=mock.ANY, + accel_info=accel_info or [] + ) + + return instance['uuid'] + + @mock.patch('nova.accelerator.cyborg._CyborgClient.get_arqs_for_instance') + def test_reboot_with_accels_ok(self, mock_get_arqs): + dp_name = 'mydp' + extra_specs = {'accel:device_profile': dp_name} + _, accel_info = fixtures.get_arqs(dp_name) + mock_get_arqs.return_value = accel_info + + instance_uuid = self._test_reboot_with_accels( + extra_specs=extra_specs, accel_info=accel_info) + + mock_get_arqs.assert_called_once_with(instance_uuid) + + @mock.patch('nova.accelerator.cyborg._CyborgClient.get_arqs_for_instance') + def test_reboot_with_accels_no_dp(self, mock_get_arqs): + self._test_reboot_with_accels(extra_specs=None, accel_info=None) + mock_get_arqs.assert_not_called() + @mock.patch.object(jsonutils, 'to_primitive') def test_reboot_fail(self, mock_to_primitive): self._test_reboot(False, fail_reboot=True) @@ -3558,8 +3644,8 @@ self.assertEqual(state_dict['power_state'], instances[0]['power_state']) - @mock.patch('nova.image.api.API.get_all') - @mock.patch('nova.image.api.API.delete') + @mock.patch('nova.image.glance.API.get_all') + @mock.patch('nova.image.glance.API.delete') def test_rotate_backups(self, mock_delete, mock_get_all_images): instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] @@ -3604,7 +3690,7 @@ rotation=1) self.assertEqual(2, mock_delete.call_count) - @mock.patch('nova.image.api.API.get_all') + @mock.patch('nova.image.glance.API.get_all') def test_rotate_backups_with_image_delete_failed(self, mock_get_all_images): instance = self._create_fake_instance_obj() @@ -3664,7 +3750,7 @@ if image_id == uuids.image_id_4: raise exception.ImageDeleteConflict(reason='image is in use') - with mock.patch.object(nova.image.api.API, 'delete', + with mock.patch.object(nova.image.glance.API, 'delete', side_effect=_check_image_id) as mock_delete: # Fake images 4,3,2 should be rotated in sequence self.compute._rotate_backups(self.context, instance=instance, @@ -8406,6 +8492,7 @@ legacy_notify, notify, instance.uuid, self.compute_api.notifier, self.context) + @mock.patch('nova.compute.api.API._local_delete_cleanup') @mock.patch('nova.compute.utils.notify_about_instance_action') @mock.patch('nova.objects.Instance.destroy') @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid') @@ -8413,7 +8500,7 @@ @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') def test_delete_while_booting_instance_not_scheduled_cellv1( self, br_get_by_instance, legacy_notify, im_get_by_instance, - instance_destroy, notify): + instance_destroy, notify, api_del_cleanup): instance = self._create_fake_instance_obj() instance.host = None @@ -8433,6 +8520,7 @@ self.context) instance_destroy.assert_called_once_with() + api_del_cleanup.assert_called_once() @mock.patch('nova.compute.utils.notify_about_instance_action') @mock.patch('nova.objects.Instance.destroy') @@ -8702,12 +8790,11 @@ def test_create_instance_associates_security_groups(self): # Make sure create associates security groups. - group = {'id': uuids.secgroup_id, 'name': 'testgroup'} with test.nested( mock.patch.object(self.compute_api.compute_task_api, 'schedule_and_build_instances'), - mock.patch('nova.network.security_group_api.get', - return_value=group), + mock.patch('nova.network.security_group_api.validate_name', + return_value=uuids.secgroup_id), ) as (mock_sbi, mock_secgroups): self.compute_api.create( self.context, @@ -8719,14 +8806,16 @@ reqspec = build_call[1]['request_spec'][0] self.assertEqual(1, len(reqspec.security_groups)) - self.assertEqual(group['id'], reqspec.security_groups[0].uuid) + self.assertEqual(uuids.secgroup_id, reqspec.security_groups[0].uuid) mock_secgroups.assert_called_once_with(mock.ANY, 'testgroup') def test_create_instance_with_invalid_security_group_raises(self): pre_build_len = len(db.instance_get_all(self.context)) - with mock.patch('nova.network.security_group_api.get', - return_value=None) as mock_secgroups: - self.assertRaises(exception.SecurityGroupNotFoundForProject, + with mock.patch( + 'nova.network.security_group_api.validate_name', + side_effect=exception.SecurityGroupNotFound('foo'), + ) as mock_secgroups: + self.assertRaises(exception.SecurityGroupNotFound, self.compute_api.create, self.context, instance_type=self.default_flavor, @@ -9012,6 +9101,7 @@ def test_rebuild_in_error_not_launched(self): instance = self._create_fake_instance_obj(params={'image_ref': ''}) + flavor = instance.flavor self.stub_out('nova.tests.unit.image.fake._FakeImageService.show', self.fake_show) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, @@ -9022,6 +9112,7 @@ "launched_at": None}) instance = db.instance_get_by_uuid(self.context, instance['uuid']) + instance['flavor'] = flavor self.assertRaises(exception.InstanceInvalidState, self.compute_api.rebuild, @@ -11644,7 +11735,8 @@ 'foo_key2': 'foo_value2', 'availability_zone': 'fake_zone'} fake_notifier.NOTIFICATIONS = [] - availability_zones._get_cache().add('fake_key', 'fake_value') + availability_zones._get_cache().region.get_or_create( + 'fake_ky', lambda: 'fake_value') aggr = self.api.update_aggregate_metadata(self.context, aggr.id, metadata) self.assertIsNone(availability_zones._get_cache().get('fake_key')) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute_utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute_utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_compute_utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_compute_utils.py 2020-04-10 17:57:57.000000000 +0000 @@ -27,6 +27,7 @@ from oslo_utils import uuidutils import six +from nova.accelerator.cyborg import _CyborgClient as cyborgclient from nova.compute import manager from nova.compute import power_state from nova.compute import task_states @@ -42,6 +43,7 @@ from nova.objects import block_device as block_device_obj from nova.objects import fields from nova import rpc +from nova.scheduler.client import report from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_crypto @@ -1511,3 +1513,174 @@ self.assertFalse( compute_utils.is_volume_backed_instance(ctxt, instance, None)) mock_bdms.assert_called_with(ctxt, instance.uuid) + + +class ComputeUtilsImageFunctionsTestCase(test.TestCase): + def setUp(self): + super(ComputeUtilsImageFunctionsTestCase, self).setUp() + self.context = context.RequestContext('fake', 'fake') + + def test_initialize_instance_snapshot_metadata_no_metadata(self): + # show no borkage from empty system meta + ctxt = self.context + instance = create_instance(ctxt) + image_meta = compute_utils.initialize_instance_snapshot_metadata( + ctxt, instance, 'empty properties') + self.assertEqual({}, image_meta['properties']) + + def test_initialize_instance_snapshot_metadata_removed_metadata(self): + # show non-inheritable properties are excluded + ctxt = self.context + instance = create_instance(ctxt) + instance.system_metadata = { + 'image_img_signature': 'an-image-signature', + 'image_cinder_encryption_key_id': + 'deeeeeac-d75e-11e2-8271-1234567897d6', + 'image_some_key': 'some_value', + 'image_fred': 'barney', + 'image_cache_in_nova': 'true' + } + image_meta = compute_utils.initialize_instance_snapshot_metadata( + ctxt, instance, 'removed properties') + properties = image_meta['properties'] + self.assertGreater(len(properties), 0) + self.assertIn('some_key', properties) + self.assertIn('fred', properties) + for p in compute_utils.NON_INHERITABLE_IMAGE_PROPERTIES: + self.assertNotIn(p, properties) + for p in CONF.non_inheritable_image_properties: + self.assertNotIn(p, properties) + + +class PciRequestUpdateTestCase(test.NoDBTestCase): + def setUp(self): + super().setUp() + self.context = context.RequestContext('fake', 'fake') + + def test_no_pci_request(self): + instance = objects.Instance( + pci_requests=objects.InstancePCIRequests(requests=[])) + provider_mapping = {} + + compute_utils.update_pci_request_spec_with_allocated_interface_name( + self.context, mock.sentinel.report_client, instance, + provider_mapping) + + def test_pci_request_from_flavor(self): + instance = objects.Instance( + pci_requests=objects.InstancePCIRequests(requests=[ + objects.InstancePCIRequest(requester_id=None) + ])) + provider_mapping = {} + + compute_utils.update_pci_request_spec_with_allocated_interface_name( + self.context, mock.sentinel.report_client, instance, + provider_mapping) + + def test_pci_request_has_no_mapping(self): + instance = objects.Instance( + pci_requests=objects.InstancePCIRequests(requests=[ + objects.InstancePCIRequest(requester_id=uuids.port_1) + ])) + provider_mapping = {} + + compute_utils.update_pci_request_spec_with_allocated_interface_name( + self.context, mock.sentinel.report_client, instance, + provider_mapping) + + def test_pci_request_ambiguous_mapping(self): + instance = objects.Instance( + pci_requests=objects.InstancePCIRequests(requests=[ + objects.InstancePCIRequest(requester_id=uuids.port_1) + ])) + provider_mapping = {uuids.port_1: [uuids.rp1, uuids.rp2]} + + self.assertRaises( + exception.AmbiguousResourceProviderForPCIRequest, + (compute_utils. + update_pci_request_spec_with_allocated_interface_name), + self.context, mock.sentinel.report_client, instance, + provider_mapping) + + def test_unexpected_provider_name(self): + report_client = mock.Mock(spec=report.SchedulerReportClient) + report_client.get_resource_provider_name.return_value = 'unexpected' + instance = objects.Instance( + pci_requests=objects.InstancePCIRequests(requests=[ + objects.InstancePCIRequest( + requester_id=uuids.port_1, + spec=[{}]) + ])) + provider_mapping = {uuids.port_1: [uuids.rp1]} + + self.assertRaises( + exception.UnexpectedResourceProviderNameForPCIRequest, + (compute_utils. + update_pci_request_spec_with_allocated_interface_name), + self.context, report_client, instance, + provider_mapping) + + report_client.get_resource_provider_name.assert_called_once_with( + self.context, uuids.rp1) + self.assertNotIn( + 'parent_ifname', instance.pci_requests.requests[0].spec[0]) + + def test_pci_request_updated(self): + report_client = mock.Mock(spec=report.SchedulerReportClient) + report_client.get_resource_provider_name.return_value = ( + 'host:agent:enp0s31f6') + instance = objects.Instance( + pci_requests=objects.InstancePCIRequests(requests=[ + objects.InstancePCIRequest( + requester_id=uuids.port_1, + spec=[{}], + ) + ])) + provider_mapping = {uuids.port_1: [uuids.rp1]} + + compute_utils.update_pci_request_spec_with_allocated_interface_name( + self.context, report_client, instance, provider_mapping) + + report_client.get_resource_provider_name.assert_called_once_with( + self.context, uuids.rp1) + self.assertEqual( + 'enp0s31f6', + instance.pci_requests.requests[0].spec[0]['parent_ifname']) + + +class AcceleratorRequestTestCase(test.NoDBTestCase): + def setUp(self): + super(AcceleratorRequestTestCase, self).setUp() + self.context = context.get_admin_context() + + @mock.patch.object(cyborgclient, 'delete_arqs_for_instance') + def test_delete_with_device_profile(self, mock_del_arq): + flavor = objects.Flavor(**test_flavor.fake_flavor) + flavor['extra_specs'] = {'accel:device_profile': 'mydp'} + instance = fake_instance.fake_instance_obj(self.context, flavor=flavor) + compute_utils.delete_arqs_if_needed(self.context, instance) + mock_del_arq.assert_called_once_with(instance.uuid) + + @mock.patch.object(cyborgclient, 'delete_arqs_for_instance') + def test_delete_with_no_device_profile(self, mock_del_arq): + flavor = objects.Flavor(**test_flavor.fake_flavor) + flavor['extra_specs'] = {} + instance = fake_instance.fake_instance_obj(self.context, flavor=flavor) + compute_utils.delete_arqs_if_needed(self.context, instance) + mock_del_arq.assert_not_called() + + @mock.patch('nova.compute.utils.LOG.exception') + @mock.patch.object(cyborgclient, 'delete_arqs_for_instance') + def test_delete_with_device_profile_exception(self, mock_del_arq, + mock_log_exc): + flavor = objects.Flavor(**test_flavor.fake_flavor) + flavor['extra_specs'] = {'accel:device_profile': 'mydp'} + instance = fake_instance.fake_instance_obj(self.context, flavor=flavor) + mock_del_arq.side_effect = exception.AcceleratorRequestOpFailed( + op='', msg='') + + compute_utils.delete_arqs_if_needed(self.context, instance) + mock_del_arq.assert_called_once_with(instance.uuid) + mock_log_exc.assert_called_once() + self.assertIn('Failed to delete accelerator requests for instance', + mock_log_exc.call_args[0][0]) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_resource_tracker.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_resource_tracker.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_resource_tracker.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_resource_tracker.py 2020-04-10 17:57:57.000000000 +0000 @@ -43,6 +43,7 @@ from nova.tests.unit import fake_notifier from nova.tests.unit.objects import test_pci_device as fake_pci_device from nova.tests.unit import utils +from nova import utils as nova_utils from nova.virt import driver _HOSTNAME = 'fake-host' @@ -3031,7 +3032,8 @@ ) as (mock_from_instance, mock_migration_save, mock_instance_save, mock_update, mock_pci_claim_instance, mock_update_usage): claim = self.rt.live_migration_claim(ctxt, instance, _NODENAME, - migration, limits=None) + migration, limits=None, + allocs=None) self.assertEqual(42, claim.migration.id) # Check that we didn't set the status to 'pre-migrating', like we # do for cold migrations, but which doesn't exist for live @@ -3896,3 +3898,21 @@ rt = resource_tracker.ResourceTracker( _HOSTNAME, mock.sentinel.driver, mock.sentinel.reportclient) self.assertIs(rt.reportclient, mock.sentinel.reportclient) + + def test_that_unfair_usage_of_compute_resource_semaphore_is_caught(self): + def _test_explict_unfair(): + class MyResourceTracker(resource_tracker.ResourceTracker): + @nova_utils.synchronized( + resource_tracker.COMPUTE_RESOURCE_SEMAPHORE, fair=False) + def foo(self): + pass + + def _test_implicit_unfair(): + class MyResourceTracker(resource_tracker.ResourceTracker): + @nova_utils.synchronized( + resource_tracker.COMPUTE_RESOURCE_SEMAPHORE) + def foo(self): + pass + + self.assertRaises(AssertionError, _test_explict_unfair) + self.assertRaises(AssertionError, _test_implicit_unfair) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_rpcapi.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_rpcapi.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_rpcapi.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_rpcapi.py 2020-04-10 17:57:57.000000000 +0000 @@ -933,13 +933,48 @@ version='5.0') def test_build_and_run_instance(self): + # With rpcapi 5.11, when a list of accel_uuids is passed as a param, + # that list must be passed to the client. That is tested in + # _test_compute_api with rpc_mock.assert, where expected_kwargs + # must have the accel_uuids. + accel_uuids = ['938af7f9-f136-4e5a-bdbe-3b6feab54311'] self._test_compute_api('build_and_run_instance', 'cast', instance=self.fake_instance_obj, host='host', image='image', request_spec={'request': 'spec'}, filter_properties=[], admin_password='passwd', injected_files=None, requested_networks=['network1'], security_groups=None, block_device_mapping=None, node='node', limits=[], - host_list=None, version='5.0') + host_list=None, accel_uuids=accel_uuids, version='5.11') + + def test_build_and_run_instance_old_rpcapi(self): + # With rpcapi < 5.11, accel_uuids must be dropped in the client call. + ctxt = context.RequestContext('fake_user', 'fake_project') + compute_api = compute_rpcapi.ComputeAPI() + compute_api.router.client = mock.Mock() + mock_client = mock.MagicMock() + compute_api.router.client.return_value = mock_client + # Force can_send_version to False, so that 5.0 version is used. + mock_client.can_send_version.return_value = False + mock_cctx = mock.MagicMock() + mock_client.prepare.return_value = mock_cctx + compute_api.build_and_run_instance( + ctxt, instance=self.fake_instance_obj, + host='host', image='image', + request_spec=self.fake_request_spec_obj, + filter_properties={}, + accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311']) + + mock_client.can_send_version.assert_called_once_with('5.11') + mock_client.prepare.assert_called_with( + server='host', version='5.0') + mock_cctx.cast.assert_called_with( # No accel_uuids + ctxt, 'build_and_run_instance', + instance=self.fake_instance_obj, + image='image', request_spec=self.fake_request_spec_obj, + filter_properties={}, admin_password=None, + injected_files=None, requested_networks=None, + security_groups=None, block_device_mapping=None, + node=None, limits=None, host_list=None) def test_quiesce_instance(self): self._test_compute_api('quiesce_instance', 'call', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_shelve.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_shelve.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/compute/test_shelve.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/compute/test_shelve.py 2020-04-10 17:57:57.000000000 +0000 @@ -265,6 +265,9 @@ return instance + @mock.patch('nova.compute.utils.' + 'update_pci_request_spec_with_allocated_interface_name', + new=mock.NonCallableMock()) @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch('nova.compute.utils.notify_about_instance_action') @mock.patch.object(nova.compute.manager.ComputeManager, @@ -358,8 +361,8 @@ mock_notify_instance_usage_call_list) mock_prep_block_device.assert_called_once_with(self.context, instance, mock.ANY) - mock_setup_network.assert_called_once_with(self.context, instance, - self.compute.host) + mock_setup_network.assert_called_once_with( + self.context, instance, self.compute.host, provider_mappings=None) mock_spawn.assert_called_once_with(self.context, instance, test.MatchType(objects.ImageMeta), injected_files=[], admin_password=None, allocations={}, network_info=[], @@ -458,8 +461,8 @@ mock_notify_instance_usage_call_list) mock_prep_block_device.assert_called_once_with(self.context, instance, mock.ANY) - mock_setup_network.assert_called_once_with(self.context, instance, - self.compute.host) + mock_setup_network.assert_called_once_with( + self.context, instance, self.compute.host, provider_mappings=None) mock_instance_claim.assert_called_once_with(self.context, instance, test_compute.NODENAME, {}, limits) @@ -545,8 +548,8 @@ self.context, instance, 'unshelve.start') mock_prep_block_device.assert_called_once_with( self.context, instance, mock_bdms) - mock_setup_network.assert_called_once_with(self.context, instance, - self.compute.host) + mock_setup_network.assert_called_once_with( + self.context, instance, self.compute.host, provider_mappings=None) mock_instance_claim.assert_called_once_with(self.context, instance, test_compute.NODENAME, {}, limits) @@ -557,6 +560,54 @@ mock_terminate_volume_connections.assert_called_once_with( self.context, instance, mock_bdms) + @mock.patch('nova.network.neutron.API.setup_instance_network_on_host') + @mock.patch('nova.compute.utils.' + 'update_pci_request_spec_with_allocated_interface_name') + def test_unshelve_with_resource_request( + self, mock_update_pci, mock_setup_network): + requested_res = [objects.RequestGroup( + requester_id=uuids.port_1, + provider_uuids=[uuids.rp1])] + request_spec = objects.RequestSpec(requested_resources=requested_res) + instance = self._create_fake_instance_obj() + + self.compute.unshelve_instance( + self.context, instance, image=None, + filter_properties={}, node='fake-node', request_spec=request_spec) + + mock_update_pci.assert_called_once_with( + self.context, self.compute.reportclient, instance, + {uuids.port_1: [uuids.rp1]}) + mock_setup_network.assert_called_once_with( + self.context, instance, self.compute.host, + provider_mappings={uuids.port_1: [uuids.rp1]}) + + @mock.patch('nova.network.neutron.API.setup_instance_network_on_host', + new=mock.NonCallableMock()) + @mock.patch('nova.compute.utils.' + 'update_pci_request_spec_with_allocated_interface_name') + def test_unshelve_with_resource_request_update_raises( + self, mock_update_pci): + requested_res = [objects.RequestGroup( + requester_id=uuids.port_1, + provider_uuids=[uuids.rp1])] + request_spec = objects.RequestSpec(requested_resources=requested_res) + instance = self._create_fake_instance_obj() + mock_update_pci.side_effect = ( + exception.UnexpectedResourceProviderNameForPCIRequest( + provider=uuids.rp1, + requester=uuids.port1, + provider_name='unexpected')) + + self.assertRaises( + exception.UnexpectedResourceProviderNameForPCIRequest, + self.compute.unshelve_instance, self.context, instance, image=None, + filter_properties={}, node='fake-node', request_spec=request_spec) + + mock_update_pci.assert_called_once_with( + self.context, self.compute.reportclient, instance, + {uuids.port_1: [uuids.rp1]}) + @mock.patch.object(objects.InstanceList, 'get_by_filters') def test_shelved_poll_none_offloaded(self, mock_get_by_filters): # Test instances are not offloaded when shelved_offload_time is -1 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conductor/tasks/test_live_migrate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conductor/tasks/test_live_migrate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conductor/tasks/test_live_migrate.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conductor/tasks/test_live_migrate.py 2020-04-10 17:57:57.000000000 +0000 @@ -61,6 +61,7 @@ self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'} self.instance.numa_topology = None self.instance.pci_requests = None + self.instance.resources = None self.destination = "destination" self.block_migration = "bm" self.disk_over_commit = "doc" @@ -96,23 +97,32 @@ dest_node = objects.ComputeNode(hypervisor_hostname='dest_node') with test.nested( mock.patch.object(self.task, '_check_host_is_up'), - mock.patch.object(self.task, '_check_requested_destination', - return_value=(mock.sentinel.source_node, - dest_node)), + mock.patch.object(self.task, '_check_requested_destination'), mock.patch.object(scheduler_utils, 'claim_resources_on_destination'), mock.patch.object(self.migration, 'save'), mock.patch.object(self.task.compute_rpcapi, 'live_migration'), mock.patch('nova.conductor.tasks.migrate.' 'replace_allocation_with_migration'), + mock.patch.object(self.task, '_check_destination_is_not_source'), + mock.patch.object(self.task, + '_check_destination_has_enough_memory'), + mock.patch.object(self.task, + '_check_compatible_with_source_hypervisor', + return_value=(mock.sentinel.source_node, + dest_node)), ) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig, - m_alloc): + m_alloc, m_check_diff, m_check_enough_mem, m_check_compatible): mock_mig.return_value = "bob" m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs) self.assertEqual("bob", self.task.execute()) - mock_check_up.assert_called_once_with(self.instance_host) + mock_check_up.assert_has_calls([ + mock.call(self.instance_host), mock.call(self.destination)]) mock_check_dest.assert_called_once_with() + m_check_diff.assert_called_once() + m_check_enough_mem.assert_called_once() + m_check_compatible.assert_called_once() allocs = mock.sentinel.allocs mock_claim.assert_called_once_with( self.context, self.task.report_client, @@ -282,61 +292,16 @@ self.assertRaises(exception.ComputeHostNotFound, self.task._check_host_is_up, "host") - @mock.patch.object(objects.Service, 'get_by_compute_host') - @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') - @mock.patch.object(servicegroup.API, 'service_is_up') - @mock.patch.object(compute_rpcapi.ComputeAPI, - 'check_can_live_migrate_destination') - def test_check_requested_destination(self, mock_check, mock_is_up, - mock_get_info, mock_get_host): - mock_get_host.return_value = "service" - mock_is_up.return_value = True - hypervisor_details = objects.ComputeNode( - hypervisor_type="a", - hypervisor_version=6.1, - free_ram_mb=513, - memory_mb=512, - ram_allocation_ratio=1.0) - mock_get_info.return_value = hypervisor_details - mock_check.return_value = "migrate_data" - self.task.limits = fake_limits1 - - with test.nested( - mock.patch.object(self.task.network_api, - 'supports_port_binding_extension', - return_value=False), - mock.patch.object(self.task, '_check_can_migrate_pci')): - self.assertEqual((hypervisor_details, hypervisor_details), - self.task._check_requested_destination()) - self.assertEqual("migrate_data", self.task.migrate_data) - mock_get_host.assert_called_once_with(self.context, self.destination) - mock_is_up.assert_called_once_with("service") - self.assertEqual([mock.call(self.destination), - mock.call(self.instance_host), - mock.call(self.destination)], - mock_get_info.call_args_list) - mock_check.assert_called_once_with(self.context, self.instance, - self.destination, self.block_migration, self.disk_over_commit, - self.task.migration, fake_limits1) - - def test_check_requested_destination_fails_with_same_dest(self): + def test_check_destination_fails_with_same_dest(self): self.task.destination = "same" self.task.source = "same" self.assertRaises(exception.UnableToMigrateToSelf, - self.task._check_requested_destination) + self.task._check_destination_is_not_source) - @mock.patch.object(objects.Service, 'get_by_compute_host', - side_effect=exception.ComputeHostNotFound(host='host')) - def test_check_requested_destination_fails_when_destination_is_up(self, - mock): - self.assertRaises(exception.ComputeHostNotFound, - self.task._check_requested_destination) - - @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') @mock.patch.object(objects.ComputeNode, 'get_first_node_by_host_for_old_compat') - def test_check_requested_destination_fails_with_not_enough_memory( - self, mock_get_first, mock_is_up): + def test_check_destination_fails_with_not_enough_memory( + self, mock_get_first): mock_get_first.return_value = ( objects.ComputeNode(free_ram_mb=513, memory_mb=1024, @@ -346,47 +311,55 @@ # ratio reduces the total available RAM to 410MB # (1024 * 0.9 - (1024 - 513)) self.assertRaises(exception.MigrationPreCheckError, - self.task._check_requested_destination) - mock_is_up.assert_called_once_with(self.destination) + self.task._check_destination_has_enough_memory) mock_get_first.assert_called_once_with(self.context, self.destination) - @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') - @mock.patch.object(live_migrate.LiveMigrationTask, - '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') - def test_check_requested_destination_fails_with_hypervisor_diff( - self, mock_get_info, mock_check, mock_is_up): + def test_check_compatible_fails_with_hypervisor_diff( + self, mock_get_info): mock_get_info.side_effect = [ objects.ComputeNode(hypervisor_type='b'), objects.ComputeNode(hypervisor_type='a')] self.assertRaises(exception.InvalidHypervisorType, - self.task._check_requested_destination) - mock_is_up.assert_called_once_with(self.destination) - mock_check.assert_called_once_with() + self.task._check_compatible_with_source_hypervisor, + self.destination) self.assertEqual([mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) - @mock.patch.object(live_migrate.LiveMigrationTask, '_check_host_is_up') - @mock.patch.object(live_migrate.LiveMigrationTask, - '_check_destination_has_enough_memory') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') - def test_check_requested_destination_fails_with_hypervisor_too_old( - self, mock_get_info, mock_check, mock_is_up): + def test_check_compatible_fails_with_hypervisor_too_old( + self, mock_get_info): host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7} host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6} mock_get_info.side_effect = [objects.ComputeNode(**host1), objects.ComputeNode(**host2)] self.assertRaises(exception.DestinationHypervisorTooOld, - self.task._check_requested_destination) - mock_is_up.assert_called_once_with(self.destination) - mock_check.assert_called_once_with() + self.task._check_compatible_with_source_hypervisor, + self.destination) self.assertEqual([mock.call(self.instance_host), mock.call(self.destination)], mock_get_info.call_args_list) + @mock.patch.object(compute_rpcapi.ComputeAPI, + 'check_can_live_migrate_destination') + def test_check_requested_destination(self, mock_check): + mock_check.return_value = "migrate_data" + self.task.limits = fake_limits1 + + with test.nested( + mock.patch.object(self.task.network_api, + 'supports_port_binding_extension', + return_value=False), + mock.patch.object(self.task, '_check_can_migrate_pci')): + self.assertIsNone(self.task._check_requested_destination()) + self.assertEqual("migrate_data", self.task.migrate_data) + mock_check.assert_called_once_with(self.context, self.instance, + self.destination, self.block_migration, self.disk_over_commit, + self.task.migration, fake_limits1) + @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info') @mock.patch.object(servicegroup.API, 'service_is_up') @@ -858,3 +831,29 @@ _test, pci_requests, False, True) self.assertRaises(exception.MigrationPreCheckError, _test, pci_requests, True, True) + + def test_check_can_migrate_specific_resources(self): + """Test _check_can_migrate_specific_resources allows live migration + with vpmem. + """ + @mock.patch.object(live_migrate, 'supports_vpmem_live_migration') + def _test(resources, supp_lm_vpmem_retval, mock_support_lm_vpmem): + self.instance.resources = resources + mock_support_lm_vpmem.return_value = supp_lm_vpmem_retval + self.task._check_can_migrate_specific_resources() + + vpmem_0 = objects.LibvirtVPMEMDevice( + label='4GB', name='ns_0', devpath='/dev/dax0.0', + size=4292870144, align=2097152) + resource_0 = objects.Resource( + provider_uuid=uuids.rp, + resource_class="CUSTOM_PMEM_NAMESPACE_4GB", + identifier='ns_0', metadata=vpmem_0) + resources = objects.ResourceList( + objects=[resource_0]) + + _test(None, False) + _test(None, True) + _test(resources, True) + self.assertRaises(exception.MigrationPreCheckError, + _test, resources, False) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conductor/test_conductor.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conductor/test_conductor.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conductor/test_conductor.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conductor/test_conductor.py 2020-04-10 17:57:58.000000000 +0000 @@ -30,6 +30,7 @@ from nova.compute import flavors from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states +from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova.conductor import manager as conductor_manager @@ -42,7 +43,7 @@ from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api_models from nova import exception as exc -from nova.image import api as image_api +from nova.image import glance as image_api from nova import objects from nova.objects import base as obj_base from nova.objects import block_device as block_device_obj @@ -437,6 +438,8 @@ def test_cold_migrate_forced_shutdown(self): self._test_cold_migrate(clean_shutdown=False) + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_create_and_bind_arqs') @mock.patch.object(compute_rpcapi.ComputeAPI, 'build_and_run_instance') @mock.patch.object(db, 'block_device_mapping_get_all_by_instance', return_value=[]) @@ -448,7 +451,7 @@ @mock.patch.object(objects.RequestSpec, 'from_primitives') def test_build_instances(self, mock_fp, mock_save, mock_getaz, mock_buildreq, mock_schedule, mock_bdm, - mock_build): + mock_build, mock_create_bind_arqs): """Tests creating two instances and the scheduler returns a unique host/node combo for each instance. """ @@ -484,6 +487,7 @@ self.useFixture(cast_as_call.CastAsCall(self)) mock_getaz.return_value = 'myaz' + mock_create_bind_arqs.return_value = mock.sentinel self.conductor.build_instances(self.context, instances=instances, @@ -519,7 +523,8 @@ requested_networks=None, security_groups='security_groups', block_device_mapping=mock.ANY, - node='node1', limits=None, host_list=sched_return[0]), + node='node1', limits=None, host_list=sched_return[0], + accel_uuids=mock.sentinel), mock.call(self.context, instance=mock.ANY, host='host2', image={'fake_data': 'should_pass_silently'}, request_spec=fake_spec, @@ -529,7 +534,88 @@ requested_networks=None, security_groups='security_groups', block_device_mapping=mock.ANY, - node='node2', limits=None, host_list=sched_return[1])]) + node='node2', limits=None, host_list=sched_return[1], + accel_uuids=mock.sentinel)]) + mock_create_bind_arqs.assert_has_calls([ + mock.call(self.context, instances[0].uuid, + instances[0].flavor.extra_specs, 'node1', mock.ANY), + mock.call(self.context, instances[1].uuid, + instances[1].flavor.extra_specs, 'node2', mock.ANY), + ]) + + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_cleanup_when_reschedule_fails') + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_create_and_bind_arqs') + @mock.patch.object(compute_rpcapi.ComputeAPI, 'build_and_run_instance') + @mock.patch.object(db, 'block_device_mapping_get_all_by_instance', + return_value=[]) + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_schedule_instances') + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + @mock.patch('nova.availability_zones.get_host_availability_zone') + @mock.patch('nova.objects.Instance.save') + @mock.patch.object(objects.RequestSpec, 'from_primitives') + def test_build_instances_arq_failure(self, mock_fp, mock_save, mock_getaz, + mock_buildreq, mock_schedule, mock_bdm, + mock_build, mock_create_bind_arqs, mock_cleanup): + """If _create_and_bind_arqs throws an exception, + _destroy_build_request must be called for each instance. + """ + fake_spec = objects.RequestSpec() + mock_fp.return_value = fake_spec + instance_type = objects.Flavor.get_by_name(self.context, 'm1.small') + # NOTE(danms): Avoid datetime timezone issues with converted flavors + instance_type.created_at = None + instances = [objects.Instance(context=self.context, + id=i, + uuid=uuids.fake, + flavor=instance_type) for i in range(2)] + instance_properties = obj_base.obj_to_primitive(instances[0]) + instance_properties['system_metadata'] = flavors.save_flavor_info( + {}, instance_type) + + sched_return = copy.deepcopy(fake_host_lists2) + mock_schedule.return_value = sched_return + + # build_instances() is a cast, we need to wait for it to complete + self.useFixture(cast_as_call.CastAsCall(self)) + + mock_getaz.return_value = 'myaz' + mock_create_bind_arqs.side_effect = ( + exc.AcceleratorRequestOpFailed(op='', msg='')) + + self.conductor.build_instances(self.context, + instances=instances, + image={'fake_data': 'should_pass_silently'}, + filter_properties={}, + admin_password='admin_password', + injected_files='injected_files', + requested_networks=None, + security_groups='security_groups', + block_device_mapping='block_device_mapping', + legacy_bdm=False, host_lists=None) + mock_create_bind_arqs.assert_has_calls([ + mock.call(self.context, instances[0].uuid, + instances[0].flavor.extra_specs, 'node1', mock.ANY), + mock.call(self.context, instances[1].uuid, + instances[1].flavor.extra_specs, 'node2', mock.ANY), + ]) + # Comparing instances fails because the instance objects have changed + # in the above flow. So, we compare the fields instead. + mock_cleanup.assert_has_calls([ + mock.call(self.context, test.MatchType(objects.Instance), + test.MatchType(exc.AcceleratorRequestOpFailed), + test.MatchType(dict), None), + mock.call(self.context, test.MatchType(objects.Instance), + test.MatchType(exc.AcceleratorRequestOpFailed), + test.MatchType(dict), None), + ]) + call_list = mock_cleanup.call_args_list + for idx, instance in enumerate(instances): + actual_inst = call_list[idx][0][1] + self.assertEqual(actual_inst['uuid'], instance['uuid']) + self.assertEqual(actual_inst['flavor']['extra_specs'], {}) @mock.patch.object(scheduler_utils, 'build_request_spec') @mock.patch.object(scheduler_utils, 'setup_instance_group') @@ -972,7 +1058,8 @@ block_device_mapping=test.MatchType( objects.BlockDeviceMappingList), node='node1', limits=None, - host_list=expected_build_run_host_list) + host_list=expected_build_run_host_list, + accel_uuids=[]) mock_pop_inst_map.assert_not_called() mock_destroy_build_req.assert_not_called() @@ -1042,7 +1129,8 @@ objects.BlockDeviceMappingList), node='node1', limits=None, - host_list=expected_build_run_host_list) + host_list=expected_build_run_host_list, + accel_uuids=[]) mock_rp_mapping.assert_called_once_with( test.MatchType(objects.RequestSpec), @@ -1124,7 +1212,8 @@ objects.BlockDeviceMappingList), node='node2', limits=None, - host_list=expected_build_run_host_list) + host_list=expected_build_run_host_list, + accel_uuids=[]) # called only once when the claim succeeded mock_rp_mapping.assert_called_once_with( @@ -1505,6 +1594,39 @@ self.context, instance, 'fake_host', fake_spec, image=None, filter_properties={'limits': {}}, node='fake_node') + @mock.patch('nova.scheduler.utils.fill_provider_mapping') + @mock.patch('nova.network.neutron.API.get_requested_resource_for_instance') + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_schedule_instances', ) + def test_unshelve_instance_resource_request( + self, mock_schedule, mock_get_res_req, mock_fill_provider_mapping): + instance = self._create_fake_instance_obj() + instance.vm_state = vm_states.SHELVED_OFFLOADED + instance.save() + + request_spec = objects.RequestSpec() + + selection = objects.Selection( + service_host='fake_host', + nodename='fake_node', + limits=None) + mock_schedule.return_value = [[selection]] + + res_req = [objects.RequestGroup()] + mock_get_res_req.return_value = res_req + + self.conductor_manager.unshelve_instance( + self.context, instance, request_spec) + + self.assertEqual(res_req, request_spec.requested_resources) + + mock_get_res_req.assert_called_once_with(self.context, instance.uuid) + mock_schedule.assert_called_once_with( + self.context, request_spec, [instance.uuid], + return_alternates=False) + mock_fill_provider_mapping.assert_called_once_with( + request_spec, selection) + def test_rebuild_instance(self): inst_obj = self._create_fake_instance_obj() rebuild_args, compute_args = self._prepare_rebuild_args( @@ -1847,6 +1969,45 @@ self.params = params self.flavor = objects.Flavor.get_by_name(self.ctxt, 'm1.tiny') + @mock.patch('nova.accelerator.cyborg.get_client') + def test_create_bind_arqs_no_device_profile(self, mock_get_client): + # If no device profile name, it is a no op. + hostname = 'myhost' + instance = fake_instance.fake_instance_obj(self.context) + + instance.flavor.extra_specs = {} + self.conductor._create_and_bind_arqs(self.context, + instance.uuid, instance.flavor.extra_specs, + hostname, resource_provider_mapping=mock.ANY) + mock_get_client.assert_not_called() + + @mock.patch('nova.accelerator.cyborg._CyborgClient.bind_arqs') + @mock.patch('nova.accelerator.cyborg._CyborgClient.' + 'create_arqs_and_match_resource_providers') + def test_create_bind_arqs(self, mock_create, mock_bind): + # Happy path + hostname = 'myhost' + instance = fake_instance.fake_instance_obj(self.context) + dp_name = 'mydp' + instance.flavor.extra_specs = {'accel:device_profile': dp_name} + + in_arq_list, _ = fixtures.get_arqs(dp_name) + mock_create.return_value = in_arq_list + + self.conductor._create_and_bind_arqs(self.context, + instance.uuid, instance.flavor.extra_specs, + hostname, resource_provider_mapping=mock.ANY) + + mock_create.assert_called_once_with(dp_name, mock.ANY) + + expected_bindings = { + 'b59d34d3-787b-4fb0-a6b9-019cd81172f8': + {'hostname': hostname, + 'device_rp_uuid': mock.ANY, + 'instance_uuid': instance.uuid} + } + mock_bind.assert_called_once_with(bindings=expected_bindings) + @mock.patch('nova.availability_zones.get_host_availability_zone') @mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance') @mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations') @@ -2018,8 +2179,8 @@ params = self.params # The cells are created in the base TestCase setup. - self.start_service('compute', host='host1', cell='cell1') - self.start_service('compute', host='host2', cell='cell2') + self.start_service('compute', host='host1', cell_name='cell1') + self.start_service('compute', host='host2', cell_name='cell2') get_hostmapping.side_effect = self.host_mappings.values() @@ -2366,6 +2527,44 @@ self.params['request_specs'][0].requested_resources = [] self._do_schedule_and_build_instances_test(self.params) + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_create_and_bind_arqs') + def test_schedule_and_build_instances_with_arqs_bind_ok( + self, mock_create_bind_arqs): + extra_specs = {'accel:device_profile': 'mydp'} + instance = self.params['build_requests'][0].instance + instance.flavor.extra_specs = extra_specs + + self._do_schedule_and_build_instances_test(self.params) + + # NOTE(Sundar): At this point, the instance has not been + # associated with a host yet. The default host.nodename is + # 'node1'. + mock_create_bind_arqs.assert_called_once_with( + self.params['context'], instance.uuid, extra_specs, + 'node1', mock.ANY) + + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_cleanup_build_artifacts') + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_create_and_bind_arqs') + def test_schedule_and_build_instances_with_arqs_bind_exception( + self, mock_create_bind_arqs, mock_cleanup): + # Exceptions in _create_and_bind_arqs result in cleanup + mock_create_bind_arqs.side_effect = ( + exc.AcceleratorRequestOpFailed(op='', msg='')) + + try: + self._do_schedule_and_build_instances_test(self.params) + except exc.AcceleratorRequestOpFailed: + pass + + mock_cleanup.assert_called_once_with( + self.params['context'], mock.ANY, mock.ANY, + self.params['build_requests'], self.params['request_specs'], + self.params['block_device_mapping'], self.params['tags'], + mock.ANY) + def test_map_instance_to_cell_already_mapped(self): """Tests a scenario where an instance is already mapped to a cell during scheduling. @@ -3193,7 +3392,7 @@ security_groups='security_groups', block_device_mapping=test.MatchType( objects.BlockDeviceMappingList), - node='node2', limits=None, host_list=[]) + node='node2', limits=None, host_list=[], accel_uuids=[]) @mock.patch.object(scheduler_utils, 'setup_instance_group') @mock.patch.object(scheduler_utils, 'build_request_spec') @@ -3253,7 +3452,7 @@ requested_networks=None, security_groups='security_groups', block_device_mapping=mock.ANY, - node='node2', limits=None, host_list=[]) + node='node2', limits=None, host_list=[], accel_uuids=[]) @mock.patch('nova.compute.utils.notify_about_compute_task_error') @mock.patch('nova.objects.Instance.save') @@ -3371,6 +3570,19 @@ # handled the error. mock_save.assert_not_called() + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_cleanup_allocated_networks') + @mock.patch.object(conductor_manager.ComputeTaskManager, + '_set_vm_state_and_notify') + @mock.patch.object(compute_utils, 'delete_arqs_if_needed') + def test_cleanup_arqs_on_reschedule(self, mock_del_arqs, + mock_set_vm, mock_clean_net): + instance = fake_instance.fake_instance_obj(self.context) + self.conductor_manager._cleanup_when_reschedule_fails( + self.context, instance, exception=None, + legacy_request_spec=None, requested_networks=None) + mock_del_arqs.assert_called_once_with(self.context, instance) + def test_cleanup_allocated_networks_none_requested(self): # Tests that we don't deallocate networks if 'none' were specifically # requested. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conf/test_devices.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conf/test_devices.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conf/test_devices.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conf/test_devices.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import nova.conf +from nova import test + + +CONF = nova.conf.CONF + + +class DevicesConfTestCase(test.NoDBTestCase): + + def test_register_dynamic_opts(self): + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + + self.assertNotIn('vgpu_nvidia-11', CONF) + self.assertNotIn('vgpu_nvidia-12', CONF) + + nova.conf.devices.register_dynamic_opts(CONF) + + self.assertIn('vgpu_nvidia-11', CONF) + self.assertIn('vgpu_nvidia-12', CONF) + self.assertEqual([], getattr(CONF, 'vgpu_nvidia-11').device_addresses) + self.assertEqual([], getattr(CONF, 'vgpu_nvidia-12').device_addresses) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conf_fixture.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conf_fixture.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/conf_fixture.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/conf_fixture.py 2020-04-10 17:57:58.000000000 +0000 @@ -17,6 +17,7 @@ from oslo_config import fixture as config_fixture from oslo_policy import opts as policy_opts +from nova.conf import devices from nova.conf import neutron from nova.conf import paths from nova import config @@ -64,3 +65,4 @@ init_rpc=False) policy_opts.set_defaults(self.conf) neutron.register_dynamic_opts(self.conf) + devices.register_dynamic_opts(self.conf) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/console/test_websocketproxy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/console/test_websocketproxy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/console/test_websocketproxy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/console/test_websocketproxy.py 2020-04-10 17:57:58.000000000 +0000 @@ -125,10 +125,10 @@ self.test_new_websocket_client_db(instance_not_found=True) -class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase): +class NovaProxyRequestHandlerTestCase(test.NoDBTestCase): def setUp(self): - super(NovaProxyRequestHandlerBaseTestCase, self).setUp() + super(NovaProxyRequestHandlerTestCase, self).setUp() self.flags(allowed_origins=['allowed-origin-example-1.net', 'allowed-origin-example-2.net'], @@ -195,7 +195,7 @@ 'Host': 'example.net:6080', } - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client(self, validate, check_port): @@ -223,7 +223,7 @@ connection_info = self.wh.msg.mock_calls[0][1][1] self.assertEqual('***', connection_info.token) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_ipv6_url(self, validate, check_port): @@ -259,7 +259,7 @@ self.wh.new_websocket_client) validate.assert_called_with(mock.ANY, "XXX") - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_internal_access_path(self, validate, @@ -290,7 +290,7 @@ tsock.send.assert_called_with(test.MatchType(bytes)) self.wh.do_proxy.assert_called_with(tsock) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_internal_access_path_err(self, validate, @@ -320,7 +320,7 @@ self.wh.new_websocket_client) validate.assert_called_with(mock.ANY, "123-456-789") - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_internal_access_path_rfb(self, validate, @@ -357,7 +357,7 @@ self.wh.do_proxy.assert_called_with(tsock) @mock.patch.object(websocketproxy, 'sys') - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_py273_good_scheme( @@ -401,7 +401,7 @@ self.assertFalse(getfqdn.called) # no reverse dns look up self.assertEqual(handler.address_string(), '8.8.8.8') # plain address - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_bad_origin_header(self, validate, @@ -422,7 +422,7 @@ self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_allowed_origin_header(self, validate, @@ -448,7 +448,7 @@ self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_blank_origin_header(self, validate, @@ -469,7 +469,7 @@ self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_no_origin_header(self, validate, @@ -494,7 +494,7 @@ self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_https_origin_proto_http( @@ -516,7 +516,7 @@ self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_https_origin_proto_ws(self, validate, @@ -538,7 +538,7 @@ self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_http_forwarded_proto_https(self, validate, @@ -570,7 +570,7 @@ self.wh.socket.assert_called_with('node1', 10000, connect=True) self.wh.do_proxy.assert_called_with('') - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_new_websocket_client_novnc_bad_console_type(self, validate, @@ -591,7 +591,7 @@ self.assertRaises(exception.ValidationError, self.wh.new_websocket_client) - @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandlerBase.' + @mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.' '_check_console_port') @mock.patch('nova.objects.ConsoleAuthToken.validate') def test_malformed_cookie(self, validate, check_port): @@ -626,6 +626,22 @@ self.wh.server.top_new_client(conn, address) self.assertIsNone(self.wh._compute_rpcapi) + @mock.patch('websockify.websocketproxy.select_ssl_version') + def test_ssl_min_version_is_not_set(self, mock_select_ssl): + websocketproxy.NovaWebSocketProxy() + self.assertFalse(mock_select_ssl.called) + + @mock.patch('websockify.websocketproxy.select_ssl_version') + def test_ssl_min_version_not_set_by_default(self, mock_select_ssl): + websocketproxy.NovaWebSocketProxy(ssl_minimum_version='default') + self.assertFalse(mock_select_ssl.called) + + @mock.patch('websockify.websocketproxy.select_ssl_version') + def test_non_default_ssl_min_version_is_set(self, mock_select_ssl): + minver = 'tlsv1_3' + websocketproxy.NovaWebSocketProxy(ssl_minimum_version=minver) + mock_select_ssl.assert_called_once_with(minver) + class NovaWebsocketSecurityProxyTestCase(test.NoDBTestCase): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_db_api.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_db_api.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_db_api.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_db_api.py 2020-04-10 17:57:58.000000000 +0000 @@ -62,7 +62,6 @@ from nova.db.sqlalchemy import types as col_types from nova.db.sqlalchemy import utils as db_utils from nova import exception -from nova import objects from nova.objects import fields from nova import test from nova.tests import fixtures as nova_fixtures @@ -1590,384 +1589,6 @@ {'key': 'value'}, True) -class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(SecurityGroupRuleTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'name': 'fake_sec_group', - 'description': 'fake_sec_group_descr', - 'user_id': 'fake', - 'project_id': 'fake', - 'instances': [] - } - - def _get_base_rule_values(self): - return { - 'protocol': "tcp", - 'from_port': 80, - 'to_port': 8080, - 'cidr': None, - 'deleted': 0, - 'deleted_at': None, - 'grantee_group': None, - 'updated_at': None - } - - def _create_security_group(self, values): - v = self._get_base_values() - v.update(values) - return db.security_group_create(self.ctxt, v) - - def _create_security_group_rule(self, values): - v = self._get_base_rule_values() - v.update(values) - return db.security_group_rule_create(self.ctxt, v) - - def test_security_group_rule_create(self): - security_group_rule = self._create_security_group_rule({}) - self.assertIsNotNone(security_group_rule['id']) - for key, value in self._get_base_rule_values().items(): - self.assertEqual(value, security_group_rule[key]) - - def _test_security_group_rule_get_by_security_group(self, columns=None): - instance = db.instance_create(self.ctxt, - {'system_metadata': {'foo': 'bar'}}) - security_group = self._create_security_group({ - 'instances': [instance]}) - security_group_rule = self._create_security_group_rule( - {'parent_group': security_group, 'grantee_group': security_group}) - security_group_rule1 = self._create_security_group_rule( - {'parent_group': security_group, 'grantee_group': security_group}) - found_rules = db.security_group_rule_get_by_security_group( - self.ctxt, security_group['id'], columns_to_join=columns) - self.assertEqual(len(found_rules), 2) - rules_ids = [security_group_rule['id'], security_group_rule1['id']] - for rule in found_rules: - if columns is None: - self.assertIn('grantee_group', dict(rule)) - self.assertIn('instances', - dict(rule.grantee_group)) - self.assertIn( - 'system_metadata', - dict(rule.grantee_group.instances[0])) - self.assertIn(rule['id'], rules_ids) - else: - self.assertNotIn('grantee_group', dict(rule)) - - def test_security_group_rule_get_by_security_group(self): - self._test_security_group_rule_get_by_security_group() - - def test_security_group_rule_get_by_security_group_no_joins(self): - self._test_security_group_rule_get_by_security_group(columns=[]) - - def test_security_group_rule_get_by_instance(self): - instance = db.instance_create(self.ctxt, {}) - security_group = self._create_security_group({ - 'instances': [instance]}) - security_group_rule = self._create_security_group_rule( - {'parent_group': security_group, 'grantee_group': security_group}) - security_group_rule1 = self._create_security_group_rule( - {'parent_group': security_group, 'grantee_group': security_group}) - security_group_rule_ids = [security_group_rule['id'], - security_group_rule1['id']] - found_rules = db.security_group_rule_get_by_instance(self.ctxt, - instance['uuid']) - self.assertEqual(len(found_rules), 2) - for rule in found_rules: - self.assertIn('grantee_group', rule) - self.assertIn(rule['id'], security_group_rule_ids) - - def test_security_group_rule_destroy(self): - self._create_security_group({'name': 'fake1'}) - self._create_security_group({'name': 'fake2'}) - security_group_rule1 = self._create_security_group_rule({}) - security_group_rule2 = self._create_security_group_rule({}) - db.security_group_rule_destroy(self.ctxt, security_group_rule1['id']) - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_rule_get, - self.ctxt, security_group_rule1['id']) - self._assertEqualObjects(db.security_group_rule_get(self.ctxt, - security_group_rule2['id']), - security_group_rule2, ['grantee_group']) - - def test_security_group_rule_destroy_not_found_exception(self): - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_rule_destroy, self.ctxt, 100500) - - def test_security_group_rule_get(self): - security_group_rule1 = ( - self._create_security_group_rule({})) - self._create_security_group_rule({}) - real_security_group_rule = db.security_group_rule_get(self.ctxt, - security_group_rule1['id']) - self._assertEqualObjects(security_group_rule1, - real_security_group_rule, ['grantee_group']) - - def test_security_group_rule_get_not_found_exception(self): - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_rule_get, self.ctxt, 100500) - - def test_security_group_rule_count_by_group(self): - sg1 = self._create_security_group({'name': 'fake1'}) - sg2 = self._create_security_group({'name': 'fake2'}) - rules_by_group = {sg1: [], sg2: []} - for group in rules_by_group: - rules = rules_by_group[group] - for i in range(0, 10): - rules.append( - self._create_security_group_rule({'parent_group_id': - group['id']})) - db.security_group_rule_destroy(self.ctxt, - rules_by_group[sg1][0]['id']) - counted_groups = [db.security_group_rule_count_by_group(self.ctxt, - group['id']) - for group in [sg1, sg2]] - expected = [9, 10] - self.assertEqual(counted_groups, expected) - - -class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(SecurityGroupTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'name': 'fake_sec_group', - 'description': 'fake_sec_group_descr', - 'user_id': 'fake', - 'project_id': 'fake', - 'instances': [] - } - - def _create_security_group(self, values): - v = self._get_base_values() - v.update(values) - return db.security_group_create(self.ctxt, v) - - def test_security_group_create(self): - security_group = self._create_security_group({}) - self.assertIsNotNone(security_group['id']) - for key, value in self._get_base_values().items(): - self.assertEqual(value, security_group[key]) - - def test_security_group_destroy(self): - security_group1 = self._create_security_group({}) - security_group2 = \ - self._create_security_group({'name': 'fake_sec_group2'}) - - db.security_group_destroy(self.ctxt, security_group1['id']) - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_get, - self.ctxt, security_group1['id']) - self._assertEqualObjects(db.security_group_get( - self.ctxt, security_group2['id'], - columns_to_join=['instances', - 'rules']), security_group2) - - def test_security_group_destroy_with_instance(self): - security_group1 = self._create_security_group({}) - - instance = db.instance_create(self.ctxt, {}) - db.instance_add_security_group(self.ctxt, instance.uuid, - security_group1.id) - - self.assertEqual( - 1, - len(db.security_group_get_by_instance(self.ctxt, instance.uuid))) - - db.security_group_destroy(self.ctxt, security_group1['id']) - - self.assertEqual( - 0, - len(db.security_group_get_by_instance(self.ctxt, instance.uuid))) - - def test_security_group_get(self): - security_group1 = self._create_security_group({}) - self._create_security_group({'name': 'fake_sec_group2'}) - real_security_group = db.security_group_get(self.ctxt, - security_group1['id'], - columns_to_join=['instances', - 'rules']) - self._assertEqualObjects(security_group1, - real_security_group) - - def test_security_group_get_with_instance_columns(self): - instance = db.instance_create(self.ctxt, - {'system_metadata': {'foo': 'bar'}}) - secgroup = self._create_security_group({'instances': [instance]}) - secgroup = db.security_group_get( - self.ctxt, secgroup['id'], - columns_to_join=['instances.system_metadata']) - inst = secgroup.instances[0] - self.assertIn('system_metadata', dict(inst).keys()) - - def test_security_group_get_no_instances(self): - instance = db.instance_create(self.ctxt, {}) - sid = self._create_security_group({'instances': [instance]})['id'] - - security_group = db.security_group_get(self.ctxt, sid, - columns_to_join=['instances']) - self.assertIn('instances', security_group.__dict__) - - security_group = db.security_group_get(self.ctxt, sid) - self.assertNotIn('instances', security_group.__dict__) - - def test_security_group_get_not_found_exception(self): - self.assertRaises(exception.SecurityGroupNotFound, - db.security_group_get, self.ctxt, 100500) - - def test_security_group_get_by_name(self): - security_group1 = self._create_security_group({'name': 'fake1'}) - security_group2 = self._create_security_group({'name': 'fake2'}) - - real_security_group1 = db.security_group_get_by_name( - self.ctxt, - security_group1['project_id'], - security_group1['name'], - columns_to_join=None) - real_security_group2 = db.security_group_get_by_name( - self.ctxt, - security_group2['project_id'], - security_group2['name'], - columns_to_join=None) - self._assertEqualObjects(security_group1, real_security_group1) - self._assertEqualObjects(security_group2, real_security_group2) - - def test_security_group_get_by_project(self): - security_group1 = self._create_security_group( - {'name': 'fake1', 'project_id': 'fake_proj1'}) - security_group2 = self._create_security_group( - {'name': 'fake2', 'project_id': 'fake_proj2'}) - - real1 = db.security_group_get_by_project( - self.ctxt, - security_group1['project_id']) - real2 = db.security_group_get_by_project( - self.ctxt, - security_group2['project_id']) - - expected1, expected2 = [security_group1], [security_group2] - self._assertEqualListsOfObjects(expected1, real1, - ignored_keys=['instances']) - self._assertEqualListsOfObjects(expected2, real2, - ignored_keys=['instances']) - - def test_security_group_get_by_instance(self): - instance = db.instance_create(self.ctxt, dict(host='foo')) - values = [ - {'name': 'fake1', 'instances': [instance]}, - {'name': 'fake2', 'instances': [instance]}, - {'name': 'fake3', 'instances': []}, - ] - security_groups = [self._create_security_group(vals) - for vals in values] - - real = db.security_group_get_by_instance(self.ctxt, - instance['uuid']) - expected = security_groups[:2] - self._assertEqualListsOfObjects(expected, real, - ignored_keys=['instances']) - - def test_security_group_get_all(self): - values = [ - {'name': 'fake1', 'project_id': 'fake_proj1'}, - {'name': 'fake2', 'project_id': 'fake_proj2'}, - ] - security_groups = [self._create_security_group(vals) - for vals in values] - - real = db.security_group_get_all(self.ctxt) - - self._assertEqualListsOfObjects(security_groups, real, - ignored_keys=['instances']) - - def test_security_group_in_use(self): - instance = db.instance_create(self.ctxt, dict(host='foo')) - values = [ - {'instances': [instance], - 'name': 'fake_in_use'}, - {'instances': []}, - ] - - security_groups = [self._create_security_group(vals) - for vals in values] - - real = [] - for security_group in security_groups: - in_use = db.security_group_in_use(self.ctxt, - security_group['id']) - real.append(in_use) - expected = [True, False] - - self.assertEqual(expected, real) - - def test_security_group_ensure_default(self): - self.ctxt.project_id = 'fake' - self.ctxt.user_id = 'fake' - self.assertEqual(0, len(db.security_group_get_by_project( - self.ctxt, - self.ctxt.project_id))) - - db.security_group_ensure_default(self.ctxt) - - security_groups = db.security_group_get_by_project( - self.ctxt, - self.ctxt.project_id) - - self.assertEqual(1, len(security_groups)) - self.assertEqual("default", security_groups[0]["name"]) - - @mock.patch.object(sqlalchemy_api, '_security_group_get_by_names') - def test_security_group_ensure_default_called_concurrently(self, sg_mock): - # make sure NotFound is always raised here to trick Nova to insert the - # duplicate security group entry - sg_mock.side_effect = exception.NotFound - - # create the first db entry - self.ctxt.project_id = 1 - db.security_group_ensure_default(self.ctxt) - security_groups = db.security_group_get_by_project( - self.ctxt, - self.ctxt.project_id) - self.assertEqual(1, len(security_groups)) - - # create the second one and ensure the exception is handled properly - default_group = db.security_group_ensure_default(self.ctxt) - self.assertEqual('default', default_group.name) - - def test_security_group_update(self): - security_group = self._create_security_group({}) - new_values = { - 'name': 'sec_group1', - 'description': 'sec_group_descr1', - 'user_id': 'fake_user1', - 'project_id': 'fake_proj1', - } - - updated_group = db.security_group_update(self.ctxt, - security_group['id'], - new_values, - columns_to_join=['rules.grantee_group']) - for key, value in new_values.items(): - self.assertEqual(updated_group[key], value) - self.assertEqual(updated_group['rules'], []) - - def test_security_group_update_to_duplicate(self): - self._create_security_group( - {'name': 'fake1', 'project_id': 'fake_proj1'}) - security_group2 = self._create_security_group( - {'name': 'fake1', 'project_id': 'fake_proj2'}) - - self.assertRaises(exception.SecurityGroupExists, - db.security_group_update, - self.ctxt, security_group2['id'], - {'project_id': 'fake_proj1'}) - - @mock.patch('time.sleep', new=lambda x: None) class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin): @@ -2984,38 +2605,6 @@ # Ensure that metadata is updated during instance_update self._test_instance_update_updates_metadata('metadata') - def test_instance_floating_address_get_all(self): - ctxt = context.get_admin_context() - - instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'}) - instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'}) - - fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3'] - instance_uuids = [instance1['uuid'], instance1['uuid'], - instance2['uuid']] - - for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses, - float_addresses, - instance_uuids): - db.fixed_ip_create(ctxt, {'address': fixed_addr, - 'instance_uuid': instance_uuid}) - fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id'] - db.floating_ip_create(ctxt, - {'address': float_addr, - 'fixed_ip_id': fixed_id}) - - real_float_addresses = \ - db.instance_floating_address_get_all(ctxt, instance_uuids[0]) - self.assertEqual(set(float_addresses[:2]), set(real_float_addresses)) - real_float_addresses = \ - db.instance_floating_address_get_all(ctxt, instance_uuids[2]) - self.assertEqual(set([float_addresses[2]]), set(real_float_addresses)) - - self.assertRaises(exception.InvalidUUID, - db.instance_floating_address_get_all, - ctxt, 'invalid_uuid') - def test_instance_stringified_ips(self): instance = self.create_instance_with_args() instance = db.instance_update( @@ -3554,7 +3143,8 @@ _make_compute_node('host2', 'node3', 'ironic', services[1]['id']), _make_compute_node('host3', 'host3', 'kvm', services[2]['id']), ] - [db.compute_node_create(self.ctxt, cn) for cn in compute_nodes] + for cn in compute_nodes: + db.compute_node_create(self.ctxt, cn) expected = services[:1] real = db.service_get_all_computes_by_hv_type(self.ctxt, @@ -3576,7 +3166,8 @@ _make_compute_node('host2', 'node3', 'ironic', services[1]['id']), _make_compute_node('host3', 'host3', 'kvm', services[2]['id']), ] - [db.compute_node_create(self.ctxt, cn) for cn in compute_nodes] + for cn in compute_nodes: + db.compute_node_create(self.ctxt, cn) expected = services[:2] real = db.service_get_all_computes_by_hv_type(self.ctxt, @@ -4318,1196 +3909,6 @@ self.assertFalse(mock_filter.called) -@mock.patch('time.sleep', new=lambda x: None) -class FixedIPTestCase(test.TestCase, ModelsObjectComparatorMixin): - def setUp(self): - super(FixedIPTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _timeout_test(self, ctxt, timeout, multi_host): - instance = db.instance_create(ctxt, dict(host='foo')) - net = db.network_create_safe(ctxt, dict(multi_host=multi_host, - host='bar')) - old = timeout - datetime.timedelta(seconds=5) - new = timeout + datetime.timedelta(seconds=5) - # should deallocate - db.fixed_ip_create(ctxt, dict(allocated=False, - instance_uuid=instance['uuid'], - network_id=net['id'], - updated_at=old)) - # still allocated - db.fixed_ip_create(ctxt, dict(allocated=True, - instance_uuid=instance['uuid'], - network_id=net['id'], - updated_at=old)) - # wrong network - db.fixed_ip_create(ctxt, dict(allocated=False, - instance_uuid=instance['uuid'], - network_id=None, - updated_at=old)) - # too new - db.fixed_ip_create(ctxt, dict(allocated=False, - instance_uuid=instance['uuid'], - network_id=None, - updated_at=new)) - - def test_fixed_ip_disassociate_all_by_timeout_single_host(self): - now = timeutils.utcnow() - self._timeout_test(self.ctxt, now, False) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now) - self.assertEqual(result, 0) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now) - self.assertEqual(result, 1) - - def test_fixed_ip_disassociate_all_by_timeout_multi_host(self): - now = timeutils.utcnow() - self._timeout_test(self.ctxt, now, True) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now) - self.assertEqual(result, 1) - result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now) - self.assertEqual(result, 0) - - def test_fixed_ip_get_by_floating_address(self): - fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'}) - values = {'address': '8.7.6.5', - 'fixed_ip_id': fixed_ip['id']} - floating = db.floating_ip_create(self.ctxt, values)['address'] - fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating) - self._assertEqualObjects(fixed_ip, fixed_ip_ref) - - def test_fixed_ip_get_by_host(self): - host_ips = { - 'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'], - 'host2': ['1.1.1.4', '1.1.1.5'], - 'host3': ['1.1.1.6'] - } - - for host, ips in host_ips.items(): - for ip in ips: - instance_uuid = self._create_instance(host=host) - db.fixed_ip_create(self.ctxt, {'address': ip}) - db.fixed_ip_associate(self.ctxt, ip, instance_uuid) - - for host, ips in host_ips.items(): - ips_on_host = [x['address'] - for x in db.fixed_ip_get_by_host(self.ctxt, host)] - self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips) - - def test_fixed_ip_get_by_network_host_not_found_exception(self): - self.assertRaises( - exception.FixedIpNotFoundForNetworkHost, - db.fixed_ip_get_by_network_host, - self.ctxt, 1, 'ignore') - - def test_fixed_ip_get_by_network_host_fixed_ip_found(self): - db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host')) - - fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host') - - self.assertEqual(1, fip['network_id']) - self.assertEqual('host', fip['host']) - - def _create_instance(self, **kwargs): - instance = db.instance_create(self.ctxt, kwargs) - return instance['uuid'] - - def test_fixed_ip_get_by_instance_fixed_ip_found(self): - instance_uuid = self._create_instance() - - FIXED_IP_ADDRESS = '192.168.1.5' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS)) - - ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS], - [ips_list[0].address]) - - def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self): - instance_uuid = self._create_instance() - - FIXED_IP_ADDRESS_1 = '192.168.1.5' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = '192.168.1.6' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2)) - - ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ip_get_by_instance_inappropriate_ignored(self): - instance_uuid = self._create_instance() - - FIXED_IP_ADDRESS_1 = '192.168.1.5' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = '192.168.1.6' - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2)) - - another_instance = db.instance_create(self.ctxt, {}) - db.fixed_ip_create(self.ctxt, dict( - instance_uuid=another_instance['uuid'], address="192.168.1.7")) - - ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ip_get_by_instance_not_found_exception(self): - instance_uuid = self._create_instance() - - self.assertRaises(exception.FixedIpNotFoundForInstance, - db.fixed_ip_get_by_instance, - self.ctxt, instance_uuid) - - def test_fixed_ips_by_virtual_interface_fixed_ip_found(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - FIXED_IP_ADDRESS = '192.168.1.5' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS)) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS], - [ips_list[0].address]) - - def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - FIXED_IP_ADDRESS_1 = '192.168.1.5' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = '192.168.1.6' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2)) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - FIXED_IP_ADDRESS_1 = '192.168.1.5' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1)) - FIXED_IP_ADDRESS_2 = '192.168.1.6' - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2)) - - another_vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - db.fixed_ip_create(self.ctxt, dict( - virtual_interface_id=another_vif.id, address="192.168.1.7")) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self._assertEqualListsOfPrimitivesAsSets( - [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2], - [ips_list[0].address, ips_list[1].address]) - - def test_fixed_ips_by_virtual_interface_no_ip_found(self): - instance_uuid = self._create_instance() - - vif = db.virtual_interface_create( - self.ctxt, dict(instance_uuid=instance_uuid)) - - ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id) - self.assertEqual(0, len(ips_list)) - - def create_fixed_ip(self, **params): - default_params = {'address': '192.168.0.1'} - default_params.update(params) - return db.fixed_ip_create(self.ctxt, default_params)['address'] - - def test_fixed_ip_associate_fails_if_ip_not_in_network(self): - instance_uuid = self._create_instance() - self.assertRaises(exception.FixedIpNotFoundForNetwork, - db.fixed_ip_associate, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_fails_if_ip_in_use(self): - instance_uuid = self._create_instance() - - address = self.create_fixed_ip(instance_uuid=instance_uuid) - self.assertRaises(exception.FixedIpAlreadyInUse, - db.fixed_ip_associate, - self.ctxt, address, instance_uuid) - - def test_fixed_ip_associate_succeeds(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip(network_id=network['id']) - db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id']) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - - def test_fixed_ip_associate_succeeds_and_sets_network(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip() - db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id']) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - self.assertEqual(fixed_ip['network_id'], network['id']) - - def test_fixed_ip_associate_succeeds_retry_on_deadlock(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip() - - def fake_first(): - if mock_first.call_count == 1: - raise db_exc.DBDeadlock() - else: - return objects.Instance(id=1, address=address, reserved=False, - instance_uuid=None, network_id=None) - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id']) - self.assertEqual(2, mock_first.call_count) - - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - self.assertEqual(fixed_ip['network_id'], network['id']) - - def test_fixed_ip_associate_succeeds_retry_on_no_rows_updated(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip() - - def fake_first(): - if mock_first.call_count == 1: - return objects.Instance(id=2, address=address, reserved=False, - instance_uuid=None, network_id=None) - else: - return objects.Instance(id=1, address=address, reserved=False, - instance_uuid=None, network_id=None) - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id']) - self.assertEqual(2, mock_first.call_count) - - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - self.assertEqual(fixed_ip['network_id'], network['id']) - - def test_fixed_ip_associate_succeeds_retry_limit_exceeded(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip() - - def fake_first(): - return objects.Instance(id=2, address=address, reserved=False, - instance_uuid=None, network_id=None) - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - self.assertRaises(exception.FixedIpAssociateFailed, - db.fixed_ip_associate, self.ctxt, address, - instance_uuid, network_id=network['id']) - # 5 reties + initial attempt - self.assertEqual(6, mock_first.call_count) - - def test_fixed_ip_associate_ip_not_in_network_with_no_retries(self): - instance_uuid = self._create_instance() - - with mock.patch('sqlalchemy.orm.query.Query.first', - return_value=None) as mock_first: - self.assertRaises(exception.FixedIpNotFoundForNetwork, - db.fixed_ip_associate, - self.ctxt, None, instance_uuid) - self.assertEqual(1, mock_first.call_count) - - def test_fixed_ip_associate_no_network_id_with_no_retries(self): - # Tests that trying to associate an instance to a fixed IP on a network - # but without specifying the network ID during associate will fail. - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - address = self.create_fixed_ip(network_id=network['id']) - - with mock.patch('sqlalchemy.orm.query.Query.first', - return_value=None) as mock_first: - self.assertRaises(exception.FixedIpNotFoundForNetwork, - db.fixed_ip_associate, - self.ctxt, address, instance_uuid) - self.assertEqual(1, mock_first.call_count) - - def test_fixed_ip_associate_with_vif(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - vif = db.virtual_interface_create(self.ctxt, {}) - address = self.create_fixed_ip() - - fixed_ip = db.fixed_ip_associate(self.ctxt, address, instance_uuid, - network_id=network['id'], - virtual_interface_id=vif['id']) - - self.assertTrue(fixed_ip['allocated']) - self.assertEqual(vif['id'], fixed_ip['virtual_interface_id']) - - def test_fixed_ip_associate_not_allocated_without_vif(self): - instance_uuid = self._create_instance() - address = self.create_fixed_ip() - - fixed_ip = db.fixed_ip_associate(self.ctxt, address, instance_uuid) - - self.assertFalse(fixed_ip['allocated']) - self.assertIsNone(fixed_ip['virtual_interface_id']) - - def test_fixed_ip_associate_pool_invalid_uuid(self): - instance_uuid = '123' - self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_pool_no_more_fixed_ips(self): - instance_uuid = self._create_instance() - self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_pool_ignores_leased_addresses(self): - instance_uuid = self._create_instance() - params = {'address': '192.168.1.5', - 'leased': True} - db.fixed_ip_create(self.ctxt, params) - self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool, - self.ctxt, None, instance_uuid) - - def test_fixed_ip_associate_pool_succeeds(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip(network_id=network['id']) - db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - - def test_fixed_ip_associate_pool_order(self): - """Test that fixed_ip always uses oldest fixed_ip. - - We should always be using the fixed ip with the oldest - updated_at. - """ - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - self.addCleanup(timeutils.clear_time_override) - start = timeutils.utcnow() - for i in range(1, 4): - now = start - datetime.timedelta(hours=i) - timeutils.set_time_override(now) - address = self.create_fixed_ip( - updated_at=now, - address='10.1.0.%d' % i, - network_id=network['id']) - db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(fixed_ip['instance_uuid'], instance_uuid) - - def test_fixed_ip_associate_pool_succeeds_fip_ref_network_id_is_none(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - self.create_fixed_ip(network_id=None) - fixed_ip = db.fixed_ip_associate_pool(self.ctxt, - network['id'], instance_uuid) - self.assertEqual(instance_uuid, fixed_ip['instance_uuid']) - self.assertEqual(network['id'], fixed_ip['network_id']) - - def test_fixed_ip_associate_pool_succeeds_retry(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - address = self.create_fixed_ip(network_id=network['id']) - - def fake_first(): - if mock_first.call_count == 1: - return {'network_id': network['id'], 'address': 'invalid', - 'instance_uuid': None, 'host': None, 'id': 1} - else: - return {'network_id': network['id'], 'address': address, - 'instance_uuid': None, 'host': None, 'id': 1} - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid) - self.assertEqual(2, mock_first.call_count) - - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address) - self.assertEqual(instance_uuid, fixed_ip['instance_uuid']) - - def test_fixed_ip_associate_pool_retry_limit_exceeded(self): - instance_uuid = self._create_instance() - network = db.network_create_safe(self.ctxt, {}) - - self.create_fixed_ip(network_id=network['id']) - - def fake_first(): - return {'network_id': network['id'], 'address': 'invalid', - 'instance_uuid': None, 'host': None, 'id': 1} - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - self.assertRaises(exception.FixedIpAssociateFailed, - db.fixed_ip_associate_pool, self.ctxt, - network['id'], instance_uuid) - # 5 retries + initial attempt - self.assertEqual(6, mock_first.call_count) - - def test_fixed_ip_create_same_address(self): - address = '192.168.1.5' - params = {'address': address} - db.fixed_ip_create(self.ctxt, params) - self.assertRaises(exception.FixedIpExists, db.fixed_ip_create, - self.ctxt, params) - - def test_fixed_ip_create_success(self): - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': '192.168.1.5', - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - fixed_ip_data = db.fixed_ip_create(self.ctxt, param) - self._assertEqualObjects(param, fixed_ip_data, ignored_keys) - - def test_fixed_ip_bulk_create_same_address(self): - address_1 = '192.168.1.5' - address_2 = '192.168.1.6' - instance_uuid = self._create_instance() - network_id_1 = db.network_create_safe(self.ctxt, {})['id'] - network_id_2 = db.network_create_safe(self.ctxt, {})['id'] - params = [ - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': '127.0.0.1', 'address': address_2, 'allocated': False, - 'instance_uuid': instance_uuid, 'network_id': network_id_1, - 'virtual_interface_id': None}, - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': '127.0.0.1', 'address': address_1, 'allocated': False, - 'instance_uuid': instance_uuid, 'network_id': network_id_1, - 'virtual_interface_id': None}, - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': 'localhost', 'address': address_2, 'allocated': True, - 'instance_uuid': instance_uuid, 'network_id': network_id_2, - 'virtual_interface_id': None}, - ] - - self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create, - self.ctxt, params) - # In this case the transaction will be rolled back and none of the ips - # will make it to the database. - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_get_by_address, self.ctxt, address_1) - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_get_by_address, self.ctxt, address_2) - - def test_fixed_ip_bulk_create_success(self): - address_1 = '192.168.1.5' - address_2 = '192.168.1.6' - - instance_uuid = self._create_instance() - network_id_1 = db.network_create_safe(self.ctxt, {})['id'] - network_id_2 = db.network_create_safe(self.ctxt, {})['id'] - params = [ - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': '127.0.0.1', 'address': address_1, 'allocated': False, - 'instance_uuid': instance_uuid, 'network_id': network_id_1, - 'virtual_interface_id': None}, - {'reserved': False, 'deleted': 0, 'leased': False, - 'host': 'localhost', 'address': address_2, 'allocated': True, - 'instance_uuid': instance_uuid, 'network_id': network_id_2, - 'virtual_interface_id': None} - ] - - db.fixed_ip_bulk_create(self.ctxt, params) - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at', - 'virtual_interface', 'network', 'floating_ips'] - fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid) - - # we have no `id` in incoming data so we can not use - # _assertEqualListsOfObjects to compare incoming data and received - # objects - fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id']) - params = sorted(params, key=lambda i: i['network_id']) - for param, ip in zip(params, fixed_ip_data): - self._assertEqualObjects(param, ip, ignored_keys) - - def test_fixed_ip_disassociate(self): - address = '192.168.1.5' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid} - vif = db.virtual_interface_create(self.ctxt, values) - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': address, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': vif['id'] - } - db.fixed_ip_create(self.ctxt, param) - - db.fixed_ip_disassociate(self.ctxt, address) - fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address) - ignored_keys = ['created_at', 'id', 'deleted_at', - 'updated_at', 'instance_uuid', - 'virtual_interface_id'] - self._assertEqualObjects(param, fixed_ip_data, ignored_keys) - self.assertIsNone(fixed_ip_data['instance_uuid']) - self.assertIsNone(fixed_ip_data['virtual_interface_id']) - - def test_fixed_ip_get_not_found_exception(self): - self.assertRaises(exception.FixedIpNotFound, - db.fixed_ip_get, self.ctxt, 0) - - def test_fixed_ip_get_success2(self): - address = '192.168.1.5' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': address, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - fixed_ip_id = db.fixed_ip_create(self.ctxt, param) - - self.ctxt.is_admin = False - self.assertRaises(exception.Forbidden, db.fixed_ip_get, - self.ctxt, fixed_ip_id) - - def test_fixed_ip_get_success(self): - address = '192.168.1.5' - instance_uuid = self._create_instance() - network_id = db.network_create_safe(self.ctxt, {})['id'] - param = { - 'reserved': False, - 'deleted': 0, - 'leased': False, - 'host': '127.0.0.1', - 'address': address, - 'allocated': False, - 'instance_uuid': instance_uuid, - 'network_id': network_id, - 'virtual_interface_id': None - } - db.fixed_ip_create(self.ctxt, param) - - fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id'] - fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id) - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - self._assertEqualObjects(param, fixed_ip_data, ignored_keys) - - def test_fixed_ip_get_by_address(self): - instance_uuid = self._create_instance() - db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4', - 'instance_uuid': instance_uuid, - }) - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4', - columns_to_join=['instance']) - self.assertIn('instance', fixed_ip.__dict__) - self.assertEqual(instance_uuid, fixed_ip.instance.uuid) - - def test_fixed_ip_update_not_found_for_address(self): - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_update, self.ctxt, - '192.168.1.5', {}) - - def test_fixed_ip_update(self): - instance_uuid_1 = self._create_instance() - instance_uuid_2 = self._create_instance() - network_id_1 = db.network_create_safe(self.ctxt, {})['id'] - network_id_2 = db.network_create_safe(self.ctxt, {})['id'] - param_1 = { - 'reserved': True, 'deleted': 0, 'leased': True, - 'host': '192.168.133.1', 'address': '10.0.0.2', - 'allocated': True, 'instance_uuid': instance_uuid_1, - 'network_id': network_id_1, 'virtual_interface_id': '123', - } - - param_2 = { - 'reserved': False, 'deleted': 0, 'leased': False, - 'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False, - 'instance_uuid': instance_uuid_2, 'network_id': network_id_2, - 'virtual_interface_id': None - } - - ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at'] - fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address'] - db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2) - fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt, - param_2['address']) - self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys) - - -@mock.patch('time.sleep', new=lambda x: None) -class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin): - - def setUp(self): - super(FloatingIpTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_base_values(self): - return { - 'address': '1.1.1.1', - 'fixed_ip_id': None, - 'project_id': 'fake_project', - 'host': 'fake_host', - 'auto_assigned': False, - 'pool': 'fake_pool', - 'interface': 'fake_interface', - } - - def _create_floating_ip(self, values): - if not values: - values = {} - vals = self._get_base_values() - vals.update(values) - return db.floating_ip_create(self.ctxt, vals) - - def test_floating_ip_get(self): - values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}] - floating_ips = [self._create_floating_ip(val) for val in values] - - for floating_ip in floating_ips: - real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id']) - self._assertEqualObjects(floating_ip, real_floating_ip, - ignored_keys=['fixed_ip']) - - def test_floating_ip_get_not_found(self): - self.assertRaises(exception.FloatingIpNotFound, - db.floating_ip_get, self.ctxt, 100500) - - @mock.patch.object(query.Query, 'first', side_effect=db_exc.DBError()) - def test_floating_ip_get_with_long_id_not_found(self, mock_query): - self.assertRaises(exception.InvalidID, - db.floating_ip_get, self.ctxt, 123456789101112) - mock_query.assert_called_once_with() - - def test_floating_ip_get_pools(self): - values = [ - {'address': '0.0.0.0', 'pool': 'abc'}, - {'address': '1.1.1.1', 'pool': 'abc'}, - {'address': '2.2.2.2', 'pool': 'def'}, - {'address': '3.3.3.3', 'pool': 'ghi'}, - ] - for val in values: - self._create_floating_ip(val) - expected_pools = [{'name': x} - for x in set(map(lambda x: x['pool'], values))] - real_pools = db.floating_ip_get_pools(self.ctxt) - self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools) - - def test_floating_ip_allocate_address(self): - pools = { - 'pool1': ['0.0.0.0', '1.1.1.1'], - 'pool2': ['2.2.2.2'], - 'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5'] - } - for pool, addresses in pools.items(): - for address in addresses: - vals = {'pool': pool, 'address': address, 'project_id': None} - self._create_floating_ip(vals) - - project_id = self._get_base_values()['project_id'] - for pool, addresses in pools.items(): - alloc_addrs = [] - for i in addresses: - float_addr = db.floating_ip_allocate_address(self.ctxt, - project_id, pool) - alloc_addrs.append(float_addr) - self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses) - - def test_floating_ip_allocate_auto_assigned(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4'] - - float_ips = [] - for i in range(0, 2): - float_ips.append(self._create_floating_ip( - {"address": addresses[i]})) - for i in range(2, 4): - float_ips.append(self._create_floating_ip({"address": addresses[i], - "auto_assigned": True})) - - for i in range(0, 2): - float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id) - self.assertFalse(float_ip.auto_assigned) - for i in range(2, 4): - float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id) - self.assertTrue(float_ip.auto_assigned) - - def test_floating_ip_allocate_address_no_more_floating_ips(self): - self.assertRaises(exception.NoMoreFloatingIps, - db.floating_ip_allocate_address, - self.ctxt, 'any_project_id', 'no_such_pool') - - def test_floating_ip_allocate_not_authorized(self): - ctxt = context.RequestContext(user_id='a', project_id='abc', - is_admin=False) - self.assertRaises(exception.Forbidden, - db.floating_ip_allocate_address, - ctxt, 'other_project_id', 'any_pool') - - def test_floating_ip_allocate_address_succeeds_retry(self): - pool = 'pool0' - address = '0.0.0.0' - vals = {'pool': pool, 'address': address, 'project_id': None} - floating_ip = self._create_floating_ip(vals) - - project_id = self._get_base_values()['project_id'] - - def fake_first(): - if mock_first.call_count == 1: - return {'pool': pool, 'project_id': None, 'fixed_ip_id': None, - 'address': address, 'id': 'invalid_id'} - else: - return {'pool': pool, 'project_id': None, 'fixed_ip_id': None, - 'address': address, 'id': 1} - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - float_addr = db.floating_ip_allocate_address(self.ctxt, - project_id, pool) - self.assertEqual(address, float_addr) - self.assertEqual(2, mock_first.call_count) - - float_ip = db.floating_ip_get(self.ctxt, floating_ip.id) - self.assertEqual(project_id, float_ip['project_id']) - - def test_floating_ip_allocate_address_retry_limit_exceeded(self): - pool = 'pool0' - address = '0.0.0.0' - vals = {'pool': pool, 'address': address, 'project_id': None} - self._create_floating_ip(vals) - - project_id = self._get_base_values()['project_id'] - - def fake_first(): - return {'pool': pool, 'project_id': None, 'fixed_ip_id': None, - 'address': address, 'id': 'invalid_id'} - - with mock.patch('sqlalchemy.orm.query.Query.first', - side_effect=fake_first) as mock_first: - self.assertRaises(exception.FloatingIpAllocateFailed, - db.floating_ip_allocate_address, self.ctxt, - project_id, pool) - # 5 retries + initial attempt - self.assertEqual(6, mock_first.call_count) - - def test_floating_ip_allocate_address_no_more_ips_with_no_retries(self): - with mock.patch('sqlalchemy.orm.query.Query.first', - return_value=None) as mock_first: - self.assertRaises(exception.NoMoreFloatingIps, - db.floating_ip_allocate_address, - self.ctxt, 'any_project_id', 'no_such_pool') - self.assertEqual(1, mock_first.call_count) - - def _get_existing_ips(self): - return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)] - - def test_floating_ip_bulk_create(self): - expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4'] - result = db.floating_ip_bulk_create(self.ctxt, - [{'address': x} for x in expected_ips], - want_result=False) - self.assertIsNone(result) - self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(), - expected_ips) - - def test_floating_ip_bulk_create_duplicate(self): - ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4'] - prepare_ips = lambda x: {'address': x} - - result = db.floating_ip_bulk_create(self.ctxt, - list(map(prepare_ips, ips))) - self.assertEqual(ips, [ip.address for ip in result]) - self.assertRaises(exception.FloatingIpExists, - db.floating_ip_bulk_create, - self.ctxt, - list(map(prepare_ips, ['1.1.1.5', '1.1.1.4'])), - want_result=False) - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_get_by_address, - self.ctxt, '1.1.1.5') - - def test_floating_ip_bulk_destroy(self): - ips_for_delete = [] - ips_for_non_delete = [] - - def create_ips(i, j): - return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)] - - # NOTE(boris-42): Create more than 256 ip to check that - # _ip_range_splitter works properly. - for i in range(1, 3): - ips_for_delete.extend(create_ips(i, 255)) - ips_for_non_delete.extend(create_ips(3, 255)) - - result = db.floating_ip_bulk_create(self.ctxt, - ips_for_delete + ips_for_non_delete, - want_result=False) - self.assertIsNone(result) - - non_bulk_ips_for_delete = create_ips(4, 3) - non_bulk_ips_for_non_delete = create_ips(5, 3) - non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete - for dct in non_bulk_ips: - self._create_floating_ip(dct) - ips_for_delete.extend(non_bulk_ips_for_delete) - ips_for_non_delete.extend(non_bulk_ips_for_non_delete) - - db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete) - - expected_addresses = [x['address'] for x in ips_for_non_delete] - self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(), - expected_addresses) - - def test_floating_ip_create(self): - floating_ip = self._create_floating_ip({}) - ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at', - 'created_at'] - - self.assertIsNotNone(floating_ip['id']) - self._assertEqualObjects(floating_ip, self._get_base_values(), - ignored_keys) - - def test_floating_ip_create_duplicate(self): - self._create_floating_ip({}) - self.assertRaises(exception.FloatingIpExists, - self._create_floating_ip, {}) - - def _create_fixed_ip(self, params): - default_params = {'address': '192.168.0.1'} - default_params.update(params) - return db.fixed_ip_create(self.ctxt, default_params)['address'] - - def test_floating_ip_fixed_ip_associate(self): - float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3'] - - project_id = self.ctxt.project_id - float_ips = [self._create_floating_ip({'address': address, - 'project_id': project_id}) - for address in float_addresses] - fixed_addrs = [self._create_fixed_ip({'address': address}) - for address in fixed_addresses] - - for float_ip, fixed_addr in zip(float_ips, fixed_addrs): - fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt, - float_ip.address, - fixed_addr, 'host') - self.assertEqual(fixed_ip.address, fixed_addr) - - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) - self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id) - self.assertEqual('host', updated_float_ip.host) - - fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt, - float_addresses[0], - fixed_addresses[0], - 'host') - self.assertEqual(fixed_ip.address, fixed_addresses[0]) - - def test_floating_ip_fixed_ip_associate_float_ip_not_found(self): - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.floating_ip_fixed_ip_associate, - self.ctxt, '10.10.10.10', 'some', 'some') - - def test_floating_ip_associate_failed(self): - fixed_ip = self._create_fixed_ip({'address': '7.7.7.7'}) - self.assertRaises(exception.FloatingIpAssociateFailed, - db.floating_ip_fixed_ip_associate, - self.ctxt, '10.10.10.10', fixed_ip, 'some') - - def test_floating_ip_deallocate(self): - values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'} - float_ip = self._create_floating_ip(values) - rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address) - self.assertEqual(1, rows_updated) - - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) - self.assertIsNone(updated_float_ip.project_id) - self.assertIsNone(updated_float_ip.host) - self.assertFalse(updated_float_ip.auto_assigned) - - def test_floating_ip_deallocate_address_not_found(self): - self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2')) - - def test_floating_ip_deallocate_address_associated_ip(self): - float_address = '1.1.1.1' - fixed_address = '2.2.2.1' - - project_id = self.ctxt.project_id - float_ip = self._create_floating_ip({'address': float_address, - 'project_id': project_id}) - fixed_addr = self._create_fixed_ip({'address': fixed_address}) - db.floating_ip_fixed_ip_associate(self.ctxt, float_ip.address, - fixed_addr, 'host') - self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, - float_address)) - - def test_floating_ip_destroy(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr}) - for addr in addresses] - - expected_len = len(addresses) - for float_ip in float_ips: - db.floating_ip_destroy(self.ctxt, float_ip.address) - self.assertRaises(exception.FloatingIpNotFound, - db.floating_ip_get, self.ctxt, float_ip.id) - expected_len -= 1 - if expected_len > 0: - self.assertEqual(expected_len, - len(db.floating_ip_get_all(self.ctxt))) - else: - self.assertRaises(exception.NoFloatingIpsDefined, - db.floating_ip_get_all, self.ctxt) - - def test_floating_ip_disassociate(self): - float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3'] - - project_id = self.ctxt.project_id - float_ips = [self._create_floating_ip({'address': address, - 'project_id': project_id}) - for address in float_addresses] - fixed_addrs = [self._create_fixed_ip({'address': address}) - for address in fixed_addresses] - - for float_ip, fixed_addr in zip(float_ips, fixed_addrs): - db.floating_ip_fixed_ip_associate(self.ctxt, - float_ip.address, - fixed_addr, 'host') - - for float_ip, fixed_addr in zip(float_ips, fixed_addrs): - fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address) - self.assertEqual(fixed.address, fixed_addr) - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id) - self.assertIsNone(updated_float_ip.fixed_ip_id) - self.assertIsNone(updated_float_ip.host) - - def test_floating_ip_disassociate_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_disassociate, self.ctxt, - '11.11.11.11') - - def test_floating_ip_get_all(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr}) - for addr in addresses] - self._assertEqualListsOfObjects(float_ips, - db.floating_ip_get_all(self.ctxt), - ignored_keys="fixed_ip") - - def test_floating_ip_get_all_associated(self): - instance = db.instance_create(self.ctxt, {'uuid': 'fake'}) - project_id = self.ctxt.project_id - float_ip = self._create_floating_ip({'address': '1.1.1.1', - 'project_id': project_id}) - fixed_ip = self._create_fixed_ip({'address': '2.2.2.2', - 'instance_uuid': instance.uuid}) - db.floating_ip_fixed_ip_associate(self.ctxt, - float_ip.address, - fixed_ip, - 'host') - float_ips = db.floating_ip_get_all(self.ctxt) - self.assertEqual(1, len(float_ips)) - self.assertEqual(float_ip.address, float_ips[0].address) - self.assertEqual(fixed_ip, float_ips[0].fixed_ip.address) - self.assertEqual(instance.uuid, float_ips[0].fixed_ip.instance_uuid) - - def test_floating_ip_get_all_not_found(self): - self.assertRaises(exception.NoFloatingIpsDefined, - db.floating_ip_get_all, self.ctxt) - - def test_floating_ip_get_all_by_host(self): - hosts = { - 'host1': ['1.1.1.1', '1.1.1.2'], - 'host2': ['2.1.1.1', '2.1.1.2'], - 'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3'] - } - - hosts_with_float_ips = {} - for host, addresses in hosts.items(): - hosts_with_float_ips[host] = [] - for address in addresses: - float_ip = self._create_floating_ip({'host': host, - 'address': address}) - hosts_with_float_ips[host].append(float_ip) - - for host, float_ips in hosts_with_float_ips.items(): - real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host) - self._assertEqualListsOfObjects(float_ips, real_float_ips, - ignored_keys="fixed_ip") - - def test_floating_ip_get_all_by_host_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForHost, - db.floating_ip_get_all_by_host, - self.ctxt, 'non_exists_host') - - def test_floating_ip_get_all_by_project(self): - projects = { - 'pr1': ['1.1.1.1', '1.1.1.2'], - 'pr2': ['2.1.1.1', '2.1.1.2'], - 'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3'] - } - - projects_with_float_ips = {} - for project_id, addresses in projects.items(): - projects_with_float_ips[project_id] = [] - for address in addresses: - float_ip = self._create_floating_ip({'project_id': project_id, - 'address': address}) - projects_with_float_ips[project_id].append(float_ip) - - for project_id, float_ips in projects_with_float_ips.items(): - real_float_ips = db.floating_ip_get_all_by_project(self.ctxt, - project_id) - self._assertEqualListsOfObjects(float_ips, real_float_ips, - ignored_keys='fixed_ip') - - def test_floating_ip_get_all_by_project_not_authorized(self): - ctxt = context.RequestContext(user_id='a', project_id='abc', - is_admin=False) - self.assertRaises(exception.Forbidden, - db.floating_ip_get_all_by_project, - ctxt, 'other_project') - - def test_floating_ip_get_by_address(self): - addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] - float_ips = [self._create_floating_ip({'address': addr}) - for addr in addresses] - - for float_ip in float_ips: - real_float_ip = db.floating_ip_get_by_address(self.ctxt, - float_ip.address) - self._assertEqualObjects(float_ip, real_float_ip, - ignored_keys='fixed_ip') - - def test_floating_ip_get_by_address_not_found(self): - self.assertRaises(exception.FloatingIpNotFoundForAddress, - db.floating_ip_get_by_address, - self.ctxt, '20.20.20.20') - - @mock.patch.object(query.Query, 'first', side_effect=db_exc.DBError()) - def test_floating_ip_get_by_invalid_address(self, mock_query): - self.assertRaises(exception.InvalidIpAddressError, - db.floating_ip_get_by_address, - self.ctxt, 'non_exists_host') - mock_query.assert_called_once_with() - - def test_floating_ip_get_by_fixed_address(self): - fixed_float = [ - ('1.1.1.1', '2.2.2.1'), - ('1.1.1.2', '2.2.2.2'), - ('1.1.1.3', '2.2.2.3') - ] - - for fixed_addr, float_addr in fixed_float: - project_id = self.ctxt.project_id - self._create_floating_ip({'address': float_addr, - 'project_id': project_id}) - self._create_fixed_ip({'address': fixed_addr}) - db.floating_ip_fixed_ip_associate(self.ctxt, float_addr, - fixed_addr, 'some_host') - - for fixed_addr, float_addr in fixed_float: - float_ip = db.floating_ip_get_by_fixed_address(self.ctxt, - fixed_addr) - self.assertEqual(float_addr, float_ip[0]['address']) - - def test_floating_ip_get_by_fixed_ip_id(self): - fixed_float = [ - ('1.1.1.1', '2.2.2.1'), - ('1.1.1.2', '2.2.2.2'), - ('1.1.1.3', '2.2.2.3') - ] - - for fixed_addr, float_addr in fixed_float: - project_id = self.ctxt.project_id - self._create_floating_ip({'address': float_addr, - 'project_id': project_id}) - self._create_fixed_ip({'address': fixed_addr}) - db.floating_ip_fixed_ip_associate(self.ctxt, float_addr, - fixed_addr, 'some_host') - - for fixed_addr, float_addr in fixed_float: - fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr) - float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt, - fixed_ip['id']) - self.assertEqual(float_addr, float_ip[0]['address']) - - def test_floating_ip_update(self): - float_ip = self._create_floating_ip({}) - - values = { - 'project_id': 'some_pr', - 'host': 'some_host', - 'auto_assigned': True, - 'interface': 'some_interface', - 'pool': 'some_pool' - } - floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'], - values) - self.assertIsNotNone(floating_ref) - updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id']) - self._assertEqualObjects(updated_float_ip, values, - ignored_keys=['id', 'address', 'updated_at', - 'deleted_at', 'created_at', - 'deleted', 'fixed_ip_id', - 'fixed_ip']) - - def test_floating_ip_update_to_duplicate(self): - float_ip1 = self._create_floating_ip({'address': '1.1.1.1'}) - float_ip2 = self._create_floating_ip({'address': '1.1.1.2'}) - - self.assertRaises(exception.FloatingIpExists, - db.floating_ip_update, - self.ctxt, float_ip2['address'], - {'address': float_ip1['address']}) - - class InstanceDestroyConstraints(test.TestCase): def test_destroy_with_equal_any_constraint_met_single_value(self): @@ -6444,313 +4845,6 @@ self._assertEqualObjects(updated, updated_vif, ignored_keys) -@mock.patch('time.sleep', new=lambda x: None) -class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin): - - """Tests for db.api.network_* methods.""" - - def setUp(self): - super(NetworkTestCase, self).setUp() - self.ctxt = context.get_admin_context() - - def _get_associated_fixed_ip(self, host, cidr, ip): - network = db.network_create_safe(self.ctxt, - {'project_id': 'project1', 'cidr': cidr}) - self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id, - host)) - instance = db.instance_create(self.ctxt, - {'project_id': 'project1', 'host': host}) - virtual_interface = db.virtual_interface_create(self.ctxt, - {'instance_uuid': instance.uuid, 'network_id': network.id, - 'address': ip}) - db.fixed_ip_create(self.ctxt, {'address': ip, - 'network_id': network.id, 'allocated': True, - 'virtual_interface_id': virtual_interface.id}) - db.fixed_ip_associate(self.ctxt, ip, instance.uuid, - network.id, virtual_interface_id=virtual_interface['id']) - return network, instance - - def test_network_get_associated_default_route(self): - network, instance = self._get_associated_fixed_ip('host.net', - '192.0.2.0/30', '192.0.2.1') - network2 = db.network_create_safe(self.ctxt, - {'project_id': 'project1', 'cidr': '192.0.3.0/30'}) - ip = '192.0.3.1' - virtual_interface = db.virtual_interface_create(self.ctxt, - {'instance_uuid': instance.uuid, 'network_id': network2.id, - 'address': ip}) - db.fixed_ip_create(self.ctxt, {'address': ip, - 'network_id': network2.id, 'allocated': True, - 'virtual_interface_id': virtual_interface.id}) - db.fixed_ip_associate(self.ctxt, ip, instance.uuid, - network2.id) - data = db.network_get_associated_fixed_ips(self.ctxt, network.id) - self.assertEqual(1, len(data)) - self.assertTrue(data[0]['default_route']) - data = db.network_get_associated_fixed_ips(self.ctxt, network2.id) - self.assertEqual(1, len(data)) - self.assertFalse(data[0]['default_route']) - - def test_network_get_associated_fixed_ips(self): - network, instance = self._get_associated_fixed_ip('host.net', - '192.0.2.0/30', '192.0.2.1') - data = db.network_get_associated_fixed_ips(self.ctxt, network.id) - self.assertEqual(1, len(data)) - self.assertEqual('192.0.2.1', data[0]['address']) - self.assertEqual('192.0.2.1', data[0]['vif_address']) - self.assertEqual(instance.uuid, data[0]['instance_uuid']) - self.assertTrue(data[0][fields.PciDeviceStatus.ALLOCATED]) - - def test_network_create_safe(self): - values = {'host': 'localhost', 'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - self.assertEqual(36, len(network['uuid'])) - db_network = db.network_get(self.ctxt, network['id']) - self._assertEqualObjects(network, db_network) - - def test_network_create_with_duplicate_vlan(self): - values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1} - values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1} - db.network_create_safe(self.ctxt, values1) - self.assertRaises(exception.DuplicateVlan, - db.network_create_safe, self.ctxt, values2) - - def test_network_delete_safe(self): - values = {'host': 'localhost', 'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - db.network_get(self.ctxt, network['id']) - values = {'network_id': network['id'], 'address': '192.168.1.5'} - address1 = db.fixed_ip_create(self.ctxt, values)['address'] - values = {'network_id': network['id'], - 'address': '192.168.1.6', - 'allocated': True} - address2 = db.fixed_ip_create(self.ctxt, values)['address'] - self.assertRaises(exception.NetworkInUse, - db.network_delete_safe, self.ctxt, network['id']) - db.fixed_ip_update(self.ctxt, address2, {'allocated': False}) - network = db.network_delete_safe(self.ctxt, network['id']) - self.assertRaises(exception.FixedIpNotFoundForAddress, - db.fixed_ip_get_by_address, self.ctxt, address1) - ctxt = self.ctxt.elevated(read_deleted='yes') - fixed_ip = db.fixed_ip_get_by_address(ctxt, address1) - self.assertTrue(fixed_ip['deleted']) - - def test_network_in_use_on_host(self): - values = {'host': 'foo', 'hostname': 'myname'} - instance = db.instance_create(self.ctxt, values) - values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']} - vif = db.virtual_interface_create(self.ctxt, values) - values = {'address': '192.168.1.6', - 'network_id': 1, - 'allocated': True, - 'instance_uuid': instance['uuid'], - 'virtual_interface_id': vif['id']} - db.fixed_ip_create(self.ctxt, values) - self.assertTrue(db.network_in_use_on_host(self.ctxt, 1, 'foo')) - self.assertFalse(db.network_in_use_on_host(self.ctxt, 1, 'bar')) - - def test_network_update_nonexistent(self): - self.assertRaises(exception.NetworkNotFound, - db.network_update, self.ctxt, 123456, {}) - - def test_network_update_with_duplicate_vlan(self): - values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1} - values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2} - network_ref = db.network_create_safe(self.ctxt, values1) - db.network_create_safe(self.ctxt, values2) - self.assertRaises(exception.DuplicateVlan, - db.network_update, self.ctxt, - network_ref["id"], values2) - - def test_network_update(self): - network = db.network_create_safe(self.ctxt, {'project_id': 'project1', - 'vlan': 1, 'host': 'test.com'}) - db.network_update(self.ctxt, network.id, {'vlan': 2}) - network_new = db.network_get(self.ctxt, network.id) - self.assertEqual(2, network_new.vlan) - - def test_network_set_host_nonexistent_network(self): - self.assertRaises(exception.NetworkNotFound, db.network_set_host, - self.ctxt, 123456, 'nonexistent') - - def test_network_set_host_already_set_correct(self): - values = {'host': 'example.com', 'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - self.assertIsNone(db.network_set_host(self.ctxt, network.id, - 'example.com')) - - def test_network_set_host_already_set_incorrect(self): - values = {'host': 'example.com', 'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - self.assertIsNone(db.network_set_host(self.ctxt, network.id, - 'new.example.com')) - - def test_network_set_host_with_initially_no_host(self): - values = {'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - db.network_set_host(self.ctxt, network.id, 'example.com') - self.assertEqual('example.com', - db.network_get(self.ctxt, network.id).host) - - def test_network_set_host_succeeds_retry_on_deadlock(self): - values = {'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - - def fake_update(params): - if mock_update.call_count == 1: - raise db_exc.DBDeadlock() - else: - return 1 - - with mock.patch('sqlalchemy.orm.query.Query.update', - side_effect=fake_update) as mock_update: - db.network_set_host(self.ctxt, network.id, 'example.com') - self.assertEqual(2, mock_update.call_count) - - def test_network_set_host_succeeds_retry_on_no_rows_updated(self): - values = {'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - - def fake_update(params): - if mock_update.call_count == 1: - return 0 - else: - return 1 - - with mock.patch('sqlalchemy.orm.query.Query.update', - side_effect=fake_update) as mock_update: - db.network_set_host(self.ctxt, network.id, 'example.com') - self.assertEqual(2, mock_update.call_count) - - def test_network_set_host_failed_with_retry_on_no_rows_updated(self): - values = {'project_id': 'project1'} - network = db.network_create_safe(self.ctxt, values) - - with mock.patch('sqlalchemy.orm.query.Query.update', - return_value=0) as mock_update: - self.assertRaises(exception.NetworkSetHostFailed, - db.network_set_host, self.ctxt, network.id, - 'example.com') - # 5 retries + initial attempt - self.assertEqual(6, mock_update.call_count) - - def test_network_get_all_by_host(self): - self.assertEqual([], - db.network_get_all_by_host(self.ctxt, 'example.com')) - host = 'h1.example.com' - # network with host set - net1 = db.network_create_safe(self.ctxt, {'host': host}) - self._assertEqualListsOfObjects([net1], - db.network_get_all_by_host(self.ctxt, host)) - # network with fixed ip with host set - net2 = db.network_create_safe(self.ctxt, {}) - db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id}) - db.network_get_all_by_host(self.ctxt, host) - self._assertEqualListsOfObjects([net1, net2], - db.network_get_all_by_host(self.ctxt, host)) - # network with instance with host set - net3 = db.network_create_safe(self.ctxt, {}) - instance = db.instance_create(self.ctxt, {'host': host}) - db.fixed_ip_create(self.ctxt, {'network_id': net3.id, - 'instance_uuid': instance.uuid}) - self._assertEqualListsOfObjects([net1, net2, net3], - db.network_get_all_by_host(self.ctxt, host)) - - def test_network_get_by_cidr(self): - cidr = '192.0.2.0/30' - cidr_v6 = '2001:db8:1::/64' - network = db.network_create_safe(self.ctxt, - {'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6}) - self._assertEqualObjects(network, - db.network_get_by_cidr(self.ctxt, cidr)) - self._assertEqualObjects(network, - db.network_get_by_cidr(self.ctxt, cidr_v6)) - - def test_network_get_by_cidr_nonexistent(self): - self.assertRaises(exception.NetworkNotFoundForCidr, - db.network_get_by_cidr, self.ctxt, '192.0.2.0/30') - - def test_network_get_by_uuid(self): - network = db.network_create_safe(self.ctxt, - {'project_id': 'project_1'}) - self._assertEqualObjects(network, - db.network_get_by_uuid(self.ctxt, network.uuid)) - - def test_network_get_by_uuid_nonexistent(self): - self.assertRaises(exception.NetworkNotFoundForUUID, - db.network_get_by_uuid, self.ctxt, 'non-existent-uuid') - - def test_network_get_all_by_uuids_no_networks(self): - self.assertRaises(exception.NoNetworksFound, - db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid']) - - def test_network_get_all_by_uuids(self): - net1 = db.network_create_safe(self.ctxt, {}) - net2 = db.network_create_safe(self.ctxt, {}) - self._assertEqualListsOfObjects([net1, net2], - db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid])) - - def test_network_get_all_no_networks(self): - self.assertRaises(exception.NoNetworksFound, - db.network_get_all, self.ctxt) - - def test_network_get_all(self): - network = db.network_create_safe(self.ctxt, {}) - network_db = db.network_get_all(self.ctxt) - self.assertEqual(1, len(network_db)) - self._assertEqualObjects(network, network_db[0]) - - def test_network_get_all_admin_user(self): - network1 = db.network_create_safe(self.ctxt, {}) - network2 = db.network_create_safe(self.ctxt, - {'project_id': 'project1'}) - self._assertEqualListsOfObjects([network1, network2], - db.network_get_all(self.ctxt, - project_only=True)) - - def test_network_get_all_normal_user(self): - normal_ctxt = context.RequestContext('fake', 'fake') - db.network_create_safe(self.ctxt, {}) - db.network_create_safe(self.ctxt, {'project_id': 'project1'}) - network1 = db.network_create_safe(self.ctxt, - {'project_id': 'fake'}) - network_db = db.network_get_all(normal_ctxt, project_only=True) - self.assertEqual(1, len(network_db)) - self._assertEqualObjects(network1, network_db[0]) - - def test_network_get(self): - network = db.network_create_safe(self.ctxt, {}) - self._assertEqualObjects(db.network_get(self.ctxt, network.id), - network) - db.network_delete_safe(self.ctxt, network.id) - self.assertRaises(exception.NetworkNotFound, - db.network_get, self.ctxt, network.id) - - def test_network_associate(self): - network = db.network_create_safe(self.ctxt, {}) - self.assertIsNone(network.project_id) - db.network_associate(self.ctxt, "project1", network.id) - self.assertEqual("project1", db.network_get(self.ctxt, - network.id).project_id) - - def test_network_diassociate(self): - network = db.network_create_safe(self.ctxt, - {'project_id': 'project1', 'host': 'test.net'}) - # disassociate project - db.network_disassociate(self.ctxt, network.id, False, True) - self.assertIsNone(db.network_get(self.ctxt, network.id).project_id) - # disassociate host - db.network_disassociate(self.ctxt, network.id, True, False) - self.assertIsNone(db.network_get(self.ctxt, network.id).host) - - def test_network_count_reserved_ips(self): - net = db.network_create_safe(self.ctxt, {}) - self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id)) - db.fixed_ip_create(self.ctxt, {'network_id': net.id, - 'reserved': True}) - self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id)) - - class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(KeyPairTestCase, self).setUp() @@ -7978,57 +6072,6 @@ super(Ec2TestCase, self).setUp() self.ctxt = context.RequestContext('fake_user', 'fake_project') - def test_ec2_ids_not_found_are_printable(self): - def check_exc_format(method, value): - try: - method(self.ctxt, value) - except exception.NotFound as exc: - self.assertIn(six.text_type(value), six.text_type(exc)) - - check_exc_format(db.get_instance_uuid_by_ec2_id, 123456) - check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456) - check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake') - - def test_ec2_volume_create(self): - vol = db.ec2_volume_create(self.ctxt, 'fake-uuid') - self.assertIsNotNone(vol['id']) - self.assertEqual(vol['uuid'], 'fake-uuid') - - def test_ec2_volume_get_by_id(self): - vol = db.ec2_volume_create(self.ctxt, 'fake-uuid') - vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id']) - self.assertEqual(vol2['uuid'], vol['uuid']) - - def test_ec2_volume_get_by_uuid(self): - vol = db.ec2_volume_create(self.ctxt, 'fake-uuid') - vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid']) - self.assertEqual(vol2['id'], vol['id']) - - def test_ec2_snapshot_create(self): - snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') - self.assertIsNotNone(snap['id']) - self.assertEqual(snap['uuid'], 'fake-uuid') - - def test_ec2_snapshot_get_by_ec2_id(self): - snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') - snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id']) - self.assertEqual(snap2['uuid'], 'fake-uuid') - - def test_ec2_snapshot_get_by_uuid(self): - snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid') - snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid') - self.assertEqual(snap['id'], snap2['id']) - - def test_ec2_snapshot_get_by_ec2_id_not_found(self): - self.assertRaises(exception.SnapshotNotFound, - db.ec2_snapshot_get_by_ec2_id, - self.ctxt, 123456) - - def test_ec2_snapshot_get_by_uuid_not_found(self): - self.assertRaises(exception.SnapshotNotFound, - db.ec2_snapshot_get_by_uuid, - self.ctxt, 'fake-uuid') - def test_ec2_instance_create(self): inst = db.ec2_instance_create(self.ctxt, 'fake-uuid') self.assertIsNotNone(inst['id']) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_migrations.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_migrations.py 2020-04-10 17:57:58.000000000 +0000 @@ -1066,6 +1066,13 @@ testtools.TestCase): FIXTURE = test_fixtures.MySQLOpportunisticFixture + def setUp(self): + super(TestNovaMigrationsMySQL, self).setUp() + # TODO(mriedem): Revert this change before Ussuri RC1 since this is a + # temporary measure to merge code post-feature-freeze in Ussuri which + # continually keeps failing gate runs due to bug 1823251. + self.skipTest('Skip due to bug 1823251 but unskip before Ussuri RC1.') + def test_innodb_tables(self): with mock.patch.object(sa_migration, 'get_engine', return_value=self.migrate_engine): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_migration_utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_migration_utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_migration_utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_migration_utils.py 2020-04-10 17:57:58.000000000 +0000 @@ -13,11 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_db.sqlalchemy.compat import utils as compat_utils from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_fixtures -from oslo_db.sqlalchemy import utils as oslodbutils from oslo_utils import uuidutils from sqlalchemy import Integer, String from sqlalchemy import MetaData, Table, Column @@ -31,8 +29,6 @@ from nova import test from nova.tests import fixtures as nova_fixtures -SA_VERSION = compat_utils.SQLA_VERSION - class CustomType(UserDefinedType): """Dummy column type for testing unsupported types.""" @@ -188,12 +184,6 @@ Column('a', CustomType)) table.create() - # reflection of custom types has been fixed upstream - if SA_VERSION < (0, 9, 0): - self.assertRaises(oslodbutils.ColumnError, - utils.create_shadow_table, - self.engine, table_name=table_name) - utils.create_shadow_table(self.engine, table_name=table_name, a=Column('a', CustomType())) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_sqlalchemy_migration.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_sqlalchemy_migration.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/db/test_sqlalchemy_migration.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/db/test_sqlalchemy_migration.py 2020-04-10 17:57:58.000000000 +0000 @@ -159,6 +159,51 @@ engine_calls = [mock.call(database, context=None)] * 3 self.assertEqual(engine_calls, mock_get_engine.call_args_list) + def test_db_version_init_race(self, mock_get_engine, mock_db_version, + mock_find_repo): + # This test exercises bug 1804652 by causing + # versioning_api.version_contro() to raise an unhandleable error the + # first time it is called. + database = 'api' + mock_get_engine.return_value = 'engine' + exc = versioning_exceptions.DatabaseNotControlledError() + mock_db_version.side_effect = [exc, ''] + metadata = mock.MagicMock() + metadata.tables.return_value = [] + with mock.patch.object(sqlalchemy, 'MetaData', + metadata), mock.patch.object(migration, + 'db_version_control') as mock_version_control: + # db_version_control raises an unhandleable error because we were + # racing to initialise with another process. + mock_version_control.side_effect = test.TestingException + migration.db_version(database) + mock_version_control.assert_called_once_with(0, + database, + context=None) + db_version_calls = [mock.call('engine', 'repo')] * 2 + self.assertEqual(db_version_calls, mock_db_version.call_args_list) + engine_calls = [mock.call(database, context=None)] * 3 + self.assertEqual(engine_calls, mock_get_engine.call_args_list) + + def test_db_version_raise_on_error(self, mock_get_engine, mock_db_version, + mock_find_repo): + # This test asserts that we will still raise a persistent error after + # working around bug 1804652. + database = 'api' + mock_get_engine.return_value = 'engine' + mock_db_version.side_effect = \ + versioning_exceptions.DatabaseNotControlledError + metadata = mock.MagicMock() + metadata.tables.return_value = [] + with mock.patch.object(sqlalchemy, 'MetaData', + metadata), mock.patch.object(migration, + 'db_version_control') as mock_version_control: + # db_version_control raises an unhandleable error because we were + # racing to initialise with another process. + mock_version_control.side_effect = test.TestingException + self.assertRaises(test.TestingException, + migration.db_version, database) + @mock.patch.object(migration, '_find_migrate_repo', return_value='repo') @mock.patch.object(migration, 'get_engine', return_value='engine') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/fake_instance.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/fake_instance.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/fake_instance.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/fake_instance.py 2020-04-10 17:57:58.000000000 +0000 @@ -140,6 +140,7 @@ inst.old_flavor = None inst.new_flavor = None inst.resources = None + inst.migration_context = None inst.obj_reset_changes() return inst diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/fake_policy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/fake_policy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/fake_policy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/fake_policy.py 2020-04-10 17:57:58.000000000 +0000 @@ -21,23 +21,35 @@ "os_compute_api:servers:show:host_status:unknown-only": "", "os_compute_api:servers:allow_all_filters": "", "os_compute_api:servers:migrations:force_complete": "", + "os_compute_api:servers:migrations:index": "", + "os_compute_api:servers:migrations:show": "", + "os_compute_api:servers:migrations:delete": "", "os_compute_api:os-admin-actions:inject_network_info": "", "os_compute_api:os-admin-actions:reset_network": "", "os_compute_api:os-admin-actions:reset_state": "", "os_compute_api:os-admin-password": "", - "os_compute_api:os-agents": "", - "os_compute_api:os-attach-interfaces": "", + "os_compute_api:os-agents:list": "", + "os_compute_api:os-agents:create": "", + "os_compute_api:os-agents:update": "", + "os_compute_api:os-agents:delete": "", + "os_compute_api:os-attach-interfaces:list": "", + "os_compute_api:os-attach-interfaces:show": "", + "os_compute_api:os-attach-interfaces:create": "", + "os_compute_api:os-attach-interfaces:delete": "", "os_compute_api:os-baremetal-nodes": "", + "os_compute_api:os-console-auth-tokens": "", "os_compute_api:os-console-output": "", "os_compute_api:os-remote-consoles": "", "os_compute_api:os-create-backup": "", - "os_compute_api:os-deferred-delete": "", + "os_compute_api:os-deferred-delete:restore": "", + "os_compute_api:os-deferred-delete:force": "", "os_compute_api:os-extended-server-attributes": "", "os_compute_api:ips:index": "", "os_compute_api:ips:show": "", "os_compute_api:extensions": "", "os_compute_api:os-flavor-access:remove_tenant_access": "", "os_compute_api:os-flavor-access:add_tenant_access": "", + "os_compute_api:os-flavor-access": "", "os_compute_api:os-flavor-extra-specs:index": "", "os_compute_api:os-flavor-extra-specs:show": "", "os_compute_api:os-flavor-manage:create": "", @@ -45,13 +57,25 @@ "os_compute_api:os-flavor-manage:delete": "", "os_compute_api:os-floating-ip-pools": "", "os_compute_api:os-floating-ips": "", - "os_compute_api:os-instance-actions": "", - "os_compute_api:os-instance-usage-audit-log": "", + "os_compute_api:os-instance-actions:list": "", + "os_compute_api:os-instance-actions:show": "", + "os_compute_api:os-instance-actions:events": "", + "os_compute_api:os-instance-actions:events:details": "", + "os_compute_api:os-instance-usage-audit-log:list": "", + "os_compute_api:os-instance-usage-audit-log:show": "", + "os_compute_api:os-hypervisors:list": "", + "os_compute_api:os-hypervisors:list-detail": "", + "os_compute_api:os-hypervisors:statistics": "", + "os_compute_api:os-hypervisors:show": "", + "os_compute_api:os-hypervisors:uptime": "", + "os_compute_api:os-hypervisors:search": "", + "os_compute_api:os-hypervisors:servers": "", "os_compute_api:os-lock-server:lock": "", "os_compute_api:os-lock-server:unlock": "", "os_compute_api:os-migrate-server:migrate": "", "os_compute_api:os-migrate-server:migrate_live": "", + "os_compute_api:os-migrations:index": "", "os_compute_api:os-multinic": "", "os_compute_api:os-networks:view": "", "os_compute_api:os-tenant-networks": "", @@ -65,8 +89,13 @@ "os_compute_api:os-quota-class-sets:update": "", "os_compute_api:os-quota-class-sets:show": "", "os_compute_api:os-rescue": "", + "os_compute_api:os-unrescue": "", + "os_compute_api:os-security-groups:list": "", + "os_compute_api:os-security-groups:add": "", + "os_compute_api:os-security-groups:remove": "", "os_compute_api:os-server-diagnostics": "", - "os_compute_api:os-server-password": "", + "os_compute_api:os-server-password:show": "", + "os_compute_api:os-server-password:clear": "", "os_compute_api:os-server-tags:index": "", "os_compute_api:os-server-tags:show": "", "os_compute_api:os-server-tags:update": "", @@ -92,6 +121,7 @@ "os_compute_api:os-volumes-attachments:show": "", "os_compute_api:os-volumes-attachments:create": "", "os_compute_api:os-volumes-attachments:update": "", + "os_compute_api:os-volumes-attachments:swap":"", "os_compute_api:os-volumes-attachments:delete": "", "os_compute_api:os-availability-zone:list": "", "os_compute_api:os-availability-zone:detail": "", diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/fake_server_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/fake_server_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/fake_server_actions.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/fake_server_actions.py 2020-04-10 17:57:58.000000000 +0000 @@ -73,7 +73,8 @@ 'deleted_at': None, 'deleted': False, 'host': 'host1', - 'hostId': FAKE_HOST_ID1 + 'hostId': FAKE_HOST_ID1, + 'details': None }, {'id': 2, 'action_id': FAKE_ACTION_ID1, @@ -89,7 +90,8 @@ 'deleted_at': None, 'deleted': False, 'host': 'host1', - 'hostId': FAKE_HOST_ID1 + 'hostId': FAKE_HOST_ID1, + 'details': None } ], FAKE_ACTION_ID2: [{'id': 3, @@ -106,7 +108,8 @@ 'deleted_at': None, 'deleted': False, 'host': 'host2', - 'hostId': FAKE_HOST_ID2 + 'hostId': FAKE_HOST_ID2, + 'details': None } ] } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/network/test_neutron.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/network/test_neutron.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/network/test_neutron.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/network/test_neutron.py 2020-04-10 17:57:58.000000000 +0000 @@ -245,6 +245,15 @@ exception.Unauthorized, client.list_networks) + def test_neutron_http_retries(self): + retries = 42 + self.flags(http_retries=retries, group='neutron') + my_context = context.RequestContext('userid', + uuids.my_tenant, + auth_token='token') + cl = neutronapi.get_client(my_context) + self.assertEqual(retries, cl.httpclient.connect_retries) + class TestAPIBase(test.TestCase): @@ -4630,6 +4639,31 @@ {'fake-port-1': [uuids.dest_compute_rp]}) get_client_mock.return_value.update_port.assert_called_once_with( 'fake-port-1', + {'port': {'device_owner': 'compute:None', + 'binding:profile': {'allocation': uuids.dest_compute_rp}, + 'binding:host_id': 'new-host'}}) + + @mock.patch.object(neutronapi, 'get_client') + def test_update_port_bindings_for_instance_with_resource_req_unshelve( + self, get_client_mock): + + instance = fake_instance.fake_instance_obj(self.context) + fake_ports = {'ports': [ + {'id': 'fake-port-1', + 'binding:vnic_type': 'normal', + constants.BINDING_HOST_ID: 'old-host', + constants.BINDING_PROFILE: { + 'allocation': uuids.source_compute_rp}, + 'resource_request': mock.sentinel.resource_request}]} + list_ports_mock = mock.Mock(return_value=fake_ports) + get_client_mock.return_value.list_ports = list_ports_mock + + # NOTE(gibi): during unshelve migration object is not created + self.api._update_port_binding_for_instance( + self.context, instance, 'new-host', None, + {'fake-port-1': [uuids.dest_compute_rp]}) + get_client_mock.return_value.update_port.assert_called_once_with( + 'fake-port-1', {'port': {'device_owner': 'compute:None', 'binding:profile': {'allocation': uuids.dest_compute_rp}, 'binding:host_id': 'new-host'}}) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/network/test_security_group.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/network/test_security_group.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/network/test_security_group.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/network/test_security_group.py 2020-04-10 17:57:58.000000000 +0000 @@ -78,47 +78,6 @@ mock_list_secgroup.assert_called_once_with(tenant_id=project_id) - def test_list_with_all_tenants_sec_name_and_admin_context(self): - project_id = '0af70a4d22cf4652824ddc1f2435dd85' - search_opts = {'all_tenants': 1} - security_group_names = ['secgroup_ssh'] - security_groups_list = {'security_groups': []} - admin_context = context.RequestContext('user1', project_id, True) - - with mock.patch.object( - self.mocked_client, - 'list_security_groups', - return_value=security_groups_list) as mock_list_secgroup: - sg_api.list(admin_context, project=project_id, - names=security_group_names, - search_opts=search_opts) - - mock_list_secgroup.assert_called_once_with( - name=security_group_names, - tenant_id=project_id) - - def test_list_with_all_tenants_sec_name_ids_and_admin_context(self): - project_id = '0af70a4d22cf4652824ddc1f2435dd85' - search_opts = {'all_tenants': 1} - security_group_names = ['secgroup_ssh'] - security_group_ids = ['id1'] - security_groups_list = {'security_groups': []} - admin_context = context.RequestContext('user1', project_id, True) - - with mock.patch.object( - self.mocked_client, - 'list_security_groups', - return_value=security_groups_list) as mock_list_secgroup: - sg_api.list(admin_context, project=project_id, - names=security_group_names, - ids=security_group_ids, - search_opts=search_opts) - - mock_list_secgroup.assert_called_once_with( - name=security_group_names, - id=security_group_ids, - tenant_id=project_id) - def test_list_with_all_tenants_not_admin(self): search_opts = {'all_tenants': 1} security_groups_list = {'security_groups': []} @@ -133,36 +92,6 @@ mock_list_secgroup.assert_called_once_with( tenant_id=self.context.project_id) - def test_get_with_name_duplicated(self): - sg_name = 'web_server' - expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' - expected_sg = {'security_group': {'name': sg_name, - 'id': expected_sg_id, - 'tenant_id': self.context.project_id, - 'description': 'server', 'rules': []}} - self.mocked_client.show_security_group.return_value = expected_sg - - with mock.patch.object(neutronv20, 'find_resourceid_by_name_or_id', - return_value=expected_sg_id): - observed_sg = sg_api.get(self.context, name=sg_name) - expected_sg['security_group']['project_id'] = self.context.project_id - del expected_sg['security_group']['tenant_id'] - self.assertEqual(expected_sg['security_group'], observed_sg) - self.mocked_client.show_security_group.assert_called_once_with( - expected_sg_id) - - def test_get_with_invalid_name(self): - sg_name = 'invalid_name' - expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5' - self.mocked_client.show_security_group.side_effect = TypeError - - with mock.patch.object(neutronv20, 'find_resourceid_by_name_or_id', - return_value=expected_sg_id): - self.assertRaises(exception.SecurityGroupNotFound, - sg_api.get, self.context, name=sg_name) - self.mocked_client.show_security_group.assert_called_once_with( - expected_sg_id) - def test_create_security_group_with_bad_request(self): name = 'test-security-group' description = None @@ -228,6 +157,7 @@ body) def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self): + project_id = '0af70a4d22cf4652824ddc1f2435dd85' sg1 = {'description': 'default', 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', @@ -247,7 +177,7 @@ self.mocked_client.list_security_groups.return_value = ( {'security_groups': [sg1]}) - result = sg_api.list(self.context) + result = sg_api.list(self.context, project=project_id) expected = [{'rules': [{'from_port': -1, 'protocol': '51', 'to_port': -1, 'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e', @@ -257,7 +187,8 @@ 'id': '07f1362f-34f6-4136-819a-2dcde112269e', 'name': 'default', 'description': 'default'}] self.assertEqual(expected, result) - self.mocked_client.list_security_groups.assert_called_once_with() + self.mocked_client.list_security_groups.assert_called_once_with( + tenant_id=project_id) def test_instances_security_group_bindings(self, detailed=False): server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/notifications/test_base.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/notifications/test_base.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/notifications/test_base.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/notifications/test_base.py 2020-04-10 17:57:58.000000000 +0000 @@ -86,7 +86,7 @@ mock.sentinel.ctxt, mock.sentinel.instance, None, populate_image_ref_url=True) - @mock.patch('nova.image.api.API.generate_image_url', + @mock.patch('nova.image.glance.API.generate_image_url', side_effect=ks_exc.EndpointNotFound) def test_info_from_instance_image_api_endpoint_not_found_no_token( self, mock_gen_image_url): @@ -105,7 +105,7 @@ self.assertEqual(instance.image_ref, payload['image_ref_url']) mock_gen_image_url.assert_called_once_with(instance.image_ref, ctxt) - @mock.patch('nova.image.api.API.generate_image_url', + @mock.patch('nova.image.glance.API.generate_image_url', side_effect=ks_exc.EndpointNotFound) def test_info_from_instance_image_api_endpoint_not_found_with_token( self, mock_gen_image_url): @@ -121,7 +121,7 @@ populate_image_ref_url=True) mock_gen_image_url.assert_called_once_with(instance.image_ref, ctxt) - @mock.patch('nova.image.api.API.generate_image_url') + @mock.patch('nova.image.glance.API.generate_image_url') def test_info_from_instance_not_call_generate_image_url( self, mock_gen_image_url): ctxt = nova_context.get_admin_context() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_fixed_ip.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_fixed_ip.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_fixed_ip.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_fixed_ip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,382 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import iso8601 -import mock -import netaddr -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import timeutils -from oslo_versionedobjects import base as ovo_base - -from nova import exception -from nova.objects import fixed_ip -from nova.objects import instance as instance_obj -from nova.tests.unit import fake_instance -from nova.tests.unit.objects import test_network -from nova.tests.unit.objects import test_objects -from nova import utils - -fake_fixed_ip = { - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': False, - 'id': 123, - 'address': '192.168.1.100', - 'network_id': None, - 'virtual_interface_id': None, - 'instance_uuid': None, - 'allocated': False, - 'leased': False, - 'reserved': False, - 'host': None, - 'network': None, - 'virtual_interface': None, - 'floating_ips': [], - } - - -class _TestFixedIPObject(object): - def _compare(self, obj, db_obj): - for field in obj.fields: - if field in ('default_route', 'floating_ips'): - continue - if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS: - if obj.obj_attr_is_set(field) and db_obj[field] is not None: - obj_val = obj[field].uuid - db_val = db_obj[field]['uuid'] - else: - continue - else: - obj_val = obj[field] - db_val = db_obj[field] - if isinstance(obj_val, netaddr.IPAddress): - obj_val = str(obj_val) - self.assertEqual(db_val, obj_val) - - @mock.patch('nova.db.api.fixed_ip_get') - def test_get_by_id(self, get): - get.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123) - get.assert_called_once_with(self.context, 123, get_network=False) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_get') - @mock.patch('nova.db.api.network_get') - def test_get_by_id_with_extras(self, network_get, fixed_get): - db_fixed = dict(fake_fixed_ip, - network=test_network.fake_network) - fixed_get.return_value = db_fixed - fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123, - expected_attrs=['network']) - fixed_get.assert_called_once_with(self.context, 123, get_network=True) - self._compare(fixedip, db_fixed) - self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) - self.assertFalse(network_get.called) - - @mock.patch('nova.db.api.fixed_ip_get_by_address') - def test_get_by_address(self, get): - get.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4') - get.assert_called_once_with(self.context, '1.2.3.4', - columns_to_join=[]) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_get_by_address') - @mock.patch('nova.db.api.network_get') - @mock.patch('nova.db.api.instance_get') - def test_get_by_address_with_extras(self, instance_get, network_get, - fixed_get): - db_fixed = dict(fake_fixed_ip, network=test_network.fake_network, - instance=fake_instance.fake_db_instance()) - fixed_get.return_value = db_fixed - fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4', - expected_attrs=['network', - 'instance']) - fixed_get.assert_called_once_with(self.context, '1.2.3.4', - columns_to_join=['network', - 'instance']) - self._compare(fixedip, db_fixed) - self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) - self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid) - self.assertFalse(network_get.called) - self.assertFalse(instance_get.called) - - @mock.patch('nova.db.api.fixed_ip_get_by_address') - @mock.patch('nova.db.api.network_get') - @mock.patch('nova.db.api.instance_get') - def test_get_by_address_with_extras_deleted_instance(self, instance_get, - network_get, - fixed_get): - db_fixed = dict(fake_fixed_ip, network=test_network.fake_network, - instance=None) - fixed_get.return_value = db_fixed - fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4', - expected_attrs=['network', - 'instance']) - fixed_get.assert_called_once_with(self.context, '1.2.3.4', - columns_to_join=['network', - 'instance']) - self._compare(fixedip, db_fixed) - self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) - self.assertIsNone(fixedip.instance) - self.assertFalse(network_get.called) - self.assertFalse(instance_get.called) - - @mock.patch('nova.db.api.fixed_ip_get_by_floating_address') - def test_get_by_floating_address(self, get): - get.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, - '1.2.3.4') - get.assert_called_once_with(self.context, '1.2.3.4') - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_get_by_floating_address') - def test_get_by_floating_address_none(self, get): - get.return_value = None - fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, - '1.2.3.4') - get.assert_called_once_with(self.context, '1.2.3.4') - self.assertIsNone(fixedip) - - @mock.patch('nova.db.api.fixed_ip_get_by_network_host') - def test_get_by_network_and_host(self, get): - get.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context, - 123, 'host') - get.assert_called_once_with(self.context, 123, 'host') - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_associate') - def test_associate(self, associate): - associate.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4', - uuids.instance) - associate.assert_called_with(self.context, '1.2.3.4', uuids.instance, - network_id=None, reserved=False, - virtual_interface_id=None) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_associate') - def test_associate_with_vif(self, associate): - associate.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4', - uuids.instance, - vif_id=0) - associate.assert_called_with(self.context, '1.2.3.4', - uuids.instance, - network_id=None, reserved=False, - virtual_interface_id=0) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_associate_pool') - def test_associate_pool(self, associate): - associate.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123, - uuids.instance, 'host') - associate.assert_called_with(self.context, 123, - instance_uuid=uuids.instance, - host='host', virtual_interface_id=None) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_associate_pool') - def test_associate_pool_with_vif(self, associate): - associate.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123, - uuids.instance, 'host', - vif_id=0) - associate.assert_called_with(self.context, 123, - instance_uuid=uuids.instance, - host='host', virtual_interface_id=0) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_disassociate') - def test_disassociate_by_address(self, disassociate): - fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4') - disassociate.assert_called_with(self.context, '1.2.3.4') - - @mock.patch('nova.db.api.fixed_ip_disassociate_all_by_timeout') - def test_disassociate_all_by_timeout(self, disassociate): - now = timeutils.utcnow() - now_tz = timeutils.parse_isotime( - utils.isotime(now)).replace( - tzinfo=iso8601.UTC) - disassociate.return_value = 123 - result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context, - 'host', now) - self.assertEqual(123, result) - # NOTE(danms): be pedantic about timezone stuff - args, kwargs = disassociate.call_args_list[0] - self.assertEqual(now_tz, args[2]) - self.assertEqual((self.context, 'host'), args[:2]) - self.assertEqual({}, kwargs) - - @mock.patch('nova.db.api.fixed_ip_create') - def test_create(self, create): - create.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4') - fixedip.create() - create.assert_called_once_with( - self.context, {'address': '1.2.3.4'}) - self._compare(fixedip, fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_update') - def test_save(self, update): - update.return_value = fake_fixed_ip - fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4', - instance_uuid=uuids.instance) - self.assertRaises(exception.ObjectActionError, fixedip.save) - fixedip.obj_reset_changes(['address']) - fixedip.save() - update.assert_called_once_with(self.context, '1.2.3.4', - {'instance_uuid': uuids.instance}) - - @mock.patch('nova.db.api.fixed_ip_disassociate') - def test_disassociate(self, disassociate): - fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4', - instance_uuid=uuids.instance) - fixedip.obj_reset_changes() - fixedip.disassociate() - disassociate.assert_called_once_with(self.context, '1.2.3.4') - self.assertIsNone(fixedip.instance_uuid) - - @mock.patch('nova.db.api.fixed_ip_get_all') - def test_get_all(self, get_all): - get_all.return_value = [fake_fixed_ip] - fixedips = fixed_ip.FixedIPList.get_all(self.context) - self.assertEqual(1, len(fixedips)) - get_all.assert_called_once_with(self.context) - self._compare(fixedips[0], fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_get_by_instance') - def test_get_by_instance(self, get): - get.return_value = [fake_fixed_ip] - fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context, - uuids.instance) - self.assertEqual(1, len(fixedips)) - get.assert_called_once_with(self.context, uuids.instance) - self._compare(fixedips[0], fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ip_get_by_host') - def test_get_by_host(self, get): - get.return_value = [fake_fixed_ip] - fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host') - self.assertEqual(1, len(fixedips)) - get.assert_called_once_with(self.context, 'host') - self._compare(fixedips[0], fake_fixed_ip) - - @mock.patch('nova.db.api.fixed_ips_by_virtual_interface') - def test_get_by_virtual_interface_id(self, get): - get.return_value = [fake_fixed_ip] - fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id( - self.context, 123) - self.assertEqual(1, len(fixedips)) - get.assert_called_once_with(self.context, 123) - self._compare(fixedips[0], fake_fixed_ip) - - def test_floating_ips_do_not_lazy_load(self): - fixedip = fixed_ip.FixedIP() - self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips) - - @mock.patch('nova.db.api.fixed_ip_bulk_create') - def test_bulk_create(self, bulk): - fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'), - fixed_ip.FixedIP(address='192.168.1.2')] - fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips) - bulk.assert_called_once_with(self.context, - [{'address': '192.168.1.1'}, - {'address': '192.168.1.2'}]) - - @mock.patch('nova.db.api.network_get_associated_fixed_ips') - def test_get_by_network(self, get): - info = {'address': '1.2.3.4', - 'instance_uuid': uuids.instance, - 'network_id': 0, - 'vif_id': 1, - 'vif_address': 'de:ad:be:ee:f0:00', - 'instance_hostname': 'fake-host', - 'instance_updated': datetime.datetime(1955, 11, 5), - 'instance_created': datetime.datetime(1955, 11, 5), - 'allocated': True, - 'leased': True, - 'default_route': True, - } - get.return_value = [info] - fixed_ips = fixed_ip.FixedIPList.get_by_network( - self.context, {'id': 0}, host='fake-host') - get.assert_called_once_with(self.context, 0, host='fake-host') - self.assertEqual(1, len(fixed_ips)) - fip = fixed_ips[0] - self.assertEqual('1.2.3.4', str(fip.address)) - self.assertEqual(uuids.instance, fip.instance_uuid) - self.assertEqual(0, fip.network_id) - self.assertEqual(1, fip.virtual_interface_id) - self.assertTrue(fip.allocated) - self.assertTrue(fip.leased) - self.assertEqual(uuids.instance, fip.instance.uuid) - self.assertEqual('fake-host', fip.instance.hostname) - self.assertIsInstance(fip.instance.created_at, datetime.datetime) - self.assertIsInstance(fip.instance.updated_at, datetime.datetime) - self.assertEqual(1, fip.virtual_interface.id) - self.assertEqual(info['vif_address'], fip.virtual_interface.address) - - @mock.patch('nova.db.api.network_get_associated_fixed_ips') - def test_backport_default_route(self, mock_get): - info = {'address': '1.2.3.4', - 'instance_uuid': uuids.instance, - 'network_id': 0, - 'vif_id': 1, - 'vif_address': 'de:ad:be:ee:f0:00', - 'instance_hostname': 'fake-host', - 'instance_updated': datetime.datetime(1955, 11, 5), - 'instance_created': datetime.datetime(1955, 11, 5), - 'allocated': True, - 'leased': True, - 'default_route': True, - } - mock_get.return_value = [info] - fixed_ips = fixed_ip.FixedIPList.get_by_network( - self.context, {'id': 0}, host='fake-host') - primitive = fixed_ips[0].obj_to_primitive() - self.assertIn('default_route', primitive['nova_object.data']) - versions = ovo_base.obj_tree_get_versions('FixedIP') - fixed_ips[0].obj_make_compatible_from_manifest( - primitive['nova_object.data'], - target_version='1.1', - version_manifest=versions) - self.assertNotIn('default_route', primitive['nova_object.data']) - - def test_get_count_by_project(self): - instance = instance_obj.Instance(context=self.context, - uuid=uuids.instance, - project_id=self.context.project_id) - instance.create() - ip = fixed_ip.FixedIP(context=self.context, - address='192.168.1.1', - instance_uuid=instance.uuid) - ip.create() - self.assertEqual(1, fixed_ip.FixedIPList.get_count_by_project( - self.context, self.context.project_id)) - - -class TestFixedIPObject(test_objects._LocalTest, - _TestFixedIPObject): - pass - - -class TestRemoteFixedIPObject(test_objects._RemoteTest, - _TestFixedIPObject): - pass diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_floating_ip.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_floating_ip.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_floating_ip.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_floating_ip.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,289 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import netaddr -from oslo_versionedobjects import base as ovo_base - -from nova import exception -from nova import objects -from nova.objects import floating_ip -from nova import test -from nova.tests.unit.api.openstack import fakes -from nova.tests.unit.objects import test_fixed_ip -from nova.tests.unit.objects import test_network -from nova.tests.unit.objects import test_objects - -fake_floating_ip = { - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': False, - 'id': 123, - 'address': '172.17.0.1', - 'fixed_ip_id': None, - 'project_id': None, - 'host': None, - 'auto_assigned': False, - 'pool': None, - 'interface': None, - 'fixed_ip': None, -} - - -class _TestFloatingIPObject(object): - def _compare(self, obj, db_obj): - for field in obj.fields: - if field in floating_ip.FLOATING_IP_OPTIONAL_ATTRS: - if obj.obj_attr_is_set(field): - obj_val = obj[field].id - db_val = db_obj[field]['id'] - else: - continue - else: - obj_val = obj[field] - db_val = db_obj[field] - if isinstance(obj_val, netaddr.IPAddress): - obj_val = str(obj_val) - self.assertEqual(db_val, obj_val) - - @mock.patch('nova.db.api.floating_ip_get') - def test_get_by_id(self, get): - db_floatingip = dict(fake_floating_ip, - fixed_ip=test_fixed_ip.fake_fixed_ip) - get.return_value = db_floatingip - floatingip = floating_ip.FloatingIP.get_by_id(self.context, 123) - get.assert_called_once_with(self.context, 123) - self._compare(floatingip, db_floatingip) - - @mock.patch('nova.db.api.floating_ip_get_by_address') - def test_get_by_address(self, get): - get.return_value = fake_floating_ip - floatingip = floating_ip.FloatingIP.get_by_address(self.context, - '1.2.3.4') - get.assert_called_once_with(self.context, '1.2.3.4') - self._compare(floatingip, fake_floating_ip) - - @mock.patch('nova.db.api.floating_ip_get_pools') - def test_get_pool_names(self, get): - get.return_value = [{'name': 'a'}, {'name': 'b'}] - self.assertEqual(['a', 'b'], - floating_ip.FloatingIP.get_pool_names(self.context)) - - @mock.patch('nova.db.api.floating_ip_allocate_address') - def test_allocate_address(self, allocate): - allocate.return_value = '1.2.3.4' - self.assertEqual('1.2.3.4', - floating_ip.FloatingIP.allocate_address(self.context, - 'project', - 'pool')) - allocate.assert_called_with(self.context, 'project', 'pool', - auto_assigned=False) - - @mock.patch('nova.db.api.floating_ip_fixed_ip_associate') - def test_associate(self, associate): - db_fixed = dict(test_fixed_ip.fake_fixed_ip, - network=test_network.fake_network) - associate.return_value = db_fixed - floatingip = floating_ip.FloatingIP.associate(self.context, - '172.17.0.1', - '192.168.1.1', - 'host') - associate.assert_called_with(self.context, '172.17.0.1', - '192.168.1.1', 'host') - self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id) - self.assertEqual('172.17.0.1', str(floatingip.address)) - self.assertEqual('host', floatingip.host) - - @mock.patch('nova.db.api.floating_ip_deallocate') - def test_deallocate(self, deallocate): - floating_ip.FloatingIP.deallocate(self.context, '1.2.3.4') - deallocate.assert_called_with(self.context, '1.2.3.4') - - @mock.patch('nova.db.api.floating_ip_destroy') - def test_destroy(self, destroy): - floating_ip.FloatingIP.destroy(self.context, '1.2.3.4') - destroy.assert_called_with(self.context, '1.2.3.4') - - @mock.patch('nova.db.api.floating_ip_disassociate') - def test_disassociate(self, disassociate): - db_fixed = dict(test_fixed_ip.fake_fixed_ip, - network=test_network.fake_network) - disassociate.return_value = db_fixed - floatingip = floating_ip.FloatingIP.disassociate(self.context, - '1.2.3.4') - disassociate.assert_called_with(self.context, '1.2.3.4') - self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id) - self.assertEqual('1.2.3.4', str(floatingip.address)) - - @mock.patch('nova.db.api.floating_ip_update') - def test_save(self, update): - update.return_value = fake_floating_ip - floatingip = floating_ip.FloatingIP(context=self.context, - id=123, address='1.2.3.4', - host='foo') - floatingip.obj_reset_changes(['address', 'id']) - floatingip.save() - self.assertEqual(set(), floatingip.obj_what_changed()) - update.assert_called_with(self.context, '1.2.3.4', - {'host': 'foo'}) - - def test_save_errors(self): - floatingip = floating_ip.FloatingIP(context=self.context, - id=123, host='foo') - floatingip.obj_reset_changes() - floating_ip.address = '1.2.3.4' - self.assertRaises(exception.ObjectActionError, floatingip.save) - - floatingip.obj_reset_changes() - floatingip.fixed_ip_id = 1 - self.assertRaises(exception.ObjectActionError, floatingip.save) - - @mock.patch('nova.db.api.floating_ip_update') - def test_save_no_fixedip(self, update): - update.return_value = fake_floating_ip - floatingip = floating_ip.FloatingIP(context=self.context, - id=123) - floatingip.fixed_ip = objects.FixedIP(context=self.context, - id=456) - self.assertNotIn('fixed_ip', update.calls[1]) - - @mock.patch('nova.db.api.floating_ip_get_all') - def test_get_all(self, get): - get.return_value = [fake_floating_ip] - floatingips = floating_ip.FloatingIPList.get_all(self.context) - self.assertEqual(1, len(floatingips)) - self._compare(floatingips[0], fake_floating_ip) - get.assert_called_with(self.context) - - @mock.patch('nova.db.api.floating_ip_get_all_by_host') - def test_get_by_host(self, get): - get.return_value = [fake_floating_ip] - floatingips = floating_ip.FloatingIPList.get_by_host(self.context, - 'host') - self.assertEqual(1, len(floatingips)) - self._compare(floatingips[0], fake_floating_ip) - get.assert_called_with(self.context, 'host') - - @mock.patch('nova.db.api.floating_ip_get_all_by_project') - def test_get_by_project(self, get): - get.return_value = [fake_floating_ip] - floatingips = floating_ip.FloatingIPList.get_by_project(self.context, - 'project') - self.assertEqual(1, len(floatingips)) - self._compare(floatingips[0], fake_floating_ip) - get.assert_called_with(self.context, 'project') - - @mock.patch('nova.db.api.floating_ip_get_by_fixed_address') - def test_get_by_fixed_address(self, get): - get.return_value = [fake_floating_ip] - floatingips = floating_ip.FloatingIPList.get_by_fixed_address( - self.context, '1.2.3.4') - self.assertEqual(1, len(floatingips)) - self._compare(floatingips[0], fake_floating_ip) - get.assert_called_with(self.context, '1.2.3.4') - - @mock.patch('nova.db.api.floating_ip_get_by_fixed_ip_id') - def test_get_by_fixed_ip_id(self, get): - get.return_value = [fake_floating_ip] - floatingips = floating_ip.FloatingIPList.get_by_fixed_ip_id( - self.context, 123) - self.assertEqual(1, len(floatingips)) - self._compare(floatingips[0], fake_floating_ip) - get.assert_called_with(self.context, 123) - - @mock.patch('nova.db.api.instance_floating_address_get_all') - def test_get_addresses_by_instance(self, get_all): - expected = ['1.2.3.4', '4.5.6.7'] - get_all.return_value = list(expected) - ips = floating_ip.FloatingIP.get_addresses_by_instance( - self.context, {'uuid': '1234'}) - self.assertEqual(expected, ips) - get_all.assert_called_once_with(self.context, '1234') - - def test_make_ip_info(self): - result = objects.FloatingIPList.make_ip_info('1.2.3.4', 'pool', 'eth0') - self.assertEqual({'address': '1.2.3.4', 'pool': 'pool', - 'interface': 'eth0'}, - result) - - @mock.patch('nova.db.api.floating_ip_bulk_create') - def test_bulk_create(self, create_mock): - def fake_create(ctxt, ip_info, want_result=False): - return [{'id': 1, 'address': ip['address'], 'fixed_ip_id': 1, - 'project_id': fakes.FAKE_PROJECT_ID, 'host': 'host', - 'auto_assigned': False, 'pool': ip['pool'], - 'interface': ip['interface'], 'fixed_ip': None, - 'created_at': None, 'updated_at': None, - 'deleted_at': None, 'deleted': False} - for ip in ip_info] - - create_mock.side_effect = fake_create - ips = [objects.FloatingIPList.make_ip_info('1.1.1.1', 'pool', 'eth0'), - objects.FloatingIPList.make_ip_info('1.1.1.2', 'loop', 'eth1')] - result = objects.FloatingIPList.create(None, ips) - self.assertIsNone(result) - result = objects.FloatingIPList.create(None, ips, want_result=True) - self.assertEqual('1.1.1.1', str(result[0].address)) - self.assertEqual('1.1.1.2', str(result[1].address)) - - @mock.patch('nova.db.api.floating_ip_bulk_destroy') - def test_bulk_destroy(self, destroy_mock): - ips = [{'address': '1.2.3.4'}, {'address': '4.5.6.7'}] - objects.FloatingIPList.destroy(None, ips) - destroy_mock.assert_called_once_with(None, ips) - - def test_backport_fixedip_1_1(self): - floating = objects.FloatingIP() - fixed = objects.FixedIP() - floating.fixed_ip = fixed - versions = ovo_base.obj_tree_get_versions('FloatingIP') - versions['FixedIP'] = '1.1' - primitive = floating.obj_to_primitive(target_version='1.1', - version_manifest=versions) - self.assertEqual('1.1', - primitive['nova_object.data']['fixed_ip']['nova_object.version']) - - def test_get_count_by_project(self): - ips = [objects.FloatingIPList.make_ip_info('1.1.1.1', 'pool', 'eth0')] - objects.FloatingIPList.create(self.context, ips) - floating_ip.FloatingIP.allocate_address(self.context, - self.context.project_id, - 'pool') - self.assertEqual(1, floating_ip.FloatingIPList.get_count_by_project( - self.context, self.context.project_id)) - - -class TestFloatingIPObject(test_objects._LocalTest, - _TestFloatingIPObject): - pass - - -class TestRemoteFloatingIPObject(test_objects._RemoteTest, - _TestFloatingIPObject): - pass - - -class TestNeutronFloatingIPObject(test.NoDBTestCase): - def test_create_with_uuid_id(self): - uuid = 'fc9b4956-fd97-11e5-86aa-5e5517507c66' - fip = objects.floating_ip.NeutronFloatingIP(id=uuid) - - self.assertEqual(uuid, fip.id) - - def test_create_with_uuid_fixed_id(self): - uuid = 'fc9b4c3a-fd97-11e5-86aa-5e5517507c66' - fip = objects.floating_ip.NeutronFloatingIP(fixed_ip_id=uuid) - - self.assertEqual(uuid, fip.fixed_ip_id) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_instance_action.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_instance_action.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_instance_action.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_instance_action.py 2020-04-10 17:57:58.000000000 +0000 @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import traceback import mock @@ -22,6 +23,7 @@ from nova.db import api as db from nova import exception +from nova import objects from nova.objects import instance_action from nova import test from nova.tests.unit.objects import test_objects @@ -56,6 +58,7 @@ 'result': 'fake-result', 'traceback': 'fake-tb', 'host': 'fake-host', + 'details': None } @@ -250,6 +253,7 @@ expected_packed_values = test_class.pack_action_event_finish( self.context, 'fake-uuid', 'fake-event') expected_packed_values['finish_time'] = NOW + self.assertNotIn('details', expected_packed_values) mock_finish.return_value = fake_event event = instance_action.InstanceActionEvent.event_finish( self.context, 'fake-uuid', 'fake-event', want_result=True) @@ -276,17 +280,24 @@ def test_event_finish_with_failure(self, mock_finish, mock_tb): self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent + # The NovaException message will get formatted for the 'details' field. + exc_val = exception.NoValidHost(reason='some error') expected_packed_values = test_class.pack_action_event_finish( - self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb') + self.context, 'fake-uuid', 'fake-event', exc_val, 'fake-tb') expected_packed_values['finish_time'] = NOW + self.assertEqual(exc_val.format_message(), + expected_packed_values['details']) - mock_finish.return_value = fake_event + fake_event_with_details = copy.deepcopy(fake_event) + fake_event_with_details['details'] = expected_packed_values['details'] + mock_finish.return_value = fake_event_with_details event = test_class.event_finish_with_failure( - self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb', + self.context, 'fake-uuid', 'fake-event', exc_val, 'fake-tb', want_result=True) mock_finish.assert_called_once_with(self.context, expected_packed_values) - self.compare_obj(event, fake_event) + self.compare_obj(event, fake_event_with_details) + mock_tb.assert_not_called() @mock.patch.object(traceback, 'format_tb') @mock.patch.object(db, 'action_event_finish') @@ -295,18 +306,27 @@ mock_tb.return_value = 'fake-tb' self.useFixture(utils_fixture.TimeFixture(NOW)) test_class = instance_action.InstanceActionEvent + # A non-NovaException will use the exception class name for + # the 'details' field. + exc_val = test.TestingException('non-nova-error') expected_packed_values = test_class.pack_action_event_finish( - self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb') + self.context, 'fake-uuid', 'fake-event', exc_val, 'fake-tb') expected_packed_values['finish_time'] = NOW + self.assertEqual('TestingException', expected_packed_values['details']) - mock_finish.return_value = fake_event + fake_event_with_details = copy.deepcopy(fake_event) + fake_event_with_details['details'] = expected_packed_values['details'] + mock_finish.return_value = fake_event_with_details fake_tb = mock.sentinel.fake_tb event = test_class.event_finish_with_failure( - self.context, 'fake-uuid', 'fake-event', exc_val='val', + self.context, 'fake-uuid', 'fake-event', exc_val=exc_val, exc_tb=fake_tb, want_result=True) + # When calling event_finish_with_failure and using exc_val as a kwarg + # serialize_args will convert exc_val to non-nova exception class name + # form before it reaches event_finish_with_failure. mock_finish.assert_called_once_with(self.context, expected_packed_values) - self.compare_obj(event, fake_event) + self.compare_obj(event, fake_event_with_details) mock_tb.assert_called_once_with(fake_tb) @mock.patch.object(db, 'action_event_finish') @@ -366,15 +386,16 @@ mock_pack): mock_format.return_value = 'traceback' mock_pack.side_effect = test.TestingException + exc = exception.NotFound() self.assertRaises( test.TestingException, instance_action.InstanceActionEvent.event_finish_with_failure, self.context, 'fake-uuid', 'fake-event', - exc_val=mock.sentinel.exc_val, + exc_val=exc, exc_tb=mock.sentinel.exc_tb) mock_pack.assert_called_once_with(self.context, 'fake-uuid', 'fake-event', - exc_val=str(mock.sentinel.exc_val), + exc_val=exc.format_message(), exc_tb='traceback') mock_format.assert_called_once_with(mock.sentinel.exc_tb) @@ -397,6 +418,17 @@ self.context, expected_updates) self.compare_obj(event, fake_event) + def test_obj_make_compatible(self): + action_event_obj = objects.InstanceActionEvent( + details=None, # added in 1.4 + host='fake-host' # added in 1.2 + ) + data = lambda x: x['nova_object.data'] + primitive = data(action_event_obj.obj_to_primitive( + target_version='1.3')) + self.assertIn('host', primitive) + self.assertNotIn('details', primitive) + class TestInstanceActionEventObject(test_objects._LocalTest, _TestInstanceActionEventObject): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_network.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_network.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_network.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_network.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,239 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import netaddr -from oslo_utils.fixture import uuidsentinel as uuids - -from nova.objects import network as network_obj -from nova.tests.unit.objects import test_objects - - -fake_network = { - 'deleted': False, - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'id': 1, - 'label': 'Fake Network', - 'injected': False, - 'cidr': '192.168.1.0/24', - 'cidr_v6': '1234::/64', - 'multi_host': False, - 'netmask': '255.255.255.0', - 'gateway': '192.168.1.1', - 'broadcast': '192.168.1.255', - 'netmask_v6': 64, - 'gateway_v6': '1234::1', - 'bridge': 'br100', - 'bridge_interface': 'eth0', - 'dns1': '8.8.8.8', - 'dns2': '8.8.4.4', - 'vlan': None, - 'vpn_public_address': None, - 'vpn_public_port': None, - 'vpn_private_address': None, - 'dhcp_start': '192.168.1.10', - 'rxtx_base': None, - 'project_id': None, - 'priority': None, - 'host': None, - 'uuid': uuids.network_instance, - 'mtu': None, - 'dhcp_server': '192.168.1.1', - 'enable_dhcp': True, - 'share_address': False, -} - - -class _TestNetworkObject(object): - def _compare(self, obj, db_obj): - for field in obj.fields: - db_val = db_obj[field] - obj_val = obj[field] - if isinstance(obj_val, netaddr.IPAddress): - obj_val = str(obj_val) - if isinstance(obj_val, netaddr.IPNetwork): - obj_val = str(obj_val) - if field == 'netmask_v6': - db_val = str(netaddr.IPNetwork('1::/%i' % db_val).netmask) - self.assertEqual(db_val, obj_val) - - @mock.patch('nova.db.api.network_get') - def test_get_by_id(self, get): - get.return_value = fake_network - network = network_obj.Network.get_by_id(self.context, 'foo') - self._compare(network, fake_network) - get.assert_called_once_with(self.context, 'foo', - project_only='allow_none') - - @mock.patch('nova.db.api.network_get_by_uuid') - def test_get_by_uuid(self, get): - get.return_value = fake_network - network = network_obj.Network.get_by_uuid(self.context, 'foo') - self._compare(network, fake_network) - get.assert_called_once_with(self.context, 'foo') - - @mock.patch('nova.db.api.network_get_by_cidr') - def test_get_by_cidr(self, get): - get.return_value = fake_network - network = network_obj.Network.get_by_cidr(self.context, - '192.168.1.0/24') - self._compare(network, fake_network) - get.assert_called_once_with(self.context, '192.168.1.0/24') - - @mock.patch('nova.db.api.network_update') - @mock.patch('nova.db.api.network_set_host') - def test_save(self, set_host, update): - result = dict(fake_network, injected=True) - network = network_obj.Network._from_db_object(self.context, - network_obj.Network(), - fake_network) - network.obj_reset_changes() - network.save() - network.label = 'bar' - update.return_value = result - network.save() - update.assert_called_once_with(self.context, network.id, - {'label': 'bar'}) - self.assertFalse(set_host.called) - self._compare(network, result) - - @mock.patch('nova.db.api.network_update') - @mock.patch('nova.db.api.network_set_host') - @mock.patch('nova.db.api.network_get') - def test_save_with_host(self, get, set_host, update): - result = dict(fake_network, injected=True) - network = network_obj.Network._from_db_object(self.context, - network_obj.Network(), - fake_network) - network.obj_reset_changes() - network.host = 'foo' - get.return_value = result - network.save() - set_host.assert_called_once_with(self.context, network.id, 'foo') - self.assertFalse(update.called) - self._compare(network, result) - - @mock.patch('nova.db.api.network_update') - @mock.patch('nova.db.api.network_set_host') - def test_save_with_host_and_other(self, set_host, update): - result = dict(fake_network, injected=True) - network = network_obj.Network._from_db_object(self.context, - network_obj.Network(), - fake_network) - network.obj_reset_changes() - network.host = 'foo' - network.label = 'bar' - update.return_value = result - network.save() - set_host.assert_called_once_with(self.context, network.id, 'foo') - update.assert_called_once_with(self.context, network.id, - {'label': 'bar'}) - self._compare(network, result) - - @mock.patch('nova.db.api.network_associate') - def test_associate(self, associate): - network_obj.Network.associate(self.context, 'project', - network_id=123) - associate.assert_called_once_with(self.context, 'project', - network_id=123, force=False) - - @mock.patch('nova.db.api.network_disassociate') - def test_disassociate(self, disassociate): - network_obj.Network.disassociate(self.context, 123, - host=True, project=True) - disassociate.assert_called_once_with(self.context, 123, True, True) - - @mock.patch('nova.db.api.network_create_safe') - def test_create(self, create): - create.return_value = fake_network - network = network_obj.Network(context=self.context, label='foo') - network.create() - create.assert_called_once_with(self.context, {'label': 'foo'}) - self._compare(network, fake_network) - - @mock.patch('nova.db.api.network_delete_safe') - def test_destroy(self, delete): - network = network_obj.Network(context=self.context, id=123) - network.destroy() - delete.assert_called_once_with(self.context, 123) - self.assertTrue(network.deleted) - self.assertNotIn('deleted', network.obj_what_changed()) - - @mock.patch('nova.db.api.network_get_all') - def test_get_all(self, get_all): - get_all.return_value = [fake_network] - networks = network_obj.NetworkList.get_all(self.context) - self.assertEqual(1, len(networks)) - get_all.assert_called_once_with(self.context, 'allow_none') - self._compare(networks[0], fake_network) - - @mock.patch('nova.db.api.network_get_all_by_uuids') - def test_get_all_by_uuids(self, get_all): - get_all.return_value = [fake_network] - networks = network_obj.NetworkList.get_by_uuids(self.context, - ['foo']) - self.assertEqual(1, len(networks)) - get_all.assert_called_once_with(self.context, ['foo'], 'allow_none') - self._compare(networks[0], fake_network) - - @mock.patch('nova.db.api.network_get_all_by_host') - def test_get_all_by_host(self, get_all): - get_all.return_value = [fake_network] - networks = network_obj.NetworkList.get_by_host(self.context, 'host') - self.assertEqual(1, len(networks)) - get_all.assert_called_once_with(self.context, 'host') - self._compare(networks[0], fake_network) - - @mock.patch('nova.db.api.network_in_use_on_host') - def test_in_use_on_host(self, in_use): - in_use.return_value = True - self.assertTrue(network_obj.Network.in_use_on_host(self.context, - 123, 'foo')) - in_use.assert_called_once_with(self.context, 123, 'foo') - - @mock.patch('nova.db.api.project_get_networks') - def test_get_all_by_project(self, get_nets): - get_nets.return_value = [fake_network] - networks = network_obj.NetworkList.get_by_project(self.context, 123) - self.assertEqual(1, len(networks)) - get_nets.assert_called_once_with(self.context, 123, associate=True) - self._compare(networks[0], fake_network) - - def test_compat_version_1_1(self): - network = network_obj.Network._from_db_object(self.context, - network_obj.Network(), - fake_network) - primitive = network.obj_to_primitive(target_version='1.1') - self.assertNotIn('mtu', primitive['nova_object.data']) - self.assertNotIn('enable_dhcp', primitive['nova_object.data']) - self.assertNotIn('dhcp_server', primitive['nova_object.data']) - self.assertNotIn('share_address', primitive['nova_object.data']) - - primitive = network.obj_to_primitive(target_version='1.2') - self.assertIn('mtu', primitive['nova_object.data']) - self.assertIn('enable_dhcp', primitive['nova_object.data']) - self.assertIn('dhcp_server', primitive['nova_object.data']) - self.assertIn('share_address', primitive['nova_object.data']) - - -class TestNetworkObject(test_objects._LocalTest, - _TestNetworkObject): - pass - - -class TestRemoteNetworkObject(test_objects._RemoteTest, - _TestNetworkObject): - pass diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_objects.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_objects.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_objects.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_objects.py 2020-04-10 17:57:58.000000000 +0000 @@ -28,6 +28,7 @@ import six from nova import context +from nova import exception from nova import objects from nova.objects import base from nova.objects import fields @@ -975,7 +976,7 @@ super(TestArgsSerializer, self).setUp() self.now = timeutils.utcnow() self.str_now = utils.strtime(self.now) - self.unicode_str = u'\xF0\x9F\x92\xA9' + self.exc = exception.NotFound() @base.serialize_args def _test_serialize_args(self, *args, **kwargs): @@ -984,14 +985,26 @@ self.assertEqual(expected_args[index], val) expected_kwargs = {'a': 'untouched', 'b': self.str_now, - 'c': self.str_now, 'exc_val': self.unicode_str} + 'c': self.str_now} + + nonnova = kwargs.pop('nonnova', None) + if nonnova: + expected_kwargs['exc_val'] = 'TestingException' + else: + expected_kwargs['exc_val'] = self.exc.format_message() for key, val in kwargs.items(): self.assertEqual(expected_kwargs[key], val) def test_serialize_args(self): self._test_serialize_args('untouched', self.now, self.now, a='untouched', b=self.now, c=self.now, - exc_val=self.unicode_str) + exc_val=self.exc) + + def test_serialize_args_non_nova_exception(self): + self._test_serialize_args('untouched', self.now, self.now, + a='untouched', b=self.now, c=self.now, + exc_val=test.TestingException('foo'), + nonnova=True) class TestRegistry(test.NoDBTestCase): @@ -1056,12 +1069,8 @@ 'DiskMetadata': '1.0-e7a0f1ccccf10d26a76b28e7492f3788', 'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9', 'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f', - 'FixedIP': '1.14-53e1c10b539f1a82fe83b1af4720efae', - 'FixedIPList': '1.15-07b6261cef836cb09d2d8673f68ece15', 'Flavor': '1.2-4ce99b41327bb230262e5a8f45ff0ce3', 'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c', - 'FloatingIP': '1.10-52a67d52d85eb8b3f324a5b7935a335b', - 'FloatingIPList': '1.12-e4debd21fddb12cf40d36f737225fa9d', 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41', 'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac', 'HostMappingList': '1.1-18ac2bfb8c1eb5545bed856da58a79bc', @@ -1071,11 +1080,11 @@ 'ImageMetaProps': '1.25-66fc973af215eb5701ed4034bb6f0685', 'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce', 'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5', - 'InstanceActionEvent': '1.3-c749e1b3589e7117c81cb2aa6ac438d5', + 'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4', 'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be', 'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759', 'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186', - 'InstanceExternalEvent': '1.3-e47782874cca95bb96e566286e9d1e23', + 'InstanceExternalEvent': '1.4-06c2dfcf2d2813c24cd37ee728524f1a', 'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38', 'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451', 'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a', @@ -1103,9 +1112,7 @@ 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542', 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220', 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922', - 'Network': '1.2-a977ab383aa462a479b2fae8211a5dde', 'NetworkInterfaceMetadata': '1.2-6f3d480b40fe339067b1c0dd4d656716', - 'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59', 'NetworkMetadata': '1.0-2cb8d21b34f87b0261d3e1d1ae5cf218', 'NetworkRequest': '1.2-af1ff2d986999fbb79377712794d82aa', 'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', @@ -1130,8 +1137,6 @@ 'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0', 'SecurityGroup': '1.2-86d67d8d3ab0c971e1dc86e02f9524a8', 'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71', - 'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29', - 'SecurityGroupRuleList': '1.2-0005c47fcd0fb78dd6d7fd32a1409f5b', 'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264', 'Service': '1.22-8a740459ab9bf258a19c8fcb875c2d9a', 'ServiceList': '1.19-5325bce13eebcbf22edc9678285270cc', @@ -1221,8 +1226,7 @@ init_args = {} init_kwargs = {} - checker = fixture.ObjectVersionChecker( - base.NovaObjectRegistry.obj_classes()) + checker = fixture.ObjectVersionChecker(get_nova_objects()) checker.test_compatibility_routines(use_manifest=True, init_args=init_args, init_kwargs=init_kwargs) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_security_group_rule.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_security_group_rule.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/objects/test_security_group_rule.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/objects/test_security_group_rule.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_versionedobjects import exception as ovo_exc - -from nova.db import api as db -from nova import objects -from nova.tests.unit.objects import test_objects -from nova.tests.unit.objects import test_security_group - - -fake_rule = { - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': False, - 'id': 1, - 'protocol': 'tcp', - 'from_port': 22, - 'to_port': 22, - 'cidr': '0.0.0.0/0', - } - - -class _TestSecurityGroupRuleObject(object): - def test_get_by_id(self): - with mock.patch.object(db, 'security_group_rule_get') as sgrg: - sgrg.return_value = fake_rule - rule = objects.SecurityGroupRule.get_by_id( - self.context, 1) - for field in fake_rule: - if field == 'cidr': - self.assertEqual(fake_rule[field], str(getattr(rule, - field))) - else: - self.assertEqual(fake_rule[field], getattr(rule, field)) - sgrg.assert_called_with(self.context, 1) - - def test_get_by_security_group(self): - secgroup = objects.SecurityGroup() - secgroup.id = 123 - rule = dict(fake_rule) - rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123) - stupid_method = 'security_group_rule_get_by_security_group' - with mock.patch.object(db, stupid_method) as sgrgbsg: - sgrgbsg.return_value = [rule] - rules = (objects.SecurityGroupRuleList. - get_by_security_group(self.context, secgroup)) - self.assertEqual(1, len(rules)) - self.assertEqual(123, rules[0].grantee_group.id) - - @mock.patch.object(db, 'security_group_rule_create', - return_value=fake_rule) - def test_create(self, db_mock): - rule = objects.SecurityGroupRule(context=self.context) - rule.protocol = 'tcp' - secgroup = objects.SecurityGroup() - secgroup.id = 123 - parentgroup = objects.SecurityGroup() - parentgroup.id = 223 - rule.grantee_group = secgroup - rule.parent_group = parentgroup - rule.create() - updates = db_mock.call_args[0][1] - self.assertEqual(fake_rule['id'], rule.id) - self.assertEqual(updates['group_id'], rule.grantee_group.id) - self.assertEqual(updates['parent_group_id'], rule.parent_group.id) - - @mock.patch.object(db, 'security_group_rule_create', - return_value=fake_rule) - def test_set_id_failure(self, db_mock): - rule = objects.SecurityGroupRule(context=self.context) - rule.create() - self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr, - rule, 'id', 124) - - -class TestSecurityGroupRuleObject(test_objects._LocalTest, - _TestSecurityGroupRuleObject): - pass - - -class TestSecurityGroupRuleObjectRemote(test_objects._RemoteTest, - _TestSecurityGroupRuleObject): - pass - - -fake_rules = [ - dict(fake_rule, id=1, grantee_group=test_security_group.fake_secgroup), - dict(fake_rule, id=2, grantee_group=test_security_group.fake_secgroup), -] - - -class _TestSecurityGroupRuleListObject(object): - @mock.patch('nova.db.api.security_group_rule_get_by_instance') - def test_get_by_instance(self, mock_get): - mock_get.return_value = fake_rules - instance = objects.Instance(uuid=uuids.instance) - rules = objects.SecurityGroupRuleList.get_by_instance(self.context, - instance) - mock_get.assert_called_once_with(self.context, instance.uuid) - self.assertEqual(2, len(rules)) - self.assertEqual([1, 2], [x.id for x in rules]) - - -class TestSecurityGroupRuleListObject(test_objects._LocalTest, - _TestSecurityGroupRuleListObject): - pass - - -class TestSecurityGroupRuleListObjectRemote(test_objects._RemoteTest, - _TestSecurityGroupRuleListObject): - pass diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/base.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/base.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/base.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/base.py 2020-04-10 17:57:58.000000000 +0000 @@ -9,6 +9,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import copy from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel as uuids @@ -23,6 +24,19 @@ class BasePolicyTest(test.TestCase): + # NOTE(gmann): Set this flag to True if you would like to tests the + # new behaviour of policy without deprecated rules. + # This means you can simulate the phase when policies completely + # switch to new behaviour by removing the support of old rules. + without_deprecated_rules = False + + # Add rules here other than base rules which need to override + # to remove the deprecated rules. + # For Example: + # rules_without_deprecation{ + # "os_compute_api:os-deferred-delete:restore": + # "rule:system_admin_or_owner"} + rules_without_deprecation = {} def setUp(self): super(BasePolicyTest, self).setUp() @@ -74,26 +88,63 @@ project_id=self.project_id_other, roles=['member', 'reader']) + self.other_project_reader_context = nova_context.RequestContext( + user_id="other_project_member", + project_id=self.project_id_other, + roles=['reader']) + self.all_contexts = [ self.legacy_admin_context, self.system_admin_context, self.system_member_context, self.system_reader_context, self.system_foo_context, self.project_admin_context, self.project_member_context, self.project_reader_context, self.other_project_member_context, - self.project_foo_context, + self.project_foo_context, self.other_project_reader_context ] + if self.without_deprecated_rules: + # To simulate the new world, remove deprecations by overriding + # rules which has the deprecated rules. + self.rules_without_deprecation.update({ + "system_admin_or_owner": + "rule:system_admin_api or rule:project_member_api", + "system_or_project_reader": + "rule:system_reader_api or rule:project_reader_api", + "system_admin_api": + "role:admin and system_scope:all", + "system_reader_api": + "role:reader and system_scope:all", + }) + self.policy.set_rules(self.rules_without_deprecation, + overwrite=False) + def common_policy_check(self, authorized_contexts, unauthorized_contexts, rule_name, func, req, *arg, **kwarg): - self.assertEqual(len(self.all_contexts), - len(authorized_contexts) + len( - unauthorized_contexts), - "Few context are missing. check all contexts " - "mentioned in self.all_contexts are tested") + # NOTE(brinzhang): When fatal=False is passed as a parameter + # in context.can(), we cannot get the desired ensure_raises(). + # At this time, we can call ensure_return() to assert the func's + # response to ensure that changes are right. + fatal = kwarg.pop('fatal', True) + authorized_response = [] + unauthorize_response = [] + + # TODO(gmann): we need to add the new context + # self.other_project_reader_context in all tests and then remove + # this conditional adjusment. + test_context = authorized_contexts + unauthorized_contexts + test_context_len = len(test_context) + if self.other_project_reader_context not in test_context: + test_context_len += 1 + self.assertEqual(len(self.all_contexts), test_context_len, + "Expected testing context are mismatch. check all " + "contexts mentioned in self.all_contexts are tested") + + def ensure_return(req, *args, **kwargs): + return func(req, *arg, **kwargs) - def ensure_raises(req): + def ensure_raises(req, *args, **kwargs): exc = self.assertRaises( exception.PolicyNotAuthorized, func, req, *arg, **kwarg) self.assertEqual( @@ -104,10 +155,25 @@ for context in authorized_contexts: LOG.info("Testing authorized context: %s", context) req.environ['nova.context'] = context - func(req, *arg, **kwarg) + args1 = copy.deepcopy(arg) + kwargs1 = copy.deepcopy(kwarg) + if not fatal: + authorized_response.append( + ensure_return(req, *args1, **kwargs1)) + else: + func(req, *args1, **kwargs1) + # Verify all the context not having allowed scope or roles fail # the policy check. for context in unauthorized_contexts: LOG.info("Testing unauthorized context: %s", context) req.environ['nova.context'] = context - ensure_raises(req) + args1 = copy.deepcopy(arg) + kwargs1 = copy.deepcopy(kwarg) + if not fatal: + unauthorize_response.append( + ensure_return(req, *args1, **kwargs1)) + else: + ensure_raises(req, *args1, **kwargs1) + + return authorized_response, unauthorize_response diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_admin_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_admin_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_admin_password.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_admin_password.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,133 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import admin_password +from nova.compute import vm_states +from nova import exception +from nova.policies import admin_password as ap_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class AdminPasswordPolicyTest(base.BasePolicyTest): + """Test Admin Password APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AdminPasswordPolicyTest, self).setUp() + self.controller = admin_password.AdminPasswordController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.rule_name = ap_policies.BASE_POLICY_NAME + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + # Check that admin or and server owner is able to change the password + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin is not able to change the password + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.set_admin_password') + def test_change_paassword_policy(self, mock_password): + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + self.rule_name, + self.controller.change_password, + self.req, self.instance.uuid, + body={'changePassword': { + 'adminPass': '1234pass'}}) + + def test_change_password_overridden_policy_failed_with_other_user(self): + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + body = {'changePassword': {'adminPass': '1234pass'}} + self.policy.set_rules({self.rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller.change_password, + req, fakes.FAKE_UUID, body=body) + self.assertEqual( + "Policy doesn't allow %s to be performed." % self.rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.set_admin_password') + def test_change_password_overridden_policy_pass_with_same_user( + self, password_mock): + self.policy.set_rules({self.rule_name: "user_id:%(user_id)s"}) + body = {'changePassword': {'adminPass': '1234pass'}} + self.controller.change_password(self.req, fakes.FAKE_UUID, body=body) + password_mock.assert_called_once_with(self.req.environ['nova.context'], + mock.ANY, '1234pass') + + +class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest): + """Test Admin Password APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AdminPasswordScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class AdminPasswordNoLegacyPolicyTest(AdminPasswordPolicyTest): + """Test Admin Password APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(AdminPasswordNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system or projct admin or owner is able to change + # the password. + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to change the + # password. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_agents.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_agents.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_agents.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_agents.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,208 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import agents +from nova.db.sqlalchemy import models +from nova import exception +from nova.policies import base as base_policy +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base +from nova.tests.unit import policy_fixture + + +class AgentsPolicyTest(base.BasePolicyTest): + """Test os-agents APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AgentsPolicyTest, self).setUp() + self.controller = agents.AgentController() + self.req = fakes.HTTPRequest.blank('') + # Check that admin is able to perform the CRUD operation + # on agents. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to perform the CRUD operation + # on agents. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + # Check that system scoped admin, member and reader are able to + # read the agent data. + # NOTE(gmann): Until old default rule which is admin_api is + # deprecated and not removed, project admin and legacy admin + # will be able to read the agent data. This make sure that existing + # tokens will keep working even we have changed this policy defaults + # to reader role. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.legacy_admin_context, + self.project_admin_context] + # Check that non-system-reader are not able to read the agent + # data + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + @mock.patch('nova.db.api.agent_build_destroy') + def test_delete_agent_policy(self, mock_delete): + rule_name = "os_compute_api:os-agents:delete" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.delete, + self.req, 1) + + @mock.patch('nova.db.api.agent_build_get_all') + def test_index_agents_policy(self, mock_get): + rule_name = "os_compute_api:os-agents:list" + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + @mock.patch('nova.db.api.agent_build_update') + def test_update_agent_policy(self, mock_update): + rule_name = "os_compute_api:os-agents:update" + body = {'para': {'version': '7.0', + 'url': 'http://example.com/path/to/resource', + 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.update, + self.req, 1, body=body) + + def test_create_agent_policy(self): + rule_name = "os_compute_api:os-agents:create" + body = {'agent': {'hypervisor': 'kvm', + 'os': 'win', + 'architecture': 'x86', + 'version': '7.0', + 'url': 'http://example.com/path/to/resource', + 'md5hash': 'add6bb58e139be103324d04d82d8f545'}} + + def fake_agent_build_create(context, values): + values['id'] = 1 + agent_build_ref = models.AgentBuild() + agent_build_ref.update(values) + return agent_build_ref + + self.stub_out("nova.db.api.agent_build_create", + fake_agent_build_create) + + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.create, + self.req, body=body) + + +class AgentsScopeTypePolicyTest(AgentsPolicyTest): + """Test os-agents APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AgentsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to perform the CRUD operation + # on agents. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system or non-admin is not able to perform the CRUD + # operation on agents. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + # Check that system admin, member and reader are able to read the + # agent data + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system or non-reader are not able to read the agent + # data + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.legacy_admin_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class AgentsDeprecatedPolicyTest(base.BasePolicyTest): + """Test os-agents APIs Deprecated policies. + This class checks if deprecated policy rules are + overridden by user on policy.json file then they + still work because oslo.policy add deprecated rules + in logical OR condition and enforce them for policy + checks if overridden. + """ + + def setUp(self): + super(AgentsDeprecatedPolicyTest, self).setUp() + self.controller = agents.AgentController() + self.admin_req = fakes.HTTPRequest.blank('') + self.admin_req.environ['nova.context'] = self.project_admin_context + self.reader_req = fakes.HTTPRequest.blank('') + self.reader_req.environ['nova.context'] = self.project_reader_context + self.deprecated_policy = "os_compute_api:os-agents" + # Overridde rule with different checks than defaults so that we can + # verify the rule overridden case. + override_rules = {self.deprecated_policy: base_policy.RULE_ADMIN_API} + # NOTE(gmann): Only override the deprecated rule in policy file so + # that we can verify if overridden checks are considered by + # oslo.policy. Oslo.policy will consider the overridden rules if: + # 1. overridden deprecated rule's checks are different than defaults + # 2. new rules are not present in policy file + self.policy = self.useFixture(policy_fixture.OverridePolicyFixture( + rules_in_file=override_rules)) + + def test_deprecated_policy_overridden_rule_is_checked(self): + # Test to verify if deprecatd overridden policy is working. + + # check for success as admin role. Deprecated rule + # has been overridden with admin checks in policy.json + # If admin role pass it means overridden rule is enforced by + # olso.policy because new default is system reader and the old + # default is admin. + with mock.patch('nova.db.api.agent_build_get_all'): + self.controller.index(self.admin_req) + + # check for failure with reader context. + exc = self.assertRaises(exception.PolicyNotAuthorized, + self.controller.index, self.reader_req) + self.assertEqual( + "Policy doesn't allow os_compute_api:os-agents:list to be" + " performed.", + exc.format_message()) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_aggregates.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_aggregates.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_aggregates.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_aggregates.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,164 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import aggregates +from nova import objects +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class AggregatesPolicyTest(base.BasePolicyTest): + """Test Aggregates APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AggregatesPolicyTest, self).setUp() + self.controller = aggregates.AggregateController() + self.req = fakes.HTTPRequest.blank('') + # Check that admin is able to perform Aggregate Operations + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to perform Aggregate Operations + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + @mock.patch('nova.compute.api.AggregateAPI.get_aggregate_list') + def test_list_aggregate_policy(self, mock_list): + rule_name = "os_compute_api:os-aggregates:index" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + @mock.patch('nova.compute.api.AggregateAPI.create_aggregate') + def test_create_aggregate_policy(self, mock_create): + rule_name = "os_compute_api:os-aggregates:create" + mock_create.return_value = objects.Aggregate(**{"name": "aggregate1", + "id": "1", + "metadata": {'availability_zone': 'nova1'}, + "hosts": ["host1", "host2"]}) + body = {"aggregate": {"name": "test", + "availability_zone": "nova1"}} + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller.create, + self.req, body=body) + + @mock.patch('nova.compute.api.AggregateAPI.update_aggregate') + def test_update_aggregate_policy(self, mock_update): + rule_name = "os_compute_api:os-aggregates:update" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.update, + self.req, 1, + body={"aggregate": {"name": "new_name"}}) + + @mock.patch('nova.compute.api.AggregateAPI.delete_aggregate') + def test_delete_aggregate_policy(self, mock_delete): + rule_name = "os_compute_api:os-aggregates:delete" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller.delete, + self.req, 1) + + @mock.patch('nova.compute.api.AggregateAPI.get_aggregate') + def test_show_aggregate_policy(self, mock_show): + rule_name = "os_compute_api:os-aggregates:show" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.show, + self.req, 1) + + @mock.patch('nova.compute.api.AggregateAPI.update_aggregate_metadata') + def test_set_metadata_aggregate_policy(self, mock_metadata): + rule_name = "os_compute_api:os-aggregates:set_metadata" + body = {"set_metadata": {"metadata": {"foo": "bar"}}} + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller._set_metadata, + self.req, 1, body=body) + + @mock.patch('nova.compute.api.AggregateAPI.add_host_to_aggregate') + def test_add_host_aggregate_policy(self, mock_add): + rule_name = "os_compute_api:os-aggregates:add_host" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._add_host, + self.req, 1, + body={"add_host": {"host": "host1"}}) + + @mock.patch('nova.compute.api.AggregateAPI.remove_host_from_aggregate') + def test_remove_host_aggregate_policy(self, mock_remove): + rule_name = "os_compute_api:os-aggregates:remove_host" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller._remove_host, + self.req, 1, + body={"remove_host": {"host": "host1"}}) + + @mock.patch('nova.compute.api.AggregateAPI.get_aggregate') + def test_images_aggregate_policy(self, mock_get): + rule_name = "compute:aggregates:images" + mock_get.return_value = {"name": "aggregate1", + "id": "1", + "hosts": ["host1", "host2"]} + body = {'cache': [{'id': uuids.fake_id}]} + req = fakes.HTTPRequest.blank('', version='2.81') + with mock.patch('nova.conductor.api.ComputeTaskAPI.cache_images'): + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.images, + req, 1, body=body) + + +class AggregatesScopeTypePolicyTest(AggregatesPolicyTest): + """Test Aggregates APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AggregatesScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to perform Aggregate Operations. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system or non-admin is not able to perform + # Aggregate Operations. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_assisted_volume_snapshots.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_assisted_volume_snapshots.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_assisted_volume_snapshots.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_assisted_volume_snapshots.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,98 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_serialization import jsonutils +from oslo_utils.fixture import uuidsentinel as uuids +import urllib + +from nova.api.openstack.compute import assisted_volume_snapshots as snapshots +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest): + """Test Assisted Volume Snapshots APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AssistedVolumeSnapshotPolicyTest, self).setUp() + self.controller = snapshots.AssistedVolumeSnapshotsController() + self.req = fakes.HTTPRequest.blank('') + # Check that admin is able to take volume snapshot. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to take volume snapshot. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + @mock.patch('nova.compute.api.API.volume_snapshot_create') + def test_assisted_create_policy(self, mock_create): + rule_name = "os_compute_api:os-assisted-volume-snapshots:create" + body = {'snapshot': {'volume_id': uuids.fake_id, + 'create_info': {'type': 'qcow2', + 'new_file': 'new_file', + 'snapshot_id': 'snapshot_id'}}} + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.create, + self.req, body=body) + + @mock.patch('nova.compute.api.API.volume_snapshot_delete') + def test_assisted_delete_policy(self, mock_delete): + rule_name = "os_compute_api:os-assisted-volume-snapshots:delete" + params = { + 'delete_info': jsonutils.dumps({'volume_id': '1'}), + } + req = fakes.HTTPRequest.blank('?%s' % urllib.parse.urlencode(params)) + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller.delete, + req, 1) + + +class AssistedSnapshotScopeTypePolicyTest(AssistedVolumeSnapshotPolicyTest): + """Test Assisted Volume Snapshots APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scopped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AssistedSnapshotScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to take volume snapshot. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system or non-admin is not able to take volume + # snapshot. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_attach_interfaces.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_attach_interfaces.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_attach_interfaces.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_attach_interfaces.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,241 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import attach_interfaces +from nova.compute import vm_states +from nova import exception +from nova.policies import attach_interfaces as ai_policies +from nova.policies import base as base_policy +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base +from nova.tests.unit import policy_fixture + + +class AttachInterfacesPolicyTest(base.BasePolicyTest): + """Test Attach Interfaces APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AttachInterfacesPolicyTest, self).setUp() + self.controller = attach_interfaces.InterfaceAttachmentController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_foo_context, + self.project_reader_context, self.project_member_context + ] + + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, self.project_foo_context + ] + + self.reader_unauthorized_contexts = [ + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.get') + @mock.patch('nova.network.neutron.API.list_ports') + def test_index_interfaces_policy(self, mock_port, mock_get): + rule_name = "os_compute_api:os-attach-interfaces:list" + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req, uuids.fake_id) + + @mock.patch('nova.compute.api.API.get') + @mock.patch('nova.network.neutron.API.show_port') + def test_show_interface_policy(self, mock_port, mock_get): + rule_name = "os_compute_api:os-attach-interfaces:show" + server_id = uuids.fake_id + port_id = uuids.fake_id + mock_port.return_value = {'port': { + "id": port_id, + "network_id": uuids.fake_id, + "admin_state_up": True, + "status": "ACTIVE", + "mac_address": "bb:bb:bb:bb:bb:bb", + "fixed_ips": ["10.0.2.2"], + "device_id": server_id, + }} + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, + self.controller.show, + self.req, server_id, port_id) + + @mock.patch('nova.compute.api.API.get') + @mock.patch('nova.api.openstack.compute.attach_interfaces' + '.InterfaceAttachmentController.show') + @mock.patch('nova.compute.api.API.attach_interface') + def test_attach_interface(self, mock_interface, mock_port, mock_get): + rule_name = "os_compute_api:os-attach-interfaces:create" + body = {'interfaceAttachment': {'net_id': uuids.fake_id}} + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.create, + self.req, uuids.fake_id, body=body) + + @mock.patch('nova.compute.api.API.get') + @mock.patch('nova.compute.api.API.detach_interface') + def test_delete_interface(self, mock_detach, mock_get): + rule_name = "os_compute_api:os-attach-interfaces:delete" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.delete, + self.req, uuids.fake_id, uuids.fake_id) + + +class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest): + """Test Attach Interfaces APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AttachInterfacesScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest): + """Test Attach Interfaces APIs Deprecated policies. + This class checks if deprecated policy rules are + overridden by user on policy.json file then they + still work because oslo.policy add deprecated rules + in logical OR condition and enforce them for policy + checks if overridden. + """ + + def setUp(self): + super(AttachInterfacesDeprecatedPolicyTest, self).setUp() + self.controller = attach_interfaces.InterfaceAttachmentController() + self.admin_req = fakes.HTTPRequest.blank('') + self.admin_req.environ['nova.context'] = self.project_admin_context + self.reader_req = fakes.HTTPRequest.blank('') + self.reader_req.environ['nova.context'] = self.project_reader_context + self.deprecated_policy = "os_compute_api:os-attach-interfaces" + # Overridde rule with different checks than defaults so that we can + # verify the rule overridden case. + override_rules = {self.deprecated_policy: base_policy.RULE_ADMIN_API} + # NOTE(gmann): Only override the deprecated rule in policy file so + # that + # we can verify if overridden checks are considered by oslo.policy. + # Oslo.policy will consider the overridden rules if: + # 1. overridden deprecated rule's checks are different than defaults + # 2. new rules are not present in policy file + self.policy = self.useFixture(policy_fixture.OverridePolicyFixture( + rules_in_file=override_rules)) + + @mock.patch('nova.compute.api.API.get') + @mock.patch('nova.network.neutron.API.list_ports') + def test_deprecated_policy_overridden_rule_is_checked(self, mock_port, + mock_get): + # Test to verify if deprecatd overridden policy is working. + + # check for success as admin role. Deprecated rule + # has been overridden with admin checks in policy.json + # If admin role pass it means overridden rule is enforced by + # olso.policy because new default is system or project reader and the + # old default is admin. + self.controller.index(self.admin_req, uuids.fake_id) + + # check for failure with reader context. + exc = self.assertRaises(exception.PolicyNotAuthorized, + self.controller.index, self.reader_req, + uuids.fake_id) + self.assertEqual( + "Policy doesn't allow os_compute_api:os-attach-interfaces:list" + " to be performed.", + exc.format_message()) + + +class AttachInterfacesNoLegacyPolicyTest(AttachInterfacesPolicyTest): + """Test Attach Interfaces APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + ai_policies.POLICY_ROOT % 'list': + base_policy.PROJECT_READER_OR_SYSTEM_READER, + ai_policies.POLICY_ROOT % 'show': + base_policy.PROJECT_READER_OR_SYSTEM_READER, + ai_policies.POLICY_ROOT % 'create': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + ai_policies.POLICY_ROOT % 'delete': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN} + + def setUp(self): + super(AttachInterfacesNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system or projct admin or owner is able to + # create or delete interfaces. + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to + # create or delete interfaces. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] + + # Check that system reader or projct is able to + # create or delete interfaces. + self.reader_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, + ] + + # Check that non-system reader nd non-admin/owner is not able to + # create or delete interfaces. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_foo_context, + self.system_foo_context, self.other_project_member_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_availability_zone.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_availability_zone.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_availability_zone.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_availability_zone.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,100 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import availability_zone +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class AvailabilityZonePolicyTest(base.BasePolicyTest): + """Test Availability Zone APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AvailabilityZonePolicyTest, self).setUp() + self.controller = availability_zone.AvailabilityZoneController() + self.req = fakes.HTTPRequest.blank('') + + # Check that everyone is able to list the AZ + self.everyone_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_member_context, self.other_project_member_context, + self.project_foo_context, self.project_reader_context] + self.everyone_unauthorized_contexts = [] + + # Check that system reader is able to list the AZ Detail + # NOTE(gmann): Until old default rule which is admin_api is + # deprecated and not removed, project admin and legacy admin + # will be able to list the AZ. This make sure that existing + # tokens will keep working even we have changed this policy defaults + # to reader role. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.legacy_admin_context, + self.project_admin_context] + # Check that non-system-reader are not able to list the AZ. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + @mock.patch('nova.objects.Instance.save') + def test_availability_zone_list_policy(self, mock_save): + rule_name = "os_compute_api:os-availability-zone:list" + self.common_policy_check(self.everyone_authorized_contexts, + self.everyone_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + def test_availability_zone_detail_policy(self): + rule_name = "os_compute_api:os-availability-zone:detail" + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.detail, + self.req) + + +class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest): + """Test Availability Zone APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(AvailabilityZoneScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to list the AZ. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system-reader is not able to list AZ. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.legacy_admin_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_console_auth_tokens.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_console_auth_tokens.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_console_auth_tokens.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_console_auth_tokens.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import console_auth_tokens +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class ConsoleAuthTokensPolicyTest(base.BasePolicyTest): + """Test Console Auth Tokens APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ConsoleAuthTokensPolicyTest, self).setUp() + self.controller = console_auth_tokens.ConsoleAuthTokensController() + self.req = fakes.HTTPRequest.blank('', version='2.31') + + # Check that system reader is able to get console connection + # information. + # NOTE(gmann): Until old default rule which is admin_api is + # deprecated and not removed, project admin and legacy admin + # will be able to get console. This make sure that existing + # tokens will keep working even we have changed this policy defaults + # to reader role. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.legacy_admin_context, + self.project_admin_context] + # Check that non-admin is not able to get console connection + # information. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + @mock.patch('nova.objects.ConsoleAuthToken.validate') + def test_console_connect_info_token_policy(self, mock_validate): + rule_name = "os_compute_api:os-console-auth-tokens" + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, fakes.FAKE_UUID) + + +class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest): + """Test Console Auth Tokens APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to get console connection + # information. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system-reader is not able to get console connection + # information. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_console_output.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_console_output.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_console_output.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_console_output.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,110 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import console_output +from nova.compute import vm_states +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ConsoleOutputPolicyTest(base.BasePolicyTest): + """Test Console Output APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ConsoleOutputPolicyTest, self).setUp() + self.controller = console_output.ConsoleOutputController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, project_id=self.project_id, + id=1, uuid=uuid, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + # Check that admin or owner is able to get the server console. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context + ] + # Check that non-admin and non-owner is not able to get the server + # console. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.get_console_output') + def test_console_output_policy(self, mock_console): + mock_console.return_value = '\n'.join([str(i) for i in range(2)]) + rule_name = "os_compute_api:os-console-output" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.get_console_output, + self.req, self.instance.uuid, + body={'os-getConsoleOutput': {}}) + + +class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest): + """Test Console Output APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ConsoleOutputScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ConsoleOutputNoLegacyPolicyTest(ConsoleOutputPolicyTest): + """Test Console Output APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(ConsoleOutputNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system or projct admin or owner is able to + # get the server console. + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to + # get the server console. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_create_backup.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_create_backup.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_create_backup.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_create_backup.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,116 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import create_backup +from nova.compute import vm_states +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class CreateBackupPolicyTest(base.BasePolicyTest): + """Test Create Backup APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(CreateBackupPolicyTest, self).setUp() + self.controller = create_backup.CreateBackupController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, project_id=self.project_id, + id=1, uuid=uuid, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + # Check that admin or owner is able to create server backup. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context + ] + # Check that non-admin and non-owner is not able to create server + # backup. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.backup') + def test_create_backup_policy(self, mock_backup): + rule_name = "os_compute_api:os-create-backup" + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._create_backup, + self.req, self.instance.uuid, + body=body) + + +class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest): + """Test Create Backup APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(CreateBackupScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class CreateBackupNoLegacyPolicyTest(CreateBackupPolicyTest): + """Test Create Backup APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(CreateBackupNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system or projct admin or owner is able to create + # server backup. + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to + # create server backup. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_deferred_delete.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_deferred_delete.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_deferred_delete.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_deferred_delete.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,149 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import deferred_delete +from nova.compute import vm_states +from nova import exception +from nova.policies import base as base_policy +from nova.policies import deferred_delete as dd_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class DeferredDeletePolicyTest(base.BasePolicyTest): + """Test Deferred Delete APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(DeferredDeletePolicyTest, self).setUp() + self.controller = deferred_delete.DeferredDeleteController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, project_id=self.project_id, + id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + # Check that admin or owner is able to force delete or restore server. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context + ] + # Check that non-admin and non-owner is not able to force delete or + # restore server. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.restore') + def test_restore_server_policy(self, mock_restore): + rule_name = dd_policies.BASE_POLICY_NAME % 'restore' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._restore, + self.req, self.instance.uuid, + body={'restore': {}}) + + def test_force_delete_server_policy(self): + rule_name = dd_policies.BASE_POLICY_NAME % 'force' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._force_delete, + self.req, self.instance.uuid, + body={'forceDelete': {}}) + + def test_force_delete_server_policy_failed_with_other_user(self): + rule_name = dd_policies.BASE_POLICY_NAME % 'force' + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller._force_delete, + req, self.instance.uuid, body={'forceDelete': {}}) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.force_delete') + def test_force_delete_server_policy_pass_with_same_user( + self, force_delete_mock): + rule_name = dd_policies.BASE_POLICY_NAME % 'force' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + self.controller._force_delete(self.req, self.instance.uuid, + body={'forceDelete': {}}) + force_delete_mock.assert_called_once_with( + self.req.environ['nova.context'], self.instance) + + +class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest): + """Test Deferred Delete APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(DeferredDeleteScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class DeferredDeleteNoLegacyPolicyTest(DeferredDeletePolicyTest): + """Test Deferred Delete APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + dd_policies.BASE_POLICY_NAME % 'restore': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + dd_policies.BASE_POLICY_NAME % 'force': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN} + + def setUp(self): + super(DeferredDeleteNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system or projct admin or owner is able to + # force delete or restore server. + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to + # force delete or restore server. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_evacuate.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_evacuate.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_evacuate.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_evacuate.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import evacuate +from nova.compute import vm_states +from nova import exception +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +def fake_service_get_by_compute_host(self, context, host): + return {'host_name': host, + 'service': 'compute', + 'zone': 'nova' + } + + +class EvacuatePolicyTest(base.BasePolicyTest): + """Test Evacuate APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(EvacuatePolicyTest, self).setUp() + self.controller = evacuate.EvacuateController() + self.req = fakes.HTTPRequest.blank('') + self.user_req = fakes.HTTPRequest.blank('') + user_id = self.user_req.environ['nova.context'].user_id + self.stub_out('nova.compute.api.HostAPI.service_get_by_compute_host', + fake_service_get_by_compute_host) + self.stub_out( + 'nova.api.openstack.common.' + 'instance_has_port_with_resource_request', + lambda *args, **kwargs: False) + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.compute.api.API.get')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, project_id=self.project_id, + id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + # Check that admin is able to evacuate the server + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to evacuate the server + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + @mock.patch('nova.compute.api.API.evacuate') + def test_evacuate_policy(self, mock_evacuate): + rule_name = "os_compute_api:os-evacuate" + body = {'evacuate': {'host': 'my-host', + 'onSharedStorage': 'False', + 'adminPass': 'admin_pass'} + } + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._evacuate, + self.req, uuids.fake_id, + body=body) + + def test_evacuate_policy_failed_with_other_user(self): + rule_name = "os_compute_api:os-evacuate" + # Change the user_id in request context. + self.user_req.environ['nova.context'].user_id = 'other-user' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + body = {'evacuate': {'host': 'my-host', + 'onSharedStorage': 'False', + 'adminPass': 'MyNewPass' + }} + exc = self.assertRaises(exception.PolicyNotAuthorized, + self.controller._evacuate, self.user_req, + fakes.FAKE_UUID, body=body) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.evacuate') + def test_evacuate_policy_pass_with_same_user(self, evacuate_mock): + rule_name = "os_compute_api:os-evacuate" + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + body = {'evacuate': {'host': 'my-host', + 'onSharedStorage': 'False', + 'adminPass': 'MyNewPass' + }} + self.controller._evacuate(self.user_req, fakes.FAKE_UUID, body=body) + evacuate_mock.assert_called_once_with( + self.user_req.environ['nova.context'], + mock.ANY, 'my-host', False, + 'MyNewPass', None) + + +class EvacuateScopeTypePolicyTest(EvacuatePolicyTest): + """Test Evacuate APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scopped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(EvacuateScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + self.user_req.environ['nova.context'].system_scope = 'all' + + # Check that system admin is able to evacuate server. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system or non-admin is not able to evacuate + # server. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_flavor_access.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_flavor_access.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_flavor_access.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_flavor_access.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,193 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import flavor_access +from nova.policies import base as base_policy +from nova.policies import flavor_access as fa_policy +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_flavor +from nova.tests.unit.policies import base + + +class FlavorAccessPolicyTest(base.BasePolicyTest): + """Test Flavor Access APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(FlavorAccessPolicyTest, self).setUp() + self.controller = flavor_access.FlavorActionController() + self.controller_index = flavor_access.FlavorAccessController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_flavor')).mock + uuid = uuids.fake_id + self.flavor = fake_flavor.fake_flavor_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + is_public=False) + self.mock_get.return_value = self.flavor + self.stub_out('nova.api.openstack.identity.verify_project_id', + lambda ctx, project_id: True) + self.stub_out('nova.objects.flavor._get_projects_from_db', + lambda context, flavorid: []) + + # Check that admin is able to add/remove flavor access + # to a tenant. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to add/remove flavor access + # to a tenant. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + # Check that everyone is able to list flavor access + # information which is nothing but bug#1867840. + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] + + self.reader_unauthorized_contexts = [ + ] + + def test_list_flavor_access_policy(self): + rule_name = fa_policy.BASE_POLICY_NAME + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller_index.index, + self.req, '1') + + @mock.patch('nova.objects.Flavor.add_access') + def test_add_tenant_access_policy(self, mock_add): + rule_name = fa_policy.POLICY_ROOT % "add_tenant_access" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller._add_tenant_access, + self.req, '1', + body={'addTenantAccess': {'tenant': 't1'}}) + + @mock.patch('nova.objects.Flavor.remove_access') + def test_remove_tenant_access_policy(self, mock_remove): + rule_name = fa_policy.POLICY_ROOT % "remove_tenant_access" + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller._remove_tenant_access, + self.req, '1', + body={'removeTenantAccess': {'tenant': 't1'}}) + + +class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest): + """Test Flavor Access APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(FlavorAccessScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to add/remove flavor access + # to a tenant. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system-admin is not able to add/remove flavor access + # to a tenant. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + # Check that system user is able to list flavor access + # information. + self.reader_authorized_contexts = [ + self.system_admin_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context] + # Check that non-system is not able to list flavor access + # information. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.other_project_member_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + + +class FlavorAccessNoLegacyPolicyTest(FlavorAccessPolicyTest): + """Test FlavorAccess APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_redear APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + fa_policy.POLICY_ROOT % "add_tenant_access": + base_policy.SYSTEM_ADMIN, + fa_policy.POLICY_ROOT % "remove_tenant_access": + base_policy.SYSTEM_ADMIN, + fa_policy.BASE_POLICY_NAME: + base_policy.SYSTEM_READER} + + def setUp(self): + super(FlavorAccessNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to add/remove flavor access + # to a tenant. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system-admin is not able to add/remove flavor access + # to a tenant. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + # Check that system reader is able to list flavor access + # information. + self.reader_authorized_contexts = [ + self.system_admin_context, + self.system_member_context, self.system_reader_context] + # Check that non-system-reader is not able to list flavor access + # information. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.other_project_member_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.system_foo_context] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_flavor_manage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_flavor_manage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_flavor_manage.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_flavor_manage.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,126 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import flavor_manage +from nova.policies import flavor_manage as fm_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class FlavorManagePolicyTest(base.BasePolicyTest): + """Test os-flavor-manage APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(FlavorManagePolicyTest, self).setUp() + self.controller = flavor_manage.FlavorManageController() + self.req = fakes.HTTPRequest.blank('') + # Check that admin is able to manage the flavors. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to manage the flavors. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + def test_create_flavor_policy(self): + rule_name = fm_policies.POLICY_ROOT % 'create' + + def fake_create(newflavor): + newflavor['flavorid'] = uuids.fake_id + newflavor["name"] = 'test' + newflavor["memory_mb"] = 512 + newflavor["vcpus"] = 2 + newflavor["root_gb"] = 1 + newflavor["ephemeral_gb"] = 1 + newflavor["swap"] = 512 + newflavor["rxtx_factor"] = 1.0 + newflavor["is_public"] = True + newflavor["disabled"] = False + self.stub_out("nova.objects.Flavor.create", fake_create) + body = { + "flavor": { + "name": "test", + "ram": 512, + "vcpus": 2, + "disk": 1, + } + } + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._create, + self.req, body=body) + + @mock.patch('nova.objects.Flavor.get_by_flavor_id') + @mock.patch('nova.objects.Flavor.save') + def test_update_flavor_policy(self, mock_save, mock_get): + rule_name = fm_policies.POLICY_ROOT % 'update' + req = fakes.HTTPRequest.blank('', version='2.55') + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._update, + req, uuids.fake_id, + body={'flavor': {'description': None}}) + + @mock.patch('nova.objects.Flavor.destroy') + def test_delete_flavor_policy(self, mock_delete): + rule_name = fm_policies.POLICY_ROOT % 'delete' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._delete, + self.req, uuids.fake_id) + + +class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest): + """Test os-flavor-manage APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(FlavorManageScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to manage the flavors. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system-admin is not able to manage the flavors. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class FlavorManageNoLegacyPolicyTest(FlavorManageScopeTypePolicyTest): + """Test Flavor Manage APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_hypervisors.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_hypervisors.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_hypervisors.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_hypervisors.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,158 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import hypervisors +from nova.policies import base as base_policy +from nova.policies import hypervisors as hv_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class HypervisorsPolicyTest(base.BasePolicyTest): + """Test os-hypervisors APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(HypervisorsPolicyTest, self).setUp() + self.controller = hypervisors.HypervisorsController() + self.req = fakes.HTTPRequest.blank('') + self.controller._get_compute_nodes_by_name_pattern = mock.MagicMock() + self.controller.host_api.compute_node_get_all = mock.MagicMock() + self.controller.host_api.service_get_by_compute_host = mock.MagicMock() + self.controller.host_api.compute_node_get = mock.MagicMock() + + # Check that system scoped admin, member and reader are able to + # perform operations on hypervisors. + # NOTE(gmann): Until old default rule which is admin_api is + # deprecated and not removed, project admin and legacy admin + # will be able to read the agent data. This make sure that existing + # tokens will keep working even we have changed this policy defaults + # to reader role. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.legacy_admin_context, + self.project_admin_context] + # Check that non-system-reader are not able to perform operations + # on hypervisors + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + def test_list_hypervisors_policy(self): + rule_name = hv_policies.BASE_POLICY_NAME % 'list' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + def test_list_details_hypervisors_policy(self): + rule_name = hv_policies.BASE_POLICY_NAME % 'list-detail' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.detail, + self.req) + + def test_show_hypervisors_policy(self): + rule_name = hv_policies.BASE_POLICY_NAME % 'show' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, 11111) + + @mock.patch('nova.compute.api.HostAPI.get_host_uptime') + def test_uptime_hypervisors_policy(self, mock_uptime): + rule_name = hv_policies.BASE_POLICY_NAME % 'uptime' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.uptime, + self.req, 11111) + + def test_search_hypervisors_policy(self): + rule_name = hv_policies.BASE_POLICY_NAME % 'search' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.search, + self.req, 11111) + + def test_servers_hypervisors_policy(self): + rule_name = hv_policies.BASE_POLICY_NAME % 'servers' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.servers, + self.req, 11111) + + @mock.patch('nova.compute.api.HostAPI.compute_node_statistics') + def test_statistics_hypervisors_policy(self, mock_statistics): + rule_name = hv_policies.BASE_POLICY_NAME % 'statistics' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.statistics, + self.req) + + +class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest): + """Test os-hypervisors APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(HypervisorsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to perform operations + # on hypervisors. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system-reader is not able to perform operations + # on hypervisors. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class HypervisorsNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest): + """Test Hypervisors APIs policies with system scope enabled, + and no more deprecated rules. + """ + without_deprecated_rules = True + rules_without_deprecation = { + hv_policies.BASE_POLICY_NAME % 'list': + base_policy.SYSTEM_READER, + hv_policies.BASE_POLICY_NAME % 'list-detail': + base_policy.SYSTEM_READER, + hv_policies.BASE_POLICY_NAME % 'show': + base_policy.SYSTEM_READER, + hv_policies.BASE_POLICY_NAME % 'statistics': + base_policy.SYSTEM_READER, + hv_policies.BASE_POLICY_NAME % 'uptime': + base_policy.SYSTEM_READER, + hv_policies.BASE_POLICY_NAME % 'search': + base_policy.SYSTEM_READER, + hv_policies.BASE_POLICY_NAME % 'servers': + base_policy.SYSTEM_READER, + } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_instance_actions.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_instance_actions.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_instance_actions.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_instance_actions.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,328 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import fixtures +import mock + +from nova.api.openstack import api_version_request +from oslo_policy import policy as oslo_policy +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import instance_actions as instance_actions_v21 +from nova.compute import vm_states +from nova import exception +from nova.policies import base as base_policy +from nova.policies import instance_actions as ia_policies +from nova import policy +from nova.tests.unit.api.openstack.compute import test_instance_actions +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit import fake_server_actions +from nova.tests.unit.policies import base +from nova.tests.unit import policy_fixture + +FAKE_UUID = fake_server_actions.FAKE_UUID +FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1 + + +class InstanceActionsPolicyTest(base.BasePolicyTest): + """Test os-instance-actions APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(InstanceActionsPolicyTest, self).setUp() + self.controller = instance_actions_v21.InstanceActionsController() + self.req = fakes.HTTPRequest.blank('') + self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS) + self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS) + + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that system reader are able to show the instance + # actions events. + self.system_reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.legacy_admin_context, + self.project_admin_context] + # Check that non-system-reader are not able to show the instance + # actions events. + self.system_reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + self.project_or_system_reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, self.project_foo_context + ] + + self.project_or_system_reader_unauthorized_contexts = [ + self.system_foo_context, + self.other_project_member_context + ] + + def _set_policy_rules(self, overwrite=True): + rules = {ia_policies.BASE_POLICY_NAME % 'show': '@'} + policy.set_rules(oslo_policy.Rules.from_dict(rules), + overwrite=overwrite) + + def test_index_instance_action_policy(self): + rule_name = ia_policies.BASE_POLICY_NAME % "list" + self.common_policy_check( + self.project_or_system_reader_authorized_contexts, + self.project_or_system_reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req, self.instance['uuid']) + + @mock.patch('nova.compute.api.InstanceActionAPI.action_get_by_request_id') + def test_show_instance_action_policy(self, mock_action_get): + fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID] + mock_action_get.return_value = fake_action + rule_name = ia_policies.BASE_POLICY_NAME % "show" + self.common_policy_check( + self.project_or_system_reader_authorized_contexts, + self.project_or_system_reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, self.instance['uuid'], fake_action['request_id']) + + @mock.patch('nova.objects.InstanceActionEventList.get_by_action') + @mock.patch('nova.objects.InstanceAction.get_by_request_id') + def test_show_instance_action_policy_with_events( + self, mock_get_action, mock_get_events): + """Test to ensure skip checking policy rule + 'os_compute_api:os-instance-actions:show'. + """ + fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID] + mock_get_action.return_value = fake_action + fake_events = self.fake_events[fake_action['id']] + fake_action['events'] = fake_events + mock_get_events.return_value = fake_events + fake_action_fmt = test_instance_actions.format_action( + copy.deepcopy(fake_action)) + + self._set_policy_rules(overwrite=False) + rule_name = ia_policies.BASE_POLICY_NAME % "events" + authorize_res, unauthorize_res = self.common_policy_check( + self.system_reader_authorized_contexts, + self.system_reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, self.instance['uuid'], + fake_action['request_id'], fatal=False) + + for action in authorize_res: + # In order to unify the display forms of 'start_time' and + # 'finish_time', format the results returned by the show api. + res_fmt = test_instance_actions.format_action( + action['instanceAction']) + self.assertEqual(fake_action_fmt['events'], res_fmt['events']) + + for action in unauthorize_res: + self.assertNotIn('events', action['instanceAction']) + + +class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest): + """Test os-instance-actions APIs Deprecated policies. + + This class checks if deprecated policy rules are overridden + by user on policy.json file then they still work because + oslo.policy add deprecated rules in logical OR condition + and enforces them for policy checks if overridden. + """ + + def setUp(self): + super(InstanceActionsDeprecatedPolicyTest, self).setUp() + self.controller = instance_actions_v21.InstanceActionsController() + self.admin_or_owner_req = fakes.HTTPRequest.blank('') + self.admin_or_owner_req.environ[ + 'nova.context'] = self.project_admin_context + self.reader_req = fakes.HTTPRequest.blank('') + self.reader_req.environ['nova.context'] = self.project_reader_context + self.deprecated_policy = ia_policies.ROOT_POLICY + # Overridde rule with different checks than defaults so that we can + # verify the rule overridden case. + override_rules = { + self.deprecated_policy: base_policy.RULE_ADMIN_OR_OWNER, + } + # NOTE(brinzhang): Only override the deprecated rule in policy file + # so that we can verify if overridden checks are considered by + # oslo.policy. + # Oslo.policy will consider the overridden rules if: + # 1. overridden deprecated rule's checks are different than defaults + # 2. new rules are not present in policy file + self.policy = self.useFixture(policy_fixture.OverridePolicyFixture( + rules_in_file=override_rules)) + + @mock.patch('nova.compute.api.InstanceActionAPI.actions_get') + @mock.patch('nova.api.openstack.common.get_instance') + def test_deprecated_policy_overridden_rule_is_checked( + self, mock_instance_get, mock_actions_get): + # Test to verify if deprecatd overridden policy is working. + + instance = fake_instance.fake_instance_obj( + self.admin_or_owner_req.environ['nova.context']) + + # Check for success as admin_or_owner role. Deprecated rule + # has been overridden with admin checks in policy.json + # If admin role pass it means overridden rule is enforced by + # olso.policy because new default is system reader and the old + # default is admin. + self.controller.index(self.admin_or_owner_req, instance['uuid']) + + # check for failure with reader context. + exc = self.assertRaises(exception.PolicyNotAuthorized, + self.controller.index, + self.reader_req, + instance['uuid']) + self.assertEqual( + "Policy doesn't allow os_compute_api:os-instance-actions:list " + "to be performed.", exc.format_message()) + + +class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest): + """Test os-instance-actions APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True, + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token which are allowed + and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(InstanceActionsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to get the + # instance action events + self.system_reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system-reader is not able to + # get the instance action events + self.system_reader_unauthorized_contexts = [ + self.system_foo_context, self.legacy_admin_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + @mock.patch('nova.objects.InstanceActionEventList.get_by_action') + @mock.patch('nova.objects.InstanceAction.get_by_request_id') + def test_show_instance_action_policy_with_show_details( + self, mock_get_action, mock_get_events): + """Test to ensure skip checking policy rule + 'os_compute_api:os-instance-actions:show'. + """ + self.req.api_version_request = api_version_request.APIVersionRequest( + '2.84') + fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID] + mock_get_action.return_value = fake_action + fake_events = self.fake_events[fake_action['id']] + fake_action['events'] = fake_events + mock_get_events.return_value = fake_events + fake_action_fmt = test_instance_actions.format_action( + copy.deepcopy(fake_action)) + + self._set_policy_rules(overwrite=False) + rule_name = ia_policies.BASE_POLICY_NAME % "events:details" + authorize_res, unauthorize_res = self.common_policy_check( + self.system_reader_authorized_contexts, + self.system_reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, self.instance['uuid'], + fake_action['request_id'], fatal=False) + + for action in authorize_res: + # Ensure the 'details' field in the action events + for event in action['instanceAction']['events']: + self.assertIn('details', event) + # In order to unify the display forms of 'start_time' and + # 'finish_time', format the results returned by the show api. + res_fmt = test_instance_actions.format_action( + action['instanceAction']) + self.assertEqual(fake_action_fmt['events'], res_fmt['events']) + + # Because of the microversion > '2.51', that will be contain + # 'events' in the os-instance-actions show api response, but the + # 'details' should not contain in the action events. + for action in unauthorize_res: + # Ensure the 'details' field not in the action events + for event in action['instanceAction']['events']: + self.assertNotIn('details', event) + + +class InstanceActionsNoLegacyPolicyTest(InstanceActionsPolicyTest): + """Test os-instance-actions APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + ia_policies.BASE_POLICY_NAME % 'list': + base_policy.PROJECT_READER_OR_SYSTEM_READER, + ia_policies.BASE_POLICY_NAME % 'show': + base_policy.PROJECT_READER_OR_SYSTEM_READER, + ia_policies.BASE_POLICY_NAME % 'events': + base_policy.SYSTEM_READER, + } + + def setUp(self): + super(InstanceActionsNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader are able to get the + # instance action events. + self.system_reader_authorized_contexts = [ + self.system_admin_context, self.system_reader_context, + self.system_member_context] + # Check that non-system-reader are not able to + # get the instance action events + self.system_reader_unauthorized_contexts = [ + self.project_admin_context, + self.system_foo_context, self.legacy_admin_context, + self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + # Check that system or projct reader is able to + # show the instance actions events. + self.project_or_system_reader_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, + ] + + # Check that non-system or non-project reader is not able to + # show the instance actions events. + self.project_or_system_reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_foo_context, + self.system_foo_context, self.other_project_member_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_instance_usage_audit_log.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_instance_usage_audit_log.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_instance_usage_audit_log.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_instance_usage_audit_log.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import instance_usage_audit_log as iual +from nova.policies import base as base_policy +from nova.policies import instance_usage_audit_log as iual_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class InstanceUsageAuditLogPolicyTest(base.BasePolicyTest): + """Test os-instance-usage-audit-log APIs policies with all possible + context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(InstanceUsageAuditLogPolicyTest, self).setUp() + self.controller = iual.InstanceUsageAuditLogController() + self.req = fakes.HTTPRequest.blank('') + self.controller.host_api.task_log_get_all = mock.MagicMock() + self.controller.host_api.service_get_all = mock.MagicMock() + + # Check that admin is able to get instance usage audit log. + # NOTE(gmann): Until old default rule which is admin_api is + # deprecated and not removed, project admin and legacy admin + # will be able to read the agent data. This make sure that existing + # tokens will keep working even we have changed this policy defaults + # to reader role. + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-admin is not able to get instance usage audit log. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + def test_show_policy(self): + rule_name = iual_policies.BASE_POLICY_NAME % 'show' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, '2020-03-25 14:40:00') + + def test_index_policy(self): + rule_name = iual_policies.BASE_POLICY_NAME % 'list' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + +class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest): + """Test os-instance-usage-audit-log APIs policies with system scope + enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(InstanceUsageScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to get instance usage audit log. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system-admin is not able to get instance + # usage audit log. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class InstanceUsageNoLegacyPolicyTest(InstanceUsageScopeTypePolicyTest): + """Test Instance Usage Audit Log APIs policies with system scope enabled, + and no more deprecated rules. + """ + without_deprecated_rules = True + rules_without_deprecation = { + iual_policies.BASE_POLICY_NAME % 'list': + base_policy.SYSTEM_READER, + iual_policies.BASE_POLICY_NAME % 'show': + base_policy.SYSTEM_READER, + } diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_limits.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_limits.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_limits.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_limits.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,136 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import limits +from nova.policies import base as base_policy +from nova.policies import limits as limits_policies +from nova import quota +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class LimitsPolicyTest(base.BasePolicyTest): + """Test Limits APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(LimitsPolicyTest, self).setUp() + self.controller = limits.LimitsController() + self.req = fakes.HTTPRequest.blank('') + + self.absolute_limits = { + 'ram': 512, + 'instances': 5, + 'cores': 21, + 'key_pairs': 10, + 'floating_ips': 10, + 'security_groups': 10, + 'security_group_rules': 20, + } + + def stub_get_project_quotas(context, project_id, usages=True): + return {k: dict(limit=v, in_use=v // 2) + for k, v in self.absolute_limits.items()} + + mock_get_project_quotas = mock.patch.object( + quota.QUOTAS, + "get_project_quotas", + side_effect = stub_get_project_quotas) + mock_get_project_quotas.start() + + # Check that everyone is able to get their limits + self.everyone_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_member_context, self.other_project_member_context, + self.project_foo_context, self.project_reader_context] + self.everyone_unauthorized_contexts = [] + + # Check that system reader is able to get other projects limit. + # NOTE(gmann): Until old default rule which is admin_api is + # deprecated and not removed, project admin and legacy admin + # will be able to read the agent data. This make sure that existing + # tokens will keep working even we have changed this policy defaults + # to reader role. + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-admin is not able to get other projects limit. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + def test_get_limits_policy(self): + rule_name = limits_policies.BASE_POLICY_NAME + self.common_policy_check(self.everyone_authorized_contexts, + self.everyone_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + def test_get_other_limits_policy(self): + req = fakes.HTTPRequest.blank('/?tenant_id=faketenant') + rule_name = limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + req) + + +class LimitsScopeTypePolicyTest(LimitsPolicyTest): + """Test Limits APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(LimitsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to get other projects limit. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system reader is not able toget other + # projects limit. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class LimitsNoLegacyPolicyTest(LimitsScopeTypePolicyTest): + """Test Limits APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME: + base_policy.SYSTEM_READER} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_lock_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_lock_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_lock_server.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_lock_server.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,220 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from nova.policies import base as base_policy +from nova.policies import lock_server as ls_policies +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import lock_server +from nova.compute import vm_states +from nova import exception +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class LockServerPolicyTest(base.BasePolicyTest): + """Test Lock server APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(LockServerPolicyTest, self).setUp() + self.controller = lock_server.LockServerController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to lock/unlock + # the sevrer + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to lock/unlock + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + # Check that admin is able to unlock the server which is + # locked by other + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to unlock the server + # which is locked by other + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.lock') + def test_lock_server_policy(self, mock_lock): + rule_name = ls_policies.POLICY_ROOT % 'lock' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._lock, + self.req, self.instance.uuid, + body={'lock': {}}) + + @mock.patch('nova.compute.api.API.unlock') + def test_unlock_server_policy(self, mock_unlock): + rule_name = ls_policies.POLICY_ROOT % 'unlock' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._unlock, + self.req, self.instance.uuid, + body={'unlock': {}}) + + @mock.patch('nova.compute.api.API.unlock') + @mock.patch('nova.compute.api.API.is_expected_locked_by') + def test_unlock_override_server_policy(self, mock_expected, mock_unlock): + mock_expected.return_value = False + rule = ls_policies.POLICY_ROOT % 'unlock' + self.policy.set_rules({rule: "@"}, overwrite=False) + rule_name = ls_policies.POLICY_ROOT % 'unlock:unlock_override' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller._unlock, + self.req, self.instance.uuid, + body={'unlock': {}}) + + def test_lock_server_policy_failed_with_other_user(self): + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + rule_name = ls_policies.POLICY_ROOT % 'lock' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller._lock, + req, fakes.FAKE_UUID, body={'lock': {}}) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.lock') + def test_lock_sevrer_overridden_policy_pass_with_same_user( + self, mock_lock): + rule_name = ls_policies.POLICY_ROOT % 'lock' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + self.controller._lock(self.req, + fakes.FAKE_UUID, + body={'lock': {}}) + + +class LockServerScopeTypePolicyTest(LockServerPolicyTest): + """Test Lock Server APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(LockServerScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class LockServerNoLegacyPolicyTest(LockServerScopeTypePolicyTest): + """Test Lock Server APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(LockServerNoLegacyPolicyTest, self).setUp() + # Check that system admin or and server owner is able to lock/unlock + # the sevrer + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system/admin/owner is not able to lock/unlock + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, self.project_reader_context, + self.project_foo_context + ] + + # Check that system admin is able to unlock the server which is + # locked by other + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that system non-admin is not able to unlock the server + # which is locked by other + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest): + """Test Lock Server APIs policies with system and project scoped + but default to system roles only are allowed for project roles + if override by operators. This test is with system scope enable + and no more deprecated rules. + """ + + def setUp(self): + super(LockServerOverridePolicyTest, self).setUp() + + # Check that system admin or project scoped role as override above + # is able to unlock the server which is locked by other + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system admin or project role is not able to + # unlock the server which is locked by other + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + def test_unlock_override_server_policy(self): + rule = ls_policies.POLICY_ROOT % 'unlock:unlock_override' + self.policy.set_rules({ + # make unlock allowed for everyone so that we can check unlock + # override policy. + ls_policies.POLICY_ROOT % 'unlock': "@", + rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, overwrite=False) + super(LockServerOverridePolicyTest, + self).test_unlock_override_server_policy() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_migrate_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_migrate_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_migrate_server.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_migrate_server.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,159 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import migrate_server +from nova.compute import vm_states +from nova.policies import base as base_policy +from nova.policies import migrate_server as ms_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class MigrateServerPolicyTest(base.BasePolicyTest): + """Test Migrate Server APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(MigrateServerPolicyTest, self).setUp() + self.controller = migrate_server.MigrateServerController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, project_id=self.project_id, + id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin is able to migrate the server. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context + ] + # Check that non-admin is not able to migrate the server + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.resize') + @mock.patch('nova.api.openstack.common.' + 'instance_has_port_with_resource_request', return_value=False) + def test_migrate_server_policy(self, mock_port, mock_resize): + rule_name = ms_policies.POLICY_ROOT % 'migrate' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._migrate, + self.req, self.instance.uuid, + body={'migrate': None}) + + @mock.patch('nova.compute.api.API.live_migrate') + @mock.patch('nova.api.openstack.common.' + 'instance_has_port_with_resource_request', return_value=False) + def test_migrate_live_server_policy(self, mock_port, mock_live_migrate): + rule_name = ms_policies.POLICY_ROOT % 'migrate_live' + body = {'os-migrateLive': { + 'host': 'hostname', + 'block_migration': "False", + 'disk_over_commit': "False"} + } + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._migrate_live, + self.req, self.instance.uuid, + body=body) + + +class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest): + """Test Migrate Server APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(MigrateServerScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class MigrateServerNoLegacyPolicyTest(MigrateServerScopeTypePolicyTest): + """Test Migrate Server APIs policies with system scope enabled, + and no more deprecated rules. + """ + without_deprecated_rules = True + + def setUp(self): + super(MigrateServerNoLegacyPolicyTest, self).setUp() + # Check that system admin is able to migrate the server. + self.admin_authorized_contexts = [ + self.system_admin_context + ] + # Check that non system admin is not able to migrate the server + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + +class MigrateServerOverridePolicyTest(MigrateServerNoLegacyPolicyTest): + """Test Migrate Server APIs policies with system and project scoped + but default to system roles only are allowed for project roles + if override by operators. This test is with system scope enable + and no more deprecated rules. + """ + + def setUp(self): + super(MigrateServerOverridePolicyTest, self).setUp() + rule_migrate = ms_policies.POLICY_ROOT % 'migrate' + rule_live_migrate = ms_policies.POLICY_ROOT % 'migrate_live' + # NOTE(gmann): override the rule to project member and verify it + # work as policy is system and projct scoped. + self.policy.set_rules({ + rule_migrate: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + rule_live_migrate: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, + overwrite=False) + + # Check that system admin or project scoped role as override above + # is able to migrate the server + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system admin or project role is not able to + # migrate the server + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_migrations.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_migrations.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.api.openstack.compute import migrations +from nova.policies import migrations as migrations_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class MigrationsPolicyTest(base.BasePolicyTest): + """Test Migrations APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(MigrationsPolicyTest, self).setUp() + self.controller = migrations.MigrationsController() + self.req = fakes.HTTPRequest.blank('') + + # Check that admin is able to list migrations. + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context + ] + # Check that non-admin is not able to list migrations. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.get_migrations') + def test_list_migrations_policy(self, mock_migration): + rule_name = migrations_policies.POLICY_ROOT % 'index' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req) + + +class MigrationsScopeTypePolicyTest(MigrationsPolicyTest): + """Test Migrations APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(MigrationsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system reader is able to list migrations. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non system reader is not able to list migrations. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_pause_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_pause_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_pause_server.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_pause_server.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,143 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from nova.policies import pause_server as ps_policies +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import pause_server +from nova.compute import vm_states +from nova import exception +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class PauseServerPolicyTest(base.BasePolicyTest): + """Test Pause server APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(PauseServerPolicyTest, self).setUp() + self.controller = pause_server.PauseServerController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to pause/unpause + # the sevrer + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to pause/unpause + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.pause') + def test_pause_server_policy(self, mock_pause): + rule_name = ps_policies.POLICY_ROOT % 'pause' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._pause, + self.req, self.instance.uuid, + body={'pause': {}}) + + @mock.patch('nova.compute.api.API.unpause') + def test_unpause_server_policy(self, mock_unpause): + rule_name = ps_policies.POLICY_ROOT % 'unpause' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._unpause, + self.req, self.instance.uuid, + body={'unpause': {}}) + + def test_pause_server_policy_failed_with_other_user(self): + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + rule_name = ps_policies.POLICY_ROOT % 'pause' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller._pause, + req, fakes.FAKE_UUID, body={'pause': {}}) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.pause') + def test_pause_sevrer_overridden_policy_pass_with_same_user( + self, mock_pause): + rule_name = ps_policies.POLICY_ROOT % 'pause' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + self.controller._pause(self.req, + fakes.FAKE_UUID, + body={'pause': {}}) + + +class PauseServerScopeTypePolicyTest(PauseServerPolicyTest): + """Test Pause Server APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(PauseServerScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class PauseServerNoLegacyPolicyTest(PauseServerScopeTypePolicyTest): + """Test Pause Server APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(PauseServerNoLegacyPolicyTest, self).setUp() + # Check that system admin or server owner is able to pause/unpause + # the sevrer + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system/admin/owner is not able to pause/unpause + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, self.project_reader_context, + self.project_foo_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_remote_consoles.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_remote_consoles.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_remote_consoles.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_remote_consoles.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,113 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from nova.policies import remote_consoles as rc_policies +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import remote_consoles +from nova.compute import vm_states +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class RemoteConsolesPolicyTest(base.BasePolicyTest): + """Test Remote Consoles APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(RemoteConsolesPolicyTest, self).setUp() + self.controller = remote_consoles.RemoteConsolesController() + mock_handler = mock.MagicMock() + mock_handler.return_value = {'url': "http://fake"} + self.controller.handlers['vnc'] = mock_handler + self.req = fakes.HTTPRequest.blank('', version='2.6') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to get server + # remote consoles. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to get server + # remote consoles. + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + def test_create_console_policy(self): + rule_name = rc_policies.BASE_POLICY_NAME + body = {'remote_console': {'protocol': 'vnc', 'type': 'novnc'}} + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.create, + self.req, self.instance.uuid, + body=body) + + +class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest): + """Test Remote Consoles APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(RemoteConsolesScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class RemoteConsolesNoLegacyPolicyTest(RemoteConsolesScopeTypePolicyTest): + """Test Remote Consoles APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(RemoteConsolesNoLegacyPolicyTest, self).setUp() + # Check that system admin or and server owner is able to get server + # remote consoles. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system/admin/owner is not able to get server + # remote consoles. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, self.project_reader_context, + self.project_foo_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_rescue.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_rescue.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_rescue.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_rescue.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,149 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from nova.policies import base as base_policy +from nova.policies import rescue as rs_policies +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import rescue +from nova.compute import vm_states +from nova import exception +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class RescueServerPolicyTest(base.BasePolicyTest): + """Test Rescue Server APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(RescueServerPolicyTest, self).setUp() + self.controller = rescue.RescueController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to rescue/unrescue + # the sevrer + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to rescue/unrescue + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.rescue') + def test_rescue_server_policy(self, mock_rescue): + rule_name = rs_policies.BASE_POLICY_NAME + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._rescue, + self.req, self.instance.uuid, + body={'rescue': {}}) + + @mock.patch('nova.compute.api.API.unrescue') + def test_unrescue_server_policy(self, mock_unrescue): + rule_name = rs_policies.UNRESCUE_POLICY_NAME + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._unrescue, + self.req, self.instance.uuid, + body={'unrescue': {}}) + + def test_rescue_server_policy_failed_with_other_user(self): + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + rule_name = rs_policies.BASE_POLICY_NAME + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller._rescue, + req, fakes.FAKE_UUID, body={'rescue': {}}) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.rescue') + def test_rescue_sevrer_overridden_policy_pass_with_same_user( + self, mock_rescue): + rule_name = rs_policies.BASE_POLICY_NAME + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + self.controller._rescue(self.req, + fakes.FAKE_UUID, + body={'rescue': {}}) + + +class RescueServerScopeTypePolicyTest(RescueServerPolicyTest): + """Test Rescue Server APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(RescueServerScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class RescueServerNoLegacyPolicyTest(RescueServerScopeTypePolicyTest): + """Test Rescue Server APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + rs_policies.UNRESCUE_POLICY_NAME: + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + rs_policies.BASE_POLICY_NAME: + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN} + + def setUp(self): + super(RescueServerNoLegacyPolicyTest, self).setUp() + # Check that system admin or and server owner is able to + # rescue/unrescue the sevrer + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system/admin/owner is not able to rescue/unrescue + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, self.project_reader_context, + self.project_foo_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_security_groups.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_security_groups.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_security_groups.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_security_groups.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,170 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import security_groups +from nova.compute import vm_states +from nova.policies import base as base_policy +from nova.policies import security_groups as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class SecurityGroupsPolicyTest(base.BasePolicyTest): + """Test Security Groups APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(SecurityGroupsPolicyTest, self).setUp() + self.controller = security_groups.ServerSecurityGroupController() + self.action_ctr = security_groups.SecurityGroupActionController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to operate + # server security groups. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to operate + # server security groups. + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, self.project_foo_context + ] + + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + + @mock.patch('nova.network.security_group_api.get_instance_security_groups') + def test_get_security_groups_policy(self, mock_get): + rule_name = policies.POLICY_NAME % 'list' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, + self.controller.index, + self.req, self.instance.uuid) + + @mock.patch('nova.network.security_group_api.add_to_instance') + def test_add_security_groups_policy(self, mock_add): + rule_name = policies.POLICY_NAME % 'add' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.action_ctr._addSecurityGroup, + self.req, self.instance.uuid, + body={'addSecurityGroup': + {'name': 'fake'}}) + + @mock.patch('nova.network.security_group_api.remove_from_instance') + def test_remove_security_groups_policy(self, mock_remove): + rule_name = policies.POLICY_NAME % 'remove' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.action_ctr._removeSecurityGroup, + self.req, self.instance.uuid, + body={'removeSecurityGroup': + {'name': 'fake'}}) + + +class SecurityGroupsScopeTypePolicyTest(SecurityGroupsPolicyTest): + """Test Security Groups APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(SecurityGroupsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest): + """Test Security Groups APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + policies.POLICY_NAME % 'list': + base_policy.PROJECT_READER_OR_SYSTEM_READER, + policies.POLICY_NAME % 'add': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + policies.POLICY_NAME % 'remove': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN} + + def setUp(self): + super(SecurityGroupsNoLegacyPolicyTest, self).setUp() + + # Check that system or projct admin or owner is able to operate + # server security groups. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to operate + # server security groups. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context] + + # Check that system reader or projct is able to get + # server security groups. + self.reader_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, + ] + + # Check that non-system reader nd non-admin/owner is not able to get + # server security groups. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_foo_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_diagnostics.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_diagnostics.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_diagnostics.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_diagnostics.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,137 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import server_diagnostics +from nova.compute import vm_states +from nova.policies import base as base_policy +from nova.policies import server_diagnostics as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ServerDiagnosticsPolicyTest(base.BasePolicyTest): + """Test Server Diagnostics APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerDiagnosticsPolicyTest, self).setUp() + self.controller = server_diagnostics.ServerDiagnosticsController() + self.req = fakes.HTTPRequest.blank('', version='2.48') + self.controller.compute_api.get_instance_diagnostics = mock.MagicMock() + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, project_id=self.project_id, + id=1, uuid=uuids.fake_id, vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin is able to get server diagnostics. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context + ] + # Check that non-admin is not able to get server diagnostics. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + def test_server_diagnostics_policy(self): + rule_name = policies.BASE_POLICY_NAME + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.index, + self.req, self.instance.uuid) + + +class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest): + """Test Server Diagnostics APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerDiagnosticsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ServerDiagnosticsNoLegacyPolicyTest( + ServerDiagnosticsScopeTypePolicyTest): + """Test Server Diagnostics APIs policies with system scope enabled, + and no more deprecated rules. + """ + without_deprecated_rules = True + + def setUp(self): + super(ServerDiagnosticsNoLegacyPolicyTest, self).setUp() + # Check that system admin is able to get server diagnostics. + self.admin_authorized_contexts = [ + self.system_admin_context + ] + # Check that non system admin is not able to get server diagnostics. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + +class ServerDiagnosticsOverridePolicyTest(ServerDiagnosticsNoLegacyPolicyTest): + """Test Server Diagnostics APIs policies with system and project scoped + but default to system roles only are allowed for project roles + if override by operators. This test is with system scope enable + and no more deprecated rules. + """ + + def setUp(self): + super(ServerDiagnosticsOverridePolicyTest, self).setUp() + rule = policies.BASE_POLICY_NAME + # NOTE(gmann): override the rule to project member and verify it + # work as policy is system and projct scoped. + self.policy.set_rules({ + rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, + overwrite=False) + + # Check that system admin or project scoped role as override above + # is able to get server diagnostics. + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system admin or project role is not able to + # get server diagnostics. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_external_events.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_external_events.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_external_events.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_external_events.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import server_external_events as ev +from nova.policies import server_external_events as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class ServerExternalEventsPolicyTest(base.BasePolicyTest): + """Test Server External Events APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerExternalEventsPolicyTest, self).setUp() + self.controller = ev.ServerExternalEventsController() + self.req = fakes.HTTPRequest.blank('') + + # Check that admin is able to create the server external events. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context + ] + # Check that non-admin is not able to create the server + # external events. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.external_instance_event') + @mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids') + @mock.patch('nova.objects.InstanceList.get_by_filters') + def test_server_external_events_policy(self, mock_event, mock_get, + mock_filter): + rule_name = policies.POLICY_ROOT % 'create' + body = {'events': [{'name': 'network-vif-plugged', + 'server_uuid': uuids.fake_id, + 'status': 'completed'}] + } + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.create, + self.req, body=body) + + +class ServerExternalEventsScopeTypePolicyTest(ServerExternalEventsPolicyTest): + """Test Server External Events APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerExternalEventsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that admin is able to create the server external events. + self.admin_authorized_contexts = [ + self.system_admin_context, + ] + # Check that non-admin is not able to create the server + # external events. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_groups.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_groups.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_groups.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_groups.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,101 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import server_groups +from nova.policies import server_groups as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class ServerGroupPolicyTest(base.BasePolicyTest): + """Test Server Groups APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerGroupPolicyTest, self).setUp() + self.controller = server_groups.ServerGroupController() + self.req = fakes.HTTPRequest.blank('') + + # Policy is admin_or_owner but we do not pass the project id + # in policy enforcement to check the ownership. project id + # is nothing but of server group for which request is made. So + # let's keep it as it is and with new defaults and sceop enbled, + # these can be authorized to meanigful roles. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context] + self.admin_or_owner_unauthorized_contexts = [ + ] + + @mock.patch('nova.objects.InstanceGroupList.get_all') + def test_index_server_groups_policy(self, mock_get): + rule_name = policies.POLICY_ROOT % 'index' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.index, + self.req) + + @mock.patch('nova.objects.InstanceGroup.get_by_uuid') + def test_show_server_groups_policy(self, mock_get): + rule_name = policies.POLICY_ROOT % 'show' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.show, + self.req, uuids.fake_id) + + @mock.patch('nova.objects.Quotas.check_deltas') + def test_create_server_groups_policy(self, mock_quota): + rule_name = policies.POLICY_ROOT % 'create' + body = {'server_group': {'name': 'fake', + 'policies': ['affinity']}} + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.create, + self.req, body=body) + + @mock.patch('nova.objects.InstanceGroup.get_by_uuid') + def test_delete_server_groups_policy(self, mock_get): + rule_name = policies.POLICY_ROOT % 'delete' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.delete, + self.req, uuids.fake_id) + + +class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest): + """Test Server Groups APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerGroupScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_ips.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_ips.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_ips.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_ips.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,119 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import ips +from nova.compute import vm_states +from nova.policies import ips as ips_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ServerIpsPolicyTest(base.BasePolicyTest): + """Test Server IPs APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerIpsPolicyTest, self).setUp() + self.controller = ips.IPsController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + self.mock_get_network = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common' + '.get_networks_for_instance')).mock + self.mock_get_network.return_value = {'net1': + {'ips': '', 'floating_ips': ''}} + + # Check that admin or and server owner is able to get server + # IP addresses. + self.reader_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.system_member_context, self.system_reader_context] + # Check that non-admin/owner is not able to get the server IP + # adderesses + self.reader_or_owner_unauthorized_contexts = [ + self.system_foo_context, + self.other_project_member_context + ] + + def test_index_ips_policy(self): + rule_name = ips_policies.POLICY_ROOT % 'index' + self.common_policy_check(self.reader_or_owner_authorized_contexts, + self.reader_or_owner_unauthorized_contexts, + rule_name, + self.controller.index, + self.req, self.instance.uuid) + + def test_show_ips_policy(self): + rule_name = ips_policies.POLICY_ROOT % 'show' + self.common_policy_check(self.reader_or_owner_authorized_contexts, + self.reader_or_owner_unauthorized_contexts, + rule_name, + self.controller.show, + self.req, self.instance.uuid, + 'net1') + + +class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest): + """Test Server IPs APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerIpsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ServerIpsNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest): + """Test Server IPs APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(ServerIpsNoLegacyPolicyTest, self).setUp() + + # Check that system reader or owner is able to + # get the server IP adderesses. + self.reader_or_owner_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.project_admin_context, + self.project_member_context, self.project_reader_context] + # Check that non-system and non-owner is not able to + # get the server IP adderesses. + self.reader_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.project_foo_context, + self.system_foo_context, self.other_project_member_context] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_metadata.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_metadata.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,182 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import server_metadata +from nova.policies import server_metadata as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ServerMetadataPolicyTest(base.BasePolicyTest): + """Test Server Metadata APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerMetadataPolicyTest, self).setUp() + self.controller = server_metadata.ServerMetadataController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuids.fake_id, project_id=self.project_id) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to CRUD + # the server metadata. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to CRUD + # the server metadata + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + # Check that admin or and server owner is able to get + # the server metadata. + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.system_member_context, self.system_reader_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to get + # the server metadata. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + + @mock.patch('nova.compute.api.API.get_instance_metadata') + def test_index_server_Metadata_policy(self, mock_get): + rule_name = policies.POLICY_ROOT % 'index' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, + self.controller.index, + self.req, self.instance.uuid) + + @mock.patch('nova.compute.api.API.get_instance_metadata') + def test_show_server_Metadata_policy(self, mock_get): + rule_name = policies.POLICY_ROOT % 'show' + mock_get.return_value = {'key9': 'value'} + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, + self.controller.show, + self.req, self.instance.uuid, 'key9') + + @mock.patch('nova.compute.api.API.update_instance_metadata') + def test_create_server_Metadata_policy(self, mock_quota): + rule_name = policies.POLICY_ROOT % 'create' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.create, + self.req, self.instance.uuid, + body={"metadata": {"key9": "value9"}}) + + @mock.patch('nova.compute.api.API.update_instance_metadata') + def test_update_server_Metadata_policy(self, mock_quota): + rule_name = policies.POLICY_ROOT % 'update' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.update, + self.req, self.instance.uuid, 'key9', + body={"meta": {"key9": "value9"}}) + + @mock.patch('nova.compute.api.API.update_instance_metadata') + def test_update_all_server_Metadata_policy(self, mock_quota): + rule_name = policies.POLICY_ROOT % 'update_all' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.update_all, + self.req, self.instance.uuid, + body={"metadata": {"key9": "value9"}}) + + @mock.patch('nova.compute.api.API.get_instance_metadata') + @mock.patch('nova.compute.api.API.delete_instance_metadata') + def test_delete_server_Metadata_policy(self, mock_delete, mock_get): + rule_name = policies.POLICY_ROOT % 'delete' + mock_get.return_value = {'key9': 'value'} + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.delete, + self.req, self.instance.uuid, 'key9') + + +class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest): + """Test Server Metadata APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerMetadataScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ServerMetadataNoLegacyPolicyTest(ServerMetadataScopeTypePolicyTest): + """Test Server Metadata APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(ServerMetadataNoLegacyPolicyTest, self).setUp() + # Check that system admin or project member is able to create, update + # and delete the server metadata. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, self.project_admin_context, + self.project_member_context] + # Check that non-system/admin/member is not able to create, update + # and delete the server metadata. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_reader_context, + self.system_foo_context, self.system_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context, + self.other_project_reader_context + ] + # Check that system admin or project member is able to + # get the server metadata. + self.reader_authorized_contexts = [ + self.system_admin_context, + self.system_member_context, self.system_reader_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context] + # Check that non-system/admin/member is not able to + # get the server metadata. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.system_foo_context, + self.project_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_migrations.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_migrations.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_migrations.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_migrations.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,208 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock + +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import server_migrations +from nova.compute import vm_states +from nova.policies import base as base_policy +from nova.policies import servers_migrations as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ServerMigrationsPolicyTest(base.BasePolicyTest): + """Test Server Migrations APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerMigrationsPolicyTest, self).setUp() + self.controller = server_migrations.ServerMigrationsController() + self.req = fakes.HTTPRequest.blank('', version='2.24') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuids.fake_id, project_id=self.project_id, + vm_state=vm_states.ACTIVE) + self.mock_get.return_value = self.instance + + # Check that admin is able to perform operations + # for server migrations. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to perform operations + # for server migrations. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + # Check that system-reader are able to perform operations + # for server migrations. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.legacy_admin_context, + self.project_admin_context] + # Check that non-system-reader are not able to perform operations + # for server migrations. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.project_foo_context, self.project_member_context, + self.project_reader_context] + + @mock.patch('nova.compute.api.API.get_migrations_in_progress_by_instance') + def test_list_server_migrations_policy(self, mock_get): + rule_name = policies.POLICY_ROOT % 'index' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req, self.instance.uuid) + + @mock.patch('nova.api.openstack.compute.server_migrations.output') + @mock.patch('nova.compute.api.API.get_migration_by_id_and_instance') + def test_show_server_migrations_policy(self, mock_show, mock_output): + rule_name = policies.POLICY_ROOT % 'show' + mock_show.return_value = {"migration_type": "live-migration", + "status": "running"} + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, self.instance.uuid, 11111) + + @mock.patch('nova.compute.api.API.live_migrate_abort') + def test_delete_server_migrations_policy(self, mock_delete): + rule_name = policies.POLICY_ROOT % 'delete' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.delete, + self.req, self.instance.uuid, 11111) + + @mock.patch('nova.compute.api.API.live_migrate_force_complete') + def test_force_delete_server_migrations_policy(self, mock_force): + rule_name = policies.POLICY_ROOT % 'force_complete' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller._force_complete, + self.req, self.instance.uuid, 11111, + body={"force_complete": None}) + + +class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest): + """Test Server Migrations APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerMigrationsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ServerMigrationsNoLegacyPolicyTest(ServerMigrationsScopeTypePolicyTest): + """Test Server Migrations APIs policies with system scope enabled, + and no more deprecated rules. + """ + without_deprecated_rules = True + + def setUp(self): + super(ServerMigrationsNoLegacyPolicyTest, self).setUp() + # Check that admin is able to perform operations + # for server migrations. + self.admin_authorized_contexts = [ + self.system_admin_context + ] + # Check that non-admin is not able to perform operations + # for server migrations. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + # Check that system reader is able to perform operations + # for server migrations. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context] + # Check that non-system-reader is not able to perform operations + # for server migrations. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class ServerMigrationsOverridePolicyTest(ServerMigrationsNoLegacyPolicyTest): + """Test Server Migrations APIs policies with system and project scoped + but default to system roles only are allowed for project roles + if override by operators. This test is with system scope enable + and no more deprecated rules. + """ + + def setUp(self): + super(ServerMigrationsOverridePolicyTest, self).setUp() + rule_show = policies.POLICY_ROOT % 'show' + rule_list = policies.POLICY_ROOT % 'index' + rule_force = policies.POLICY_ROOT % 'force_complete' + rule_delete = policies.POLICY_ROOT % 'delete' + # NOTE(gmann): override the rule to project member and verify it + # work as policy is system and projct scoped. + self.policy.set_rules({ + rule_show: base_policy.PROJECT_READER_OR_SYSTEM_READER, + rule_list: base_policy.PROJECT_READER_OR_SYSTEM_READER, + rule_force: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN, + rule_delete: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, + overwrite=False) + + # Check that system admin or project scoped role as override above + # is able to migrate the server + self.admin_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system admin or project role is not able to + # migrate the server + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + # Check that system reader is able to perform operations + # for server migrations. + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_member_context, + self.system_reader_context, self.project_admin_context, + self.project_member_context, self.project_reader_context] + # Check that non-system-reader is not able to perform operations + # for server migrations. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.system_foo_context, + self.other_project_member_context, self.project_foo_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_password.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_password.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_password.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_password.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,150 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import server_password +from nova.policies import base as base_policy +from nova.policies import server_password as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ServerPasswordPolicyTest(base.BasePolicyTest): + """Test Server Password APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerPasswordPolicyTest, self).setUp() + self.controller = server_password.ServerPasswordController() + self.req = fakes.HTTPRequest.blank('') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuids.fake_id, project_id=self.project_id, + system_metadata={}, expected_attrs=['system_metadata']) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to + # delete the server password. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to delete + # the server password. + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + # Check that admin or and server owner is able to get + # the server password. + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.system_member_context, self.system_reader_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to get + # the server password. + self.reader_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + + @mock.patch('nova.api.metadata.password.extract_password') + def test_index_server_password_policy(self, mock_pass): + rule_name = policies.BASE_POLICY_NAME % 'show' + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, + self.controller.index, + self.req, self.instance.uuid) + + @mock.patch('nova.api.metadata.password.convert_password') + def test_clear_server_password_policy(self, mock_pass): + rule_name = policies.BASE_POLICY_NAME % 'clear' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.clear, + self.req, self.instance.uuid) + + +class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest): + """Test Server Password APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerPasswordScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ServerPasswordNoLegacyPolicyTest(ServerPasswordScopeTypePolicyTest): + """Test Server Password APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system_admin_or_owner APIs. + """ + without_deprecated_rules = True + rules_without_deprecation = { + policies.BASE_POLICY_NAME % 'show': + base_policy.PROJECT_READER_OR_SYSTEM_READER, + policies.BASE_POLICY_NAME % 'clear': + base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN} + + def setUp(self): + super(ServerPasswordNoLegacyPolicyTest, self).setUp() + + # Check that system or projct admin or owner is able to clear + # server password. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system and non-admin/owner is not able to clear + # server password. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.project_reader_context, + self.project_foo_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context] + + # Check that system reader or projct owner is able to get + # server password. + self.reader_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_member_context, + ] + + # Check that non-system reader nd non-admin/owner is not able to get + # server password. + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.project_foo_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_tags.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_tags.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_server_tags.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_server_tags.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,197 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import server_tags +from nova.compute import vm_states +from nova import context +from nova import objects +from nova.policies import server_tags as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ServerTagsPolicyTest(base.BasePolicyTest): + """Test Server Tags APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerTagsPolicyTest, self).setUp() + self.controller = server_tags.ServerTagsController() + self.req = fakes.HTTPRequest.blank('', version='2.26') + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuids.fake_id, vm_state=vm_states.ACTIVE, + project_id=self.project_id) + self.mock_get.return_value = self.instance + inst_map = objects.InstanceMapping( + project_id=self.project_id, + cell_mapping=objects.CellMappingList.get_all( + context.get_admin_context())[1]) + self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid', + lambda s, c, u: inst_map) + + # Check that admin or and server owner is able to perform + # operations on server tags. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context + ] + # Check that non-admin/owner is not able to perform operations + # on server tags + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + # Check that reader or and server owner is able to perform operations + # on server tags. + self.reader_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.system_member_context, self.system_reader_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-reader/owner is not able to perform operations + # on server tags. + self.reader_or_owner_unauthorized_contexts = [ + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] + + @mock.patch('nova.objects.TagList.get_by_resource_id') + def test_index_server_tags_policy(self, mock_tag): + rule_name = policies.POLICY_ROOT % 'index' + self.common_policy_check(self.reader_or_owner_authorized_contexts, + self.reader_or_owner_unauthorized_contexts, + rule_name, + self.controller.index, + self.req, self.instance.uuid) + + @mock.patch('nova.objects.Tag.exists') + def test_show_server_tags_policy(self, mock_exists): + rule_name = policies.POLICY_ROOT % 'show' + self.common_policy_check(self.reader_or_owner_authorized_contexts, + self.reader_or_owner_unauthorized_contexts, + rule_name, + self.controller.show, + self.req, self.instance.uuid, uuids.fake_id) + + @mock.patch('nova.notifications.base.send_instance_update_notification') + @mock.patch('nova.db.api.instance_tag_get_by_instance_uuid') + @mock.patch('nova.objects.Tag.create') + def test_update_server_tags_policy(self, mock_create, mock_tag, + mock_notf): + rule_name = policies.POLICY_ROOT % 'update' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.update, + self.req, self.instance.uuid, uuids.fake_id, + body=None) + + @mock.patch('nova.notifications.base.send_instance_update_notification') + @mock.patch('nova.db.api.instance_tag_set') + def test_update_all_server_tags_policy(self, mock_set, mock_notf): + rule_name = policies.POLICY_ROOT % 'update_all' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.update_all, + self.req, self.instance.uuid, + body={'tags': ['tag1', 'tag2']}) + + @mock.patch('nova.notifications.base.send_instance_update_notification') + @mock.patch('nova.objects.TagList.destroy') + def test_delete_all_server_tags_policy(self, mock_destroy, mock_notf): + rule_name = policies.POLICY_ROOT % 'delete_all' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.delete_all, + self.req, self.instance.uuid) + + @mock.patch('nova.notifications.base.send_instance_update_notification') + @mock.patch('nova.db.api.instance_tag_get_by_instance_uuid') + @mock.patch('nova.objects.Tag.destroy') + def test_delete_server_tags_policy(self, mock_destroy, mock_get, + mock_notf): + rule_name = policies.POLICY_ROOT % 'delete' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.delete, + self.req, self.instance.uuid, uuids.fake_id) + + +class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest): + """Test Server Tags APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ServerTagsScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ServerTagsNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest): + """Test Server Tags APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(ServerTagsNoLegacyPolicyTest, self).setUp() + # Check that system admin or project member is able to + # perform operations on server tags. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, self.project_admin_context, + self.project_member_context] + # Check that non-system/admin/member is not able to + # perform operations on server tags. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_reader_context, + self.system_foo_context, self.system_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context, + self.other_project_reader_context + ] + # Check that system reader or owner is able to + # perform operations on server tags. + self.reader_or_owner_authorized_contexts = [ + self.system_admin_context, + self.system_member_context, self.system_reader_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context] + # Check that non-system/reader/owner is not able to + # perform operations on server tags. + self.reader_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_foo_context, + self.project_foo_context, self.other_project_member_context, + self.other_project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_shelve.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_shelve.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_shelve.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_shelve.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,173 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import shelve +from nova.compute import vm_states +from nova import exception +from nova.policies import shelve as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class ShelveServerPolicyTest(base.BasePolicyTest): + """Test Shelve server APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ShelveServerPolicyTest, self).setUp() + self.controller = shelve.ShelveController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuids.fake_id, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to shelve/unshelve + # the server + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to shelve/unshelve + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + # Check that admin is able to shelve offload the server. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to shelve offload the server. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.shelve') + def test_shelve_server_policy(self, mock_shelve): + rule_name = policies.POLICY_ROOT % 'shelve' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._shelve, + self.req, self.instance.uuid, + body={'shelve': {}}) + + @mock.patch('nova.compute.api.API.unshelve') + def test_unshelve_server_policy(self, mock_unshelve): + rule_name = policies.POLICY_ROOT % 'unshelve' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._unshelve, + self.req, self.instance.uuid, + body={'unshelve': {}}) + + @mock.patch('nova.compute.api.API.shelve_offload') + def test_shelve_offload_server_policy(self, mock_offload): + rule_name = policies.POLICY_ROOT % 'shelve_offload' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller._shelve_offload, + self.req, self.instance.uuid, + body={'shelveOffload': {}}) + + def test_shelve_server_policy_failed_with_other_user(self): + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + rule_name = policies.POLICY_ROOT % 'shelve' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller._shelve, + req, fakes.FAKE_UUID, body={'shelve': {}}) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.shelve') + def test_shelve_sevrer_overridden_policy_pass_with_same_user( + self, mock_shelve): + rule_name = policies.POLICY_ROOT % 'shelve' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + self.controller._shelve(self.req, + fakes.FAKE_UUID, + body={'shelve': {}}) + + +class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest): + """Test Shelve Server APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(ShelveServerScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class ShelveServerNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest): + """Test Shelve Server APIs policies with system scope enabled, + and no more deprecated rules. + """ + without_deprecated_rules = True + + def setUp(self): + super(ShelveServerNoLegacyPolicyTest, self).setUp() + + # Check that system admin or and owner is able to shelve/unshelve + # the server. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system/admin/owner is not able to shelve/unshelve + # the server. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, self.project_reader_context, + self.project_foo_context + ] + # Check that system admin is able to shelve offload the server. + self.admin_authorized_contexts = [ + self.system_admin_context + ] + # Check that non system admin is not able to shelve offload the server + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.project_admin_context, + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.project_reader_context, self.project_foo_context, + self.other_project_member_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_simple_tenant_usage.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_simple_tenant_usage.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_simple_tenant_usage.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_simple_tenant_usage.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,100 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.openstack.compute import simple_tenant_usage +from nova.policies import simple_tenant_usage as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit.policies import base + + +class SimpleTenantUsagePolicyTest(base.BasePolicyTest): + """Test Simple Tenant Usage APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(SimpleTenantUsagePolicyTest, self).setUp() + self.controller = simple_tenant_usage.SimpleTenantUsageController() + self.req = fakes.HTTPRequest.blank('') + + # Check that admin or and owner is able to get + # the tenant usage statistics for a specific tenant. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to get + # the tenant usage statistics for a specific tenant. + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.other_project_member_context, + self.other_project_reader_context, + ] + # Check that admin is able to get the tenant usage statistics. + self.admin_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context] + # Check that non-admin is not able to get the tenant usage statistics. + self.admin_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context, + self.other_project_reader_context, + ] + + def test_index_simple_tenant_usage_policy(self): + rule_name = policies.POLICY_ROOT % 'list' + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, + self.controller.index, + self.req) + + def test_show_simple_tenant_usage_policy(self): + rule_name = policies.POLICY_ROOT % 'show' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller.show, + self.req, self.project_id) + + +class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest): + """Test Simple Tenant Usage APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(SimpleTenantUsageScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to get the tenant usage statistics. + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system/admin is not able to get the tenant usage + # statistics. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_suspend_server.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_suspend_server.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_suspend_server.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_suspend_server.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,140 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids + +from nova.api.openstack.compute import suspend_server +from nova.compute import vm_states +from nova import exception +from nova.policies import suspend_server as policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + + +class SuspendServerPolicyTest(base.BasePolicyTest): + """Test Suspend Server APIs policies with all possible context. + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(SuspendServerPolicyTest, self).setUp() + self.controller = suspend_server.SuspendServerController() + self.req = fakes.HTTPRequest.blank('') + user_id = self.req.environ['nova.context'].user_id + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuids.fake_id, project_id=self.project_id, + user_id=user_id, vm_state=vm_states.ACTIVE) + self.mock_get.return_value = self.instance + + # Check that admin or and server owner is able to suspend/resume + # the sevrer + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_member_context, + self.project_reader_context, self.project_foo_context] + # Check that non-admin/owner is not able to suspend/resume + # the server + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch('nova.compute.api.API.suspend') + def test_suspend_server_policy(self, mock_suspend): + rule_name = policies.POLICY_ROOT % 'suspend' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._suspend, + self.req, self.instance.uuid, + body={'suspend': {}}) + + @mock.patch('nova.compute.api.API.resume') + def test_resume_server_policy(self, mock_resume): + rule_name = policies.POLICY_ROOT % 'resume' + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, + self.controller._resume, + self.req, self.instance.uuid, + body={'resume': {}}) + + def test_suspend_server_policy_failed_with_other_user(self): + # Change the user_id in request context. + req = fakes.HTTPRequest.blank('') + req.environ['nova.context'].user_id = 'other-user' + rule_name = policies.POLICY_ROOT % 'suspend' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller._suspend, + req, self.instance.uuid, body={'suspend': {}}) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + + @mock.patch('nova.compute.api.API.suspend') + def test_suspend_sevrer_overridden_policy_pass_with_same_user( + self, mock_suspend): + rule_name = policies.POLICY_ROOT % 'suspend' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + self.controller._suspend(self.req, + self.instance.uuid, + body={'suspend': {}}) + + +class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest): + """Test Suspend Server APIs policies with system scope enabled. + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(SuspendServerScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + +class SuspendServerNoLegacyPolicyTest(SuspendServerScopeTypePolicyTest): + """Test Suspend Server APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to + access system APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(SuspendServerNoLegacyPolicyTest, self).setUp() + # Check that system admin or and server owner is able to + # suspend/resume the server. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, self.project_member_context] + # Check that non-system/admin/owner is not able to suspend/resume + # the server. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.other_project_member_context, self.project_reader_context, + self.project_foo_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_volumes.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_volumes.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/policies/test_volumes.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/policies/test_volumes.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,314 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import timeutils + +from nova.api.openstack.compute import volumes as volumes_v21 +from nova.compute import vm_states +from nova import exception +from nova import objects +from nova.objects import block_device as block_device_obj +from nova.policies import volumes_attachments as va_policies +from nova.tests.unit.api.openstack import fakes +from nova.tests.unit import fake_block_device +from nova.tests.unit import fake_instance +from nova.tests.unit.policies import base + +# This is the server ID. +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +# This is the old volume ID (to swap from). +FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000' +# This is the new volume ID (to swap to). +FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' + + +def fake_bdm_get_by_volume_and_instance(cls, ctxt, volume_id, instance_uuid): + if volume_id != FAKE_UUID_A: + raise exception.VolumeBDMNotFound(volume_id=volume_id) + db_bdm = fake_block_device.FakeDbBlockDeviceDict( + {'id': 1, + 'instance_uuid': instance_uuid, + 'device_name': '/dev/fake0', + 'delete_on_termination': 'False', + 'source_type': 'volume', + 'destination_type': 'volume', + 'snapshot_id': None, + 'volume_id': volume_id, + 'volume_size': 1}) + return objects.BlockDeviceMapping._from_db_object( + ctxt, objects.BlockDeviceMapping(), db_bdm) + + +def fake_get_volume(self, context, id): + if id == FAKE_UUID_A: + status = 'in-use' + attach_status = 'attached' + elif id == FAKE_UUID_B: + status = 'available' + attach_status = 'detached' + else: + raise exception.VolumeNotFound(volume_id=id) + return {'id': id, 'status': status, 'attach_status': attach_status} + + +class VolumeAttachPolicyTest(base.BasePolicyTest): + """Test os-volumes-attachments APIs policies with all possible context. + + This class defines the set of context with different roles + which are allowed and not allowed to pass the policy checks. + With those set of context, it will call the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(VolumeAttachPolicyTest, self).setUp() + self.controller = volumes_v21.VolumeAttachmentController() + self.req = fakes.HTTPRequest.blank('') + self.policy_root = va_policies.POLICY_ROOT + self.stub_out('nova.objects.BlockDeviceMapping' + '.get_by_volume_and_instance', + fake_bdm_get_by_volume_and_instance) + self.stub_out('nova.volume.cinder.API.get', fake_get_volume) + + self.mock_get = self.useFixture( + fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock + uuid = uuids.fake_id + self.instance = fake_instance.fake_instance_obj( + self.project_member_context, + id=1, uuid=uuid, project_id=self.project_id, + vm_state=vm_states.ACTIVE, + task_state=None, launched_at=timeutils.utcnow()) + self.mock_get.return_value = self.instance + + # Check that admin or owner is able to list/create/show/delete + # the attached volume. + self.admin_or_owner_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.project_admin_context, self.project_foo_context, + self.project_reader_context, self.project_member_context + ] + + self.admin_or_owner_unauthorized_contexts = [ + self.system_member_context, self.system_reader_context, + self.system_foo_context, + self.other_project_member_context + ] + + # Check that admin is able to update the attached volume + self.admin_authorized_contexts = [ + self.legacy_admin_context, + self.system_admin_context, + self.project_admin_context + ] + # Check that non-admin is not able to update the attached + # volume + self.admin_unauthorized_contexts = [ + self.system_member_context, + self.system_reader_context, + self.system_foo_context, + self.project_member_context, + self.other_project_member_context, + self.project_foo_context, + self.project_reader_context + ] + + self.reader_authorized_contexts = [ + self.legacy_admin_context, self.system_admin_context, + self.system_reader_context, self.system_member_context, + self.project_admin_context, self.project_reader_context, + self.project_member_context, self.project_foo_context + ] + + self.reader_unauthorized_contexts = [ + self.system_foo_context, + self.other_project_member_context + ] + + @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') + def test_index_volume_attach_policy(self, mock_get_instance): + rule_name = self.policy_root % "index" + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.index, + self.req, FAKE_UUID) + + def test_show_volume_attach_policy(self): + rule_name = self.policy_root % "show" + self.common_policy_check(self.reader_authorized_contexts, + self.reader_unauthorized_contexts, + rule_name, self.controller.show, + self.req, FAKE_UUID, FAKE_UUID_A) + + @mock.patch('nova.compute.api.API.attach_volume') + def test_create_volume_attach_policy(self, mock_attach_volume): + rule_name = self.policy_root % "create" + body = {'volumeAttachment': {'volumeId': FAKE_UUID_B, + 'device': '/dev/fake'}} + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, self.controller.create, + self.req, FAKE_UUID, body=body) + + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + def test_update_volume_attach_policy(self, mock_bdm_save): + rule_name = self.policy_root % "update" + req = fakes.HTTPRequest.blank('', version='2.85') + body = {'volumeAttachment': { + 'volumeId': FAKE_UUID_A, + 'delete_on_termination': True}} + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, self.controller.update, + req, FAKE_UUID, + FAKE_UUID_A, body=body) + + @mock.patch('nova.compute.api.API.detach_volume') + def test_delete_volume_attach_policy(self, mock_detach_volume): + rule_name = self.policy_root % "delete" + self.common_policy_check(self.admin_or_owner_authorized_contexts, + self.admin_or_owner_unauthorized_contexts, + rule_name, self.controller.delete, + self.req, FAKE_UUID, FAKE_UUID_A) + + @mock.patch('nova.compute.api.API.swap_volume') + def test_swap_volume_attach_policy(self, mock_swap_volume): + rule_name = self.policy_root % "swap" + body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}} + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.update, + self.req, FAKE_UUID, FAKE_UUID_A, body=body) + + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + @mock.patch('nova.compute.api.API.swap_volume') + def test_swap_volume_attach_policy_failed(self, + mock_swap_volume, + mock_bdm_save): + """Policy check fails for swap + update due to swap policy failure. + """ + rule_name = self.policy_root % "swap" + req = fakes.HTTPRequest.blank('', version='2.85') + req.environ['nova.context'].user_id = 'other-user' + self.policy.set_rules({rule_name: "user_id:%(user_id)s"}) + body = {'volumeAttachment': {'volumeId': FAKE_UUID_B, + 'delete_on_termination': True}} + exc = self.assertRaises( + exception.PolicyNotAuthorized, self.controller.update, + req, FAKE_UUID, FAKE_UUID_A, body=body) + self.assertEqual( + "Policy doesn't allow %s to be performed." % rule_name, + exc.format_message()) + mock_swap_volume.assert_not_called() + mock_bdm_save.assert_not_called() + + @mock.patch.object(block_device_obj.BlockDeviceMapping, 'save') + @mock.patch('nova.compute.api.API.swap_volume') + def test_pass_swap_and_update_volume_attach_policy(self, + mock_swap_volume, + mock_bdm_save): + rule_name = self.policy_root % "swap" + req = fakes.HTTPRequest.blank('', version='2.85') + body = {'volumeAttachment': {'volumeId': FAKE_UUID_B, + 'delete_on_termination': True}} + self.common_policy_check(self.admin_authorized_contexts, + self.admin_unauthorized_contexts, + rule_name, self.controller.update, + req, FAKE_UUID, FAKE_UUID_A, body=body) + mock_swap_volume.assert_called() + mock_bdm_save.assert_called() + + +class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest): + """Test os-volume-attachments APIs policies with system scope enabled. + + This class set the nova.conf [oslo_policy] enforce_scope to True + so that we can switch on the scope checking on oslo policy side. + It defines the set of context with scoped token + which are allowed and not allowed to pass the policy checks. + With those set of context, it will run the API operation and + verify the expected behaviour. + """ + + def setUp(self): + super(VolumeAttachScopeTypePolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system admin is able to update the attached volume + self.admin_authorized_contexts = [ + self.system_admin_context] + # Check that non-system or non-admin is not able to update + # the attached volume. + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + +class VolumeAttachNoLegacyPolicyTest(VolumeAttachPolicyTest): + """Test os-volume-attachments APIs policies with system scope enabled, + and no more deprecated rules that allow the legacy admin API to access + system_admin_or_owner APIs. + """ + without_deprecated_rules = True + + def setUp(self): + super(VolumeAttachNoLegacyPolicyTest, self).setUp() + self.flags(enforce_scope=True, group="oslo_policy") + + # Check that system or projct admin or owner is able to + # list/create/show/delete the attached volume. + self.admin_or_owner_authorized_contexts = [ + self.system_admin_context, + self.project_admin_context, + self.project_member_context + ] + + # Check that non-system and non-admin/owner is not able to + # list/create/show/delete the attached volume. + self.admin_or_owner_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.project_reader_context, + self.project_foo_context, self.system_foo_context, + self.other_project_member_context + ] + + # Check that admin is able to update the attached volume + self.admin_authorized_contexts = [ + self.system_admin_context + ] + # Check that non-admin is not able to update the attached + # volume + self.admin_unauthorized_contexts = [ + self.legacy_admin_context, self.system_member_context, + self.system_reader_context, self.system_foo_context, + self.project_admin_context, self.project_member_context, + self.other_project_member_context, + self.project_foo_context, self.project_reader_context + ] + + self.reader_authorized_contexts = [ + self.system_admin_context, self.system_reader_context, + self.system_member_context, self.project_admin_context, + self.project_reader_context, self.project_member_context + ] + + self.reader_unauthorized_contexts = [ + self.legacy_admin_context, self.system_foo_context, + self.project_foo_context, + self.other_project_member_context + ] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_filter_scheduler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_filter_scheduler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_filter_scheduler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_filter_scheduler.py 2020-04-10 17:57:58.000000000 +0000 @@ -20,14 +20,15 @@ from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids +from nova import context from nova import exception from nova import objects from nova.scheduler import filter_scheduler from nova.scheduler import host_manager from nova.scheduler import utils as scheduler_utils from nova.scheduler import weights +from nova import servicegroup from nova import test # noqa -from nova.tests.unit.scheduler import test_scheduler fake_numa_limit = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0, @@ -50,18 +51,41 @@ allocation_request_version=fake_alloc_version) -class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): +class FilterSchedulerTestCase(test.NoDBTestCase): """Test case for Filter Scheduler.""" - driver_cls = filter_scheduler.FilterScheduler - + @mock.patch.object(host_manager.HostManager, '_init_instance_info', + new=mock.Mock()) + @mock.patch.object(host_manager.HostManager, '_init_aggregates', + new=mock.Mock()) + @mock.patch('nova.scheduler.client.report.SchedulerReportClient', + autospec=True, new=mock.Mock()) + @mock.patch('nova.scheduler.client.query.SchedulerQueryClient', + autospec=True, new=mock.Mock()) def setUp(self): - with mock.patch( - 'nova.scheduler.client.report.SchedulerReportClient', - autospec=True), mock.patch( - 'nova.scheduler.client.query.SchedulerQueryClient', - autospec=True): - super(FilterSchedulerTestCase, self).setUp() + super(FilterSchedulerTestCase, self).setUp() + + self.driver = filter_scheduler.FilterScheduler() + self.context = context.RequestContext('fake_user', 'fake_project') + self.topic = 'fake_topic' + self.servicegroup_api = servicegroup.API() + + @mock.patch('nova.objects.ServiceList.get_by_topic') + @mock.patch('nova.servicegroup.API.service_is_up') + def test_hosts_up(self, mock_service_is_up, mock_get_by_topic): + service1 = objects.Service(host='host1') + service2 = objects.Service(host='host2') + services = objects.ServiceList(objects=[service1, service2]) + + mock_get_by_topic.return_value = services + mock_service_is_up.side_effect = [False, True] + + result = self.driver.hosts_up(self.context, self.topic) + self.assertEqual(result, ['host2']) + + mock_get_by_topic.assert_called_once_with(self.context, self.topic) + calls = [mock.call(service1), mock.call(service2)] + self.assertEqual(calls, mock_service_is_up.call_args_list) @mock.patch('nova.scheduler.utils.claim_resources') @mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_manager.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_manager.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_manager.py 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,441 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +import mock +import oslo_messaging as messaging +from oslo_utils.fixture import uuidsentinel as uuids + +from nova import context +from nova import exception +from nova import objects +from nova.scheduler import filter_scheduler +from nova.scheduler import host_manager +from nova.scheduler import manager +from nova import test +from nova.tests.unit import fake_server_actions +from nova.tests.unit.scheduler import fakes + + +class SchedulerManagerInitTestCase(test.NoDBTestCase): + """Test case for scheduler manager initiation.""" + manager_cls = manager.SchedulerManager + + @mock.patch.object(host_manager.HostManager, '_init_instance_info') + @mock.patch.object(host_manager.HostManager, '_init_aggregates') + def test_init_using_default_schedulerdriver(self, + mock_init_agg, + mock_init_inst): + driver = self.manager_cls().driver + self.assertIsInstance(driver, filter_scheduler.FilterScheduler) + + @mock.patch.object(host_manager.HostManager, '_init_instance_info') + @mock.patch.object(host_manager.HostManager, '_init_aggregates') + def test_init_nonexist_schedulerdriver(self, + mock_init_agg, + mock_init_inst): + self.flags(driver='nonexist_scheduler', group='scheduler') + # The entry point has to be defined in setup.cfg and nova-scheduler has + # to be deployed again before using a custom value. + self.assertRaises(RuntimeError, self.manager_cls) + + +class SchedulerManagerTestCase(test.NoDBTestCase): + """Test case for scheduler manager.""" + + manager_cls = manager.SchedulerManager + + @mock.patch.object(host_manager.HostManager, '_init_instance_info') + @mock.patch.object(host_manager.HostManager, '_init_aggregates') + def setUp(self, mock_init_agg, mock_init_inst): + super(SchedulerManagerTestCase, self).setUp() + self.manager = self.manager_cls() + self.context = context.RequestContext('fake_user', 'fake_project') + self.topic = 'fake_topic' + self.fake_args = (1, 2, 3) + self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} + fake_server_actions.stub_out_action_events(self) + + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + def test_select_destination(self, mock_get_ac, mock_rfrs, mock_process): + fake_spec = objects.RequestSpec() + fake_spec.instance_uuid = uuids.instance + fake_version = "9.42" + mock_p_sums = mock.Mock() + fake_alloc_reqs = fakes.get_fake_alloc_reqs() + place_res = (fake_alloc_reqs, mock_p_sums, fake_version) + mock_get_ac.return_value = place_res + mock_rfrs.return_value.cpu_pinning_requested = False + expected_alloc_reqs_by_rp_uuid = { + cn.uuid: [fake_alloc_reqs[x]] + for x, cn in enumerate(fakes.COMPUTE_NODES) + } + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + self.manager.select_destinations(self.context, spec_obj=fake_spec, + instance_uuids=[fake_spec.instance_uuid]) + mock_process.assert_called_once_with(self.context, fake_spec) + select_destinations.assert_called_once_with( + self.context, fake_spec, + [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, + mock_p_sums, fake_version, False) + mock_get_ac.assert_called_once_with( + self.context, mock_rfrs.return_value) + + # Now call select_destinations() with True values for the params + # introduced in RPC version 4.5 + select_destinations.reset_mock() + self.manager.select_destinations(None, spec_obj=fake_spec, + instance_uuids=[fake_spec.instance_uuid], + return_objects=True, return_alternates=True) + select_destinations.assert_called_once_with(None, fake_spec, + [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, + mock_p_sums, fake_version, True) + + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + def test_select_destination_return_objects(self, mock_get_ac, + mock_rfrs, mock_process): + fake_spec = objects.RequestSpec() + fake_spec.instance_uuid = uuids.instance + fake_version = "9.42" + mock_p_sums = mock.Mock() + fake_alloc_reqs = fakes.get_fake_alloc_reqs() + place_res = (fake_alloc_reqs, mock_p_sums, fake_version) + mock_get_ac.return_value = place_res + mock_rfrs.return_value.cpu_pinning_requested = False + expected_alloc_reqs_by_rp_uuid = { + cn.uuid: [fake_alloc_reqs[x]] + for x, cn in enumerate(fakes.COMPUTE_NODES) + } + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + sel_obj = objects.Selection(service_host="fake_host", + nodename="fake_node", compute_node_uuid=uuids.compute_node, + cell_uuid=uuids.cell, limits=None) + select_destinations.return_value = [[sel_obj]] + # Pass True; should get the Selection object back. + dests = self.manager.select_destinations(None, spec_obj=fake_spec, + instance_uuids=[fake_spec.instance_uuid], + return_objects=True, return_alternates=True) + sel_host = dests[0][0] + self.assertIsInstance(sel_host, objects.Selection) + mock_process.assert_called_once_with(None, fake_spec) + # Since both return_objects and return_alternates are True, the + # driver should have been called with True for return_alternates. + select_destinations.assert_called_once_with(None, fake_spec, + [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, + mock_p_sums, fake_version, True) + + # Now pass False for return objects, but keep return_alternates as + # True. Verify that the manager converted the Selection object back + # to a dict. + select_destinations.reset_mock() + dests = self.manager.select_destinations(None, spec_obj=fake_spec, + instance_uuids=[fake_spec.instance_uuid], + return_objects=False, return_alternates=True) + sel_host = dests[0] + self.assertIsInstance(sel_host, dict) + # Even though return_alternates was passed as True, since + # return_objects was False, the driver should have been called with + # return_alternates as False. + select_destinations.assert_called_once_with(None, fake_spec, + [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, + mock_p_sums, fake_version, False) + + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + def _test_select_destination(self, get_allocation_candidates_response, + mock_get_ac, mock_rfrs, mock_process): + fake_spec = objects.RequestSpec() + fake_spec.instance_uuid = uuids.instance + place_res = get_allocation_candidates_response + mock_get_ac.return_value = place_res + mock_rfrs.return_value.cpu_pinning_requested = False + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + self.assertRaises(messaging.rpc.dispatcher.ExpectedException, + self.manager.select_destinations, self.context, + spec_obj=fake_spec, + instance_uuids=[fake_spec.instance_uuid]) + select_destinations.assert_not_called() + mock_process.assert_called_once_with(self.context, fake_spec) + mock_get_ac.assert_called_once_with( + self.context, mock_rfrs.return_value) + + def test_select_destination_old_placement(self): + """Tests that we will raise NoValidhost when the scheduler + report client's get_allocation_candidates() returns None, None as it + would if placement service hasn't been upgraded before scheduler. + """ + place_res = (None, None, None) + self._test_select_destination(place_res) + + def test_select_destination_placement_connect_fails(self): + """Tests that we will raise NoValidHost when the scheduler + report client's get_allocation_candidates() returns None, which it + would if the connection to Placement failed and the safe_connect + decorator returns None. + """ + place_res = None + self._test_select_destination(place_res) + + def test_select_destination_no_candidates(self): + """Tests that we will raise NoValidHost when the scheduler + report client's get_allocation_candidates() returns [], {} which it + would if placement service hasn't yet had compute nodes populate + inventory. + """ + place_res = ([], {}, None) + self._test_select_destination(place_res) + + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + def test_select_destination_is_rebuild(self, mock_get_ac, mock_rfrs, + mock_process): + fake_spec = objects.RequestSpec( + scheduler_hints={'_nova_check_type': ['rebuild']}) + fake_spec.instance_uuid = uuids.instance + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + self.manager.select_destinations(self.context, spec_obj=fake_spec, + instance_uuids=[fake_spec.instance_uuid]) + select_destinations.assert_called_once_with( + self.context, fake_spec, + [fake_spec.instance_uuid], None, None, None, False) + mock_get_ac.assert_not_called() + mock_process.assert_not_called() + + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + def test_select_destination_with_4_3_client( + self, mock_get_ac, mock_rfrs, mock_process, + cpu_pinning_requested=False): + fake_spec = objects.RequestSpec() + mock_p_sums = mock.Mock() + fake_alloc_reqs = fakes.get_fake_alloc_reqs() + place_res = (fake_alloc_reqs, mock_p_sums, "42.0") + mock_get_ac.return_value = place_res + mock_rfrs.return_value.cpu_pinning_requested = cpu_pinning_requested + expected_alloc_reqs_by_rp_uuid = { + cn.uuid: [fake_alloc_reqs[x]] + for x, cn in enumerate(fakes.COMPUTE_NODES) + } + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + self.manager.select_destinations(self.context, spec_obj=fake_spec) + mock_process.assert_called_once_with(self.context, fake_spec) + select_destinations.assert_called_once_with(self.context, + fake_spec, None, expected_alloc_reqs_by_rp_uuid, + mock_p_sums, "42.0", False) + mock_rfrs.assert_called_once_with( + self.context, fake_spec, mock.ANY, + enable_pinning_translate=True) + mock_get_ac.assert_called_once_with( + self.context, mock_rfrs.return_value) + + @mock.patch('nova.scheduler.manager.LOG.debug') + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + def test_select_destination_with_pcpu_fallback( + self, mock_get_ac, mock_rfrs, mock_process, mock_log): + """Check that we make a second request to placement if we've got a PCPU + request. + """ + self.flags(disable_fallback_pcpu_query=False, group='workarounds') + + # mock the result from placement. In reality, the two calls we expect + # would return two different results, but we don't care about that. All + # we want to check is that it _was_ called twice + fake_spec = objects.RequestSpec() + mock_p_sums = mock.Mock() + fake_alloc_reqs = fakes.get_fake_alloc_reqs() + place_res = (fake_alloc_reqs, mock_p_sums, "42.0") + mock_get_ac.return_value = place_res + + pcpu_rreq = mock.Mock() + pcpu_rreq.cpu_pinning_requested = True + vcpu_rreq = mock.Mock() + mock_rfrs.side_effect = [pcpu_rreq, vcpu_rreq] + + # as above, the two allocation requests against each compute node would + # be different in reality, and not all compute nodes might have two + # allocation requests, but that doesn't matter for this simple test + expected_alloc_reqs_by_rp_uuid = { + cn.uuid: [fake_alloc_reqs[x], fake_alloc_reqs[x]] + for x, cn in enumerate(fakes.COMPUTE_NODES) + } + + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + self.manager.select_destinations(self.context, spec_obj=fake_spec) + select_destinations.assert_called_once_with(self.context, + fake_spec, None, expected_alloc_reqs_by_rp_uuid, + mock_p_sums, "42.0", False) + + mock_process.assert_called_once_with(self.context, fake_spec) + mock_log.assert_called_with( + 'Requesting fallback allocation candidates with VCPU instead of ' + 'PCPU') + mock_rfrs.assert_has_calls([ + mock.call(self.context, fake_spec, mock.ANY, + enable_pinning_translate=True), + mock.call(self.context, fake_spec, mock.ANY, + enable_pinning_translate=False), + ]) + mock_get_ac.assert_has_calls([ + mock.call(self.context, pcpu_rreq), + mock.call(self.context, vcpu_rreq), + ]) + + def test_select_destination_with_pcpu_fallback_disabled(self): + """Check that we do not make a second request to placement if we've + been told not to, even though we've got a PCPU instance. + """ + self.flags(disable_fallback_pcpu_query=True, group='workarounds') + + self.test_select_destination_with_4_3_client( + cpu_pinning_requested=True) + + # TODO(sbauza): Remove that test once the API v4 is removed + @mock.patch('nova.scheduler.request_filter.process_reqspec') + @mock.patch('nova.scheduler.utils.resources_from_request_spec') + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_allocation_candidates') + @mock.patch.object(objects.RequestSpec, 'from_primitives') + def test_select_destination_with_old_client(self, from_primitives, + mock_get_ac, mock_rfrs, mock_process): + fake_spec = objects.RequestSpec() + fake_spec.instance_uuid = uuids.instance + from_primitives.return_value = fake_spec + mock_p_sums = mock.Mock() + fake_alloc_reqs = fakes.get_fake_alloc_reqs() + place_res = (fake_alloc_reqs, mock_p_sums, "42.0") + mock_get_ac.return_value = place_res + mock_rfrs.return_value.cpu_pinning_requested = False + expected_alloc_reqs_by_rp_uuid = { + cn.uuid: [fake_alloc_reqs[x]] + for x, cn in enumerate(fakes.COMPUTE_NODES) + } + with mock.patch.object(self.manager.driver, 'select_destinations' + ) as select_destinations: + self.manager.select_destinations( + self.context, request_spec='fake_spec', + filter_properties='fake_props', + instance_uuids=[fake_spec.instance_uuid]) + mock_process.assert_called_once_with(self.context, fake_spec) + select_destinations.assert_called_once_with( + self.context, fake_spec, + [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, + mock_p_sums, "42.0", False) + mock_get_ac.assert_called_once_with( + self.context, mock_rfrs.return_value) + + def test_update_aggregates(self): + with mock.patch.object(self.manager.driver.host_manager, + 'update_aggregates' + ) as update_aggregates: + self.manager.update_aggregates(None, aggregates='agg') + update_aggregates.assert_called_once_with('agg') + + def test_delete_aggregate(self): + with mock.patch.object(self.manager.driver.host_manager, + 'delete_aggregate' + ) as delete_aggregate: + self.manager.delete_aggregate(None, aggregate='agg') + delete_aggregate.assert_called_once_with('agg') + + def test_update_instance_info(self): + with mock.patch.object(self.manager.driver.host_manager, + 'update_instance_info') as mock_update: + self.manager.update_instance_info(mock.sentinel.context, + mock.sentinel.host_name, + mock.sentinel.instance_info) + mock_update.assert_called_once_with(mock.sentinel.context, + mock.sentinel.host_name, + mock.sentinel.instance_info) + + def test_delete_instance_info(self): + with mock.patch.object(self.manager.driver.host_manager, + 'delete_instance_info') as mock_delete: + self.manager.delete_instance_info(mock.sentinel.context, + mock.sentinel.host_name, + mock.sentinel.instance_uuid) + mock_delete.assert_called_once_with(mock.sentinel.context, + mock.sentinel.host_name, + mock.sentinel.instance_uuid) + + def test_sync_instance_info(self): + with mock.patch.object(self.manager.driver.host_manager, + 'sync_instance_info') as mock_sync: + self.manager.sync_instance_info(mock.sentinel.context, + mock.sentinel.host_name, + mock.sentinel.instance_uuids) + mock_sync.assert_called_once_with(mock.sentinel.context, + mock.sentinel.host_name, + mock.sentinel.instance_uuids) + + def test_reset(self): + with mock.patch.object(self.manager.driver.host_manager, + 'refresh_cells_caches') as mock_refresh: + self.manager.reset() + mock_refresh.assert_called_once_with() + + @mock.patch('nova.objects.host_mapping.discover_hosts') + def test_discover_hosts(self, mock_discover): + cm1 = objects.CellMapping(name='cell1') + cm2 = objects.CellMapping(name='cell2') + mock_discover.return_value = [objects.HostMapping(host='a', + cell_mapping=cm1), + objects.HostMapping(host='b', + cell_mapping=cm2)] + self.manager._discover_hosts_in_cells(mock.sentinel.context) + + @mock.patch('nova.scheduler.manager.LOG.debug') + @mock.patch('nova.scheduler.manager.LOG.warning') + @mock.patch('nova.objects.host_mapping.discover_hosts') + def test_discover_hosts_duplicate_host_mapping(self, mock_discover, + mock_log_warning, + mock_log_debug): + # This tests the scenario of multiple schedulers running discover_hosts + # at the same time. + mock_discover.side_effect = exception.HostMappingExists(name='a') + self.manager._discover_hosts_in_cells(mock.sentinel.context) + msg = ("This periodic task should only be enabled on a single " + "scheduler to prevent collisions between multiple " + "schedulers: Host 'a' mapping already exists") + mock_log_warning.assert_called_once_with(msg) + mock_log_debug.assert_not_called() + # Second collision should log at debug, not warning. + mock_log_warning.reset_mock() + self.manager._discover_hosts_in_cells(mock.sentinel.context) + mock_log_warning.assert_not_called() + mock_log_debug.assert_called_once_with(msg) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_request_filter.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_request_filter.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_request_filter.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_request_filter.py 2020-04-10 17:57:58.000000000 +0000 @@ -12,11 +12,13 @@ import mock import os_traits as ot + from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import timeutils from nova import context as nova_context from nova import exception +from nova.network import model as network_model from nova import objects from nova.scheduler import request_filter from nova import test @@ -400,3 +402,75 @@ log_lines = [c[0][0] for c in mock_log.debug.call_args_list] self.assertIn('added forbidden trait', log_lines[0]) self.assertIn('took %.1f seconds', log_lines[1]) + + @mock.patch.object(request_filter, 'LOG', new=mock.Mock()) + def test_transform_image_metadata(self): + self.flags(image_metadata_prefilter=True, group='scheduler') + properties = objects.ImageMetaProps( + hw_disk_bus=objects.fields.DiskBus.SATA, + hw_cdrom_bus=objects.fields.DiskBus.IDE, + hw_video_model=objects.fields.VideoModel.QXL, + hw_vif_model=network_model.VIF_MODEL_VIRTIO + ) + reqspec = objects.RequestSpec( + image=objects.ImageMeta(properties=properties), + flavor=objects.Flavor(extra_specs={}), + ) + self.assertTrue( + request_filter.transform_image_metadata(None, reqspec) + ) + expected = { + 'COMPUTE_GRAPHICS_MODEL_QXL', + 'COMPUTE_NET_VIF_MODEL_VIRTIO', + 'COMPUTE_STORAGE_BUS_IDE', + 'COMPUTE_STORAGE_BUS_SATA', + } + self.assertEqual(expected, reqspec.root_required) + + def test_transform_image_metadata__disabled(self): + self.flags(image_metadata_prefilter=False, group='scheduler') + reqspec = objects.RequestSpec(flavor=objects.Flavor(extra_specs={})) + # Assert that we completely skip the filter if disabled + self.assertFalse( + request_filter.transform_image_metadata(self.context, reqspec) + ) + self.assertEqual(set(), reqspec.root_required) + + @mock.patch.object(request_filter, 'LOG') + def test_accelerators_filter_with_device_profile(self, mock_log): + # First ensure that accelerators_filter is included + self.assertIn(request_filter.accelerators_filter, + request_filter.ALL_REQUEST_FILTERS) + + es = {'accel:device_profile': 'mydp'} + reqspec = objects.RequestSpec(flavor=objects.Flavor(extra_specs=es)) + self.assertEqual(set(), reqspec.root_required) + self.assertEqual(set(), reqspec.root_forbidden) + + # Request filter puts the trait into the request spec + request_filter.accelerators_filter(self.context, reqspec) + self.assertEqual({ot.COMPUTE_ACCELERATORS}, reqspec.root_required) + self.assertEqual(set(), reqspec.root_forbidden) + + # Assert both the in-method logging and trace decorator. + log_lines = [c[0][0] for c in mock_log.debug.call_args_list] + self.assertIn('added required trait', log_lines[0]) + self.assertIn('took %.1f seconds', log_lines[1]) + + @mock.patch.object(request_filter, 'LOG') + def test_accelerators_filter_no_device_profile(self, mock_log): + # First ensure that accelerators_filter is included + self.assertIn(request_filter.accelerators_filter, + request_filter.ALL_REQUEST_FILTERS) + + reqspec = objects.RequestSpec(flavor=objects.Flavor(extra_specs={})) + self.assertEqual(set(), reqspec.root_required) + self.assertEqual(set(), reqspec.root_forbidden) + + # Request filter puts the trait into the request spec + request_filter.accelerators_filter(self.context, reqspec) + self.assertEqual(set(), reqspec.root_required) + self.assertEqual(set(), reqspec.root_forbidden) + + # Assert about logging + mock_log.assert_not_called() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_scheduler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_scheduler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/scheduler/test_scheduler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/scheduler/test_scheduler.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,483 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler -""" - -import mock -import oslo_messaging as messaging -from oslo_utils.fixture import uuidsentinel as uuids - -from nova import context -from nova import exception -from nova import objects -from nova.scheduler import filter_scheduler -from nova.scheduler import host_manager -from nova.scheduler import manager -from nova import servicegroup -from nova import test -from nova.tests.unit import fake_server_actions -from nova.tests.unit.scheduler import fakes - - -class SchedulerManagerInitTestCase(test.NoDBTestCase): - """Test case for scheduler manager initiation.""" - manager_cls = manager.SchedulerManager - - @mock.patch.object(host_manager.HostManager, '_init_instance_info') - @mock.patch.object(host_manager.HostManager, '_init_aggregates') - def test_init_using_default_schedulerdriver(self, - mock_init_agg, - mock_init_inst): - driver = self.manager_cls().driver - self.assertIsInstance(driver, filter_scheduler.FilterScheduler) - - @mock.patch.object(host_manager.HostManager, '_init_instance_info') - @mock.patch.object(host_manager.HostManager, '_init_aggregates') - def test_init_nonexist_schedulerdriver(self, - mock_init_agg, - mock_init_inst): - self.flags(driver='nonexist_scheduler', group='scheduler') - # The entry point has to be defined in setup.cfg and nova-scheduler has - # to be deployed again before using a custom value. - self.assertRaises(RuntimeError, self.manager_cls) - - -class SchedulerManagerTestCase(test.NoDBTestCase): - """Test case for scheduler manager.""" - - manager_cls = manager.SchedulerManager - driver_cls = fakes.FakeScheduler - driver_plugin_name = 'fake_scheduler' - - @mock.patch.object(host_manager.HostManager, '_init_instance_info') - @mock.patch.object(host_manager.HostManager, '_init_aggregates') - def setUp(self, mock_init_agg, mock_init_inst): - super(SchedulerManagerTestCase, self).setUp() - self.flags(driver=self.driver_plugin_name, group='scheduler') - self.manager = self.manager_cls() - self.context = context.RequestContext('fake_user', 'fake_project') - self.topic = 'fake_topic' - self.fake_args = (1, 2, 3) - self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} - fake_server_actions.stub_out_action_events(self) - - def test_1_correct_init(self): - # Correct scheduler driver - manager = self.manager - self.assertIsInstance(manager.driver, self.driver_cls) - - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - def test_select_destination(self, mock_get_ac, mock_rfrs, mock_process): - fake_spec = objects.RequestSpec() - fake_spec.instance_uuid = uuids.instance - fake_version = "9.42" - mock_p_sums = mock.Mock() - fake_alloc_reqs = fakes.get_fake_alloc_reqs() - place_res = (fake_alloc_reqs, mock_p_sums, fake_version) - mock_get_ac.return_value = place_res - mock_rfrs.return_value.cpu_pinning_requested = False - expected_alloc_reqs_by_rp_uuid = { - cn.uuid: [fake_alloc_reqs[x]] - for x, cn in enumerate(fakes.COMPUTE_NODES) - } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - self.manager.select_destinations(self.context, spec_obj=fake_spec, - instance_uuids=[fake_spec.instance_uuid]) - mock_process.assert_called_once_with(self.context, fake_spec) - select_destinations.assert_called_once_with( - self.context, fake_spec, - [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, - mock_p_sums, fake_version, False) - mock_get_ac.assert_called_once_with( - self.context, mock_rfrs.return_value) - - # Now call select_destinations() with True values for the params - # introduced in RPC version 4.5 - select_destinations.reset_mock() - self.manager.select_destinations(None, spec_obj=fake_spec, - instance_uuids=[fake_spec.instance_uuid], - return_objects=True, return_alternates=True) - select_destinations.assert_called_once_with(None, fake_spec, - [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, - mock_p_sums, fake_version, True) - - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - def test_select_destination_return_objects(self, mock_get_ac, - mock_rfrs, mock_process): - fake_spec = objects.RequestSpec() - fake_spec.instance_uuid = uuids.instance - fake_version = "9.42" - mock_p_sums = mock.Mock() - fake_alloc_reqs = fakes.get_fake_alloc_reqs() - place_res = (fake_alloc_reqs, mock_p_sums, fake_version) - mock_get_ac.return_value = place_res - mock_rfrs.return_value.cpu_pinning_requested = False - expected_alloc_reqs_by_rp_uuid = { - cn.uuid: [fake_alloc_reqs[x]] - for x, cn in enumerate(fakes.COMPUTE_NODES) - } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - sel_obj = objects.Selection(service_host="fake_host", - nodename="fake_node", compute_node_uuid=uuids.compute_node, - cell_uuid=uuids.cell, limits=None) - select_destinations.return_value = [[sel_obj]] - # Pass True; should get the Selection object back. - dests = self.manager.select_destinations(None, spec_obj=fake_spec, - instance_uuids=[fake_spec.instance_uuid], - return_objects=True, return_alternates=True) - sel_host = dests[0][0] - self.assertIsInstance(sel_host, objects.Selection) - mock_process.assert_called_once_with(None, fake_spec) - # Since both return_objects and return_alternates are True, the - # driver should have been called with True for return_alternates. - select_destinations.assert_called_once_with(None, fake_spec, - [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, - mock_p_sums, fake_version, True) - - # Now pass False for return objects, but keep return_alternates as - # True. Verify that the manager converted the Selection object back - # to a dict. - select_destinations.reset_mock() - dests = self.manager.select_destinations(None, spec_obj=fake_spec, - instance_uuids=[fake_spec.instance_uuid], - return_objects=False, return_alternates=True) - sel_host = dests[0] - self.assertIsInstance(sel_host, dict) - # Even though return_alternates was passed as True, since - # return_objects was False, the driver should have been called with - # return_alternates as False. - select_destinations.assert_called_once_with(None, fake_spec, - [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, - mock_p_sums, fake_version, False) - - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - def _test_select_destination(self, get_allocation_candidates_response, - mock_get_ac, mock_rfrs, mock_process): - fake_spec = objects.RequestSpec() - fake_spec.instance_uuid = uuids.instance - place_res = get_allocation_candidates_response - mock_get_ac.return_value = place_res - mock_rfrs.return_value.cpu_pinning_requested = False - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - self.assertRaises(messaging.rpc.dispatcher.ExpectedException, - self.manager.select_destinations, self.context, - spec_obj=fake_spec, - instance_uuids=[fake_spec.instance_uuid]) - select_destinations.assert_not_called() - mock_process.assert_called_once_with(self.context, fake_spec) - mock_get_ac.assert_called_once_with( - self.context, mock_rfrs.return_value) - - def test_select_destination_old_placement(self): - """Tests that we will raise NoValidhost when the scheduler - report client's get_allocation_candidates() returns None, None as it - would if placement service hasn't been upgraded before scheduler. - """ - place_res = (None, None, None) - self._test_select_destination(place_res) - - def test_select_destination_placement_connect_fails(self): - """Tests that we will raise NoValidHost when the scheduler - report client's get_allocation_candidates() returns None, which it - would if the connection to Placement failed and the safe_connect - decorator returns None. - """ - place_res = None - self._test_select_destination(place_res) - - def test_select_destination_no_candidates(self): - """Tests that we will raise NoValidHost when the scheduler - report client's get_allocation_candidates() returns [], {} which it - would if placement service hasn't yet had compute nodes populate - inventory. - """ - place_res = ([], {}, None) - self._test_select_destination(place_res) - - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - def test_select_destination_is_rebuild(self, mock_get_ac, mock_rfrs, - mock_process): - fake_spec = objects.RequestSpec( - scheduler_hints={'_nova_check_type': ['rebuild']}) - fake_spec.instance_uuid = uuids.instance - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - self.manager.select_destinations(self.context, spec_obj=fake_spec, - instance_uuids=[fake_spec.instance_uuid]) - select_destinations.assert_called_once_with( - self.context, fake_spec, - [fake_spec.instance_uuid], None, None, None, False) - mock_get_ac.assert_not_called() - mock_process.assert_not_called() - - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - def test_select_destination_with_4_3_client( - self, mock_get_ac, mock_rfrs, mock_process, - cpu_pinning_requested=False): - fake_spec = objects.RequestSpec() - mock_p_sums = mock.Mock() - fake_alloc_reqs = fakes.get_fake_alloc_reqs() - place_res = (fake_alloc_reqs, mock_p_sums, "42.0") - mock_get_ac.return_value = place_res - mock_rfrs.return_value.cpu_pinning_requested = cpu_pinning_requested - expected_alloc_reqs_by_rp_uuid = { - cn.uuid: [fake_alloc_reqs[x]] - for x, cn in enumerate(fakes.COMPUTE_NODES) - } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - self.manager.select_destinations(self.context, spec_obj=fake_spec) - mock_process.assert_called_once_with(self.context, fake_spec) - select_destinations.assert_called_once_with(self.context, - fake_spec, None, expected_alloc_reqs_by_rp_uuid, - mock_p_sums, "42.0", False) - mock_rfrs.assert_called_once_with( - self.context, fake_spec, mock.ANY, - enable_pinning_translate=True) - mock_get_ac.assert_called_once_with( - self.context, mock_rfrs.return_value) - - @mock.patch('nova.scheduler.manager.LOG.debug') - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - def test_select_destination_with_pcpu_fallback( - self, mock_get_ac, mock_rfrs, mock_process, mock_log): - """Check that we make a second request to placement if we've got a PCPU - request. - """ - self.flags(disable_fallback_pcpu_query=False, group='workarounds') - - # mock the result from placement. In reality, the two calls we expect - # would return two different results, but we don't care about that. All - # we want to check is that it _was_ called twice - fake_spec = objects.RequestSpec() - mock_p_sums = mock.Mock() - fake_alloc_reqs = fakes.get_fake_alloc_reqs() - place_res = (fake_alloc_reqs, mock_p_sums, "42.0") - mock_get_ac.return_value = place_res - - pcpu_rreq = mock.Mock() - pcpu_rreq.cpu_pinning_requested = True - vcpu_rreq = mock.Mock() - mock_rfrs.side_effect = [pcpu_rreq, vcpu_rreq] - - # as above, the two allocation requests against each compute node would - # be different in reality, and not all compute nodes might have two - # allocation requests, but that doesn't matter for this simple test - expected_alloc_reqs_by_rp_uuid = { - cn.uuid: [fake_alloc_reqs[x], fake_alloc_reqs[x]] - for x, cn in enumerate(fakes.COMPUTE_NODES) - } - - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - self.manager.select_destinations(self.context, spec_obj=fake_spec) - select_destinations.assert_called_once_with(self.context, - fake_spec, None, expected_alloc_reqs_by_rp_uuid, - mock_p_sums, "42.0", False) - - mock_process.assert_called_once_with(self.context, fake_spec) - mock_log.assert_called_with( - 'Requesting fallback allocation candidates with VCPU instead of ' - 'PCPU') - mock_rfrs.assert_has_calls([ - mock.call(self.context, fake_spec, mock.ANY, - enable_pinning_translate=True), - mock.call(self.context, fake_spec, mock.ANY, - enable_pinning_translate=False), - ]) - mock_get_ac.assert_has_calls([ - mock.call(self.context, pcpu_rreq), - mock.call(self.context, vcpu_rreq), - ]) - - def test_select_destination_with_pcpu_fallback_disabled(self): - """Check that we do not make a second request to placement if we've - been told not to, even though we've got a PCPU instance. - """ - self.flags(disable_fallback_pcpu_query=True, group='workarounds') - - self.test_select_destination_with_4_3_client( - cpu_pinning_requested=True) - - # TODO(sbauza): Remove that test once the API v4 is removed - @mock.patch('nova.scheduler.request_filter.process_reqspec') - @mock.patch('nova.scheduler.utils.resources_from_request_spec') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'get_allocation_candidates') - @mock.patch.object(objects.RequestSpec, 'from_primitives') - def test_select_destination_with_old_client(self, from_primitives, - mock_get_ac, mock_rfrs, mock_process): - fake_spec = objects.RequestSpec() - fake_spec.instance_uuid = uuids.instance - from_primitives.return_value = fake_spec - mock_p_sums = mock.Mock() - fake_alloc_reqs = fakes.get_fake_alloc_reqs() - place_res = (fake_alloc_reqs, mock_p_sums, "42.0") - mock_get_ac.return_value = place_res - mock_rfrs.return_value.cpu_pinning_requested = False - expected_alloc_reqs_by_rp_uuid = { - cn.uuid: [fake_alloc_reqs[x]] - for x, cn in enumerate(fakes.COMPUTE_NODES) - } - with mock.patch.object(self.manager.driver, 'select_destinations' - ) as select_destinations: - self.manager.select_destinations( - self.context, request_spec='fake_spec', - filter_properties='fake_props', - instance_uuids=[fake_spec.instance_uuid]) - mock_process.assert_called_once_with(self.context, fake_spec) - select_destinations.assert_called_once_with( - self.context, fake_spec, - [fake_spec.instance_uuid], expected_alloc_reqs_by_rp_uuid, - mock_p_sums, "42.0", False) - mock_get_ac.assert_called_once_with( - self.context, mock_rfrs.return_value) - - def test_update_aggregates(self): - with mock.patch.object(self.manager.driver.host_manager, - 'update_aggregates' - ) as update_aggregates: - self.manager.update_aggregates(None, aggregates='agg') - update_aggregates.assert_called_once_with('agg') - - def test_delete_aggregate(self): - with mock.patch.object(self.manager.driver.host_manager, - 'delete_aggregate' - ) as delete_aggregate: - self.manager.delete_aggregate(None, aggregate='agg') - delete_aggregate.assert_called_once_with('agg') - - def test_update_instance_info(self): - with mock.patch.object(self.manager.driver.host_manager, - 'update_instance_info') as mock_update: - self.manager.update_instance_info(mock.sentinel.context, - mock.sentinel.host_name, - mock.sentinel.instance_info) - mock_update.assert_called_once_with(mock.sentinel.context, - mock.sentinel.host_name, - mock.sentinel.instance_info) - - def test_delete_instance_info(self): - with mock.patch.object(self.manager.driver.host_manager, - 'delete_instance_info') as mock_delete: - self.manager.delete_instance_info(mock.sentinel.context, - mock.sentinel.host_name, - mock.sentinel.instance_uuid) - mock_delete.assert_called_once_with(mock.sentinel.context, - mock.sentinel.host_name, - mock.sentinel.instance_uuid) - - def test_sync_instance_info(self): - with mock.patch.object(self.manager.driver.host_manager, - 'sync_instance_info') as mock_sync: - self.manager.sync_instance_info(mock.sentinel.context, - mock.sentinel.host_name, - mock.sentinel.instance_uuids) - mock_sync.assert_called_once_with(mock.sentinel.context, - mock.sentinel.host_name, - mock.sentinel.instance_uuids) - - def test_reset(self): - with mock.patch.object(self.manager.driver.host_manager, - 'refresh_cells_caches') as mock_refresh: - self.manager.reset() - mock_refresh.assert_called_once_with() - - @mock.patch('nova.objects.host_mapping.discover_hosts') - def test_discover_hosts(self, mock_discover): - cm1 = objects.CellMapping(name='cell1') - cm2 = objects.CellMapping(name='cell2') - mock_discover.return_value = [objects.HostMapping(host='a', - cell_mapping=cm1), - objects.HostMapping(host='b', - cell_mapping=cm2)] - self.manager._discover_hosts_in_cells(mock.sentinel.context) - - @mock.patch('nova.scheduler.manager.LOG.debug') - @mock.patch('nova.scheduler.manager.LOG.warning') - @mock.patch('nova.objects.host_mapping.discover_hosts') - def test_discover_hosts_duplicate_host_mapping(self, mock_discover, - mock_log_warning, - mock_log_debug): - # This tests the scenario of multiple schedulers running discover_hosts - # at the same time. - mock_discover.side_effect = exception.HostMappingExists(name='a') - self.manager._discover_hosts_in_cells(mock.sentinel.context) - msg = ("This periodic task should only be enabled on a single " - "scheduler to prevent collisions between multiple " - "schedulers: Host 'a' mapping already exists") - mock_log_warning.assert_called_once_with(msg) - mock_log_debug.assert_not_called() - # Second collision should log at debug, not warning. - mock_log_warning.reset_mock() - self.manager._discover_hosts_in_cells(mock.sentinel.context) - mock_log_warning.assert_not_called() - mock_log_debug.assert_called_once_with(msg) - - -class SchedulerTestCase(test.NoDBTestCase): - """Test case for base scheduler driver class.""" - - # So we can subclass this test and re-use tests if we need. - driver_cls = fakes.FakeScheduler - - @mock.patch.object(host_manager.HostManager, '_init_instance_info') - @mock.patch.object(host_manager.HostManager, '_init_aggregates') - def setUp(self, mock_init_agg, mock_init_inst): - super(SchedulerTestCase, self).setUp() - self.driver = self.driver_cls() - self.context = context.RequestContext('fake_user', 'fake_project') - self.topic = 'fake_topic' - self.servicegroup_api = servicegroup.API() - - @mock.patch('nova.objects.ServiceList.get_by_topic') - @mock.patch('nova.servicegroup.API.service_is_up') - def test_hosts_up(self, mock_service_is_up, mock_get_by_topic): - service1 = objects.Service(host='host1') - service2 = objects.Service(host='host2') - services = objects.ServiceList(objects=[service1, service2]) - - mock_get_by_topic.return_value = services - mock_service_is_up.side_effect = [False, True] - - result = self.driver.hosts_up(self.context, self.topic) - self.assertEqual(result, ['host2']) - - mock_get_by_topic.assert_called_once_with(self.context, self.topic) - calls = [mock.call(service1), mock.call(service2)] - self.assertEqual(calls, mock_service_is_up.call_args_list) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_availability_zones.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_availability_zones.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_availability_zones.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_availability_zones.py 2020-04-10 17:57:58.000000000 +0000 @@ -78,7 +78,7 @@ aggregate.delete_host(service['host']) def test_rest_availability_zone_reset_cache(self): - az._get_cache().add('cache', 'fake_value') + az._get_cache().region.get_or_create('cache', lambda: 'fake_value') az.reset_cache() self.assertIsNone(az._get_cache().get('cache')) @@ -215,7 +215,7 @@ zones, not_zones = az.get_availability_zones(self.context, host_api) self.assertEqual(['nova-test', 'nova-test2'], zones) - self.assertEqual(['nova-test3', 'nova'], not_zones) + self.assertEqual(['nova', 'nova-test3'], not_zones) zones = az.get_availability_zones(self.context, host_api, get_only_available=True) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_metadata.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_metadata.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_metadata.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_metadata.py 2020-04-10 17:57:58.000000000 +0000 @@ -1401,6 +1401,45 @@ self.assertEqual(200, response.status_int) @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) + def test_metadata_lb_proxy_many_networks(self, mock_get_client): + + def fake_list_ports(context, fixed_ips, network_id, fields): + if 'f-f-f-f' in network_id: + return {'ports': + [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]} + return {'ports': []} + + self.flags(service_metadata_proxy=True, group='neutron') + handler.MAX_QUERY_NETWORKS = 10 + + self.expected_instance_id = b'a-b-c-d' + + # with X-Metadata-Provider + proxy_lb_id = 'edge-x' + + mock_client = mock_get_client.return_value + subnet_list = [{'network_id': 'f-f-f-' + chr(c)} + for c in range(ord('a'), ord('z'))] + mock_client.list_subnets.return_value = { + 'subnets': subnet_list} + + with mock.patch.object( + mock_client, 'list_ports', + side_effect=fake_list_ports) as mock_list_ports: + + response = fake_request( + self, self.mdinst, + relpath="/2009-04-04/user-data", + address="192.192.192.2", + fake_get_metadata_by_instance_id=self._fake_x_get_metadata, + headers={'X-Forwarded-For': '192.192.192.2', + 'X-Metadata-Provider': proxy_lb_id}) + + self.assertEqual(3, mock_list_ports.call_count) + + self.assertEqual(200, response.status_int) + + @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock()) def _metadata_handler_with_provider_id(self, hnd, mock_get_client): # with X-Metadata-Provider proxy_lb_id = 'edge-x' diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_policy.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_policy.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_policy.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_policy.py 2020-04-10 17:57:58.000000000 +0000 @@ -328,7 +328,9 @@ "os_compute_api:os-aggregates:add_host", "os_compute_api:os-aggregates:remove_host", "os_compute_api:os-aggregates:set_metadata", -"os_compute_api:os-agents", +"os_compute_api:os-agents:create", +"os_compute_api:os-agents:update", +"os_compute_api:os-agents:delete", "os_compute_api:os-baremetal-nodes", "os_compute_api:os-evacuate", "os_compute_api:os-extended-server-attributes", @@ -341,9 +343,7 @@ "os_compute_api:os-flavor-manage:update", "os_compute_api:os-flavor-manage:delete", "os_compute_api:os-hosts", -"os_compute_api:os-hypervisors", "os_compute_api:os-instance-actions:events", -"os_compute_api:os-instance-usage-audit-log", "os_compute_api:os-lock-server:unlock:unlock_override", "os_compute_api:os-migrate-server:migrate", "os_compute_api:os-migrate-server:migrate_live", @@ -355,17 +355,13 @@ "os_compute_api:os-shelve:shelve_offload", "os_compute_api:os-simple-tenant-usage:list", "os_compute_api:os-availability-zone:detail", -"os_compute_api:os-used-limits", -"os_compute_api:os-migrations:index", "os_compute_api:os-assisted-volume-snapshots:create", "os_compute_api:os-assisted-volume-snapshots:delete", "os_compute_api:os-console-auth-tokens", "os_compute_api:os-quota-class-sets:update", "os_compute_api:os-server-external-events:create", -"os_compute_api:os-volumes-attachments:update", +"os_compute_api:os-volumes-attachments:swap", "os_compute_api:servers:create:zero_disk_flavor", -"os_compute_api:servers:migrations:index", -"os_compute_api:servers:migrations:show", ) self.admin_or_owner_rules = ( @@ -415,28 +411,27 @@ "os_compute_api:servers:update", "os_compute_api:servers:create_image:allow_volume_backed", "os_compute_api:os-admin-password", -"os_compute_api:os-attach-interfaces", "os_compute_api:os-attach-interfaces:create", "os_compute_api:os-attach-interfaces:delete", "os_compute_api:os-console-output", "os_compute_api:os-remote-consoles", -"os_compute_api:os-deferred-delete", +"os_compute_api:os-deferred-delete:restore", +"os_compute_api:os-deferred-delete:force", "os_compute_api:os-flavor-access", "os_compute_api:os-flavor-extra-specs:index", "os_compute_api:os-flavor-extra-specs:show", "os_compute_api:os-floating-ip-pools", "os_compute_api:os-floating-ips", -"os_compute_api:os-instance-actions", -"os_compute_api:limits", "os_compute_api:os-multinic", "os_compute_api:os-networks:view", "os_compute_api:os-rescue", +"os_compute_api:os-unrescue", "os_compute_api:os-security-groups", -"os_compute_api:os-server-password", +"os_compute_api:os-security-groups:add", +"os_compute_api:os-security-groups:remove", +"os_compute_api:os-server-password:clear", "os_compute_api:os-server-tags:delete", "os_compute_api:os-server-tags:delete_all", -"os_compute_api:os-server-tags:index", -"os_compute_api:os-server-tags:show", "os_compute_api:os-server-tags:update", "os_compute_api:os-server-tags:update_all", "os_compute_api:os-server-groups:index", @@ -446,19 +441,47 @@ "os_compute_api:os-shelve:shelve", "os_compute_api:os-shelve:unshelve", "os_compute_api:os-volumes", -"os_compute_api:os-volumes-attachments:index", -"os_compute_api:os-volumes-attachments:show", "os_compute_api:os-volumes-attachments:create", "os_compute_api:os-volumes-attachments:delete", -"os_compute_api:os-availability-zone:list", +"os_compute_api:os-volumes-attachments:update", ) self.allow_all_rules = ( "os_compute_api:os-quota-sets:defaults", +"os_compute_api:os-availability-zone:list", +"os_compute_api:limits", ) self.system_reader_rules = ( +"os_compute_api:servers:migrations:index", +"os_compute_api:servers:migrations:show", +"os_compute_api:os-migrations:index", "os_compute_api:os-services:list", +"os_compute_api:os-instance-actions:events:details", +"os_compute_api:os-instance-usage-audit-log:list", +"os_compute_api:os-instance-usage-audit-log:show", +"os_compute_api:os-agents:list", +"os_compute_api:os-hypervisors:list", +"os_compute_api:os-hypervisors:list-detail", +"os_compute_api:os-hypervisors:show", +"os_compute_api:os-hypervisors:statistics", +"os_compute_api:os-hypervisors:uptime", +"os_compute_api:os-hypervisors:search", +"os_compute_api:os-hypervisors:servers", +"os_compute_api:limits:other_project", +) + + self.system_reader_or_owner_rules = ( +"os_compute_api:os-security-groups:list", +"os_compute_api:os-volumes-attachments:index", +"os_compute_api:os-volumes-attachments:show", +"os_compute_api:os-attach-interfaces:list", +"os_compute_api:os-attach-interfaces:show", +"os_compute_api:os-instance-actions:list", +"os_compute_api:os-instance-actions:show", +"os_compute_api:os-server-password:show", +"os_compute_api:os-server-tags:index", +"os_compute_api:os-server-tags:show", ) self.allow_nobody_rules = ( @@ -509,5 +532,6 @@ result = set(rules.keys()) - set(self.admin_only_rules + self.admin_or_owner_rules + self.allow_all_rules + self.system_reader_rules + + self.system_reader_or_owner_rules + self.allow_nobody_rules + special_rules) self.assertEqual(set([]), result) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_profiler.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_profiler.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_profiler.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_profiler.py 2020-04-10 17:57:58.000000000 +0000 @@ -54,7 +54,7 @@ 'nova.conductor.manager.ConductorManager', 'nova.conductor.rpcapi.ComputeTaskAPI', 'nova.conductor.rpcapi.ConductorAPI', - 'nova.image.api.API', + 'nova.image.glance.API', 'nova.network.neutron.ClientWrapper', 'nova.scheduler.manager.SchedulerManager', 'nova.scheduler.rpcapi.SchedulerAPI', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/test_utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/test_utils.py 2020-04-10 17:57:58.000000000 +0000 @@ -1045,104 +1045,6 @@ self.adap.get_endpoint.assert_called_once_with() -class RunOnceTests(test.NoDBTestCase): - - fake_logger = mock.MagicMock() - - @utils.run_once("already ran once", fake_logger) - def dummy_test_func(self, fail=False): - if fail: - raise ValueError() - return True - - def setUp(self): - super(RunOnceTests, self).setUp() - self.dummy_test_func.reset() - RunOnceTests.fake_logger.reset_mock() - - def test_wrapped_funtions_called_once(self): - self.assertFalse(self.dummy_test_func.called) - result = self.dummy_test_func() - self.assertTrue(result) - self.assertTrue(self.dummy_test_func.called) - - # assert that on second invocation no result - # is returned and that the logger is invoked. - result = self.dummy_test_func() - RunOnceTests.fake_logger.assert_called_once() - self.assertIsNone(result) - - def test_wrapped_funtions_called_once_raises(self): - self.assertFalse(self.dummy_test_func.called) - self.assertRaises(ValueError, self.dummy_test_func, fail=True) - self.assertTrue(self.dummy_test_func.called) - - # assert that on second invocation no result - # is returned and that the logger is invoked. - result = self.dummy_test_func() - RunOnceTests.fake_logger.assert_called_once() - self.assertIsNone(result) - - def test_wrapped_funtions_can_be_reset(self): - # assert we start with a clean state - self.assertFalse(self.dummy_test_func.called) - result = self.dummy_test_func() - self.assertTrue(result) - - self.dummy_test_func.reset() - # assert we restored a clean state - self.assertFalse(self.dummy_test_func.called) - result = self.dummy_test_func() - self.assertTrue(result) - - # assert that we never called the logger - RunOnceTests.fake_logger.assert_not_called() - - def test_reset_calls_cleanup(self): - mock_clean = mock.Mock() - - @utils.run_once("already ran once", self.fake_logger, - cleanup=mock_clean) - def f(): - pass - - f() - self.assertTrue(f.called) - - f.reset() - self.assertFalse(f.called) - mock_clean.assert_called_once_with() - - def test_clean_is_not_called_at_reset_if_wrapped_not_called(self): - mock_clean = mock.Mock() - - @utils.run_once("already ran once", self.fake_logger, - cleanup=mock_clean) - def f(): - pass - - self.assertFalse(f.called) - - f.reset() - self.assertFalse(f.called) - self.assertFalse(mock_clean.called) - - def test_reset_works_even_if_cleanup_raises(self): - mock_clean = mock.Mock(side_effect=ValueError()) - - @utils.run_once("already ran once", self.fake_logger, - cleanup=mock_clean) - def f(): - pass - - f() - self.assertTrue(f.called) - - self.assertRaises(ValueError, f.reset) - self.assertFalse(f.called) - mock_clean.assert_called_once_with() - - class TestResourceClassNormalize(test.NoDBTestCase): def test_normalize_name(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/ironic/test_driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/ironic/test_driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/ironic/test_driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/ironic/test_driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -3004,7 +3004,7 @@ instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid) - self.driver.rescue(self.ctx, instance, None, None, 'xyz') + self.driver.rescue(self.ctx, instance, None, None, 'xyz', None) mock_sps.assert_called_once_with(node.uuid, 'rescue', rescue_password='xyz') @@ -3021,7 +3021,7 @@ self.assertRaises(exception.InstanceRescueFailure, self.driver.rescue, - self.ctx, instance, None, None, 'xyz') + self.ctx, instance, None, None, 'xyz', None) @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') @@ -3035,7 +3035,7 @@ self.assertRaises(exception.InstanceRescueFailure, self.driver.rescue, - self.ctx, instance, None, None, 'xyz') + self.ctx, instance, None, None, 'xyz', None) @mock.patch.object(ironic_driver.IronicDriver, '_validate_instance_and_node') @@ -3051,7 +3051,7 @@ self.assertRaises(exception.InstanceRescueFailure, self.driver.rescue, - self.ctx, instance, None, None, 'xyz') + self.ctx, instance, None, None, 'xyz', None) @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state') @@ -3316,17 +3316,28 @@ self.assertEqual(SENTINEL, self.driver.hash_ring) self.mock_is_up.assert_has_calls(is_up_calls) + def test__refresh_hash_ring_same_host_different_case(self): + # Test that we treat Host1 and host1 as the same host + # CONF.host is set to 'host1' in __test_refresh_hash_ring + services = ['Host1'] + expected_hosts = {'host1'} + self.mock_is_up.return_value = True + self._test__refresh_hash_ring(services, expected_hosts) + def test__refresh_hash_ring_one_compute(self): services = ['host1'] expected_hosts = {'host1'} self.mock_is_up.return_value = True self._test__refresh_hash_ring(services, expected_hosts) - def test__refresh_hash_ring_many_computes(self): + @mock.patch('nova.virt.ironic.driver.LOG.debug') + def test__refresh_hash_ring_many_computes(self, mock_log_debug): services = ['host1', 'host2', 'host3'] expected_hosts = {'host1', 'host2', 'host3'} self.mock_is_up.return_value = True self._test__refresh_hash_ring(services, expected_hosts) + expected_msg = 'Hash ring members are %s' + mock_log_debug.assert_called_once_with(expected_msg, set(services)) def test__refresh_hash_ring_one_compute_new_compute(self): services = [] @@ -3451,6 +3462,26 @@ **kwargs) self.assertIsNotNone(self.driver.node_cache_time) + def test__refresh_cache_same_host_different_case(self): + # Test that we treat Host1 and host1 as the same host + self.host = 'Host1' + self.flags(host=self.host) + instances = [] + nodes = [ + _get_cached_node( + uuid=uuidutils.generate_uuid(), instance_uuid=None), + _get_cached_node( + uuid=uuidutils.generate_uuid(), instance_uuid=None), + _get_cached_node( + uuid=uuidutils.generate_uuid(), instance_uuid=None), + ] + hosts = ['host1', 'host1', 'host1'] + + self._test__refresh_cache(instances, nodes, hosts) + + expected_cache = {n.uuid: n for n in nodes} + self.assertEqual(expected_cache, self.driver.node_cache) + def test__refresh_cache(self): # normal operation, one compute service instances = [] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/fake_imagebackend.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/fake_imagebackend.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/fake_imagebackend.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/fake_imagebackend.py 2020-04-10 17:57:58.000000000 +0000 @@ -184,6 +184,12 @@ # class. image_init.SUPPORTS_CLONE = False + # Ditto for the 'is_shared_block_storage' function + def is_shared_block_storage(): + return False + + setattr(image_init, 'is_shared_block_storage', is_shared_block_storage) + return image_init def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs): @@ -201,7 +207,8 @@ self.imported_files.append((local_filename, remote_filename)) def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode, - extra_specs, hypervisor_version, disk_unit=None): + extra_specs, hypervisor_version, disk_unit=None, + boot_order=None): # For tests in test_virt_drivers which expect libvirt_info to be # functional info = config.LibvirtConfigGuestDisk() @@ -212,4 +219,6 @@ info.driver_cache = cache_mode info.driver_format = 'raw' info.source_path = mock_disk.path + if boot_order: + info.boot_order = boot_order return info diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/fakelibvirt.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/fakelibvirt.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/fakelibvirt.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/fakelibvirt.py 2020-04-10 17:57:58.000000000 +0000 @@ -21,6 +21,7 @@ from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel as uuids +from nova import conf from nova.objects import fields as obj_fields from nova.tests.unit.virt.libvirt import fake_libvirt_data from nova.virt.libvirt import config as vconfig @@ -40,6 +41,7 @@ LOG = logging.getLogger(__name__) +CONF = conf.CONF # virDomainState VIR_DOMAIN_NOSTATE = 0 @@ -61,6 +63,9 @@ VIR_DOMAIN_BLOCK_REBASE_COPY = 8 VIR_DOMAIN_BLOCK_REBASE_COPY_DEV = 32 +# virDomainBlockResize +VIR_DOMAIN_BLOCK_RESIZE_BYTES = 1 + VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC = 1 VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2 @@ -190,10 +195,18 @@ VF_DRIVER_NAME = 'ixgbevf' VF_CAP_TYPE = 'phys_function' +MDEV_CAPABLE_VEND_ID = '10DE' +MDEV_CAPABLE_VEND_NAME = 'Nvidia' +MDEV_CAPABLE_PROD_ID = '0FFE' +MDEV_CAPABLE_PROD_NAME = 'GRID M60-0B' +MDEV_CAPABLE_DRIVER_NAME = 'nvidia' +MDEV_CAPABLE_CAP_TYPE = 'mdev_types' + NVIDIA_11_VGPU_TYPE = 'nvidia-11' -PGPU1_PCI_ADDR = 'pci_0000_06_00_0' -PGPU2_PCI_ADDR = 'pci_0000_07_00_0' -PGPU3_PCI_ADDR = 'pci_0000_08_00_0' +NVIDIA_12_VGPU_TYPE = 'nvidia-12' +PGPU1_PCI_ADDR = 'pci_0000_81_00_0' +PGPU2_PCI_ADDR = 'pci_0000_81_01_0' +PGPU3_PCI_ADDR = 'pci_0000_81_02_0' class FakePCIDevice(object): @@ -235,9 +248,16 @@ """.strip()) # noqa cap_templ = "%(addresses)s" addr_templ = "
" # noqa + mdevtypes_templ = textwrap.dedent(""" + + GRID M60-0Bvfio-pci + %(instances)s + """.strip()) # noqa + + is_capable_of_mdevs = False def __init__(self, dev_type, slot, function, iommu_group, numa_node, - vf_ratio=None): + vf_ratio=None, multiple_gpu_types=False): """Populate pci devices :param dev_type: (string) Indicates the type of the device (PCI, PF, @@ -248,8 +268,11 @@ :param numa_node: (int) NUMA node of the device. :param vf_ratio: (int) Ratio of Virtual Functions on Physical. Only applicable if ``dev_type`` is one of: ``PF``, ``VF``. + :param multiple_gpu_types: (bool) Supports different vGPU types """ + vend_id = PCI_VEND_ID + vend_name = PCI_VEND_NAME if dev_type == 'PCI': if vf_ratio: raise ValueError('vf_ratio does not apply for PCI devices') @@ -290,14 +313,34 @@ 'function': 0, } } + elif dev_type == 'MDEV_TYPES': + prod_id = MDEV_CAPABLE_PROD_ID + prod_name = MDEV_CAPABLE_PROD_NAME + driver = MDEV_CAPABLE_DRIVER_NAME + vend_id = MDEV_CAPABLE_VEND_ID + vend_name = MDEV_CAPABLE_VEND_NAME + types = [self.mdevtypes_templ % { + 'type_id': NVIDIA_11_VGPU_TYPE, + 'instances': 16, + }] + if multiple_gpu_types: + types.append(self.mdevtypes_templ % { + 'type_id': NVIDIA_12_VGPU_TYPE, + 'instances': 8, + }) + capability = self.cap_templ % { + 'cap_type': MDEV_CAPABLE_CAP_TYPE, + 'addresses': '\n'.join(types) + } + self.is_capable_of_mdevs = True else: raise ValueError('Expected one of: PCI, VF, PCI') self.pci_device = self.pci_device_template % { 'slot': slot, 'function': function, - 'vend_id': PCI_VEND_ID, - 'vend_name': PCI_VEND_NAME, + 'vend_id': vend_id, + 'vend_name': vend_name, 'prod_id': prod_id, 'prod_name': prod_name, 'driver': driver, @@ -321,26 +364,31 @@ TOTAL_NUMA_NODES = 2 pci_devname_template = 'pci_0000_81_%(slot)02x_%(function)d' - def __init__(self, num_pci=0, num_pfs=2, num_vfs=8, numa_node=None): + def __init__(self, num_pci=0, num_pfs=2, num_vfs=8, num_mdevcap=0, + numa_node=None, multiple_gpu_types=False): """Create a new HostPCIDevicesInfo object. - :param num_pci: (int) The number of (non-SR-IOV) PCI devices. + :param num_pci: (int) The number of (non-SR-IOV) and (non-MDEV capable) + PCI devices. :param num_pfs: (int) The number of PCI SR-IOV Physical Functions. :param num_vfs: (int) The number of PCI SR-IOV Virtual Functions. + :param num_mdevcap: (int) The number of PCI devices capable of creating + mediated devices. :param iommu_group: (int) Initial IOMMU group ID. :param numa_node: (int) NUMA node of the device; if set all of the devices will be assigned to the specified node else they will be split between ``$TOTAL_NUMA_NODES`` nodes. + :param multiple_gpu_types: (bool) Supports different vGPU types """ self.devices = {} - if not (num_vfs or num_pfs): + if not (num_vfs or num_pfs) and not num_mdevcap: return if num_vfs and not num_pfs: raise ValueError('Cannot create VFs without PFs') - if num_vfs % num_pfs: + if num_pfs and num_vfs % num_pfs: raise ValueError('num_vfs must be a factor of num_pfs') slot = 0 @@ -364,6 +412,24 @@ slot += 1 iommu_group += 1 + # Generate MDEV capable devs + for dev in range(num_mdevcap): + pci_dev_name = self.pci_devname_template % { + 'slot': slot, 'function': function} + + LOG.info('Generating MDEV capable device %r', pci_dev_name) + + self.devices[pci_dev_name] = FakePCIDevice( + dev_type='MDEV_TYPES', + slot=slot, + function=function, + iommu_group=iommu_group, + numa_node=self._calc_numa_node(dev, numa_node), + multiple_gpu_types=multiple_gpu_types) + + slot += 1 + iommu_group += 1 + vf_ratio = num_vfs // num_pfs if num_pfs else 0 # Generate PFs @@ -420,6 +486,10 @@ pci_dev = self.devices.get(device_name) return pci_dev + def get_all_mdev_capable_devices(self): + return [dev for dev in self.devices + if self.devices[dev].is_capable_of_mdevs] + class FakeMdevDevice(object): template = """ @@ -448,21 +518,11 @@ class HostMdevDevicesInfo(object): - def __init__(self): - self.devices = { - 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c01': - FakeMdevDevice( - dev_name='mdev_4b20d080_1b54_4048_85b3_a6a62d165c01', - type_id=NVIDIA_11_VGPU_TYPE, parent=PGPU1_PCI_ADDR), - 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c02': - FakeMdevDevice( - dev_name='mdev_4b20d080_1b54_4048_85b3_a6a62d165c02', - type_id=NVIDIA_11_VGPU_TYPE, parent=PGPU2_PCI_ADDR), - 'mdev_4b20d080_1b54_4048_85b3_a6a62d165c03': - FakeMdevDevice( - dev_name='mdev_4b20d080_1b54_4048_85b3_a6a62d165c03', - type_id=NVIDIA_11_VGPU_TYPE, parent=PGPU3_PCI_ADDR), - } + def __init__(self, devices=None): + if devices is not None: + self.devices = devices + else: + self.devices = {} def get_all_devices(self): return self.devices.keys() @@ -1066,6 +1126,12 @@ ''' % vpmem + serial_console = '' + if CONF.serial_console.enabled: + serial_console = """ + + """ + return ''' %(name)s %(uuid)s @@ -1093,17 +1159,7 @@ function='0x1'/> %(nics)s - - - - - - - - - - - + %(serial_console)s @@ -1132,7 +1188,8 @@ 'disks': disks, 'nics': nics, 'hostdevs': hostdevs, - 'vpmems': vpmems} + 'vpmems': vpmems, + 'serial_console': serial_console} def managedSave(self, flags): self._connection._mark_not_running(self) @@ -1173,7 +1230,7 @@ def blockJobAbort(self, disk, flags): pass - def blockResize(self, disk, size): + def blockResize(self, disk, size, flags): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): @@ -1266,7 +1323,7 @@ self.pci_info = pci_info or HostPCIDevicesInfo(num_pci=0, num_pfs=0, num_vfs=0) - self.mdev_info = mdev_info or [] + self.mdev_info = mdev_info or HostMdevDevicesInfo(devices={}) self.hostname = hostname or 'compute1' def _add_filter(self, nwfilter): @@ -1571,10 +1628,7 @@ if cap == 'mdev': return self.mdev_info.get_all_devices() if cap == 'mdev_types': - # TODO(gibi): We should return something like - # https://libvirt.org/drvnodedev.html#MDEVCap but I tried and it - # did not work for me. - return None + return self.pci_info.get_all_mdev_capable_devices() else: raise ValueError('Capability "%s" is not supported' % cap) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_blockinfo.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_blockinfo.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_blockinfo.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_blockinfo.py 2020-04-10 17:57:58.000000000 +0000 @@ -76,22 +76,25 @@ self.test_instance['old_flavor'] = None self.test_instance['new_flavor'] = None - def test_volume_in_mapping(self): - swap = {'device_name': '/dev/sdb', - 'swap_size': 1} + def _test_block_device_info(self, with_eph=True, with_swap=True, + with_bdms=True): + swap = {'device_name': '/dev/vdb', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4', - 'device_name': '/dev/sdc1', 'size': 10}, + 'device_name': '/dev/vdc1', 'size': 10}, {'disk_bus': 'ide', 'guest_format': None, - 'device_name': '/dev/sdd', 'size': 10}] + 'device_name': '/dev/vdd', 'size': 10}] block_device_mapping = [{'mount_device': '/dev/sde', 'device_path': 'fake_device'}, {'mount_device': '/dev/sdf', 'device_path': 'fake_device'}] - block_device_info = { - 'root_device_name': '/dev/sda', - 'swap': swap, - 'ephemerals': ephemerals, - 'block_device_mapping': block_device_mapping} + return {'root_device_name': '/dev/vda', + 'swap': swap if with_swap else {}, + 'ephemerals': ephemerals if with_eph else [], + 'block_device_mapping': + block_device_mapping if with_bdms else []} + + def test_volume_in_mapping(self): + block_device_info = self._test_block_device_info() def _assert_volume_in_mapping(device_name, true_or_false): self.assertEqual( @@ -99,10 +102,10 @@ block_device.volume_in_mapping(device_name, block_device_info)) - _assert_volume_in_mapping('sda', False) - _assert_volume_in_mapping('sdb', True) - _assert_volume_in_mapping('sdc1', True) - _assert_volume_in_mapping('sdd', True) + _assert_volume_in_mapping('vda', False) + _assert_volume_in_mapping('vdb', True) + _assert_volume_in_mapping('vdc1', True) + _assert_volume_in_mapping('vdd', True) _assert_volume_in_mapping('sde', True) _assert_volume_in_mapping('sdf', True) _assert_volume_in_mapping('sdg', False) @@ -268,6 +271,206 @@ } self.assertEqual(expect, mapping) + def _test_get_disk_mapping_stable_rescue( + self, rescue_props, expected, block_device_info, with_local=False): + instance = objects.Instance(**self.test_instance) + + # Make disk.local disks optional per test as found in + # nova.virt.libvirt.BlockInfo.get_default_ephemeral_info + instance.ephemeral_gb = '20' if with_local else None + + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + + rescue_image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + rescue_props = objects.ImageMetaProps.from_dict(rescue_props) + rescue_image_meta.properties = rescue_props + + mapping = blockinfo.get_disk_mapping("kvm", instance, "virtio", "ide", + image_meta, rescue=True, block_device_info=block_device_info, + rescue_image_meta=rescue_image_meta) + + # Assert that the expected mapping is returned from get_disk_mapping + self.assertEqual(expected, mapping) + + def test_get_disk_mapping_stable_rescue_virtio_disk(self): + """Assert the disk mapping when rescuing using a virtio disk""" + rescue_props = {'hw_rescue_bus': 'virtio'} + block_info = self._test_block_device_info( + with_eph=False, with_swap=False, with_bdms=False) + expected = { + 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.rescue': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, + 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info) + + def test_get_disk_mapping_stable_rescue_ide_disk(self): + """Assert the disk mapping when rescuing using an IDE disk""" + rescue_props = {'hw_rescue_bus': 'ide'} + block_info = self._test_block_device_info( + with_eph=False, with_swap=False, with_bdms=False) + expected = { + 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.rescue': {'bus': 'ide', 'dev': 'hda', 'type': 'disk'}, + 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info) + + def test_get_disk_mapping_stable_rescue_usb_disk(self): + """Assert the disk mapping when rescuing using a USB disk""" + rescue_props = {'hw_rescue_bus': 'usb'} + block_info = self._test_block_device_info( + with_eph=False, with_swap=False, with_bdms=False) + expected = { + 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.rescue': {'bus': 'usb', 'dev': 'sda', 'type': 'disk'}, + 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info) + + def test_get_disk_mapping_stable_rescue_ide_cdrom(self): + """Assert the disk mapping when rescuing using an IDE cd-rom""" + rescue_props = {'hw_rescue_device': 'cdrom'} + block_info = self._test_block_device_info( + with_eph=False, with_swap=False, with_bdms=False) + expected = { + 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.rescue': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'}, + 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info) + + def test_get_disk_mapping_stable_rescue_virtio_disk_with_local(self): + """Assert the disk mapping when rescuing using a virtio disk with + default ephemeral (local) disks also attached to the instance. + """ + rescue_props = {'hw_rescue_bus': 'virtio'} + block_info = self._test_block_device_info( + with_eph=False, with_swap=False, with_bdms=False) + expected = { + 'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, + 'disk.rescue': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, + 'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info, with_local=True) + + def test_get_disk_mapping_stable_rescue_virtio_disk_with_eph(self): + """Assert the disk mapping when rescuing using a virtio disk with + ephemeral disks also attached to the instance. + """ + rescue_props = {'hw_rescue_bus': 'virtio'} + block_info = self._test_block_device_info( + with_swap=False, with_bdms=False) + expected = { + 'disk': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.eph0': { + 'bus': 'virtio', 'dev': 'vdc1', 'format': 'ext4', + 'type': 'disk'}, + 'disk.eph1': { + 'bus': 'ide', 'dev': 'vdd', 'type': 'disk'}, + 'disk.rescue': { + 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, + 'root': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info, with_local=True) + + def test_get_disk_mapping_stable_rescue_virtio_disk_with_swap(self): + """Assert the disk mapping when rescuing using a virtio disk with + swap attached to the instance. + """ + rescue_props = {'hw_rescue_bus': 'virtio'} + block_info = self._test_block_device_info( + with_eph=False, with_bdms=False) + expected = { + 'disk': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.rescue': { + 'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, + 'disk.swap': { + 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, + 'root': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info) + + def test_get_disk_mapping_stable_rescue_virtio_disk_with_bdm(self): + """Assert the disk mapping when rescuing using a virtio disk with + volumes also attached to the instance. + """ + rescue_props = {'hw_rescue_bus': 'virtio'} + block_info = self._test_block_device_info( + with_eph=False, with_swap=False) + expected = { + '/dev/sde': { + 'bus': 'scsi', 'dev': 'sde', 'type': 'disk'}, + '/dev/sdf': { + 'bus': 'scsi', 'dev': 'sdf', 'type': 'disk'}, + 'disk': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.rescue': { + 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, + 'root': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info) + + def test_get_disk_mapping_stable_rescue_virtio_disk_with_everything(self): + """Assert the disk mapping when rescuing using a virtio disk with + volumes, ephemerals and swap also attached to the instance. + """ + rescue_props = {'hw_rescue_bus': 'virtio'} + block_info = self._test_block_device_info() + expected = { + '/dev/sde': { + 'bus': 'scsi', 'dev': 'sde', 'type': 'disk'}, + '/dev/sdf': { + 'bus': 'scsi', 'dev': 'sdf', 'type': 'disk'}, + 'disk': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'}, + 'disk.eph0': { + 'bus': 'virtio', 'dev': 'vdc1', 'format': 'ext4', + 'type': 'disk'}, + 'disk.eph1': { + 'bus': 'ide', 'dev': 'vdd', 'type': 'disk'}, + 'disk.rescue': { + 'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'}, + 'disk.swap': { + 'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'}, + 'root': { + 'boot_index': '1', 'bus': 'virtio', 'dev': 'vda', + 'type': 'disk'} + } + self._test_get_disk_mapping_stable_rescue( + rescue_props, expected, block_info, with_local=True) + def test_get_disk_mapping_lxc(self): # A simple disk mapping setup, but for lxc @@ -831,7 +1034,9 @@ def test_get_disk_bus_for_device_type_cdrom_with_q35_image_meta(self): instance = objects.Instance(**self.test_instance) - image_meta = {'properties': {'hw_machine_type': 'pc-q35-rhel8.0.0'}} + image_meta = {'properties': { + 'hw_machine_type': 'pc-q35-rhel8.0.0', + 'hw_architecture': obj_fields.Architecture.X86_64}} image_meta = objects.ImageMeta.from_dict(image_meta) bus = blockinfo.get_disk_bus_for_device_type(instance, 'kvm', image_meta, @@ -1075,6 +1280,40 @@ expected_order = ['hd', 'cdrom'] self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info)) + def _get_rescue_image_meta(self, props_dict): + meta_dict = dict(self.test_image_meta) + meta_dict['properties'] = props_dict + return objects.ImageMeta.from_dict(meta_dict) + + def test_get_rescue_device(self): + # Assert that all supported device types are returned correctly + for device in blockinfo.SUPPORTED_DEVICE_TYPES: + meta = self._get_rescue_image_meta({'hw_rescue_device': device}) + self.assertEqual(device, blockinfo.get_rescue_device(meta)) + + # Assert that disk is returned if hw_rescue_device isn't set + meta = self._get_rescue_image_meta({'hw_rescue_bus': 'virtio'}) + self.assertEqual('disk', blockinfo.get_rescue_device(meta)) + + # Assert that UnsupportedHardware is raised for unsupported devices + meta = self._get_rescue_image_meta({'hw_rescue_device': 'fs'}) + self.assertRaises(exception.UnsupportedRescueDevice, + blockinfo.get_rescue_device, meta) + + def test_get_rescue_bus(self): + # Assert that all supported device bus types are returned. Stable + # device rescue is not supported by xen or lxc so ignore these. + for virt_type in ['qemu', 'kvm', 'uml', 'parallels']: + for bus in blockinfo.SUPPORTED_DEVICE_BUS[virt_type]: + meta = self._get_rescue_image_meta({'hw_rescue_bus': bus}) + self.assertEqual(bus, blockinfo.get_rescue_bus(None, virt_type, + meta, None)) + + # Assert that UnsupportedHardware is raised for unsupported devices + meta = self._get_rescue_image_meta({'hw_rescue_bus': 'xen'}) + self.assertRaises(exception.UnsupportedRescueBus, + blockinfo.get_rescue_bus, None, 'kvm', meta, 'disk') + class DefaultDeviceNamesTestCase(test.NoDBTestCase): def setUp(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_config.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_config.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_config.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_config.py 2020-04-10 17:57:58.000000000 +0000 @@ -3808,3 +3808,132 @@ """) + + +class LibvirtConfigVideoModelsTests(LibvirtConfigBaseTest): + + def test_parse_video_model(self): + + xml = """ + + """ + obj = config.LibvirtConfigDomainCapsVideoModels() + obj.parse_str(xml) + expected_models = ('vga', 'cirrus', 'vmvga', 'qxl', 'virtio') + self.assertTrue(obj.supported) + for model in expected_models: + self.assertIn(model, obj.models) + self.assertNotIn('gop', obj.models) + + +class LibvirtConfigDiskBusesTests(LibvirtConfigBaseTest): + + def test_parse_disk_buses(self): + + xml = """ + + + disk + cdrom + floppy + lun + + + ide + scsi + virtio + usb + sata + + + """ + obj = config.LibvirtConfigDomainCapsDiskBuses() + obj.parse_str(xml) + expected_buses = ('ide', 'scsi', 'virtio', 'usb', 'sata') + self.assertTrue(obj.supported) + for bus in expected_buses: + self.assertIn(bus, obj.buses) + self.assertNotIn('fdc', obj.buses) + + +class LibvirtConfigDomainCapsDevicesTests(LibvirtConfigBaseTest): + + def test_parse_domain_caps_devices(self): + + xml = """ + + + + disk + cdrom + floppy + lun + + + ide + fdc + scsi + virtio + usb + sata + + + + + sdl + vnc + spice + + + + + + subsystem + + + default + mandatory + requisite + optional + + + usb + pci + scsi + + + + + + """ + obj = config.LibvirtConfigDomainCapsDevices() + obj.parse_str(xml) + # we only use the video and disk devices today. + device_types = [config.LibvirtConfigDomainCapsDiskBuses, + config.LibvirtConfigDomainCapsVideoModels] + # so we assert there are only two device types parsed + self.assertEqual(2, len(obj.devices)) + # we then assert that the parsed devices are of the correct type + for dev in obj.devices: + self.assertIn(type(dev), device_types) + # and that the sub-devices are accessible directly via properties. + self.assertIsInstance( + obj.disk, config.LibvirtConfigDomainCapsDiskBuses) + self.assertIsInstance( + obj.video, config.LibvirtConfigDomainCapsVideoModels) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -105,7 +105,6 @@ from nova.virt import fake from nova.virt import hardware from nova.virt.image import model as imgmodel -from nova.virt import images from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import designer @@ -120,6 +119,7 @@ from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils +from nova.virt.libvirt import vif as libvirt_vif from nova.virt.libvirt.volume import volume as volume_drivers @@ -1107,6 +1107,82 @@ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr.need_legacy_block_device_info) + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_cpu_traits') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits') + def test_static_traits( + self, mock_vif_traits, mock_video_traits, mock_storage_traits, + mock_cpu_traits, + ): + """Ensure driver capabilities are correctly retrieved and cached.""" + + # we don't mock out calls to os_traits intentionally, so we need to + # return valid traits here + mock_cpu_traits.return_value = {'HW_CPU_HYPERTHREADING': True} + mock_storage_traits.return_value = {'COMPUTE_STORAGE_BUS_VIRTIO': True} + mock_video_traits.return_value = {'COMPUTE_GRAPHICS_MODEL_VGA': True} + mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True} + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + expected = { + 'HW_CPU_HYPERTHREADING': True, + 'COMPUTE_STORAGE_BUS_VIRTIO': True, + 'COMPUTE_GRAPHICS_MODEL_VGA': True, + 'COMPUTE_NET_VIF_MODEL_VIRTIO': True, + } + + static_traits = drvr.static_traits + + # check that results are as expected and the individual helper + # functions were called once each + self.assertEqual(expected, static_traits) + for mock_traits in ( + mock_vif_traits, mock_video_traits, mock_storage_traits, + mock_cpu_traits, + ): + mock_traits.assert_called_once_with() + mock_traits.reset_mock() + + static_traits = drvr.static_traits + + # now check that the results are still as expected but the helpers + # weren't called since the value was cached + self.assertEqual(expected, static_traits) + for mock_traits in ( + mock_vif_traits, mock_video_traits, mock_storage_traits, + mock_cpu_traits, + ): + mock_traits.assert_not_called() + + @mock.patch.object(libvirt_driver.LOG, 'debug') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_cpu_traits') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits') + def test_static_traits__invalid_trait( + self, mock_vif_traits, mock_video_traits, mock_storage_traits, + mock_cpu_traits, mock_log, + ): + """Ensure driver capabilities are correctly retrieved and cached.""" + mock_cpu_traits.return_value = {'foo': True} + mock_storage_traits.return_value = {'bar': True} + mock_video_traits.return_value = {'baz': True} + mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True} + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + expected = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True} + + static_traits = drvr.static_traits + + self.assertEqual(expected, static_traits) + mock_log.assert_has_calls([ + mock.call("Trait '%s' is not valid; ignoring.", "foo"), + mock.call("Trait '%s' is not valid; ignoring.", "bar"), + mock.call("Trait '%s' is not valid; ignoring.", "baz"), + ], + any_order=True) + @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_ok(self, mock_version): mock_version.return_value = True @@ -1233,23 +1309,6 @@ break self.assertFalse(version_arg_found) - # NOTE(sdague): python2.7 and python3.5 have different behaviors - # when it comes to comparing against the sentinel, so - # has_min_version is needed to pass python3.5. - @mock.patch.object(nova.virt.libvirt.host.Host, "has_min_version", - return_value=True) - @mock.patch.object(fakelibvirt.Connection, 'getVersion', - return_value=mock.sentinel.qemu_version) - def test_qemu_image_version(self, mock_get_libversion, min_ver): - """Test that init_host sets qemu image version - - A sentinel is used here so that we aren't chasing this value - against minimums that get raised over time. - """ - drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) - drvr.init_host("dummyhost") - self.assertEqual(images.QEMU_VERSION, mock.sentinel.qemu_version) - @mock.patch.object(fields.Architecture, "from_host", return_value=fields.Architecture.PPC64) def test_min_version_ppc_ok(self, mock_arch): @@ -1354,6 +1413,23 @@ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost") + def test__check_cpu_compatibility_aarch64_qemu_custom_start_OK(self): + """Test getting CPU traits when using a virt_type that doesn't support + the feature, only kvm and qemu supports reporting CPU traits. + """ + self.flags(cpu_mode='custom', + cpu_models=['max'], + virt_type='qemu', + group='libvirt') + caps = vconfig.LibvirtConfigCaps() + caps.host = vconfig.LibvirtConfigCapsHost() + caps.host.cpu = vconfig.LibvirtConfigCPU() + caps.host.cpu.arch = fields.Architecture.AARCH64 + with mock.patch.object(host.Host, "get_capabilities", + return_value=caps): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + drvr.init_host("dummyhost") + @mock.patch.object(libvirt_driver.LOG, 'warning') def test_check_cpu_set_configuration__no_configuration(self, mock_log): """Test that configuring no CPU option results no errors or logs. @@ -3612,7 +3688,7 @@ caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = fields.Architecture.X86_64 caps.host.topology = fakelibvirt.NUMATopology( - sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) + cpu_nodes=4, cpu_sockets=1, cpu_cores=4, cpu_threads=2) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, @@ -3628,7 +3704,7 @@ mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', - return_value=set(range(8))), + return_value=set(range(32))), ): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) @@ -7457,6 +7533,36 @@ self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) + def test_get_guest_cpu_config_qemu_custom_aarch64(self): + self.flags(cpu_mode="custom", group='libvirt', + cpu_models=["max"]) + expected = { + fields.Architecture.AARCH64: "custom", + } + + for guestarch, expect_mode in expected.items(): + caps = vconfig.LibvirtConfigCaps() + caps.host = vconfig.LibvirtConfigCapsHost() + caps.host.cpu = vconfig.LibvirtConfigCPU() + caps.host.cpu.arch = guestarch + with mock.patch.object(host.Host, "get_capabilities", + return_value=caps): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + if caps.host.cpu.arch == fields.Architecture.AARCH64: + drvr._has_uefi_support = mock.Mock(return_value=True) + instance_ref = objects.Instance(**self.test_instance) + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + + disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, + instance_ref, + image_meta) + conf = drvr._get_guest_config(instance_ref, + _fake_network_info(self), + image_meta, disk_info) + self.assertIsInstance(conf.cpu, + vconfig.LibvirtConfigGuestCPU) + self.assertEqual(conf.cpu.mode, expect_mode) + @mock.patch.object(libvirt_driver.LOG, 'warning') def test_get_guest_cpu_config_custom_with_extra_flags(self, mock_warn): @@ -8448,7 +8554,7 @@ mock_volume_driver.connect_volume.assert_called_once_with( connection_info, instance) mock_attach_encryptor.assert_called_once_with( - self.context, connection_info, encryption, True) + self.context, connection_info, encryption) mock_volume_driver.disconnect_volume.assert_not_called() @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_driver') @@ -8479,18 +8585,18 @@ mock_volume_driver.connect_volume.assert_called_once_with( connection_info, instance) mock_attach_encryptor.assert_called_once_with( - self.context, connection_info, encryption, True) + self.context, connection_info, encryption) mock_volume_driver.disconnect_volume.assert_called_once_with( connection_info, instance) @mock.patch.object(key_manager, 'API') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption') - @mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks') + @mock.patch.object(libvirt_driver.LibvirtDriver, '_is_luks_v1') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor') @mock.patch('nova.virt.libvirt.host.Host') @mock.patch('os_brick.encryptors.luks.is_luks') - def test_connect_volume_native_luks(self, mock_is_luks, mock_host, - mock_get_volume_encryptor, mock_use_native_luks, + def test_connect_volume_luks(self, mock_is_volume_luks, mock_host, + mock_get_volume_encryptor, mock_is_luks_v1, mock_get_volume_encryption, mock_get_key_mgr): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -8505,7 +8611,7 @@ # Mock out the encryptors mock_encryptor = mock.Mock() mock_get_volume_encryptor.return_value = mock_encryptor - mock_is_luks.return_value = True + mock_is_volume_luks.return_value = True # Mock out the key manager key = u'3734363537333734' @@ -8517,9 +8623,9 @@ mock_key.get_encoded.return_value = key_encoded # assert that the secret is created for the encrypted volume during - # _connect_volume when use_native_luks is True + # _connect_volume when _is_luks_v1 is True mock_get_volume_encryption.return_value = encryption - mock_use_native_luks.return_value = True + mock_is_luks_v1.return_value = True drvr._connect_volume(self.context, connection_info, instance, encryption=encryption) @@ -8527,10 +8633,10 @@ uuids.volume_id, password=key) mock_encryptor.attach_volume.assert_not_called() - # assert that the encryptor is used if use_native_luks is False + # assert that the encryptor is used if is_luks is False drvr._host.create_secret.reset_mock() mock_get_volume_encryption.reset_mock() - mock_use_native_luks.return_value = False + mock_is_luks_v1.return_value = False drvr._connect_volume(self.context, connection_info, instance, encryption=encryption) @@ -8538,26 +8644,17 @@ mock_encryptor.attach_volume.assert_called_once_with(self.context, **encryption) - # assert that we format the volume if is_luks is False - mock_use_native_luks.return_value = True - mock_is_luks.return_value = False + # assert that we format the volume if it is not already formatted + mock_is_luks_v1.return_value = True + mock_is_volume_luks.return_value = False drvr._connect_volume(self.context, connection_info, instance, encryption=encryption) mock_encryptor._format_volume.assert_called_once_with(key, **encryption) - # assert that os-brick is used when allow_native_luks is False - mock_encryptor.attach_volume.reset_mock() - mock_is_luks.return_value = True - - drvr._connect_volume(self.context, connection_info, instance, - encryption=encryption, allow_native_luks=False) - mock_encryptor.attach_volume.assert_called_once_with(self.context, - **encryption) - @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryptor') - def test_disconnect_volume_native_luks(self, mock_get_volume_encryptor): + def test_disconnect_volume_luks(self, mock_get_volume_encryptor): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._host = mock.Mock() drvr._host.find_secret.return_value = mock.Mock() @@ -9026,25 +9123,27 @@ 'access_mode': 'rw'} } - new_size_in_kb = 20 * 1024 * 1024 + new_size = 20 * units.Gi guest = mock.Mock(spec=libvirt_guest.Guest) # block_device - block_device = mock.Mock(spec=libvirt_guest.BlockDevice) + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) + block_device.resize = mock.Mock() guest.get_block_device = mock.Mock(return_value=block_device) drvr._host.get_guest = mock.Mock(return_value=guest) - drvr._extend_volume = mock.Mock(return_value=new_size_in_kb) + drvr._extend_volume = mock.Mock(return_value=new_size) for state in (power_state.RUNNING, power_state.PAUSED): guest.get_power_state = mock.Mock(return_value=state) - drvr.extend_volume(connection_info, - instance, new_size_in_kb * 1024) + drvr.extend_volume( + self.context, connection_info, instance, new_size) drvr._extend_volume.assert_called_with(connection_info, instance, - new_size_in_kb * 1024) + new_size) guest.get_block_device.assert_called_with('/fake') - block_device.resize.assert_called_with(20480) + block_device.resize.assert_called_with(new_size) def test_extend_volume_with_volume_driver_without_support(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -9054,7 +9153,7 @@ side_effect=NotImplementedError()): connection_info = {'driver_volume_type': 'fake'} self.assertRaises(exception.ExtendVolumeNotSupported, - drvr.extend_volume, + drvr.extend_volume, self.context, connection_info, instance, 0) def test_extend_volume_disk_not_found(self): @@ -9065,16 +9164,15 @@ 'data': {'device_path': '/fake', 'access_mode': 'rw'} } - new_size_in_kb = 20 * 1024 * 1024 + new_size = 20 * units.Gi xml_no_disk = "" dom = fakelibvirt.Domain(drvr._get_connection(), xml_no_disk, False) guest = libvirt_guest.Guest(dom) guest.get_power_state = mock.Mock(return_value=power_state.RUNNING) drvr._host.get_guest = mock.Mock(return_value=guest) - drvr._extend_volume = mock.Mock(return_value=new_size_in_kb) - - drvr.extend_volume(connection_info, instance, new_size_in_kb * 1024) + drvr._extend_volume = mock.Mock(return_value=new_size) + drvr.extend_volume(self.context, connection_info, instance, new_size) def test_extend_volume_with_instance_not_found(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -9088,7 +9186,7 @@ ) as (_get_domain, _extend_volume): connection_info = {'driver_volume_type': 'fake'} self.assertRaises(exception.InstanceNotFound, - drvr.extend_volume, + drvr.extend_volume, self.context, connection_info, instance, 0) def test_extend_volume_with_libvirt_error(self): @@ -9099,21 +9197,22 @@ 'data': {'device_path': '/fake', 'access_mode': 'rw'} } - new_size_in_kb = 20 * 1024 * 1024 + new_size = 20 * units.Gi guest = mock.Mock(spec=libvirt_guest.Guest) guest.get_power_state = mock.Mock(return_value=power_state.RUNNING) # block_device - block_device = mock.Mock(spec=libvirt_guest.BlockDevice) + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) block_device.resize = mock.Mock( side_effect=fakelibvirt.libvirtError('ERR')) guest.get_block_device = mock.Mock(return_value=block_device) drvr._host.get_guest = mock.Mock(return_value=guest) - drvr._extend_volume = mock.Mock(return_value=new_size_in_kb) + drvr._extend_volume = mock.Mock(return_value=new_size) self.assertRaises(fakelibvirt.libvirtError, - drvr.extend_volume, - connection_info, instance, new_size_in_kb * 1024) + drvr.extend_volume, self.context, + connection_info, instance, new_size) def test_extend_volume_with_no_device_path_attribute(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -9126,11 +9225,13 @@ 'volume_id': '58a84f6d-3f0c-4e19-a0af-eb657b790657', 'access_mode': 'rw'} } - new_size_in_kb = 20 * 1024 * 1024 + new_size = 20 * units.Gi guest = mock.Mock(spec=libvirt_guest.Guest) # block_device - block_device = mock.Mock(spec=libvirt_guest.BlockDevice) + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) + block_device.resize = mock.Mock() disk = mock.Mock( spec=vconfig.LibvirtConfigGuestDisk, @@ -9139,17 +9240,16 @@ guest.get_block_device = mock.Mock(return_value=block_device) guest.get_all_disks = mock.Mock(return_value=[disk]) drvr._host.get_guest = mock.Mock(return_value=guest) - drvr._extend_volume = mock.Mock(return_value=new_size_in_kb) + drvr._extend_volume = mock.Mock(return_value=new_size) for state in (power_state.RUNNING, power_state.PAUSED): guest.get_power_state = mock.Mock(return_value=state) - drvr.extend_volume(connection_info, instance, - new_size_in_kb * 1024) - drvr._extend_volume.assert_called_with(connection_info, - instance, - new_size_in_kb * 1024) + drvr.extend_volume(self.context, connection_info, instance, + new_size) + drvr._extend_volume.assert_called_with(connection_info, instance, + new_size) guest.get_block_device.assert_called_with('vdb') - block_device.resize.assert_called_with(20480) + block_device.resize.assert_called_with(new_size) def test_extend_volume_no_disk_found_by_serial(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @@ -9162,11 +9262,12 @@ 'volume_id': '58a84f6d-3f0c-4e19-a0af-eb657b790657', 'access_mode': 'rw'} } - new_size_in_kb = 20 * 1024 * 1024 + new_size = 20 * units.Gi guest = mock.Mock(spec=libvirt_guest.Guest) # block_device - block_device = mock.Mock(spec=libvirt_guest.BlockDevice) + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) block_device.resize = mock.Mock() disk = mock.Mock( spec=vconfig.LibvirtConfigGuestDisk, @@ -9175,18 +9276,205 @@ guest.get_block_device = mock.Mock(return_value=block_device) guest.get_all_disks = mock.Mock(return_value=[disk]) drvr._host.get_guest = mock.Mock(return_value=guest) - drvr._extend_volume = mock.Mock(return_value=new_size_in_kb) + drvr._extend_volume = mock.Mock(return_value=new_size) guest.get_power_state = mock.Mock(return_value=power_state.RUNNING) self.assertRaises( exception.VolumeNotFound, drvr.extend_volume, + self.context, connection_info, instance, - new_size_in_kb * 1024 + new_size ) @mock.patch('os_brick.encryptors.get_encryption_metadata') + def test_extend_volume_luksv1_unknown_path(self, + mock_get_encryption_metadata): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + connection_info = { + 'serial': uuids.volume_id, + 'driver_volume_type': 'fake', + 'data': {'access_mode': 'rw'} + } + + disk_1 = mock.Mock(spec=vconfig.LibvirtConfigGuestDisk, + serial=uuids.volume_id, + target_dev=mock.sentinel.disk_1_target_dev) + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) + guest = mock.Mock(spec=libvirt_guest.Guest) + guest.get_block_device.return_value = block_device + guest.get_power_state.return_value = power_state.RUNNING + guest.get_all_disks.return_value = [disk_1] + + # The requested_size is provided to extend_volume in bytes. + new_size = 20 * units.Gi + + drvr._host.get_guest = mock.Mock(return_value=guest) + drvr._extend_volume = mock.Mock(return_value=new_size) + + mock_get_encryption_metadata.return_value = { + 'provider': 'luks', + 'control_location': 'front-end'} + + # Assert that DiskNotFound is raised + self.assertRaises(exception.DiskNotFound, drvr.extend_volume, + self.context, connection_info, instance, new_size) + + # Assert that resize is not called + block_device.resize.assert_not_called() + + @mock.patch('os_brick.encryptors.get_encryption_metadata') + @mock.patch('nova.virt.images.privileged_qemu_img_info') + def test_extend_volume_luksv1_DiskNotFound(self, mock_qemu_img_info, + mock_get_encryption_metadata): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + connection_info = { + 'serial': uuids.volume_id, + 'driver_volume_type': 'fake', + 'data': {'device_path': mock.sentinel.device_path, + 'access_mode': 'rw'} + } + + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) + guest = mock.Mock(spec=libvirt_guest.Guest) + guest.get_block_device.return_value = block_device + guest.get_power_state.return_value = power_state.RUNNING + + # The requested_size is provided to extend_volume in bytes. + new_size = 20 * units.Gi + + drvr._host.get_guest = mock.Mock(return_value=guest) + drvr._extend_volume = mock.Mock(return_value=new_size) + + mock_qemu_img_info.side_effect = exception.DiskNotFound( + location=mock.sentinel.device_path) + mock_get_encryption_metadata.return_value = { + 'provider': 'luks', + 'control_location': 'front-end'} + + # Assert that DiskNotFound is raised + self.assertRaises(exception.DiskNotFound, drvr.extend_volume, + self.context, connection_info, instance, new_size) + + # Assert that resize is not called + block_device.resize.assert_not_called() + + @mock.patch('os_brick.encryptors.get_encryption_metadata') + @mock.patch('nova.virt.images.privileged_qemu_img_info') + def test_extend_volume_luksv1_block(self, mock_qemu_img_info, + mock_get_encryption_metadata): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + connection_info = { + 'serial': uuids.volume_id, + 'driver_volume_type': 'fake', + 'data': {'device_path': mock.sentinel.device_path, + 'access_mode': 'rw'} + } + + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk) + guest = mock.Mock(spec=libvirt_guest.Guest) + guest.get_block_device.return_value = block_device + guest.get_power_state.return_value = power_state.RUNNING + + # The requested_size is provided to extend_volume in bytes. + new_size = 20 * units.Gi + # The LUKSv1 payload offset as reported by qemu-img info in bytes. + payload_offset = 2048 * units.Ki + # The new size is provided to Libvirt virDomainBlockResize + new_size_minus_offset = new_size - payload_offset + + drvr._host.get_guest = mock.Mock(return_value=guest) + drvr._extend_volume = mock.Mock(return_value=new_size) + + mock_qemu_img_info.return_value = mock.Mock( + format_specific={'data': {'payload-offset': payload_offset}}) + mock_get_encryption_metadata.return_value = { + 'provider': 'luks', + 'control_location': 'front-end'} + + # Extend the volume to new_size + drvr.extend_volume(self.context, connection_info, instance, new_size) + + # Assert that the expected calls are made prior to the device resize. + drvr._host.get_guest.assert_called_once_with(instance) + guest.get_power_state.assert_called_once_with(drvr._host) + guest.get_block_device(mock.sentinel.device_path) + + mock_get_encryption_metadata.assert_called_once_with( + self.context, drvr._volume_api, uuids.volume_id, connection_info) + mock_qemu_img_info.assert_called_once_with( + mock.sentinel.device_path, output_format='json') + + # Assert that the Libvirt call to resize the device within the instance + # is called with the LUKSv1 payload offset taken into account. + block_device.resize.assert_called_once_with(new_size_minus_offset) + + @mock.patch('os_brick.encryptors.get_encryption_metadata') + @mock.patch('nova.virt.images.privileged_qemu_img_info') + def test_extend_volume_luksv1_rbd(self, mock_qemu_img_info, + mock_get_encryption_metadata): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + connection_info = { + 'serial': uuids.volume_id, + 'driver_volume_type': 'rbd', + 'data': {'name': 'pool/volume', + 'access_mode': 'rw'} + } + disk_1 = mock.Mock(spec=vconfig.LibvirtConfigGuestDisk, + serial=uuids.volume_id, + target_dev=mock.sentinel.disk_1_target_dev) + disk_2 = mock.Mock(spec=vconfig.LibvirtConfigGuestDisk, + serial=uuids.extra_volume_id, + target_dev=mock.sentinel.disk_2_target_dev) + block_device = mock.Mock(spec=libvirt_guest.BlockDevice, + _disk=mock.sentinel.disk_1_target_dev) + guest = mock.Mock(spec=libvirt_guest.Guest) + guest.get_block_device.return_value = block_device + guest.get_power_state.return_value = power_state.RUNNING + guest.get_all_disks.return_value = [disk_1, disk_2] + + # The requested_size is provided to extend_volume in bytes. + new_size = 20 * units.Gi + # The LUKSv1 payload offset as reported by qemu-img info in bytes. + payload_offset = 2048 * units.Ki + # The new size is provided to Libvirt virDomainBlockResize + new_size_minus_offset = new_size - payload_offset + + drvr._host.get_guest = mock.Mock(return_value=guest) + drvr._extend_volume = mock.Mock(return_value=new_size) + + mock_qemu_img_info.return_value = mock.Mock( + format_specific={'data': {'payload-offset': payload_offset}}) + mock_get_encryption_metadata.return_value = { + 'provider': 'luks', + 'control_location': 'front-end'} + + # Extend the volume to new_size + drvr.extend_volume(self.context, connection_info, instance, new_size) + + # Assert that the expected calls are made prior to the device resize. + drvr._host.get_guest.assert_called_once_with(instance) + guest.get_power_state.assert_called_once_with(drvr._host) + guest.get_block_device(mock.sentinel.disk_1_target_dev) + + mock_get_encryption_metadata.assert_called_once_with( + self.context, drvr._volume_api, uuids.volume_id, connection_info) + mock_qemu_img_info.assert_called_once_with( + 'rbd:pool/volume', output_format='json') + + # Assert that the Libvirt call to resize the device within the instance + # is called with the LUKSv1 payload offset taken into account. + block_device.resize.assert_called_once_with(new_size_minus_offset) + + @mock.patch('os_brick.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor') def test_use_encryptor_connection_info_incomplete(self, mock_get_encryptor, mock_get_metadata): @@ -9195,7 +9483,7 @@ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'data': {}} - drvr._attach_encryptor(self.context, connection_info, None, False) + drvr._attach_encryptor(self.context, connection_info, None) mock_get_metadata.assert_not_called() mock_get_encryptor.assert_not_called() @@ -9213,7 +9501,7 @@ connection_info = {'data': {'volume_id': uuids.volume_id}} mock_get_metadata.return_value = encryption - drvr._attach_encryptor(self.context, connection_info, None, False) + drvr._attach_encryptor(self.context, connection_info, None) mock_get_metadata.assert_called_once_with(self.context, drvr._volume_api, uuids.volume_id, connection_info) @@ -9231,8 +9519,7 @@ encryption = {} connection_info = {'data': {'volume_id': uuids.volume_id}} - drvr._attach_encryptor(self.context, connection_info, encryption, - False) + drvr._attach_encryptor(self.context, connection_info, encryption) mock_get_metadata.assert_not_called() mock_get_encryptor.assert_not_called() @@ -9247,11 +9534,12 @@ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_encryptor = mock.MagicMock() mock_get_encryptor.return_value = mock_encryptor - encryption = {'provider': 'luks', 'control_location': 'front-end'} + encryption = {'provider': encryptors.PLAIN, + 'control_location': 'front-end'} mock_get_metadata.return_value = encryption connection_info = {'data': {'volume_id': uuids.volume_id}} - drvr._attach_encryptor(self.context, connection_info, None, False) + drvr._attach_encryptor(self.context, connection_info, None) mock_get_metadata.assert_called_once_with(self.context, drvr._volume_api, uuids.volume_id, connection_info) @@ -9271,11 +9559,11 @@ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_encryptor = mock.MagicMock() mock_get_encryptor.return_value = mock_encryptor - encryption = {'provider': 'luks', 'control_location': 'front-end'} + encryption = {'provider': encryptors.PLAIN, + 'control_location': 'front-end'} connection_info = {'data': {'volume_id': uuids.volume_id}} - drvr._attach_encryptor(self.context, connection_info, - encryption, False) + drvr._attach_encryptor(self.context, connection_info, encryption) mock_get_metadata.assert_not_called() mock_get_encryptor.assert_called_once_with(connection_info, @@ -9307,10 +9595,10 @@ mock_key_mgr.get.return_value = mock_key mock_key.get_encoded.return_value = key_encoded - with mock.patch.object(drvr, '_use_native_luks', return_value=True): + with mock.patch.object(drvr, '_is_luks_v1', return_value=True): with mock.patch.object(drvr._host, 'create_secret') as crt_scrt: drvr._attach_encryptor(self.context, connection_info, - encryption, allow_native_luks=True) + encryption) mock_get_metadata.assert_not_called() mock_get_encryptor.assert_not_called() @@ -9369,9 +9657,9 @@ @mock.patch('os_brick.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor') - @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._use_native_luks') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._is_luks_v1') def test_detach_encryptor_encrypted_volume_meta_missing(self, - mock_use_native_luks, mock_get_encryptor, mock_get_metadata): + mock_is_luks_v1, mock_get_encryptor, mock_get_metadata): """Assert that if missing the encryption metadata of an encrypted volume is fetched and then used to detach the encryptor for the volume. """ @@ -9381,7 +9669,7 @@ encryption = {'provider': 'luks', 'control_location': 'front-end'} mock_get_metadata.return_value = encryption connection_info = {'data': {'volume_id': uuids.volume_id}} - mock_use_native_luks.return_value = False + mock_is_luks_v1.return_value = False drvr._detach_encryptor(self.context, connection_info, None) @@ -9393,9 +9681,9 @@ @mock.patch('os_brick.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor') - @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._use_native_luks') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._is_luks_v1') def test_detach_encryptor_encrypted_volume_meta_provided(self, - mock_use_native_luks, mock_get_encryptor, mock_get_metadata): + mock_is_luks_v1, mock_get_encryptor, mock_get_metadata): """Assert that when provided there are no further attempts to fetch the encryption metadata for the volume and that the provided metadata is then used to detach the volume. @@ -9405,7 +9693,7 @@ mock_get_encryptor.return_value = mock_encryptor encryption = {'provider': 'luks', 'control_location': 'front-end'} connection_info = {'data': {'volume_id': uuids.volume_id}} - mock_use_native_luks.return_value = False + mock_is_luks_v1.return_value = False drvr._detach_encryptor(self.context, connection_info, encryption) @@ -9415,10 +9703,10 @@ mock_encryptor.detach_volume.assert_called_once_with(**encryption) @mock.patch('nova.virt.libvirt.host.Host.find_secret') - @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._use_native_luks') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._is_luks_v1') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor') def test_detach_encryptor_native_luks_device_path_secret_missing(self, - mock_get_encryptor, mock_use_native_luks, mock_find_secret): + mock_get_encryptor, mock_is_luks_v1, mock_find_secret): """Assert that the encryptor is not built when native LUKS is available, the associated volume secret is missing and device_path is also missing from the connection_info. @@ -9428,33 +9716,28 @@ 'encryption_key_id': uuids.encryption_key_id} connection_info = {'data': {'volume_id': uuids.volume_id}} mock_find_secret.return_value = False - mock_use_native_luks.return_value = True + mock_is_luks_v1.return_value = True drvr._detach_encryptor(self.context, connection_info, encryption) mock_find_secret.assert_called_once_with('volume', uuids.volume_id) mock_get_encryptor.assert_not_called() - @mock.patch.object(host.Host, "has_min_version") - def test_use_native_luks(self, mock_has_min_version): + def test_is_luks_v1(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - # The required QEMU and Libvirt versions are always available - # on the host and a valid LUKS provider is present within the - # encryption metadata dict. - mock_has_min_version.return_value = True - self.assertFalse(drvr._use_native_luks({})) - self.assertFalse(drvr._use_native_luks({ + self.assertFalse(drvr._is_luks_v1({})) + self.assertFalse(drvr._is_luks_v1({ 'provider': 'nova.volume.encryptors.cryptsetup.CryptSetupEncryptor' })) - self.assertFalse(drvr._use_native_luks({ + self.assertFalse(drvr._is_luks_v1({ 'provider': 'CryptSetupEncryptor'})) - self.assertFalse(drvr._use_native_luks({ + self.assertFalse(drvr._is_luks_v1({ 'provider': encryptors.PLAIN})) - self.assertTrue(drvr._use_native_luks({ + self.assertTrue(drvr._is_luks_v1({ 'provider': 'nova.volume.encryptors.luks.LuksEncryptor'})) - self.assertTrue(drvr._use_native_luks({ + self.assertTrue(drvr._is_luks_v1({ 'provider': 'LuksEncryptor'})) - self.assertTrue(drvr._use_native_luks({ + self.assertTrue(drvr._is_luks_v1({ 'provider': encryptors.LUKS})) def test_multi_nic(self): @@ -10915,7 +11198,8 @@ drvr._live_migration_uri(target_connection), params=params, flags=0) mock_updated_guest_xml.assert_called_once_with( - guest, migrate_data, mock.ANY, get_vif_config=None) + guest, migrate_data, mock.ANY, get_vif_config=None, + new_resources=None) def test_live_migration_update_vifs_xml(self): """Tests that when migrate_data.vifs is populated, the destination @@ -10942,7 +11226,8 @@ fake_xml = '' def fake_get_updated_guest_xml(guest, migrate_data, get_volume_config, - get_vif_config=None): + get_vif_config=None, + new_resources=None): self.assertIsNotNone(get_vif_config) return fake_xml @@ -12887,7 +13172,6 @@ is_shared_block_storage=False, is_shared_instance_path=False, serial_listen_ports=[], - src_supports_native_luks=True, supported_perf_events=[], graphics_listen_addr_spice='127.0.0.1', graphics_listen_addr_vnc='127.0.0.1', @@ -12921,21 +13205,13 @@ self._test_pre_live_migration_works_correctly_mocked( target_ret=target_ret) - def test_pre_live_migration_only_dest_supports_native_luks(self): - # Assert that allow_native_luks is False when src_supports_native_luks - # is missing from migrate data during a P to Q LM. - self._test_pre_live_migration_works_correctly_mocked( - src_supports_native_luks=None, dest_supports_native_luks=True, - allow_native_luks=False) - @mock.patch.object(libvirt_driver.LibvirtDriver, 'plug_vifs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_connect_volume') @mock.patch('nova.virt.libvirt.utils.file_open', side_effect=[io.BytesIO(b''), io.BytesIO(b'')]) def _test_pre_live_migration_works_correctly_mocked( self, mock_file_open, mock_connect, mock_plug, - target_ret=None, src_supports_native_luks=True, - dest_supports_native_luks=True, allow_native_luks=True): + target_ret=None): # Creating testdata c = context.get_admin_context() instance = objects.Instance(root_device_name='/dev/vda', @@ -12989,8 +13265,7 @@ expected_connect_calls = [] for v in block_device_info['block_device_mapping']: expected_connect_calls.append( - mock.call(c, v['connection_info'], instance, - allow_native_luks=allow_native_luks)) + mock.call(c, v['connection_info'], instance)) migrate_data = migrate_data_obj.LibvirtLiveMigrateData( block_migration=False, @@ -13004,10 +13279,6 @@ if not target_ret: target_ret = self._generate_target_ret() - if src_supports_native_luks: - migrate_data.src_supports_native_luks = True - else: - target_ret.pop('src_supports_native_luks') result = drvr.pre_live_migration( c, instance, block_device_info, nw_info, None, migrate_data=migrate_data) @@ -13168,7 +13439,6 @@ disk_available_mb=123, image_type='qcow2', filename='foo', - src_supports_native_luks=True, ) expected_migrate_data = migrate_data_obj.LibvirtLiveMigrateData( @@ -13184,7 +13454,6 @@ serial_listen_ports=[], supported_perf_events=[], target_connect_addr=None, - src_supports_native_luks=True ) bdmi_vol1 = migrate_data_obj.LibvirtLiveMigrateBDMInfo() @@ -13222,7 +13491,7 @@ expected_connect_volume_calls = [] for bdm in block_device_info['block_device_mapping']: expected_call = mock.call(self.context, bdm['connection_info'], - inst_ref, allow_native_luks=True) + inst_ref) expected_connect_volume_calls.append(expected_call) mock_connect_volume.assert_has_calls(expected_connect_volume_calls) @@ -13857,6 +14126,49 @@ def test_spawn_power_on_false(self): self.test_spawn_with_network_info(power_on=False) + @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') + def _test_spawn_accels(self, accel_info, mock_get_disk_info): + mock_get_disk_info.return_value = {'mapping': None} + self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.' + '_create_image', lambda *a, **kw: None) + self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.' + '_ensure_console_log_for_instance', + lambda *a, **kw: None) + self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.' + '_allocate_mdevs', lambda *a, **kw: None) + self.stub_out('nova.virt.libvirt.driver.LibvirtDriver.' + '_create_domain_and_network', lambda *a, **kw: None) + + instance = objects.Instance(**self.test_instance) + instance.image_ref = uuids.image_ref + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + + drvr.spawn(self.context, instance, image_meta, [], None, {}, + accel_info=accel_info, power_on=False) + return instance + + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml') + def test_spawn_accels_no_accel_info(self, mock_get_guest_xml): + # accel_info should be passed to get_guest_xml even if it is [] + accel_info = [] + instance = self._test_spawn_accels(accel_info) + mock_get_guest_xml.assert_called_once_with( + self.context, instance, mock.ANY, mock.ANY, mock.ANY, + block_device_info=None, mdevs=mock.ANY, + accel_info=[]) + + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml') + def test_spawn_accels_with_accel_info(self, mock_get_guest_xml): + # accel_info should be passed to get_guest_xml if it is not [] + accel_info = nova_fixtures.CyborgFixture.bound_arq_list + instance = self._test_spawn_accels(accel_info) + mock_get_guest_xml.assert_called_once_with( + self.context, instance, mock.ANY, mock.ANY, mock.ANY, + block_device_info=None, mdevs=mock.ANY, + accel_info=accel_info) + # Methods called directly by spawn() @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml') @mock.patch.object(libvirt_driver.LibvirtDriver, @@ -15265,9 +15577,10 @@ backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) + accel_info = [{'k1': 'v1', 'k2': 'v2'}] with mock.patch('os.path.exists', return_value=True): drvr._hard_reboot(self.context, instance, network_info, - block_device_info) + block_device_info, accel_info=accel_info) disks = backend.disks @@ -15292,7 +15605,8 @@ mock_get_guest_xml.assert_called_once_with(self.context, instance, network_info, mock.ANY, mock.ANY, - block_device_info=block_device_info, mdevs=[uuids.mdev1]) + block_device_info=block_device_info, mdevs=[uuids.mdev1], + accel_info=accel_info) mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, block_device_info=block_device_info, vifs_already_plugged=True) @@ -15796,8 +16110,11 @@ mock_domain.undefine.assert_called_once_with() mock_save.assert_called_once_with() + @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(objects.Instance, 'save') - def test_destroy_removes_nvram(self, mock_save): + def test_destroy_removes_nvram_host_support_uefi(self, + mock_save, + mock_image): mock_domain = mock.Mock(fakelibvirt.virDomain) mock_domain.ID.return_value = 123 @@ -15809,15 +16126,47 @@ state=power_state.SHUTDOWN, internal_id=-1)) instance = objects.Instance(self.context, **self.test_instance) + mock_image.return_value = {"properties": { + "hw_firmware_type": "bios"}} drvr.destroy(self.context, instance, []) self.assertEqual(1, mock_domain.ID.call_count) mock_domain.destroy.assert_called_once_with() - # undefineFlags should now be called with 5 as uefi us supported + # NVRAM flag should not called only host support uefi mock_domain.undefineFlags.assert_called_once_with( + fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE + ) + mock_domain.undefine.assert_not_called() + mock_save.assert_called_once_with() + + @mock.patch('nova.utils.get_image_from_system_metadata') + @mock.patch.object(objects.Instance, 'save') + def test_destroy_removes_nvram_host_and_guest_support_uefi(self, + mock_save, + mock_image): + mock_domain = mock.Mock(fakelibvirt.virDomain) + mock_domain.ID.return_value = 123 + + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + drvr._host._get_domain = mock.Mock(return_value=mock_domain) + drvr._has_uefi_support = mock.Mock(return_value=True) + drvr.delete_instance_files = mock.Mock(return_value=None) + drvr.get_info = mock.Mock(return_value=hardware.InstanceInfo( + state=power_state.SHUTDOWN, internal_id=-1)) + + instance = objects.Instance(self.context, **self.test_instance) + mock_image.return_value = {"properties": { + "hw_firmware_type": "uefi"}} + drvr.destroy(self.context, instance, []) + + self.assertEqual(1, mock_domain.ID.call_count) + mock_domain.destroy.assert_called_once_with() + # undefineFlags should now be called with 5 as uefi supported + # by both host and guest + mock_domain.undefineFlags.assert_has_calls([mock.call( fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE | fakelibvirt.VIR_DOMAIN_UNDEFINE_NVRAM - ) + )]) mock_domain.undefine.assert_not_called() mock_save.assert_called_once_with() @@ -16058,10 +16407,10 @@ 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', - 'virt_disk_size': '0', - 'backing_file': '/somepath/disk2', - 'disk_size': '10737418240', - 'over_committed_disk_size': '0'}]} + 'virt_disk_size': '10737418240', + 'backing_file': '', + 'disk_size': '5350000000', + 'over_committed_disk_size': '5387418240'}]} def get_info(cfg, block_device_info): return fake_disks.get(cfg.name) @@ -16081,7 +16430,7 @@ mock_info.side_effect = get_info result = drvr._get_disk_over_committed_size_total() - self.assertEqual(result, 10653532160) + self.assertEqual(result, 16040950400) mock_list.assert_called_once_with(only_running=False) self.assertEqual(2, mock_info.call_count) @@ -16300,6 +16649,35 @@ self.assertEqual(disk_info[0]['disk_size'], disk_info[0]['virt_disk_size']) + @mock.patch('os.stat') + @mock.patch('os.path.getsize') + def test_get_instance_disk_info_from_config_raw_files(self, + mock_getsize, mock_stat): + """Test that over_committed_disk_size are calculated also for raw + images_type, since disk can be sparsely allocated if + [compute]/preallocate_images option is not set to space. + """ + config = vconfig.LibvirtConfigGuest() + disk_config = vconfig.LibvirtConfigGuestDisk() + disk_config.source_type = "file" + disk_config.source_path = "fake" + disk_config.driver_format = "raw" + config.devices.append(disk_config) + + disk_virtual_size = 53687091200 + disk_actual_size = 3687091200 + disk_actual_size_blocks = disk_actual_size / 512 + expected_over_committed_disk_size = disk_virtual_size -\ + disk_actual_size + + mock_getsize.return_value = disk_virtual_size + mock_stat.return_value = mock.Mock(st_blocks=disk_actual_size_blocks) + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + disk_info = drvr._get_instance_disk_info_from_config(config, None) + + self.assertEqual(expected_over_committed_disk_size, + disk_info[0]['over_committed_disk_size']) + def test_cpu_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) @@ -18497,6 +18875,19 @@ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') + def test_cleanup_pass_with_no_connection_info(self, undefine, unplug): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + drvr._disconnect_volume = mock.Mock() + fake_inst = objects.Instance(**self.test_instance) + fake_bdms = [{'connection_info': None}] + with mock.patch('nova.virt.driver' + '.block_device_info_get_mapping', + return_value=fake_bdms): + drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) + self.assertFalse(drvr._disconnect_volume.called) + + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr._disconnect_volume = mock.Mock() @@ -18566,11 +18957,11 @@ save.assert_called_once_with() @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_volume_encryption') - @mock.patch.object(libvirt_driver.LibvirtDriver, '_use_native_luks') - def test_swap_volume_native_luks_blocked(self, mock_use_native_luks, + @mock.patch.object(libvirt_driver.LibvirtDriver, '_is_luks_v1') + def test_swap_volume_native_luks_blocked(self, mock_is_luks_v1, mock_get_encryption): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) - mock_use_native_luks.return_value = True + mock_is_luks_v1.return_value = True # dest volume is encrypted mock_get_encryption.side_effect = [{}, {'provider': 'luks'}] @@ -18588,11 +18979,102 @@ self.assertRaises(NotImplementedError, drvr.swap_volume, self.context, {}, {}, None, None, None) + @mock.patch.object(fakelibvirt.Connection, 'getVersion') + @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') + @mock.patch('nova.virt.libvirt.host.Host.write_instance_config') + def test_swap_volume_copy(self, mock_write_instance_config, + mock_libvirt_ver, mock_qemu_ver): + """Assert the happy path of calling virDomainBlockCopy to swap""" + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + + mock_guest = mock.MagicMock(spec=libvirt_guest.Guest) + mock_dev = mock.MagicMock(spec=libvirt_guest.BlockDevice) + mock_conf = mock.MagicMock( + spec=vconfig.LibvirtConfigGuestDisk, + source_type=mock.sentinel.source_type, + target_dev=mock.sentinel.target_dev, + source_path=None) + + mock_libvirt_ver.return_value = versionutils.convert_version_to_int( + libvirt_driver.MIN_LIBVIRT_BLOCKDEV) + mock_qemu_ver.return_value = versionutils.convert_version_to_int( + libvirt_driver.MIN_QEMU_BLOCKDEV) + mock_dev.is_job_complete.return_value = True + mock_guest.get_block_device.return_value = mock_dev + mock_guest.get_xml_desc.side_effect = [ + mock.sentinel.original_xml_desc, + mock.sentinel.new_xml_desc] + mock_guest.has_persistent_configuration.return_value = False + mock_conf.to_xml.return_value = mock.sentinel.conf_xml + + resize_to = 1 + expected_resize_to = resize_to * units.Gi + + drvr._swap_volume(mock_guest, 'vdb', mock_conf, resize_to, None) + + # Assert that virDomainBlockCopy is called + mock_dev.copy.assert_called_once_with( + mock.sentinel.conf_xml, reuse_ext=True) + + # Assert that we abort once and then pivot + mock_dev.abort_job.assert_has_calls([ + mock.call(), mock.call(pivot=True)]) + + # Assert that virDomainBlockResize is called + mock_dev.resize.assert_called_once_with(expected_resize_to) + + # Assert the new domain XML is written to disk on success + mock_write_instance_config.assert_called_once_with( + mock.sentinel.new_xml_desc) + + @mock.patch.object(fakelibvirt.Connection, 'getVersion') + @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') + @mock.patch('nova.virt.libvirt.host.Host.write_instance_config') + def test_swap_volume_copy_failure(self, mock_write_instance_config, + mock_libvirt_ver, mock_qemu_ver): + """Assert that exception.VolumeRebaseFailed is raised on failure""" + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + + mock_guest = mock.MagicMock(spec=libvirt_guest.Guest) + mock_dev = mock.MagicMock(spec=libvirt_guest.BlockDevice) + mock_conf = mock.MagicMock( + spec=vconfig.LibvirtConfigGuestDisk, + source_type=mock.sentinel.source_type, + target_dev=mock.sentinel.target_dev, + source_path=None) + + mock_libvirt_ver.return_value = versionutils.convert_version_to_int( + libvirt_driver.MIN_LIBVIRT_BLOCKDEV) + mock_qemu_ver.return_value = versionutils.convert_version_to_int( + libvirt_driver.MIN_QEMU_BLOCKDEV) + mock_dev.copy.side_effect = test.TestingException() + mock_guest.get_block_device.return_value = mock_dev + mock_guest.get_xml_desc.side_effect = [ + mock.sentinel.original_xml_desc, + mock.sentinel.new_xml_desc] + mock_guest.has_persistent_configuration.return_value = False + mock_conf.to_xml.return_value = mock.sentinel.conf_xml + + # Assert that exception.VolumeRebaseFailed is raised + self.assertRaises(exception.VolumeRebaseFailed, drvr._swap_volume, + mock_guest, 'vdb', mock_conf, 0, None) + + # Assert that virDomainBlockCopy is called + mock_dev.copy.assert_called_once_with( + mock.sentinel.conf_xml, reuse_ext=True) + + # Assert the new domain XML is written to disk on success + mock_write_instance_config.assert_called_once_with( + mock.sentinel.original_xml_desc) + @mock.patch('nova.virt.libvirt.guest.BlockDevice.is_job_complete', return_value=True) - def _test_swap_volume(self, mock_is_job_complete, source_type, - resize=False, fail=False): + def _test_swap_volume_rebase(self, mock_is_job_complete, source_type, + resize=False, fail=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + hw_firmware_type = image_meta.properties.get( + 'hw_firmware_type') mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) @@ -18632,10 +19114,11 @@ mock_conf = mock.MagicMock(source_type=source_type, source_path=dstfile) if not fail: - drvr._swap_volume(guest, srcfile, mock_conf, 1) + drvr._swap_volume(guest, srcfile, mock_conf, 1, + hw_firmware_type) else: self.assertRaises(expected_exception, drvr._swap_volume, guest, - srcfile, mock_conf, 1) + srcfile, mock_conf, 1, hw_firmware_type) # Verify we read the original persistent config. expected_call_count = 1 @@ -18665,23 +19148,23 @@ # Verify we called resize with the correct args. if resize: mock_dom.blockResize.assert_called_once_with( - srcfile, 1 * units.Gi / units.Ki) + srcfile, 1 * units.Gi, flags=1) - def test_swap_volume_file(self): - self._test_swap_volume('file') + def test_swap_volume_rebase_file(self): + self._test_swap_volume_rebase('file') - def test_swap_volume_block(self): + def test_swap_volume_rebase_block(self): """If the swapped volume is type="block", make sure that we give libvirt the correct VIR_DOMAIN_BLOCK_REBASE_COPY_DEV flag to ensure the correct type="block" XML is generated (bug 1691195) """ - self._test_swap_volume('block') + self._test_swap_volume_rebase('block') def test_swap_volume_rebase_fail(self): - self._test_swap_volume('block', fail=True) + self._test_swap_volume_rebase('block', fail=True) - def test_swap_volume_resize_fail(self): - self._test_swap_volume('file', resize=True, fail=True) + def test_swap_volume_rebase_resize_fail(self): + self._test_swap_volume_rebase('file', resize=True, fail=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') @@ -18692,6 +19175,9 @@ swap_volume, disconnect_volume): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + hw_firmware_type = image_meta.properties.get( + 'hw_firmware_type') old_connection_info = {'driver_volume_type': 'fake', 'serial': 'old-volume-id', 'data': {'device_path': '/fake-old-volume', @@ -18724,7 +19210,90 @@ connect_volume.assert_called_once_with(self.context, new_connection_info, instance) - swap_volume.assert_called_once_with(guest, 'vdb', conf, 1) + swap_volume.assert_called_once_with(guest, 'vdb', + conf, 1, hw_firmware_type) + disconnect_volume.assert_called_once_with(self.context, + old_connection_info, + instance) + + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') + @mock.patch('nova.virt.libvirt.host.Host.get_guest') + def test_swap_volume_without_device_path_blocked(self, get_guest, + connect_volume, get_volume_config, swap_volume, disconnect_volume): + """Assert that NotImplementedError is raised when swap_volume is called + without a source_path prior to MIN_LIBVIRT_BLOCKDEV. + """ + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + instance = objects.Instance(**self.test_instance) + old_connection_info = {'driver_volume_type': 'rbd', + 'serial': 'old-volume-id', + 'data': {'access_mode': 'rw'}} + new_connection_info = {'driver_volume_type': 'rbd', + 'serial': 'new-volume-id', + 'data': {'access_mode': 'rw'}} + + mock_guest = mock.MagicMock() + mock_guest.get_disk.return_value = True + get_guest.return_value = mock_guest + get_volume_config.return_value = mock.MagicMock(source_path=None) + + self.assertRaises(NotImplementedError, conn.swap_volume, self.context, + old_connection_info, new_connection_info, instance, + '/dev/vdb', 1) + + @mock.patch.object(fakelibvirt.Connection, 'getVersion') + @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') + @mock.patch('nova.virt.libvirt.host.Host.get_guest') + def test_swap_volume_blockdev_without_device_path(self, get_guest, + connect_volume, get_volume_config, swap_volume, disconnect_volume, + lib_version, qemu_version): + """Assert that swap_volume correctly calls down to _swap_volume when + source_path isn't provided after MIN_LIBVIRT_BLOCKDEV. + """ + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) + lib_version.return_value = versionutils.convert_version_to_int( + libvirt_driver.MIN_LIBVIRT_BLOCKDEV) + qemu_version.return_value = versionutils.convert_version_to_int( + libvirt_driver.MIN_QEMU_BLOCKDEV) + instance = objects.Instance(**self.test_instance) + old_connection_info = {'driver_volume_type': 'rbd', + 'serial': 'old-volume-id', + 'data': {'access_mode': 'rw'}} + new_connection_info = {'driver_volume_type': 'rbd', + 'serial': 'new-volume-id', + 'data': {'access_mode': 'rw'}} + mock_dom = mock.MagicMock() + guest = libvirt_guest.Guest(mock_dom) + mock_dom.XMLDesc.return_value = """ + + + + + + + + """ + mock_dom.name.return_value = 'inst' + mock_dom.UUIDString.return_value = 'uuid' + get_guest.return_value = guest + conf = mock.MagicMock(source_path='/fake-new-volume') + get_volume_config.return_value = conf + + conn.swap_volume(self.context, old_connection_info, + new_connection_info, instance, '/dev/vdb', 1) + + get_guest.assert_called_once_with(instance) + connect_volume.assert_called_once_with(self.context, + new_connection_info, instance) + + swap_volume.assert_called_once_with(guest, 'vdb', conf, 1, None) disconnect_volume.assert_called_once_with(self.context, old_connection_info, instance) @@ -19021,13 +19590,15 @@ if rescue: rescue_data = ct_instance + disk_info = {'mapping': {'root': {'dev': 'hda'}, + 'disk.rescue': {'dev': 'hda'}}} else: rescue_data = None + disk_info = {'mapping': {'disk': {}}} cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self), - image_meta, {'mapping': {'disk': {}}}, - rescue_data) + image_meta, disk_info, rescue_data) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) @@ -19120,6 +19691,47 @@ self._test_get_guest_config_parallels_volume(fields.VMMode.EXE, 4) self._test_get_guest_config_parallels_volume(fields.VMMode.HVM, 6) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' + '_guest_add_accel_pci_devices') + def test_get_guest_config_accel_pci(self, mock_add_accel): + # For an ARQ list with attach handle type 'PCI', the list should + # be passed intact to _guest_add_accel_pci_devices. + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + instance.image_ref = uuids.image_ref + instance.config_drive = '' + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + disk_info = {'mapping': {}} + + accel_info = copy.deepcopy(nova_fixtures.CyborgFixture.bound_arq_list) + for arq in accel_info: + arq['attach_handle_type'] = 'PCI' + drvr._get_guest_config(instance, network_info=[], + image_meta=image_meta, disk_info=disk_info, accel_info=accel_info) + mock_add_accel.assert_called_once_with(mock.ANY, accel_info) + + @mock.patch.object(libvirt_driver.LOG, 'info') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' + '_guest_add_accel_pci_devices') + def test_get_guest_config_accel_nonpci(self, mock_add_accel, mock_log): + # For an ARQ list with attach handle type != 'PCI', + # _guest_add_accel_pci_devices should get []. + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + instance = objects.Instance(**self.test_instance) + instance.image_ref = uuids.image_ref + instance.config_drive = '' + image_meta = objects.ImageMeta.from_dict(self.test_image_meta) + disk_info = {'mapping': {}} + + # This list has ARQs with attach handle type 'TEST_PCI'. + accel_info = nova_fixtures.CyborgFixture.bound_arq_list + + drvr._get_guest_config(instance, network_info=[], + image_meta=image_meta, disk_info=disk_info, accel_info=accel_info) + mock_add_accel.assert_called_once_with(mock.ANY, []) + self.assertIn('Ignoring accelerator requests for instance', + six.text_type(mock_log.call_args[0])) + def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self): # New config drives are stored in rbd but existing instances have # config drives in the old location under the instances path. @@ -19289,6 +19901,35 @@ vpmem_amount += 1 self.assertEqual(2, vpmem_amount) + @mock.patch.object(host.Host, "get_capabilities") + def test_get_cpu_model_mapping(self, mock_cap): + expected = { + fields.Architecture.X86_64: ["Haswell", "IvyBridge"], + fields.Architecture.I686: ["Haswell"], + fields.Architecture.PPC: ["601_v1"], + fields.Architecture.PPC64: ["power7"], + fields.Architecture.PPC64LE: ["power8"], + fields.Architecture.AARCH64: None, + } + for guestarch, expect_model in expected.items(): + if guestarch == fields.Architecture.AARCH64: + self.flags(cpu_models="max", group='libvirt') + caps = vconfig.LibvirtConfigCaps() + caps.host = vconfig.LibvirtConfigCapsHost() + caps.host.cpu = vconfig.LibvirtConfigCPU() + caps.host.cpu.arch = guestarch + mock_cap.return_value = caps + + with mock.patch.object(host.Host, + "get_cpu_model_names", + return_value=expect_model): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + if guestarch == fields.Architecture.AARCH64: + drvr._get_cpu_model_mapping(None) + else: + cpu_model = drvr._get_cpu_model_mapping(expect_model[0]) + self.assertEqual(cpu_model, expect_model[0]) + class TestGuestConfigSysinfoSerialOS(test.NoDBTestCase): def setUp(self): @@ -19616,8 +20257,8 @@ self._test_update_provider_tree() self.assertEqual(self._get_inventory(), (self.pt.data(self.cn_rp['uuid'])).inventory) - self.assertEqual(set(['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']), - self.pt.data(self.cn_rp['uuid']).traits) + for trait in ['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']: + self.assertIn(trait, self.pt.data(self.cn_rp['uuid']).traits) def test_update_provider_tree_with_vgpus(self): pci_devices = ['pci_0000_06_00_0', 'pci_0000_07_00_0'] @@ -19780,8 +20421,8 @@ self.pt.add_traits(self.cn_rp['uuid'], 'HW_CPU_X86_VMX', 'HW_CPU_X86_XOP') self._test_update_provider_tree() - self.assertEqual(set(['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']), - self.pt.data(self.cn_rp['uuid']).traits) + for trait in ['HW_CPU_X86_AVX512F', 'HW_CPU_X86_BMI']: + self.assertIn(trait, self.pt.data(self.cn_rp['uuid']).traits) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_get_mediated_device_information') @@ -21808,6 +22449,32 @@ self.assertIn('the device is no longer found on the guest', six.text_type(mock_log.warning.call_args[0])) + @mock.patch('nova.virt.libvirt.driver.LOG') + def test_detach_interface_guest_not_found_after_detach(self, mock_log): + # Asserts that we don't raise an exception when the guest is gone + # after a libvirt error during detach. + instance = self._create_instance() + vif = _fake_network_info(self, 1)[0] + guest = mock.MagicMock() + guest.get_power_state.return_value = power_state.RUNNING + guest.get_interface_by_cfg.return_value = ( + vconfig.LibvirtConfigGuestInterface()) + get_guest_mock = mock.Mock() + # Host.get_guest should be called twice: the first time it is found, + # the second time it is gone. + get_guest_mock.side_effect = ( + guest, exception.InstanceNotFound(instance_id=instance.uuid)) + self.drvr._host.get_guest = get_guest_mock + error = fakelibvirt.libvirtError( + 'internal error: End of file from qemu monitor') + error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,) + guest.detach_device_with_retry.side_effect = error + self.drvr.detach_interface(self.context, instance, vif) + self.assertEqual(1, mock_log.info.call_count) + self.assertIn('Instance disappeared while detaching interface', + mock_log.info.call_args[0][0]) + get_guest_mock.assert_has_calls([mock.call(instance)] * 2) + @mock.patch.object(FakeVirtDomain, 'info') @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags') @mock.patch.object(host.Host, '_get_domain') @@ -21867,6 +22534,9 @@ mock_detach.assert_called_once_with(expected.to_xml(), flags=expected_flags) + @mock.patch('nova.objects.block_device.BlockDeviceMapping.save', + new=mock.Mock()) + @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref') @mock.patch('nova.virt.libvirt.LibvirtDriver.' '_get_all_assigned_mediated_devices') @mock.patch('nova.virt.libvirt.utils.write_to_file') @@ -21876,13 +22546,12 @@ @mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata') @mock.patch('nova.privsep.utils.supports_direct_io') @mock.patch('nova.api.metadata.base.InstanceMetadata') - def _test_rescue(self, instance, - mock_instance_metadata, mock_supports_direct_io, - mock_build_device_metadata, mock_set_host_enabled, - mock_write_to_file, - mock_get_mdev, - image_meta_dict=None, - exists=None): + def _test_rescue(self, instance, mock_instance_metadata, + mock_supports_direct_io, mock_build_device_metadata, + mock_set_host_enabled, mock_write_to_file, mock_get_mdev, + mock_get_image_meta_by_ref, image_meta_dict=None, exists=None, + instance_image_meta_dict=None, block_device_info=None): + self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) mock_build_device_metadata.return_value = None mock_supports_direct_io.return_value = True @@ -21896,6 +22565,10 @@ image_meta_dict = {'id': uuids.image_id, 'name': 'fake'} image_meta = objects.ImageMeta.from_dict(image_meta_dict) + if instance_image_meta_dict: + meta = objects.ImageMeta.from_dict(instance_image_meta_dict) + mock_get_image_meta_by_ref.return_value = meta + network_info = _fake_network_info(self) rescue_password = 'fake_password' @@ -21907,11 +22580,15 @@ if post_xml_callback is not None: post_xml_callback() - with mock.patch.object( - self.drvr, '_create_domain', - side_effect=fake_create_domain) as mock_create_domain: + with test.nested( + mock.patch.object(self.drvr, '_create_domain', + side_effect=fake_create_domain), + mock.patch.object(self.drvr, '_connect_volume'), + ) as (mock_create_domain, mock_connect_volume): + self.drvr.rescue(self.context, instance, - network_info, image_meta, rescue_password) + network_info, image_meta, rescue_password, + block_device_info) self.assertTrue(mock_create_domain.called) @@ -22028,6 +22705,124 @@ self.assertEqual(expected_kernel_ramdisk_paths, kernel_ramdisk_paths) + @mock.patch('nova.virt.libvirt.utils.write_to_file') + def test_rescue_stable_device_unsupported_virt_types(self, + mock_libvirt_write_to_file): + network_info = _fake_network_info(self, 1) + instance = self._create_instance({'config_drive': str(True)}) + rescue_image_meta_dict = {'id': uuids.rescue_image_id, + 'name': 'rescue', + 'properties': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'virtio'}} + rescue_image_meta = objects.ImageMeta.from_dict(rescue_image_meta_dict) + + # Assert that InstanceNotRescuable is raised for xen and lxc virt_types + self.flags(virt_type='xen', group='libvirt') + self.assertRaises(exception.InstanceNotRescuable, self.drvr.rescue, + self.context, instance, network_info, + rescue_image_meta, None, None) + + self.flags(virt_type='lxc', group='libvirt') + self.assertRaises(exception.InstanceNotRescuable, self.drvr.rescue, + self.context, instance, network_info, + rescue_image_meta, None, None) + + def test_rescue_stable_device(self): + # Assert the imagebackend behaviour and domain device layout + instance = self._create_instance({'config_drive': str(True)}) + inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'} + rescue_image_meta_dict = {'id': uuids.rescue_image_id, + 'name': 'rescue', + 'properties': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'virtio'}} + block_device_info = {'root_device_name': '/dev/vda', + 'ephemerals': [ + {'guest_format': None, + 'disk_bus': 'virtio', + 'device_name': '/dev/vdb', + 'size': 20, + 'device_type': 'disk'}], + 'swap': None, + 'block_device_mapping': None} + + backend, domain = self._test_rescue( + instance, + image_meta_dict=rescue_image_meta_dict, + instance_image_meta_dict=inst_image_meta_dict, + block_device_info=block_device_info) + + # Assert that we created the expected set of disks, and no others + self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'], + sorted(backend.created_disks.keys())) + + # Assert that the original disks are presented first with the rescue + # disk attached as the final device in the domain. + expected_disk_paths = [backend.disks[name].path for name + in ('disk', 'disk.eph0', 'disk.config', + 'disk.rescue')] + disk_paths = domain.xpath('devices/disk/source/@file') + self.assertEqual(expected_disk_paths, disk_paths) + + # Assert that the disk.rescue device has a boot order of 1 + disk_path = backend.disks['disk.rescue'].path + query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path + self.assertEqual('1', domain.xpath(query)[0]) + + def test_rescue_stable_device_with_volume_attached(self): + # Assert the imagebackend behaviour and domain device layout + instance = self._create_instance({'config_drive': str(True)}) + inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'} + rescue_image_meta_dict = {'id': uuids.rescue_image_id, + 'name': 'rescue', + 'properties': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'virtio'}} + conn_info = {'driver_volume_type': 'iscsi', + 'data': {'device_path': '/dev/sdb'}} + bdm = objects.BlockDeviceMapping( + self.context, + **fake_block_device.FakeDbBlockDeviceDict({ + 'id': 1, + 'source_type': 'volume', + 'destination_type': 'volume', + 'device_name': '/dev/vdd'})) + bdms = driver_block_device.convert_volumes([bdm]) + block_device_info = {'root_device_name': '/dev/vda', + 'ephemerals': [ + {'guest_format': None, + 'disk_bus': 'virtio', + 'device_name': '/dev/vdb', + 'size': 20, + 'device_type': 'disk'}], + 'swap': None, + 'block_device_mapping': bdms} + bdm = block_device_info['block_device_mapping'][0] + bdm['connection_info'] = conn_info + + backend, domain = self._test_rescue( + instance, + image_meta_dict=rescue_image_meta_dict, + instance_image_meta_dict=inst_image_meta_dict, + block_device_info=block_device_info) + + # Assert that we created the expected set of disks, and no others + self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'], + sorted(backend.created_disks.keys())) + + # Assert that the original disks are presented first with the rescue + # disk attached as the final device in the domain. + expected_disk_paths = [ + backend.disks['disk'].path, backend.disks['disk.eph0'].path, + backend.disks['disk.config'].path, '/dev/sdb', + backend.disks['disk.rescue'].path] + query = 'devices/disk/source/@*[name()="file" or name()="dev"]' + disk_paths = domain.xpath(query) + self.assertEqual(expected_disk_paths, disk_paths) + + # Assert that the disk.rescue device has a boot order of 1 + disk_path = backend.disks['disk.rescue'].path + query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path + self.assertEqual('1', domain.xpath(query)[0]) + @mock.patch.object(libvirt_utils, 'get_instance_path') @mock.patch.object(libvirt_utils, 'load_file') @mock.patch.object(host.Host, '_get_domain') @@ -22610,7 +23405,8 @@ '._get_mediated_devices') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._get_mdev_capable_devices') - def test_get_gpu_inventories(self, get_mdev_capable_devs, + def _test_get_gpu_inventories(self, drvr, expected, expected_types, + get_mdev_capable_devs, get_mediated_devices): get_mdev_capable_devs.return_value = [ {"dev_id": "pci_0000_06_00_0", @@ -22625,6 +23421,9 @@ "types": {'nvidia-11': {'availableInstances': 7, 'name': 'GRID M60-0B', 'deviceAPI': 'vfio-pci'}, + 'nvidia-12': {'availableInstances': 10, + 'name': 'GRID M60-8Q', + 'deviceAPI': 'vfio-pci'}, } }, ] @@ -22638,13 +23437,19 @@ 'parent': "pci_0000_07_00_0", 'type': 'nvidia-11', 'iommu_group': 1}] - drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + self.assertEqual(expected, drvr._get_gpu_inventories()) + get_mdev_capable_devs.assert_called_once_with(types=expected_types) + get_mediated_devices.assert_called_once_with(types=expected_types) + + def test_get_gpu_inventories_with_a_single_type(self): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # If the operator doesn't provide GPU types self.assertEqual({}, drvr._get_gpu_inventories()) - # Now, set a specific GPU type + # Now, set a specific GPU type and restart the driver self.flags(enabled_vgpu_types=['nvidia-11'], group='devices') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) expected = { # the first GPU also has one mdev allocated against it 'pci_0000_06_00_0': {'total': 15 + 1, @@ -22663,9 +23468,158 @@ 'allocation_ratio': 1.0, }, } - self.assertEqual(expected, drvr._get_gpu_inventories()) - get_mdev_capable_devs.assert_called_once_with(types=['nvidia-11']) - get_mediated_devices.assert_called_once_with(types=['nvidia-11']) + self._test_get_gpu_inventories(drvr, expected, ['nvidia-11']) + + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' + '._get_mdev_capable_devices') + def test_get_gpu_inventories_with_two_types(self, get_mdev_capable_devs): + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:06:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:07:00.0'], group='vgpu_nvidia-12') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + expected = { + # the first GPU supports nvidia-11 and has one mdev with this type + 'pci_0000_06_00_0': {'total': 15 + 1, + 'max_unit': 15 + 1, + 'min_unit': 1, + 'step_size': 1, + 'reserved': 0, + 'allocation_ratio': 1.0, + }, + # the second GPU supports nvidia-12 but the existing mdev is not + # using this type, so we only count the availableInstances value + # for nvidia-12. + 'pci_0000_07_00_0': {'total': 10, + 'max_unit': 10, + 'min_unit': 1, + 'step_size': 1, + 'reserved': 0, + 'allocation_ratio': 1.0, + }, + } + self._test_get_gpu_inventories(drvr, expected, ['nvidia-11', + 'nvidia-12']) + + @mock.patch.object(libvirt_driver.LOG, 'warning') + def test_get_supported_vgpu_types(self, mock_warning): + # Verify that by default we don't support vGPU types + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + self.assertEqual([], drvr._get_supported_vgpu_types()) + + # Now, provide only one supported vGPU type + self.flags(enabled_vgpu_types=['nvidia-11'], group='devices') + self.assertEqual(['nvidia-11'], drvr._get_supported_vgpu_types()) + # Given we only support one vGPU type, we don't have any map for PCI + # devices *yet* + self.assertEqual({}, drvr.pgpu_type_mapping) + # Since the operator wanted to only support one type, it's fine to not + # provide config groups + mock_warning.assert_not_called() + # For further checking + mock_warning.reset_mock() + + # Now two types without forgetting to provide the pGPU addresses + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-11') + self.assertEqual(['nvidia-11'], drvr._get_supported_vgpu_types()) + self.assertEqual({}, drvr.pgpu_type_mapping) + msg = ("The vGPU type '%(type)s' was listed in '[devices] " + "enabled_vgpu_types' but no corresponding " + "'[vgpu_%(type)s]' group or " + "'[vgpu_%(type)s] device_addresses' " + "option was defined. Only the first type '%(ftype)s' " + "will be used." % {'type': 'nvidia-12', + 'ftype': 'nvidia-11'}) + mock_warning.assert_called_once_with(msg) + # For further checking + mock_warning.reset_mock() + + # And now do it correctly ! + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:85:00.0'], group='vgpu_nvidia-12') + self.assertEqual(['nvidia-11', 'nvidia-12'], + drvr._get_supported_vgpu_types()) + self.assertEqual({'0000:84:00.0': 'nvidia-11', + '0000:85:00.0': 'nvidia-12'}, drvr.pgpu_type_mapping) + mock_warning.assert_not_called() + + def test_get_supported_vgpu_types_with_duplicate_types(self): + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + # Provide the same pGPU PCI ID for two different types + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-12') + self.assertRaises(exception.InvalidLibvirtGPUConfig, + libvirt_driver.LibvirtDriver, + fake.FakeVirtAPI(), False) + + def test_get_supported_vgpu_types_with_invalid_pci_address(self): + self.flags(enabled_vgpu_types=['nvidia-11'], group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + # Fat-finger the PCI address + self.flags(device_addresses=['whoops'], group='vgpu_nvidia-11') + self.assertRaises(exception.InvalidLibvirtGPUConfig, + libvirt_driver.LibvirtDriver, + fake.FakeVirtAPI(), False) + + def test_get_vgpu_type_per_pgpu(self): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + device = 'pci_0000_84_00_0' + self.assertIsNone(drvr._get_vgpu_type_per_pgpu(device)) + + # BY default, we return the first type if we only support one. + self.flags(enabled_vgpu_types=['nvidia-11'], group='devices') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + self.assertEqual('nvidia-11', drvr._get_vgpu_type_per_pgpu(device)) + + # Now, make sure we provide the right vGPU type for the device + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:85:00.0'], group='vgpu_nvidia-12') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + # the libvirt name pci_0000_84_00_0 matches 0000:84:00.0 + self.assertEqual('nvidia-11', drvr._get_vgpu_type_per_pgpu(device)) + + def test_get_vgpu_type_per_pgpu_with_incorrect_pci_addr(self): + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:85:00.0'], group='vgpu_nvidia-12') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + # 'whoops' is not a correct libvirt name corresponding to a PCI address + self.assertIsNone(drvr._get_vgpu_type_per_pgpu('whoops')) + + def test_get_vgpu_type_per_pgpu_with_unconfigured_pgpu(self): + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:84:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:85:00.0'], group='vgpu_nvidia-12') + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + # 0000:86:00.0 wasn't configured + self.assertIsNone(drvr._get_vgpu_type_per_pgpu('pci_0000_86_00_0')) @mock.patch.object(host.Host, 'device_lookup_by_name') @mock.patch.object(host.Host, 'list_mdev_capable_devices') @@ -22851,8 +23805,8 @@ drvr.provider_tree = self._get_fake_provider_tree_with_vgpu() self.assertEqual([uuids.mdev1], drvr._allocate_mdevs(allocations=allocations)) - get_unassigned_mdevs.assert_called_once_with(['nvidia-11'], - 'pci_0000_06_00_0') + get_unassigned_mdevs.assert_called_once_with('pci_0000_06_00_0', + ['nvidia-11']) @mock.patch.object(nova.privsep.libvirt, 'create_mdev') @mock.patch.object(libvirt_driver.LibvirtDriver, @@ -22863,7 +23817,13 @@ unallocated_mdevs, get_mdev_capable_devs, privsep_create_mdev): - self.flags(enabled_vgpu_types=['nvidia-11'], group='devices') + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:06:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:07:00.0'], group='vgpu_nvidia-12') allocations = { uuids.rp1: { 'resources': { @@ -22875,7 +23835,12 @@ get_mdev_capable_devs.return_value = [ {"dev_id": "pci_0000_06_00_0", "vendor_id": 0x10de, - "types": {'nvidia-11': {'availableInstances': 16, + # This pGPU can support both types but the operator only wanted + # to use nvidia-11 for it. + "types": {'nvidia-10': {'availableInstances': 16, + 'name': 'GRID M60-8Q', + 'deviceAPI': 'vfio-pci'}, + 'nvidia-11': {'availableInstances': 16, 'name': 'GRID M60-0B', 'deviceAPI': 'vfio-pci'}, } @@ -22956,11 +23921,13 @@ "pGPU device name %(name)s can't be guessed from the ProviderTree " "roots %(roots)s", {'name': 'oops_I_did_it_again', 'roots': 'cn'}) + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vgpu_type_per_pgpu') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_mediated_devices') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_all_assigned_mediated_devices') def test_get_existing_mdevs_not_assigned(self, get_all_assigned_mdevs, - get_mediated_devices): + get_mediated_devices, + get_vgpu_type_per_pgpu): # mdev2 is assigned to instance1 get_all_assigned_mdevs.return_value = {uuids.mdev2: uuids.inst1} # there is a total of 2 mdevs, mdev1 and mdev2 @@ -22973,25 +23940,45 @@ 'uuid': uuids.mdev2, 'parent': "pci_some", 'type': 'nvidia-11', - 'iommu_group': 1}] + 'iommu_group': 1}, + {'dev_id': 'mdev_some_uuid3', + 'uuid': uuids.mdev3, + 'parent': "pci_some", + 'type': 'nvidia-12', + 'iommu_group': 1}, + ] + + def _fake_get_vgpu_type_per_pgpu(parent_addr): + # Always return the same vGPU type so we avoid mdev3 + return 'nvidia-11' + get_vgpu_type_per_pgpu.side_effect = _fake_get_vgpu_type_per_pgpu drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - # Since mdev2 is assigned to inst1, only mdev1 is available + # Since mdev3 type is not supported and mdev2 is assigned to inst1, + # only mdev1 is available self.assertEqual(set([uuids.mdev1]), - drvr._get_existing_mdevs_not_assigned()) + drvr._get_existing_mdevs_not_assigned(parent=None)) @mock.patch('nova.compute.utils.get_machine_ips', new=mock.Mock(return_value=[])) @mock.patch.object(nova.privsep.libvirt, 'create_mdev') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_mdev_capable_devices') + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' + '_get_mediated_device_information') @mock.patch.object(os.path, 'exists') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_all_assigned_mediated_devices') def test_recreate_mediated_device_on_init_host( - self, get_all_assigned_mdevs, exists, + self, get_all_assigned_mdevs, exists, mock_get_mdev_info, get_mdev_capable_devs, privsep_create_mdev): - self.flags(enabled_vgpu_types=['nvidia-11'], group='devices') + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + # we need to call the below again to ensure the updated + # 'device_addresses' value is read and the new groups created + nova.conf.devices.register_dynamic_opts(CONF) + self.flags(device_addresses=['0000:06:00.0'], group='vgpu_nvidia-11') + self.flags(device_addresses=['0000:07:00.0'], group='vgpu_nvidia-12') get_all_assigned_mdevs.return_value = {uuids.mdev1: uuids.inst1, uuids.mdev2: uuids.inst2} @@ -23006,6 +23993,13 @@ return True if uuids.mdev1 in path else False exists.side_effect = _exists + mock_get_mdev_info.side_effect = [ + {"dev_id": "mdev_fake", + "uuid": uuids.mdev2, + "parent": "pci_0000_06_00_0", + "type": "nvidia-11", + "iommu_group": 12 + }] get_mdev_capable_devs.return_value = [ {"dev_id": "pci_0000_06_00_0", "vendor_id": 0x10de, @@ -23017,9 +24011,35 @@ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host(host='foo') + # Only mdev2 will be recreated as mdev1 already exists. privsep_create_mdev.assert_called_once_with( "0000:06:00.0", 'nvidia-11', uuid=uuids.mdev2) + @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' + '_get_mediated_device_information') + @mock.patch.object(os.path, 'exists') + @mock.patch.object(libvirt_driver.LibvirtDriver, + '_get_all_assigned_mediated_devices') + def test_recreate_mediated_device_on_init_host_with_wrong_config( + self, get_all_assigned_mdevs, exists, mock_get_mdev_info): + self.flags(enabled_vgpu_types=['nvidia-11', 'nvidia-12'], + group='devices') + get_all_assigned_mdevs.return_value = {uuids.mdev1: uuids.inst1} + # We pretend this mdev doesn't exist hence it needs recreation + exists.return_value = False + mock_get_mdev_info.side_effect = [ + {"dev_id": "mdev_fake", + "uuid": uuids.mdev1, + "parent": "pci_0000_06_00_0", + "type": "nvidia-99", + "iommu_group": 12 + }] + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + # mdev1 was originally created for nvidia-99 but the operator messed up + # the configuration by removing this type, we want to hardstop. + self.assertRaises(exception.InvalidLibvirtGPUConfig, + drvr.init_host, host='foo') + @mock.patch.object(libvirt_guest.Guest, 'detach_device') def _test_detach_mediated_devices(self, side_effect, detach_device): @@ -23073,6 +24093,69 @@ self.assertRaises(test.TestingException, self._test_detach_mediated_devices, exc) + def test_storage_bus_traits__qemu_kvm(self): + """Test getting storage bus traits per virt type. + """ + self.flags(hw_machine_type='pc', group='libvirt') + for virt_type in ('qemu', 'kvm'): + self.flags(virt_type=virt_type, group='libvirt') + bus_traits = self.drvr._get_storage_bus_traits() + dom_caps = self.drvr._host.get_domain_capabilities() + buses = dom_caps['x86_64']['pc'].devices.disk.buses + for bus in buses: + name = bus.replace('-', '_').upper() + trait = f'COMPUTE_STORAGE_BUS_{name}' + self.assertIn(trait, bus_traits) + self.assertTrue(bus_traits[trait]) + bus_traits.pop(trait) + self.assertTrue(all(not bus for bus in bus_traits.values())) + + valid_traits = ot.check_traits(bus_traits) + self.assertEqual(len(bus_traits), len(valid_traits[0])) + self.assertEqual(0, len(valid_traits[1])) + + def test_storage_bus_traits__non_qemu_kvm(self): + """Test getting storage bus traits per virt type.""" + all_traits = set(ot.get_traits('COMPUTE_STORAGE_BUS_')) + # ensure each virt type reports the correct bus types + for virt_type, buses in blockinfo.SUPPORTED_STORAGE_BUSES.items(): + if virt_type in ('qemu', 'kvm'): + continue + + self.flags(virt_type=virt_type, group='libvirt') + bus_traits = self.drvr._get_storage_bus_traits() + # Ensure all bus traits are accounted for + self.assertEqual(all_traits, set(bus_traits)) + for trait, val in bus_traits.items(): + bus_from_trait = trait.rsplit('_', 1)[1].lower() + self.assertEqual(bus_from_trait in buses, bus_traits[trait]) + + def test_vif_model_traits(self): + """Test getting vif model traits per virt type.""" + for virt_type, models in libvirt_vif.SUPPORTED_VIF_MODELS.items(): + self.flags(virt_type=virt_type, group='libvirt') + vif_models = self.drvr._get_vif_model_traits() + for model in models: + trait = 'COMPUTE_NET_VIF_MODEL_%s' % ( + model.replace('-', '_').upper() + ) + self.assertIn(trait, vif_models) + self.assertTrue(vif_models[trait]) + vif_models.pop(trait) + self.assertTrue(all(not model for model in vif_models.values())) + + def test_video_model_traits(self): + """Test getting video model traits per virt type.""" + # NOTE(sean-k-mooney): we do not have a static tables of which video + # models are supported by each virt type so just assert that traits are + # available for all models but not if the traits are mapped to true or + # false. + self.flags(virt_type='qemu', group='libvirt') + model_traits = self.drvr._get_video_model_traits() + for model in fields.VideoModel.ALL: + trait = f'COMPUTE_GRAPHICS_MODEL_{model.upper()}' + self.assertIn(trait, model_traits) + @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_cpu_feature_traits', new=mock.Mock(return_value={})) def test_cpu_traits__sev_support(self): @@ -25177,6 +26260,29 @@ self.assertEqual('SMALL', vpmems[1].label) self.assertEqual('SMALL', vpmems[2].label) + @mock.patch('nova.virt.hardware.get_vpmems') + def test_sorted_migrating_vpmem_resources(self, mock_labels): + drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance = fake_instance.fake_instance_obj(self.context) + instance.flavor = objects.Flavor( + name='m1.small', memory_mb=2048, vcpus=2, root_gb=10, + ephemeral_gb=20, swap=0, extra_specs={ + 'hw:pmem': 'SMALL,4GB,SMALL'}) + mock_labels.return_value = ['SMALL', '4GB', 'SMALL'] + migr_context = objects.MigrationContext() + # original order is '4GB' 'SMALL' 'SMALL' + migr_context.new_resources = objects.ResourceList(objects=[ + self.resource_0, self.resource_1, self.resource_2]) + instance.migration_context = migr_context + + new_resources = drvr._sorted_migrating_resources( + instance, instance.flavor) + # ordered vpmems are 'SMAL' '4GB' 'SMALL' + expected_new_resources = objects.ResourceList(objects=[ + self.resource_1, self.resource_0, self.resource_2]) + for i in range(3): + self.assertEqual(expected_new_resources[i], new_resources[i]) + @mock.patch('nova.privsep.libvirt.cleanup_vpmem') def test_cleanup_vpmems(self, mock_cleanup_vpmem): vpmems = [self.vpmem_0, self.vpmem_1, self.vpmem_2] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_guest.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_guest.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_guest.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_guest.py 2020-04-10 17:57:58.000000000 +0000 @@ -749,7 +749,7 @@ def test_resize(self): self.gblock.resize(10) - self.domain.blockResize.assert_called_once_with('vda', 10) + self.domain.blockResize.assert_called_once_with('vda', 10, flags=1) def test_rebase(self): self.gblock.rebase("foo") diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_imagebackend.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_imagebackend.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_imagebackend.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_imagebackend.py 2020-04-10 17:57:58.000000000 +0000 @@ -1582,11 +1582,28 @@ ["server1:1899", "server2:1920", "[::1]:1930"]), model) + @mock.patch.object(rbd_utils.RBDDriver, 'parent_info') @mock.patch.object(rbd_utils.RBDDriver, 'flatten') - def test_flatten(self, mock_flatten): + def test_flatten(self, mock_flatten, mock_parent_info): image = self.image_class(self.INSTANCE, self.NAME) image.flatten() mock_flatten.assert_called_once_with(image.rbd_name, pool=self.POOL) + mock_parent_info.assert_called_once_with( + image.rbd_name, pool=self.POOL) + + @mock.patch.object(imagebackend, 'LOG') + @mock.patch.object(rbd_utils.RBDDriver, 'parent_info') + @mock.patch.object(rbd_utils.RBDDriver, 'flatten') + def test_flatten_already_flat( + self, mock_flatten, mock_parent_info, mock_log): + mock_parent_info.side_effect = exception.ImageUnacceptable( + image_id=1, reason='foo') + image = self.image_class(self.INSTANCE, self.NAME) + image.flatten() + mock_log.debug.assert_called_once() + mock_flatten.assert_not_called() + mock_parent_info.assert_called_once_with( + image.rbd_name, pool=self.POOL) def test_import_file(self): image = self.image_class(self.INSTANCE, self.NAME) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_migration.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_migration.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_migration.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_migration.py 2020-04-10 17:57:58.000000000 +0000 @@ -116,6 +116,70 @@ mock_memory_backing.assert_called_once_with(mock.ANY, data) self.assertEqual(1, mock_tostring.called) + def test_update_device_resources_xml_vpmem(self): + # original xml for vpmems, /dev/dax0.1 and /dev/dax0.2 here + # are vpmem device path on source host + old_xml = textwrap.dedent(""" + + + + + /dev/dax0.1 + 2048 + on + + + 4192256 + + 0 + + + + + /dev/dax0.2 + 2048 + on + + + 4192256 + + 0 + + + + """) + doc = etree.fromstring(old_xml) + vpmem_resource_0 = objects.Resource( + provider_uuid=uuids.rp_uuid, + resource_class="CUSTOM_PMEM_NAMESPACE_4GB", + identifier='ns_0', + metadata= objects.LibvirtVPMEMDevice( + label='4GB', name='ns_0', devpath='/dev/dax1.0', + size=4292870144, align=2097152)) + vpmem_resource_1 = objects.Resource( + provider_uuid=uuids.rp_uuid, + resource_class="CUSTOM_PMEM_NAMESPACE_4GB", + identifier='ns_0', + metadata= objects.LibvirtVPMEMDevice( + label='4GB', name='ns_1', devpath='/dev/dax2.0', + size=4292870144, align=2097152)) + # new_resources contains vpmems claimed on destination, + # /dev/dax1.0 and /dev/dax2.0 are where vpmem data is migrated to + new_resources = objects.ResourceList( + objects=[vpmem_resource_0, vpmem_resource_1]) + res = etree.tostring(migration._update_device_resources_xml( + copy.deepcopy(doc), new_resources), + encoding='unicode') + # we expect vpmem info will be updated in xml after invoking + # _update_device_resources_xml + new_xml = old_xml.replace("/dev/dax0.1", "/dev/dax1.0") + new_xml = new_xml.replace("/dev/dax0.2", "/dev/dax2.0") + self.assertXmlEqual(res, new_xml) + def test_update_numa_xml(self): xml = textwrap.dedent(""" diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/libvirt/test_utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/libvirt/test_utils.py 2020-04-10 17:57:58.000000000 +0000 @@ -32,6 +32,7 @@ from nova import objects from nova.objects import fields as obj_fields import nova.privsep.fs +import nova.privsep.qemu from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.unit import fake_instance @@ -107,18 +108,18 @@ }) mock_execute.return_value = (output, '') d_backing = libvirt_utils.get_disk_backing_file(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertIsNone(d_backing) def _test_disk_size(self, mock_execute, path, expected_size): d_size = libvirt_utils.get_disk_size(path) self.assertEqual(expected_size, d_size) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) @mock.patch('os.path.exists', return_value=True) def test_disk_size(self, mock_exists): @@ -152,20 +153,22 @@ @mock.patch('os.path.exists', return_value=True) @mock.patch('oslo_concurrency.processutils.execute') - def test_qemu_info_canon(self, mock_execute, mock_exists): + def test_qemu_img_info_json(self, mock_execute, mock_exists): path = "disk.config" - example_output = """image: disk.config -file format: raw -virtual size: 64M (67108864 bytes) -cluster_size: 65536 -disk size: 96K -blah BLAH: bb + example_output = """{ + "virtual-size": 67108864, + "filename": "disk.config", + "cluster-size": 65536, + "format": "raw", + "actual-size": 98304 +} """ mock_execute.return_value = (example_output, '') - image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + image_info = images.qemu_img_info(path, output_format='json') + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', '--output=json', + prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) @@ -175,8 +178,7 @@ @mock.patch('os.path.exists', return_value=True) @mock.patch('oslo_concurrency.processutils.execute') - def test_qemu_info_canon_qemu_2_10(self, mock_execute, mock_exists): - images.QEMU_VERSION = images.QEMU_VERSION_REQ_SHARED + def test_qemu_info_canon(self, mock_execute, mock_exists): path = "disk.config" example_output = """image: disk.config file format: raw @@ -187,10 +189,9 @@ """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - '--force-share', - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) @@ -211,9 +212,9 @@ """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('qcow2', image_info.file_format) @@ -235,10 +236,10 @@ """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', - os.path.join(path, 'root.hds'), - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', + os.path.join(path, 'root.hds'), '--force-share', + prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_isdir.assert_called_once_with(path) self.assertEqual(2, mock_exists.call_count) self.assertEqual(path, mock_exists.call_args_list[0][0][0]) @@ -266,9 +267,9 @@ """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) @@ -295,9 +296,9 @@ """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) @@ -320,9 +321,9 @@ """ mock_execute.return_value = (example_output, '') image_info = images.qemu_img_info(path) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) self.assertEqual('disk.config', image_info.image) self.assertEqual('raw', image_info.file_format) @@ -354,16 +355,21 @@ @mock.patch('os.path.exists', return_value=True) @mock.patch('oslo_concurrency.processutils.execute') - def test_create_cow_image(self, mock_execute, mock_exists): + @mock.patch('nova.virt.images.qemu_img_info') + def test_create_cow_image(self, mock_info, mock_execute, mock_exists): mock_execute.return_value = ('stdout', None) - libvirt_utils.create_cow_image('/some/path', '/the/new/cow') - expected_args = [(('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', '/some/path'), - {'prlimit': images.QEMU_IMG_LIMITS}), - (('qemu-img', 'create', '-f', 'qcow2', - '-o', 'backing_file=/some/path', - '/the/new/cow'),)] - self.assertEqual(expected_args, mock_execute.call_args_list) + mock_info.return_value = mock.Mock( + file_format=mock.sentinel.backing_fmt, + cluster_size=mock.sentinel.cluster_size) + libvirt_utils.create_cow_image(mock.sentinel.backing_path, + mock.sentinel.new_path) + mock_info.assert_called_once_with(mock.sentinel.backing_path) + mock_execute.assert_has_calls([mock.call( + 'qemu-img', 'create', '-f', 'qcow2', '-o', + 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % ( + mock.sentinel.backing_path, mock.sentinel.backing_fmt, + mock.sentinel.cluster_size), + mock.sentinel.new_path)]) @ddt.unpack @ddt.data({'fs_type': 'some_fs_type', @@ -472,9 +478,9 @@ """ mock_execute.return_value = (example_output, '') self.assertEqual(4592640, disk.get_disk_size('/some/path')) - mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', - 'qemu-img', 'info', path, - prlimit=images.QEMU_IMG_LIMITS) + mock_execute.assert_called_once_with( + 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, + '--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS) mock_exists.assert_called_once_with(path) def test_copy_image(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/disk/test_localdisk.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/disk/test_localdisk.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/disk/test_localdisk.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/disk/test_localdisk.py 2020-04-10 17:57:58.000000000 +0000 @@ -171,7 +171,7 @@ mock_upload_image.assert_called_once_with( 'context', 'instance', mock_image_meta) - @mock.patch('nova.image.api.API.download') + @mock.patch('nova.image.glance.API.download') @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter') @mock.patch('pypowervm.tasks.storage.upload_new_vdisk') @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/disk/test_ssp.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/disk/test_ssp.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/disk/test_ssp.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/disk/test_ssp.py 2020-04-10 17:57:58.000000000 +0000 @@ -164,7 +164,7 @@ new_callable=mock.PropertyMock) @mock.patch('pypowervm.util.sanitize_file_name_for_api', autospec=True) @mock.patch('pypowervm.tasks.storage.crt_lu', autospec=True) - @mock.patch('nova.image.api.API.download') + @mock.patch('nova.image.glance.API.download') @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter', autospec=True) def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/test_driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/test_driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/test_driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/test_driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -82,7 +82,7 @@ self.assertTrue(self.drv.capabilities['supports_extend_volume']) self.assertFalse(self.drv.capabilities['supports_multiattach']) - @mock.patch('nova.image.API') + @mock.patch('nova.image.glance.API') @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True) @mock.patch('oslo_utils.importutils.import_object_ns', autospec=True) @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True) @@ -589,7 +589,8 @@ @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter') def test_extend_volume(self, mock_vscsi_adpt): mock_bdm = self._fake_bdms()['block_device_mapping'][0] - self.drv.extend_volume(mock_bdm.get('connection_info'), self.inst, 0) + self.drv.extend_volume( + 'context', mock_bdm.get('connection_info'), self.inst, 0) mock_vscsi_adpt.return_value.extend_volume.assert_called_once_with() def test_vol_drv_iter(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/test_image.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/test_image.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/powervm/test_image.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/powervm/test_image.py 2020-04-10 17:57:58.000000000 +0000 @@ -25,7 +25,7 @@ class TestImage(test.TestCase): @mock.patch('nova.utils.temporary_chown', autospec=True) - @mock.patch('nova.image.api.API', autospec=True) + @mock.patch('nova.image.glance.API', autospec=True) def test_stream_blockdev_to_glance(self, mock_api, mock_chown): mock_open = mock.mock_open() with mock.patch.object(six.moves.builtins, 'open', new=mock_open): @@ -36,7 +36,7 @@ mock_api.update.assert_called_with('context', 'image_id', 'metadata', mock_open.return_value) - @mock.patch('nova.image.api.API', autospec=True) + @mock.patch('nova.image.glance.API', autospec=True) def test_generate_snapshot_metadata(self, mock_api): mock_api.get.return_value = {'name': 'image_name'} mock_instance = mock.Mock() diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/test_hardware.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/test_hardware.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/test_hardware.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/test_hardware.py 2020-04-10 17:57:58.000000000 +0000 @@ -14,6 +14,7 @@ import collections import copy +import ddt import mock import testtools @@ -4376,3 +4377,18 @@ hw.get_pci_numa_policy_constraint, flavor, image_meta) with testtools.ExpectedException(ValueError): image_meta.properties.hw_pci_numa_affinity_policy = "fake" + + +@ddt.ddt +class RescuePropertyTestCase(test.NoDBTestCase): + + @ddt.unpack + @ddt.data({'props': {'hw_rescue_device': 'disk', + 'hw_rescue_bus': 'virtio'}, 'expected': True}, + {'props': {'hw_rescue_device': 'disk'}, 'expected': True}, + {'props': {'hw_rescue_bus': 'virtio'}, 'expected': True}, + {'props': {'hw_disk_bus': 'virtio'}, 'expected': False}) + def test_check_hw_rescue_props(self, props=None, expected=None): + meta = objects.ImageMeta.from_dict({'disk_format': 'raw'}) + meta.properties = objects.ImageMetaProps.from_dict(props) + self.assertEqual(expected, hw.check_hw_rescue_props(meta)) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/test_images.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/test_images.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/test_images.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/test_images.py 2020-04-10 17:57:58.000000000 +0000 @@ -49,6 +49,15 @@ self.assertTrue(image_info) self.assertTrue(str(image_info)) + @mock.patch('oslo_concurrency.processutils.execute', + return_value=('stdout', None)) + def test_qemu_info_with_rbd_path(self, utils_execute): + # Assert that the use of a RBD URI as the path doesn't raise + # exception.DiskNotFound + image_info = images.qemu_img_info('rbd:volume/pool') + self.assertTrue(image_info) + self.assertTrue(str(image_info)) + @mock.patch.object(compute_utils, 'disk_ops_semaphore') @mock.patch('nova.privsep.utils.supports_direct_io', return_value=True) @mock.patch.object(processutils, 'execute', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/test_virt_drivers.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/test_virt_drivers.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/test_virt_drivers.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/test_virt_drivers.py 2020-04-10 17:57:58.000000000 +0000 @@ -298,7 +298,7 @@ image_meta = objects.ImageMeta.from_dict({}) instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, - image_meta, '') + image_meta, '', None) @catch_notimplementederror @mock.patch('os.unlink') @@ -313,7 +313,7 @@ image_meta = objects.ImageMeta.from_dict({}) instance_ref, network_info = self._get_running_instance() self.connection.rescue(self.ctxt, instance_ref, network_info, - image_meta, '') + image_meta, '', None) self.connection.unrescue(instance_ref, network_info) @catch_notimplementederror @@ -572,6 +572,7 @@ @catch_notimplementederror def test_get_serial_console(self): + self.flags(enabled=True, group='serial_console') instance_ref, network_info = self._get_running_instance() serial_console = self.connection.get_serial_console(self.ctxt, instance_ref) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/vmwareapi/test_vmops.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/vmwareapi/test_vmops.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/vmwareapi/test_vmops.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/vmwareapi/test_vmops.py 2020-04-10 17:57:58.000000000 +0000 @@ -383,7 +383,7 @@ def test_get_datacenter_ref_and_name_with_no_datastore(self): self._test_get_datacenter_ref_and_name() - @mock.patch('nova.image.api.API.get') + @mock.patch('nova.image.glance.API.get') @mock.patch.object(vm_util, 'power_off_instance') @mock.patch.object(ds_util, 'disk_copy') @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref') @@ -1138,7 +1138,7 @@ mock_attach_cdrom_to_vm.assert_called_once_with( vm_ref, self._instance, self._ds.ref, str(upload_iso_path)) - @mock.patch('nova.image.api.API.get') + @mock.patch('nova.image.glance.API.get') @mock.patch.object(vmops.LOG, 'debug') @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing') @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info') @@ -1650,7 +1650,7 @@ mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid'), mock.patch.object(images, 'fetch_image'), - mock.patch('nova.image.api.API.get'), + mock.patch('nova.image.glance.API.get'), mock.patch.object(vutil, 'get_inventory_path', return_value=self._dc_info.name), mock.patch.object(self._vmops, '_get_extra_specs', diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/vmwareapi/test_volumeops.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/vmwareapi/test_volumeops.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/vmwareapi/test_volumeops.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/vmwareapi/test_volumeops.py 2020-04-10 17:57:58.000000000 +0000 @@ -226,9 +226,15 @@ disk_uuid) def test_detach_volume_vmdk(self): + client_factory = self._volumeops._session.vim.client.factory + + virtual_controller = client_factory.create( + 'ns0:VirtualLsiLogicController') + virtual_controller.key = 100 + + virtual_disk = client_factory.create('ns0:VirtualDisk') + virtual_disk.controllerKey = virtual_controller.key - vmdk_info = vm_util.VmdkInfo('fake-path', 'lsiLogic', 'thin', - 1024, 'fake-device') with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=mock.sentinel.vm_ref), @@ -236,15 +242,17 @@ return_value=mock.sentinel.volume_ref), mock.patch.object(self._volumeops, '_get_vmdk_backed_disk_device', - return_value=mock.sentinel.device), - mock.patch.object(vm_util, 'get_vmdk_info', - return_value=vmdk_info), + return_value=virtual_disk), + mock.patch.object(vm_util, '_get_device_disk_type', + return_value='fake-disk-type'), mock.patch.object(self._volumeops, '_consolidate_vmdk_volume'), mock.patch.object(self._volumeops, 'detach_disk_from_vm'), mock.patch.object(self._volumeops, '_update_volume_details'), + mock.patch.object(self._volumeops._session, '_call_method', + return_value=[virtual_controller]) ) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device, - get_vmdk_info, consolidate_vmdk_volume, detach_disk_from_vm, - update_volume_details): + _get_device_disk_type, consolidate_vmdk_volume, + detach_disk_from_vm, update_volume_details, session_call_method): connection_info = {'driver_volume_type': 'vmdk', 'serial': 'volume-fake-id', @@ -262,39 +270,46 @@ connection_info['data']['volume']) get_vmdk_backed_disk_device.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']) - get_vmdk_info.assert_called_once_with(self._volumeops._session, - mock.sentinel.volume_ref) + adapter_type = vm_util.CONTROLLER_TO_ADAPTER_TYPE.get( + virtual_controller.__class__.__name__) consolidate_vmdk_volume.assert_called_once_with( - instance, mock.sentinel.vm_ref, mock.sentinel.device, - mock.sentinel.volume_ref, adapter_type=vmdk_info.adapter_type, - disk_type=vmdk_info.disk_type) + instance, mock.sentinel.vm_ref, virtual_disk, + mock.sentinel.volume_ref, adapter_type=adapter_type, + disk_type='fake-disk-type') detach_disk_from_vm.assert_called_once_with(mock.sentinel.vm_ref, instance, - mock.sentinel.device) + virtual_disk) update_volume_details.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']['volume_id'], "") def test_detach_volume_vmdk_invalid(self): + client_factory = self._volumeops._session.vim.client.factory + + virtual_controller = client_factory.create( + 'ns0:VirtualIDEController') + virtual_controller.key = 100 + + virtual_disk = client_factory.create('ns0:VirtualDisk') + virtual_disk.controllerKey = virtual_controller.key + connection_info = {'driver_volume_type': 'vmdk', 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE) - vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE, - constants.DISK_TYPE_PREALLOCATED, 1024, - 'fake-device') with test.nested( mock.patch.object(vm_util, 'get_vm_ref', return_value=mock.sentinel.vm_ref), mock.patch.object(self._volumeops, '_get_volume_ref'), mock.patch.object(self._volumeops, - '_get_vmdk_backed_disk_device'), - mock.patch.object(vm_util, 'get_vmdk_info', - return_value=vmdk_info), + '_get_vmdk_backed_disk_device', + return_value=virtual_disk), mock.patch.object(vm_util, 'get_vm_state', - return_value=power_state.RUNNING) + return_value=power_state.RUNNING), + mock.patch.object(self._volumeops._session, '_call_method', + return_value=[virtual_controller]) ) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device, - get_vmdk_info, get_vm_state): + get_vm_state, session_call_method): self.assertRaises(exception.Invalid, self._volumeops._detach_volume_vmdk, connection_info, instance) @@ -305,7 +320,6 @@ connection_info['data']['volume']) get_vmdk_backed_disk_device.assert_called_once_with( mock.sentinel.vm_ref, connection_info['data']) - self.assertTrue(get_vmdk_info.called) get_vm_state.assert_called_once_with(self._volumeops._session, instance) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py 2020-04-10 17:57:58.000000000 +0000 @@ -20,7 +20,7 @@ from nova import context from nova import exception -from nova.image.api import API as image_api +from nova.image.glance import API as image_api from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi.image import utils from nova.virt.xenapi.image import vdi_stream diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/xenapi/test_xenapi.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/xenapi/test_xenapi.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/xenapi/test_xenapi.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/xenapi/test_xenapi.py 2020-04-10 17:57:58.000000000 +0000 @@ -1315,7 +1315,7 @@ {'id': IMAGE_VHD, 'disk_format': 'vhd', 'properties': {'vm_mode': 'xen'}}) - conn.rescue(self.context, instance, [], image_meta, '') + conn.rescue(self.context, instance, [], image_meta, '', None) vm = xenapi_fake.get_record('VM', vm_ref) rescue_name = "%s-rescue" % vm["name_label"] @@ -1351,7 +1351,7 @@ self.conn._vmops, '_start', side_effect=test.TestingException('Start Error')): self.assertRaises(test.TestingException, self.conn.rescue, - self.context, instance, [], image_meta, '') + self.context, instance, [], image_meta, '', []) # confirm original disk still exists: vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/zvm/test_driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/zvm/test_driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/tests/unit/virt/zvm/test_driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/tests/unit/virt/zvm/test_driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -136,6 +136,7 @@ self.assertEqual(0, results['vcpus']) self.assertEqual(0, results['memory_mb_used']) self.assertEqual(0, results['disk_available_least']) + self.assertEqual(0, results['hypervisor_version']) self.assertEqual('TESTHOST', results['hypervisor_hostname']) def test_driver_template_validation(self): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/utils.py 2020-04-10 17:57:58.000000000 +0000 @@ -1079,49 +1079,6 @@ yield [stack.enter_context(c) for c in contexts] -def run_once(message, logger, cleanup=None): - """This is a utility function decorator to ensure a function - is run once and only once in an interpreter instance. - The decorated function object can be reset by calling its - reset function. All exceptions raised by the wrapped function, - logger and cleanup function will be propagated to the caller. - """ - def outer_wrapper(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - if not wrapper.called: - # Note(sean-k-mooney): the called state is always - # updated even if the wrapped function completes - # by raising an exception. If the caller catches - # the exception it is their responsibility to call - # reset if they want to re-execute the wrapped function. - try: - return func(*args, **kwargs) - finally: - wrapper.called = True - else: - logger(message) - - wrapper.called = False - - def reset(wrapper, *args, **kwargs): - # Note(sean-k-mooney): we conditionally call the - # cleanup function if one is provided only when the - # wrapped function has been called previously. We catch - # and reraise any exception that may be raised and update - # the called state in a finally block to ensure its - # always updated if reset is called. - try: - if cleanup and wrapper.called: - return cleanup(*args, **kwargs) - finally: - wrapper.called = False - - wrapper.reset = functools.partial(reset, wrapper) - return wrapper - return outer_wrapper - - def normalize_rc_name(rc_name): """Normalize a resource class name to standard form.""" if rc_name is None: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -110,6 +110,7 @@ "supports_multiattach": os_traits.COMPUTE_VOLUME_MULTI_ATTACH, # Added in os-traits 0.8.0. "supports_trusted_certs": os_traits.COMPUTE_TRUSTED_CERTS, + "supports_accelerators": os_traits.COMPUTE_ACCELERATORS, # Image type support flags, added in os-traits 0.12.0 "supports_image_type_aki": os_traits.COMPUTE_IMAGE_TYPE_AKI, @@ -176,6 +177,7 @@ "supports_multiattach": False, "supports_trusted_certs": False, "supports_pcpus": False, + "supports_accelerators": False, # Image type support flags "supports_image_type_aki": False, @@ -349,7 +351,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be @@ -376,6 +378,22 @@ attached to the instance. :param power_on: True if the instance should be powered on, False otherwise + :param arqs: List of bound accelerator requests for this instance. + [ + {'uuid': $arq_uuid, + 'device_profile_name': $dp_name, + 'device_profile_group_id': $dp_request_group_index, + 'state': 'Bound', + 'device_rp_uuid': $resource_provider_uuid, + 'hostname': $host_nodename, + 'instance_uuid': $instance_uuid, + 'attach_handle_info': { # PCI bdf + 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '0'}, + 'attach_handle_type': 'PCI' + # or 'TEST_PCI' for Cyborg fake driver + } + ] + Also doc'd in nova/accelerator/cyborg.py::get_arqs_for_instance() """ raise NotImplementedError() @@ -414,7 +432,8 @@ raise NotImplementedError() def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): """Reboot the specified instance. After this is called successfully, the instance's state @@ -429,6 +448,8 @@ :param block_device_info: Info pertaining to attached volumes :param bad_volumes_callback: Function to handle any bad volumes encountered + :param accel_info: List of accelerator request dicts. The exact + data struct is doc'd in nova/virt/driver.py::spawn(). """ raise NotImplementedError() @@ -577,9 +598,11 @@ """ raise NotImplementedError() - def extend_volume(self, connection_info, instance, requested_size): + def extend_volume(self, context, connection_info, instance, + requested_size): """Extend the disk attached to the instance. + :param context: The request context. :param dict connection_info: The connection for the extended volume. :param nova.objects.instance.Instance instance: @@ -827,7 +850,7 @@ raise NotImplementedError() def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): """Rescue the specified instance. :param nova.context.RequestContext context: @@ -839,6 +862,8 @@ :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param rescue_password: new root password to set for rescue. + :param dict block_device_info: + The block device mapping of the instance. """ raise NotImplementedError() @@ -868,10 +893,14 @@ raise NotImplementedError() def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Power on the specified instance. :param instance: nova.objects.instance.Instance + :param network_info: instance network information + :param block_device_info: instance volume block device info + :param accel_info: List of accelerator request dicts. The exact + data struct is doc'd in nova/virt/driver.py::spawn(). """ raise NotImplementedError() @@ -1442,9 +1471,6 @@ raise NotImplementedError() def unplug_vifs(self, instance, network_info): - # NOTE(markus_z): 2015-08-18 - # The compute manager doesn't use this interface, which seems odd - # since the manager should be the controlling thing here. """Unplug virtual interfaces (VIFs) from networks. The counter action is :func:`plug_vifs`. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/fake.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/fake.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/fake.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/fake.py 2020-04-10 17:57:58.000000000 +0000 @@ -113,6 +113,7 @@ "supports_multiattach": True, "supports_trusted_certs": True, "supports_pcpus": False, + "supports_accelerators": True, # Supported image types "supports_image_type_raw": True, @@ -181,7 +182,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): if network_info: for vif in network_info: @@ -208,7 +209,8 @@ update_task_state(task_state=task_states.IMAGE_UPLOADING) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): # If the guest is not on the hypervisor and we're doing a hard reboot # then mimic the libvirt driver by spawning the guest. if (instance.uuid not in self.instances and @@ -236,7 +238,7 @@ pass def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): pass def unrescue(self, instance, network_info): @@ -275,7 +277,7 @@ raise exception.InstanceNotFound(instance_id=instance.uuid) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): if instance.uuid in self.instances: self.instances[instance.uuid].state = power_state.RUNNING else: @@ -348,7 +350,8 @@ self._mounts[instance_name] = {} self._mounts[instance_name][mountpoint] = new_connection_info - def extend_volume(self, connection_info, instance, requested_size): + def extend_volume(self, context, connection_info, instance, + requested_size): """Extend the disk attached to the instance.""" pass @@ -783,7 +786,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): if not self.rescheduled.get(instance.uuid, False): # We only reschedule on the first time something hits spawn(). self.rescheduled[instance.uuid] = True @@ -806,7 +809,7 @@ """ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): raise exception.BuildAbortException( instance_uuid=instance.uuid, reason='FakeBuildAbortDriver') @@ -822,7 +825,7 @@ """ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): if instance.vm_state == vm_states.SHELVED_OFFLOADED: raise exception.VirtualInterfaceCreateException( 'FakeUnshelveSpawnFailDriver') diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/hardware.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/hardware.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/hardware.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/hardware.py 2020-04-10 17:57:58.000000000 +0000 @@ -1060,7 +1060,8 @@ pagesize = _numa_cell_supports_pagesize_request( host_cell, instance_cell) if not pagesize: - LOG.debug('Host does not support requested memory pagesize. ' + LOG.debug('Host does not support requested memory pagesize, ' + 'or not enough free pages of the requested size. ' 'Requested: %d kB', instance_cell.pagesize) return LOG.debug('Selected memory pagesize: %(selected_mem_pagesize)d kB. ' @@ -2258,3 +2259,10 @@ if formed_label: formed_labels.append(formed_label) return formed_labels + + +def check_hw_rescue_props(image_meta): + """Confirm that hw_rescue_* image properties are present. + """ + hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus'] + return any(key in image_meta.properties for key in hw_rescue_props) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/hyperv/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/hyperv/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/hyperv/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/hyperv/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -101,6 +101,7 @@ "supports_multiattach": False, "supports_trusted_certs": False, "supports_pcpus": False, + "supports_accelerators": False, # Supported image types "supports_image_type_vhd": True, @@ -160,12 +161,13 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): self._vmops.reboot(instance, network_info, reboot_type) def destroy(self, context, instance, network_info, block_device_info=None, @@ -223,7 +225,7 @@ self._vmops.power_off(instance, timeout, retry_interval) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): self._vmops.power_on(instance, block_device_info, network_info) def resume_state_on_host_boot(self, context, instance, network_info, @@ -355,7 +357,7 @@ return self._vmops.detach_interface(instance, vif) def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): self._vmops.rescue_instance(context, instance, network_info, image_meta, rescue_password) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/images.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/images.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/images.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/images.py 2020-04-10 17:57:58.000000000 +0000 @@ -19,80 +19,50 @@ Handling of VM disk images. """ -import operator import os from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import imageutils -from oslo_utils import units from nova.compute import utils as compute_utils import nova.conf from nova import exception from nova.i18n import _ -from nova import image +from nova.image import glance import nova.privsep.qemu LOG = logging.getLogger(__name__) CONF = nova.conf.CONF -IMAGE_API = image.API() +IMAGE_API = glance.API() -QEMU_IMG_LIMITS = processutils.ProcessLimits( - cpu_time=30, - address_space=1 * units.Gi) -# This is set by the libvirt driver on startup. The version is used to -# determine what flags need to be set on the command line. -QEMU_VERSION = None -QEMU_VERSION_REQ_SHARED = 2010000 +def qemu_img_info(path, format=None, output_format=None): + """Return an object containing the parsed output from qemu-img info.""" + if not os.path.exists(path) and not path.startswith('rbd:'): + raise exception.DiskNotFound(location=path) + info = nova.privsep.qemu.unprivileged_qemu_img_info( + path, format=format, output_format=output_format) + if output_format: + return imageutils.QemuImgInfo(info, format=output_format) + else: + return imageutils.QemuImgInfo(info) -def qemu_img_info(path, format=None): + +def privileged_qemu_img_info(path, format=None, output_format=None): """Return an object containing the parsed output from qemu-img info.""" - # TODO(mikal): this code should not be referring to a libvirt specific - # flag. - if not os.path.exists(path) and CONF.libvirt.images_type != 'rbd': + if not os.path.exists(path) and not path.startswith('rbd:'): raise exception.DiskNotFound(location=path) - try: - # The following check is about ploop images that reside within - # directories and always have DiskDescriptor.xml file beside them - if (os.path.isdir(path) and - os.path.exists(os.path.join(path, "DiskDescriptor.xml"))): - path = os.path.join(path, "root.hds") - - cmd = ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) - if format is not None: - cmd = cmd + ('-f', format) - # Check to see if the qemu version is >= 2.10 because if so, we need - # to add the --force-share flag. - if QEMU_VERSION and operator.ge(QEMU_VERSION, QEMU_VERSION_REQ_SHARED): - cmd = cmd + ('--force-share',) - out, err = processutils.execute(*cmd, prlimit=QEMU_IMG_LIMITS) - except processutils.ProcessExecutionError as exp: - if exp.exit_code == -9: - # this means we hit prlimits, make the exception more specific - msg = (_("qemu-img aborted by prlimits when inspecting " - "%(path)s : %(exp)s") % {'path': path, 'exp': exp}) - elif exp.exit_code == 1 and 'No such file or directory' in exp.stderr: - # The os.path.exists check above can race so this is a simple - # best effort at catching that type of failure and raising a more - # specific error. - raise exception.DiskNotFound(location=path) - else: - msg = (_("qemu-img failed to execute on %(path)s : %(exp)s") % - {'path': path, 'exp': exp}) - raise exception.InvalidDiskInfo(reason=msg) - - if not out: - msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") % - {'path': path, 'error': err}) - raise exception.InvalidDiskInfo(reason=msg) - - return imageutils.QemuImgInfo(out) + info = nova.privsep.qemu.privileged_qemu_img_info( + path, format=format, output_format=output_format) + if output_format: + return imageutils.QemuImgInfo(info, format=output_format) + else: + return imageutils.QemuImgInfo(info) def convert_image(source, dest, in_format, out_format, run_as_root=False, diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/ironic/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/ironic/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/ironic/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/ironic/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -163,6 +163,7 @@ "supports_multiattach": False, "supports_trusted_certs": False, "supports_pcpus": False, + "supports_accelerators": False, # Image type support flags "supports_image_type_aki": False, @@ -773,14 +774,15 @@ if peer_list is None or svc.host in peer_list: is_up = self.servicegroup_api.service_is_up(svc) if is_up: - services.add(svc.host) + services.add(svc.host.lower()) # NOTE(jroll): always make sure this service is in the list, because # only services that have something registered in the compute_nodes # table will be here so far, and we might be brand new. - services.add(CONF.host) + services.add(CONF.host.lower()) self.hash_ring = hash_ring.HashRing(services, partitions=_HASH_RING_PARTITIONS) + LOG.debug('Hash ring members are %s', services) def _refresh_cache(self): ctxt = nova_context.get_admin_context() @@ -824,7 +826,7 @@ # nova while the service was down, and not yet reaped, will not be # reported until the periodic task cleans it up. elif (node.instance_uuid is None and - CONF.host in + CONF.host.lower() in self.hash_ring.get_nodes(node.uuid.encode('utf-8'))): node_cache[node.uuid] = node @@ -1140,7 +1142,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): """Deploy an instance. :param context: The security context. @@ -1157,6 +1159,7 @@ :param network_info: Instance network information. :param block_device_info: Instance block device information. + :param arqs: Accelerator requests for this instance. :param power_on: True if the instance should be powered on, False otherwise """ @@ -1369,7 +1372,8 @@ node.uuid, instance=instance) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): """Reboot the specified instance. NOTE: Unlike the libvirt driver, this method does not delete @@ -1384,7 +1388,8 @@ Ignored by this driver. :param bad_volumes_callback: Function to handle any bad volumes encountered. Ignored by this driver. - + :param accel_info: List of accelerator request dicts. The exact + data struct is doc'd in nova/virt/driver.py::spawn(). """ LOG.debug('Reboot(type %s) called for instance', reboot_type, instance=instance) @@ -1470,7 +1475,7 @@ node.uuid, instance=instance) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Power on the specified instance. NOTE: Unlike the libvirt driver, this method does not delete @@ -1482,7 +1487,8 @@ this driver. :param block_device_info: Instance block device information. Ignored by this driver. - + :param accel_info: List of accelerator requests for this instance. + Ignored by this driver. """ LOG.debug('Power on called for instance', instance=instance) node = self._validate_instance_and_node(instance) @@ -2114,7 +2120,7 @@ version=max_version) def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): """Rescue the specified instance. :param nova.context.RequestContext context: @@ -2127,6 +2133,8 @@ :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. Ignored by this driver. :param rescue_password: new root password to set for rescue. + :param dict block_device_info: + The block device mapping of the instance. :raise InstanceRescueFailure if rescue fails. """ LOG.debug('Rescue called for instance', instance=instance) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/blockinfo.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/blockinfo.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/blockinfo.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/blockinfo.py 2020-04-10 17:57:58.000000000 +0000 @@ -87,9 +87,17 @@ CONF = cfg.CONF - -SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun') BOOT_DEV_FOR_TYPE = {'disk': 'hd', 'cdrom': 'cdrom', 'floppy': 'fd'} +# NOTE(aspiers): If you change this, don't forget to update the docs and +# metadata for hw_*_bus in glance. +SUPPORTED_DEVICE_BUS = { + 'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'], + 'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'], + 'xen': ['xen', 'ide'], + 'uml': ['uml'], + 'lxc': ['lxc'], + 'parallels': ['ide', 'scsi']} +SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun') def has_disk_dev(mapping, disk_dev): @@ -200,22 +208,24 @@ raise exception.TooManyDiskDevices(maximum=max_dev) -def is_disk_bus_valid_for_virt(virt_type, disk_bus): - # NOTE(aspiers): If you change this, don't forget to update the - # docs and metadata for hw_*_bus in glance. - valid_bus = { - 'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'], - 'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'], - 'xen': ['xen', 'ide'], - 'uml': ['uml'], - 'lxc': ['lxc'], - 'parallels': ['ide', 'scsi'] - } +# NOTE(aspiers): If you change this, don't forget to update the docs and +# metadata for hw_*_bus in glance. In addition, these bus names map directly to +# standard os-traits as 'foo' => 'COMPUTE_STORAGE_BUS_FOO'. If adding a new bus +# name, make sure the standard trait conforms to this rule. +SUPPORTED_STORAGE_BUSES = { + 'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'], + 'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'], + 'xen': ['xen', 'ide'], + 'uml': ['uml'], + 'lxc': ['lxc'], + 'parallels': ['ide', 'scsi'] +} - if virt_type not in valid_bus: - raise exception.UnsupportedVirtType(virt=virt_type) - return disk_bus in valid_bus[virt_type] +def is_disk_bus_valid_for_virt(virt_type, disk_bus): + if virt_type not in SUPPORTED_STORAGE_BUSES: + raise exception.UnsupportedVirtType(virt=virt_type) + return disk_bus in SUPPORTED_STORAGE_BUSES[virt_type] def get_disk_bus_for_device_type(instance, @@ -509,11 +519,9 @@ info['bus'], info['type'])))) -def get_disk_mapping(virt_type, instance, - disk_bus, cdrom_bus, - image_meta, - block_device_info=None, - rescue=False): +def get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta, + block_device_info=None, rescue=False, + rescue_image_meta=None): """Determine how to map default disks to the virtual machine. This is about figuring out whether the default 'disk', @@ -525,7 +533,7 @@ mapping = {} - if rescue: + if rescue and rescue_image_meta is None: rescue_info = get_next_disk_info(mapping, disk_bus, boot_index=1) mapping['disk.rescue'] = rescue_info @@ -631,11 +639,21 @@ device_type) mapping['disk.config'] = config_info + # NOTE(lyarwood): This can only be a stable device rescue so add the rescue + # disk as the final disk in the mapping. + if rescue and rescue_image_meta: + rescue_device = get_rescue_device(rescue_image_meta) + rescue_bus = get_rescue_bus(instance, virt_type, rescue_image_meta, + rescue_device) + rescue_info = get_next_disk_info(mapping, rescue_bus, + device_type=rescue_device) + mapping['disk.rescue'] = rescue_info + return mapping -def get_disk_info(virt_type, instance, image_meta, - block_device_info=None, rescue=False): +def get_disk_info(virt_type, instance, image_meta, block_device_info=None, + rescue=False, rescue_image_meta=None): """Determine guest disk mapping info. This is a wrapper around get_disk_mapping, which @@ -656,8 +674,9 @@ mapping = get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta, - block_device_info, - rescue) + block_device_info=block_device_info, + rescue=rescue, + rescue_image_meta=rescue_image_meta) return {'disk_bus': disk_bus, 'cdrom_bus': cdrom_bus, @@ -676,3 +695,25 @@ return [el for el in lst if el not in s and not s.add(el)] return uniq(boot_devs_dup) + + +def get_rescue_device(rescue_image_meta): + # Find and validate the hw_rescue_device rescue device + rescue_device = rescue_image_meta.properties.get("hw_rescue_device", + "disk") + if rescue_device not in SUPPORTED_DEVICE_TYPES: + raise exception.UnsupportedRescueDevice(device=rescue_device) + return rescue_device + + +def get_rescue_bus(instance, virt_type, rescue_image_meta, rescue_device): + # Find and validate the hw_rescue_bus + rescue_bus = rescue_image_meta.properties.get("hw_rescue_bus") + if rescue_bus is not None: + if is_disk_bus_valid_for_virt(virt_type, rescue_bus): + return rescue_bus + else: + raise exception.UnsupportedRescueBus(bus=rescue_bus, + virt=virt_type) + return get_disk_bus_for_device_type(instance, virt_type, rescue_image_meta, + device_type=rescue_device) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/config.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/config.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/config.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/config.py 2020-04-10 17:57:58.000000000 +0000 @@ -122,6 +122,7 @@ self._features = None self._machine = None self._alias = None + self._devices = None def parse_dom(self, xmldoc): super(LibvirtConfigDomainCaps, self).parse_dom(xmldoc) @@ -133,6 +134,10 @@ self._features = features elif c.tag == "machine": self._machine = c.text + elif c.tag == "devices": + devices = LibvirtConfigDomainCapsDevices() + devices.parse_dom(c) + self._devices = devices @property def features(self): @@ -156,6 +161,79 @@ def machine_type_alias(self, alias): self._alias = alias + @property + def devices(self): + if self._devices is None: + return [] + return self._devices + + +class LibvirtConfigDomainCapsVideoModels(LibvirtConfigObject): + + def __init__(self, **kwargs): + super().__init__(root_name='video', **kwargs) + self.supported = False + self.models = set() + + def parse_dom(self, xmldoc): + super().parse_dom(xmldoc) + + if xmldoc.get('supported') == 'yes': + self.supported = True + self.models = {str(node) for node in + xmldoc.xpath("//enum[@name='modelType']/value/text()")} + + +class LibvirtConfigDomainCapsDiskBuses(LibvirtConfigObject): + + def __init__(self, **kwargs): + super().__init__(root_name='disk', **kwargs) + self.supported = False + self.buses = set() + + def parse_dom(self, xmldoc): + super(LibvirtConfigDomainCapsDiskBuses, self).parse_dom(xmldoc) + + if xmldoc.get('supported') == 'yes': + self.supported = True + self.buses = {str(node) for node in + xmldoc.xpath("//enum[@name='bus']/value/text()")} + + +class LibvirtConfigDomainCapsDevices(LibvirtConfigObject): + DEVICE_PARSERS = { + 'video': LibvirtConfigDomainCapsVideoModels, + 'disk': LibvirtConfigDomainCapsDiskBuses, + } + + def __init__(self, **kwargs): + super().__init__(root_name='devices', **kwargs) + self.devices = set() + + def parse_dom(self, xmldoc): + super().parse_dom(xmldoc) + + for c in xmldoc.getchildren(): + device = self.DEVICE_PARSERS.get(c.tag) + if device: + device = device() + device.parse_dom(c) + self.devices.add(device) + + def _get_device(self, device_type): + for device in self.devices: + if type(device) == self.DEVICE_PARSERS.get(device_type): + return device + return None + + @property + def disk(self): + return self._get_device('disk') + + @property + def video(self): + return self._get_device('video') + class LibvirtConfigDomainCapsFeatures(LibvirtConfigObject): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -87,7 +87,7 @@ from nova.db import constants as db_const from nova import exception from nova.i18n import _ -from nova import image +from nova.image import glance from nova.network import model as network_model from nova import objects from nova.objects import diagnostics as diagnostics_obj @@ -282,6 +282,10 @@ MIN_LIBVIRT_PMEM_SUPPORT = (5, 0, 0) MIN_QEMU_PMEM_SUPPORT = (3, 1, 0) +# -blockdev support (replacing -drive) +MIN_LIBVIRT_BLOCKDEV = (6, 0, 0) +MIN_QEMU_BLOCKDEV = (4, 2, 0) + class LibvirtDriver(driver.ComputeDriver): def __init__(self, virtapi, read_only=False): @@ -328,6 +332,7 @@ "supports_image_type_qcow2": not requires_raw_image, "supports_image_type_ploop": requires_ploop_image, "supports_pcpus": True, + "supports_accelerators": True, } super(LibvirtDriver, self).__init__(virtapi) @@ -379,7 +384,7 @@ self.disk_cachemodes[disk_type] = cache_mode self._volume_api = cinder.API() - self._image_api = image.API() + self._image_api = glance.API() # The default choice for the sysinfo_serial config option is "unique" # which does not have a special function since the value is just the @@ -411,6 +416,10 @@ # intended to be updatable directly self.provider_tree = None + # driver traits will not change during the runtime of the agent + # so calcuate them once and save them + self._static_traits = None + # The CPU models in the configuration are case-insensitive, but the CPU # model in the libvirt is case-sensitive, therefore create a mapping to # map the lower case CPU model name to normal CPU model name. @@ -420,6 +429,10 @@ self._vpmems_by_name, self._vpmems_by_rc = self._discover_vpmems( vpmem_conf=CONF.libvirt.pmem_namespaces) + # We default to not support vGPUs unless the configuration is set. + self.pgpu_type_mapping = collections.defaultdict(str) + self.supported_vgpu_types = self._get_supported_vgpu_types() + def _discover_vpmems(self, vpmem_conf=None): """Discover vpmems on host and configuration. @@ -649,11 +662,7 @@ libvirt_utils.version_to_string(MIN_LIBVIRT_VERSION)) if CONF.libvirt.virt_type in ("qemu", "kvm"): - if self._host.has_min_version(hv_ver=MIN_QEMU_VERSION): - # "qemu-img info" calls are version dependent, so we need to - # store the version in the images module. - images.QEMU_VERSION = self._host.get_connection().getVersion() - else: + if not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION): raise exception.InternalError( _('Nova requires QEMU version %s or greater.') % libvirt_utils.version_to_string(MIN_QEMU_VERSION)) @@ -739,12 +748,6 @@ cpu = vconfig.LibvirtConfigGuestCPU() for model in models: cpu.model = self._get_cpu_model_mapping(model) - if not cpu.model: - msg = (_("Configured CPU model: %(model)s is not correct, " - "or your host CPU arch does not suuport this " - "model. Please correct your config and try " - "again.") % {'model': model}) - raise exception.InvalidCPUInfo(msg) try: self._compare_cpu(cpu, self._get_cpu_info(), None) except exception.InvalidCPUInfo as e: @@ -783,15 +786,31 @@ """Recreate assigned mdevs that could have disappeared if we reboot the host. """ - # FIXME(sbauza): We blindly recreate mediated devices without checking - # which ResourceProvider was allocated for the instance so it would use - # another pGPU. - # TODO(sbauza): Pass all instances' allocations here. + # NOTE(sbauza): This method just calls sysfs to recreate mediated + # devices by looking up existing guest XMLs and doesn't use + # the Placement API so it works with or without a vGPU reshape. mdevs = self._get_all_assigned_mediated_devices() - requested_types = self._get_supported_vgpu_types() for (mdev_uuid, instance_uuid) in six.iteritems(mdevs): if not self._is_existing_mdev(mdev_uuid): - self._create_new_mediated_device(requested_types, mdev_uuid) + dev_name = libvirt_utils.mdev_uuid2name(mdev_uuid) + dev_info = self._get_mediated_device_information(dev_name) + parent = dev_info['parent'] + parent_type = self._get_vgpu_type_per_pgpu(parent) + if dev_info['type'] != parent_type: + # NOTE(sbauza): The mdev was created by using a different + # vGPU type. We can't recreate the mdev until the operator + # modifies the configuration. + parent = "{}:{}:{}.{}".format(*parent[4:].split('_')) + msg = ("The instance UUID %(inst)s uses a VGPU that " + "its parent pGPU %(parent)s no longer " + "supports as the instance vGPU type %(type)s " + "is not accepted for the pGPU. Please correct " + "the configuration accordingly." % + {'inst': instance_uuid, + 'parent': parent, + 'type': dev_info['type']}) + raise exception.InvalidLibvirtGPUConfig(reason=msg) + self._create_new_mediated_device(parent, uuid=mdev_uuid) def _set_multiattach_support(self): # Check to see if multiattach is supported. Based on bugzilla @@ -1270,7 +1289,10 @@ try: guest = self._host.get_guest(instance) try: - support_uefi = self._has_uefi_support() + hw_firmware_type = instance.image_meta.properties.get( + 'hw_firmware_type') + support_uefi = (self._has_uefi_support() and + hw_firmware_type == fields.FirmwareType.UEFI) guest.delete_configuration(support_uefi) except libvirt.libvirtError as e: with excutils.save_and_reraise_exception() as ctxt: @@ -1308,6 +1330,11 @@ block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] + if not connection_info: + # if booting from a volume, creation could have failed meaning + # this would be unset + continue + disk_dev = vol['mount_device'] if disk_dev is not None: disk_dev = disk_dev.rpartition("/")[2] @@ -1499,12 +1526,11 @@ return self.volume_drivers[driver_type] def _connect_volume(self, context, connection_info, instance, - encryption=None, allow_native_luks=True): + encryption=None): vol_driver = self._get_volume_driver(connection_info) vol_driver.connect_volume(connection_info, instance) try: - self._attach_encryptor( - context, connection_info, encryption, allow_native_luks) + self._attach_encryptor(context, connection_info, encryption) except Exception: # Encryption failed so rollback the volume connection. with excutils.save_and_reraise_exception(logger=LOG): @@ -1565,8 +1591,8 @@ return vol_driver.extend_volume(connection_info, instance, requested_size) - def _use_native_luks(self, encryption=None): - """Check if LUKS is the required 'provider' + def _is_luks_v1(self, encryption=None): + """Check if LUKS (v1) is the encryption 'provider' """ provider = None if encryption: @@ -1598,22 +1624,18 @@ self._volume_api, volume_id, connection_info) return encryption - def _attach_encryptor(self, context, connection_info, encryption, - allow_native_luks): + def _attach_encryptor(self, context, connection_info, encryption): """Attach the frontend encryptor if one is required by the volume. The request context is only used when an encryption metadata dict is not provided. The encryption metadata dict being populated is then used to determine if an attempt to attach the encryptor should be made. - If native LUKS decryption is enabled then create a Libvirt volume - secret containing the LUKS passphrase for the volume. """ if encryption is None: encryption = self._get_volume_encryption(context, connection_info) - if (encryption and allow_native_luks and - self._use_native_luks(encryption)): + if encryption and self._is_luks_v1(encryption=encryption): # NOTE(lyarwood): Fetch the associated key for the volume and # decode the passphrase from the key. # FIXME(lyarwood): c-vol currently creates symmetric keys for use @@ -1664,11 +1686,11 @@ if encryption is None: encryption = self._get_volume_encryption(context, connection_info) # NOTE(lyarwood): Handle bug #1821696 where volume secrets have been - # removed manually by returning if native LUKS decryption is available + # removed manually by returning if a LUKS provider is being used # and device_path is not present in the connection_info. This avoids # VolumeEncryptionNotSupported being thrown when we incorrectly build # the encryptor below due to the secrets not being present above. - if (encryption and self._use_native_luks(encryption) and + if (encryption and self._is_luks_v1(encryption=encryption) and not connection_info['data'].get('device_path')): return if encryption: @@ -1768,9 +1790,19 @@ self._disconnect_volume(context, connection_info, instance, encryption=encryption) - def _swap_volume(self, guest, disk_path, conf, resize_to): - """Swap existing disk with a new block device.""" - dev = guest.get_block_device(disk_path) + def _swap_volume(self, guest, disk_dev, conf, resize_to, hw_firmware_type): + """Swap existing disk with a new block device. + + Call virDomainBlockRebase or virDomainBlockCopy with Libvirt >= 6.0.0 + to copy and then pivot to a new volume. + + :param: guest: Guest object representing the guest domain + :param: disk_dev: Device within the domain that is being swapped + :param: conf: LibvirtConfigGuestDisk object representing the new volume + :param: resize_to: Size of the dst volume, 0 if the same as the src + :param: hw_firmware_type: fields.FirmwareType if set in the imagemeta + """ + dev = guest.get_block_device(disk_dev) # Save a copy of the domain's persistent XML file. We'll use this # to redefine the domain if anything fails during the volume swap. @@ -1784,35 +1816,50 @@ pass try: - # NOTE (rmk): blockRebase cannot be executed on persistent - # domains, so we need to temporarily undefine it. - # If any part of this block fails, the domain is - # re-defined regardless. + # NOTE (rmk): virDomainBlockRebase and virDomainBlockCopy cannot be + # executed on persistent domains, so we need to temporarily + # undefine it. If any part of this block fails, the domain is + # re-defined regardless. if guest.has_persistent_configuration(): - support_uefi = self._has_uefi_support() + support_uefi = (self._has_uefi_support() and + hw_firmware_type == fields.FirmwareType.UEFI) guest.delete_configuration(support_uefi) try: - # Start copy with VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT flag to - # allow writing to existing external volume file. Use - # VIR_DOMAIN_BLOCK_REBASE_COPY_DEV if it's a block device to - # make sure XML is generated correctly (bug 1691195) - copy_dev = conf.source_type == 'block' - dev.rebase(conf.source_path, copy=True, reuse_ext=True, - copy_dev=copy_dev) + # NOTE(lyarwood): Use virDomainBlockCopy from libvirt >= 6.0.0 + # and QEMU >= 4.2.0 with -blockdev domains allowing QEMU to + # copy to remote disks. + if self._host.has_min_version(lv_ver=MIN_LIBVIRT_BLOCKDEV, + hv_ver=MIN_QEMU_BLOCKDEV): + dev.copy(conf.to_xml(), reuse_ext=True) + else: + # TODO(lyarwood): Remove the following use of + # virDomainBlockRebase once MIN_LIBVIRT_VERSION hits >= + # 6.0.0 and MIN_QEMU_VERSION hits >= 4.2.0. + # Start copy with VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT flag to + # allow writing to existing external volume file. Use + # VIR_DOMAIN_BLOCK_REBASE_COPY_DEV if it's a block device + # to make sure XML is generated correctly (bug 1691195) + copy_dev = conf.source_type == 'block' + dev.rebase(conf.source_path, copy=True, reuse_ext=True, + copy_dev=copy_dev) while not dev.is_job_complete(): time.sleep(0.5) dev.abort_job(pivot=True) except Exception as exc: + # NOTE(lyarwood): conf.source_path is not set for RBD disks so + # fallback to conf.target_dev when None. + new_path = conf.source_path or conf.target_dev + old_path = disk_dev LOG.exception("Failure rebasing volume %(new_path)s on " - "%(old_path)s.", {'new_path': conf.source_path, - 'old_path': disk_path}) + "%(old_path)s.", {'new_path': new_path, + 'old_path': old_path}) raise exception.VolumeRebaseFailed(reason=six.text_type(exc)) if resize_to: - dev.resize(resize_to * units.Gi / units.Ki) + dev.resize(resize_to * units.Gi) # Make sure we will redefine the domain using the updated # configuration after the volume was swapped. The dump_inactive @@ -1830,8 +1877,8 @@ # NOTE(lyarwood): https://bugzilla.redhat.com/show_bug.cgi?id=760547 old_encrypt = self._get_volume_encryption(context, old_connection_info) new_encrypt = self._get_volume_encryption(context, new_connection_info) - if ((old_encrypt and self._use_native_luks(old_encrypt)) or - (new_encrypt and self._use_native_luks(new_encrypt))): + if ((old_encrypt and self._is_luks_v1(old_encrypt)) or + (new_encrypt and self._is_luks_v1(new_encrypt))): raise NotImplementedError(_("Swap volume is not supported for " "encrypted volumes when native LUKS decryption is enabled.")) @@ -1855,12 +1902,20 @@ # eventually do this for us. self._connect_volume(context, new_connection_info, instance) conf = self._get_volume_config(new_connection_info, disk_info) - if not conf.source_path: + if (not conf.source_path and not + self._host.has_min_version(lv_ver=MIN_LIBVIRT_BLOCKDEV, + hv_ver=MIN_QEMU_BLOCKDEV)): self._disconnect_volume(context, new_connection_info, instance) - raise NotImplementedError(_("Swap only supports host devices")) + raise NotImplementedError(_("Swap only supports host devices and " + "files with Libvirt < 6.0.0 or QEMU " + "< 4.2.0")) + + hw_firmware_type = instance.image_meta.properties.get( + 'hw_firmware_type') try: - self._swap_volume(guest, disk_dev, conf, resize_to) + self._swap_volume(guest, disk_dev, conf, + resize_to, hw_firmware_type) except exception.VolumeRebaseFailed: with excutils.save_and_reraise_exception(): self._disconnect_volume(context, new_connection_info, instance) @@ -1927,7 +1982,62 @@ self._disconnect_volume(context, connection_info, instance, encryption=encryption) - def extend_volume(self, connection_info, instance, requested_size): + def _resize_attached_volume(self, new_size, block_device, instance): + LOG.debug('Resizing target device %(dev)s to %(size)u', + {'dev': block_device._disk, 'size': new_size}, + instance=instance) + block_device.resize(new_size) + + def _resize_attached_encrypted_volume(self, original_new_size, + block_device, instance, + connection_info, encryption): + # TODO(lyarwood): Also handle the dm-crpyt encryption providers of + # plain and LUKSv2, for now just use the original_new_size. + decrypted_device_new_size = original_new_size + + # NOTE(lyarwood): original_new_size currently refers to the total size + # of the extended volume in bytes. With natively decrypted LUKSv1 + # volumes we need to ensure this now takes the LUKSv1 header and key + # material into account. Otherwise QEMU will attempt and fail to grow + # host block devices and remote RBD volumes. + if self._is_luks_v1(encryption): + try: + # NOTE(lyarwood): Find the path to provide to qemu-img + if 'device_path' in connection_info['data']: + path = connection_info['data']['device_path'] + elif connection_info['driver_volume_type'] == 'rbd': + path = 'rbd:%s' % (connection_info['data']['name']) + else: + path = 'unknown' + raise exception.DiskNotFound(location='unknown') + + info = images.privileged_qemu_img_info( + path, output_format='json') + format_specific_data = info.format_specific['data'] + payload_offset = format_specific_data['payload-offset'] + + # NOTE(lyarwood): Ensure the underlying device is not resized + # by subtracting the LUKSv1 payload_offset (where the users + # encrypted data starts) from the original_new_size (the total + # size of the underlying volume). Both are reported in bytes. + decrypted_device_new_size = original_new_size - payload_offset + + except exception.DiskNotFound: + with excutils.save_and_reraise_exception(): + LOG.exception('Unable to access the encrypted disk %s.', + path, instance=instance) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception('Unknown error when attempting to find the ' + 'payload_offset for LUKSv1 encrypted disk ' + '%s.', path, instance=instance) + # NOTE(lyarwood): Resize the decrypted device within the instance to + # the calculated size as with normal volumes. + self._resize_attached_volume( + decrypted_device_new_size, block_device, instance) + + def extend_volume(self, context, connection_info, instance, + requested_size): try: new_size = self._extend_volume(connection_info, instance, requested_size) @@ -1939,6 +2049,7 @@ try: guest = self._host.get_guest(instance) state = guest.get_power_state(self._host) + volume_id = driver_block_device.get_volume_id(connection_info) active_state = state in (power_state.RUNNING, power_state.PAUSED) if active_state: if 'device_path' in connection_info['data']: @@ -1947,8 +2058,6 @@ # Some drivers (eg. net) don't put the device_path # into the connection_info. Match disks by their serial # number instead - volume_id = driver_block_device.get_volume_id( - connection_info) disk = next(iter([ d for d in guest.get_all_disks() if d.serial == volume_id @@ -1956,11 +2065,16 @@ if not disk: raise exception.VolumeNotFound(volume_id=volume_id) disk_path = disk.target_dev - - LOG.debug('resizing block device %(dev)s to %(size)u kb', - {'dev': disk_path, 'size': new_size}) dev = guest.get_block_device(disk_path) - dev.resize(new_size // units.Ki) + encryption = encryptors.get_encryption_metadata( + context, self._volume_api, volume_id, connection_info) + if encryption: + self._resize_attached_encrypted_volume( + new_size, dev, instance, + connection_info, encryption) + else: + self._resize_attached_volume( + new_size, dev, instance) else: LOG.debug('Skipping block device resize, guest is not running', instance=instance) @@ -2083,6 +2197,13 @@ # then we can just log it as a warning rather than tracing an # error. mac = vif.get('address') + # Get a fresh instance of the guest in case it is gone. + try: + guest = self._host.get_guest(instance) + except exception.InstanceNotFound: + LOG.info("Instance disappeared while detaching interface " + "%s", vif['id'], instance=instance) + return interface = guest.get_interface_by_cfg(cfg) if interface: LOG.error('detaching network adapter failed.', @@ -2463,7 +2584,10 @@ # If any part of this block fails, the domain is # re-defined regardless. if guest.has_persistent_configuration(): - support_uefi = self._has_uefi_support() + hw_firmware_type = image_meta.properties.get( + 'hw_firmware_type') + support_uefi = (self._has_uefi_support() and + hw_firmware_type == fields.FirmwareType.UEFI) guest.delete_configuration(support_uefi) # NOTE (rmk): Establish a temporary mirror of our root disk and @@ -2963,7 +3087,8 @@ self._volume_refresh_connection_info(context, instance, volume_id) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): """Reboot a virtual machine, given an instance reference.""" if reboot_type == 'SOFT': # NOTE(vish): This will attempt to do a graceful shutdown/restart. @@ -2984,7 +3109,7 @@ "Trying hard reboot.", instance=instance) return self._hard_reboot(context, instance, network_info, - block_device_info) + block_device_info, accel_info) def _soft_reboot(self, instance): """Attempt to shutdown and restart the instance gracefully. @@ -3036,7 +3161,7 @@ return False def _hard_reboot(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Reboot a virtual machine, given an instance reference. Performs a Libvirt reset (if supported) on the domain. @@ -3078,7 +3203,7 @@ xml = self._get_guest_xml(context, instance, network_info, disk_info, instance.image_meta, block_device_info=block_device_info, - mdevs=mdevs) + mdevs=mdevs, accel_info=accel_info) # NOTE(mdbooth): context.auth_token will not be set when we call # _hard_reboot from resume_state_on_host_boot() @@ -3205,12 +3330,13 @@ self._destroy(instance) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Power on the specified instance.""" # We use _hard_reboot here to ensure that all backing files, # network, and block device connections, etc. are established # and available before we attempt to start the instance. - self._hard_reboot(context, instance, network_info, block_device_info) + self._hard_reboot(context, instance, network_info, block_device_info, + accel_info) def trigger_crash_dump(self, instance): @@ -3283,7 +3409,7 @@ self._hard_reboot(context, instance, network_info, block_device_info) def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the @@ -3291,6 +3417,29 @@ should not edit or over-ride the original image, only allow for data recovery. + Two modes are provided when rescuing an instance with this driver. + + The original and default rescue mode, where the rescue boot disk, + original root disk and optional regenerated config drive are attached + to the instance. + + A second stable device rescue mode is also provided where all of the + original devices are attached to the instance during the rescue attempt + with the addition of the rescue boot disk. This second mode is + controlled by the hw_rescue_device and hw_rescue_bus image properties + on the rescue image provided to this method via image_meta. + + :param nova.context.RequestContext context: + The context for the rescue. + :param nova.objects.instance.Instance instance: + The instance being rescued. + :param nova.network.model.NetworkInfo network_info: + Necessary network information for the resume. + :param nova.objects.ImageMeta image_meta: + The metadata of the image of the instance. + :param rescue_password: new root password to set for rescue. + :param dict block_device_info: + The block device mapping of the instance. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml = self._get_existing_domain_xml(instance, network_info) @@ -3298,6 +3447,7 @@ libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml) rescue_image_id = None + rescue_image_meta = None if image_meta.obj_attr_is_set("id"): rescue_image_id = image_meta.id @@ -3309,10 +3459,43 @@ 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or instance.ramdisk_id), } - disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, - instance, - image_meta, - rescue=True) + + virt_type = CONF.libvirt.virt_type + if hardware.check_hw_rescue_props(image_meta): + LOG.info("Attempting a stable device rescue", instance=instance) + # NOTE(lyarwood): Stable device rescue is not supported when using + # the LXC and Xen virt_types as they do not support the required + # definitions allowing an instance to boot from the + # rescue device added as a final device to the domain. + if virt_type in ('lxc', 'xen'): + reason = ("Stable device rescue is not supported by virt_type " + "%s", virt_type) + raise exception.InstanceNotRescuable(instance_id=instance.uuid, + reason=reason) + # NOTE(lyarwood): Stable device rescue provides the original disk + # mapping of the instance with the rescue device appened to the + # end. As a result we need to provide the original image_meta, the + # new rescue_image_meta and block_device_info when calling + # get_disk_info. + rescue_image_meta = image_meta + if instance.image_ref: + image_meta = objects.ImageMeta.from_image_ref( + context, self._image_api, instance.image_ref) + else: + image_meta = objects.ImageMeta.from_dict({}) + + else: + LOG.info("Attempting an unstable device rescue", instance=instance) + # NOTE(lyarwood): An unstable rescue only provides the rescue + # device and the original root device so we don't need to provide + # block_device_info to the get_disk_info call. + block_device_info = None + + disk_info = blockinfo.get_disk_info(virt_type, instance, image_meta, + rescue=True, block_device_info=block_device_info, + rescue_image_meta=rescue_image_meta) + LOG.debug("rescue generated disk_info: %s", disk_info) + injection_info = InjectionInfo(network_info=network_info, admin_pass=rescue_password, files=None) @@ -3328,7 +3511,8 @@ disk_images=rescue_images) xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, rescue=rescue_images, - mdevs=mdevs) + mdevs=mdevs, + block_device_info=block_device_info) self._destroy(instance) self._create_domain(xml, post_xml_callback=gen_confdrive) @@ -3366,7 +3550,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, @@ -3390,7 +3574,7 @@ xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, - mdevs=mdevs) + mdevs=mdevs, accel_info=accel_info) self._create_domain_and_network( context, xml, instance, network_info, block_device_info=block_device_info, @@ -4146,14 +4330,32 @@ name. :param model: Case-insensitive CPU model name. - :return: Case-sensitive CPU model name, or None(Only when configured - CPU model name not correct) - """ + :return: It will validate and return the case-sensitive CPU model name + if on a supported platform, otherwise it will just return + what was provided + :raises: exception.InvalidCPUInfo if the CPU model is not supported. + """ + cpu_info = self._get_cpu_info() + if cpu_info['arch'] not in (fields.Architecture.I686, + fields.Architecture.X86_64, + fields.Architecture.PPC64, + fields.Architecture.PPC64LE, + fields.Architecture.PPC): + return model + if not self.cpu_models_mapping: cpu_models = self._host.get_cpu_model_names() for cpu_model in cpu_models: self.cpu_models_mapping[cpu_model.lower()] = cpu_model - return self.cpu_models_mapping.get(model.lower(), None) + + if model.lower() not in self.cpu_models_mapping: + msg = (_("Configured CPU model: %(model)s is not correct, " + "or your host CPU arch does not support this " + "model. Please correct your config and try " + "again.") % {'model': model}) + raise exception.InvalidCPUInfo(msg) + + return self.cpu_models_mapping.get(model.lower()) def _get_guest_cpu_model_config(self, flavor=None): mode = CONF.libvirt.cpu_mode @@ -4164,8 +4366,8 @@ if (CONF.libvirt.virt_type == "kvm" or CONF.libvirt.virt_type == "qemu"): + caps = self._host.get_capabilities() if mode is None: - caps = self._host.get_capabilities() # AArch64 lacks 'host-model' support because neither libvirt # nor QEMU are able to tell what the host CPU model exactly is. # And there is no CPU description code for ARM(64) at this @@ -4184,6 +4386,13 @@ mode = "host-model" if mode == "none": return vconfig.LibvirtConfigGuestCPU() + # On AArch64 platform the return of _get_cpu_model_mapping will not + # return the default CPU model. + if mode == "custom": + if caps.host.cpu.arch == fields.Architecture.AARCH64: + if not models: + models = ['max'] + else: if mode is None or mode == "none": return None @@ -4256,7 +4465,7 @@ return cpu def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type, - image_type=None): + image_type=None, boot_order=None): disk_unit = None disk = self.image_backend.by_name(instance, name, image_type) if (name == 'disk.config' and image_type == 'rbd' and @@ -4279,7 +4488,8 @@ conf = disk.libvirt_info(disk_info, self.disk_cachemode, inst_type['extra_specs'], self._host.get_version(), - disk_unit=disk_unit) + disk_unit=disk_unit, + boot_order=boot_order) return conf def _get_guest_fs_config(self, instance, name, image_type=None): @@ -4351,7 +4561,7 @@ devices = devices + _get_ephemeral_devices() else: - if rescue: + if rescue and disk_mapping['disk.rescue'] == disk_mapping['root']: diskrescue = self._get_guest_disk_config(instance, 'disk.rescue', disk_mapping, @@ -4391,7 +4601,10 @@ instance.default_swap_device = ( block_device.prepend_dev(diskswap.target_dev)) - config_name = 'disk.config.rescue' if rescue else 'disk.config' + config_name = 'disk.config' + if rescue and disk_mapping['disk.rescue'] == disk_mapping['root']: + config_name = 'disk.config.rescue' + if config_name in disk_mapping: diskconfig = self._get_guest_disk_config( instance, config_name, disk_mapping, inst_type, @@ -4424,6 +4637,12 @@ if scsi_controller: devices.append(scsi_controller) + if rescue and disk_mapping['disk.rescue'] != disk_mapping['root']: + diskrescue = self._get_guest_disk_config(instance, 'disk.rescue', + disk_mapping, inst_type, + boot_order='1') + devices.append(diskrescue) + return devices @staticmethod @@ -4493,17 +4712,19 @@ return sysinfo + def _set_managed_mode(self, pcidev): + # only kvm support managed mode + if CONF.libvirt.virt_type in ('xen', 'parallels',): + pcidev.managed = 'no' + if CONF.libvirt.virt_type in ('kvm', 'qemu'): + pcidev.managed = 'yes' + def _get_guest_pci_device(self, pci_device): dbsf = pci_utils.parse_address(pci_device.address) dev = vconfig.LibvirtConfigGuestHostdevPCI() dev.domain, dev.bus, dev.slot, dev.function = dbsf - - # only kvm support managed mode - if CONF.libvirt.virt_type in ('xen', 'parallels',): - dev.managed = 'no' - if CONF.libvirt.virt_type in ('kvm', 'qemu'): - dev.managed = 'yes' + self._set_managed_mode(dev) return dev @@ -5567,7 +5788,7 @@ def _get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None, - context=None, mdevs=None): + context=None, mdevs=None, accel_info=None): """Get config data for parameters. :param rescue: optional dictionary that should contain the key @@ -5575,6 +5796,7 @@ 'kernel_id' if a kernel is needed for the rescue image. :param mdevs: optional list of mediated devices to assign to the guest. + :param accel_info: optional list of accelerator requests (ARQs) """ flavor = instance.flavor inst_path = libvirt_utils.get_instance_path(instance) @@ -5683,6 +5905,26 @@ self._guest_add_pci_devices(guest, instance) + pci_arq_list = [] + if accel_info: + # NOTE(Sundar): We handle only the case where all attach handles + # are of type 'PCI'. The Cyborg fake driver used for testing + # returns attach handles of type 'TEST_PCI' and so its ARQs will + # not get composed into the VM's domain XML. For now, we do not + # expect a mixture of different attach handles for the same + # instance; but that case also gets ignored by this logic. + ah_types_set = {arq['attach_handle_type'] for arq in accel_info} + supported_types_set = {'PCI'} + if ah_types_set == supported_types_set: + pci_arq_list = accel_info + else: + LOG.info('Ignoring accelerator requests for instance %s. ' + 'Supported Attach handle types: %s. ' + 'But got these unsupported types: %s.', + instance.uuid, supported_types_set, + ah_types_set.difference(supported_types_set)) + self._guest_add_accel_pci_devices(guest, pci_arq_list) + self._guest_add_watchdog_action(guest, flavor, image_meta) self._guest_add_memory_balloon(guest) @@ -5700,30 +5942,18 @@ return guest def _get_ordered_vpmems(self, instance, flavor): - ordered_vpmems = [] - vpmems = self._get_vpmems(instance) - labels = hardware.get_vpmems(flavor) - for label in labels: - for vpmem in vpmems: - if vpmem.label == label: - ordered_vpmems.append(vpmem) - vpmems.remove(vpmem) - break + resources = self._get_resources(instance) + ordered_vpmem_resources = self._get_ordered_vpmem_resources( + resources, flavor) + ordered_vpmems = [self._vpmems_by_name[resource.identifier] + for resource in ordered_vpmem_resources] return ordered_vpmems def _get_vpmems(self, instance, prefix=None): - vpmems = [] - resources = instance.resources - if prefix == 'old' and instance.migration_context: - if 'old_resources' in instance.migration_context: - resources = instance.migration_context.old_resources - if not resources: - return vpmems - for resource in resources: - rc = resource.resource_class - if rc.startswith("CUSTOM_PMEM_NAMESPACE_"): - vpmem = self._vpmems_by_name[resource.identifier] - vpmems.append(vpmem) + resources = self._get_resources(instance, prefix=prefix) + vpmem_resources = self._get_vpmem_resources(resources) + vpmems = [self._vpmems_by_name[resource.identifier] + for resource in vpmem_resources] return vpmems def _guest_add_vpmems(self, guest, vpmems): @@ -5880,6 +6110,18 @@ if pci_manager.get_instance_pci_devs(instance, 'all'): raise exception.PciDeviceUnsupportedHypervisor(type=virt_type) + def _guest_add_accel_pci_devices(self, guest, accel_info): + """Add all accelerator PCI functions from ARQ list.""" + for arq in accel_info: + dev = vconfig.LibvirtConfigGuestHostdevPCI() + pci_addr = arq['attach_handle_info'] + dev.domain, dev.bus, dev.slot, dev.function = ( + pci_addr['domain'], pci_addr['bus'], + pci_addr['device'], pci_addr['function']) + self._set_managed_mode(dev) + + guest.add_device(dev) + @staticmethod def _guest_add_video_device(guest): # NB some versions of libvirt support both SPICE and VNC @@ -5964,7 +6206,7 @@ def _get_guest_xml(self, context, instance, network_info, disk_info, image_meta, rescue=None, block_device_info=None, - mdevs=None): + mdevs=None, accel_info=None): # NOTE(danms): Stringifying a NetworkInfo will take a lock. Do # this ahead of time so that we don't acquire it while also # holding the logging lock. @@ -5982,7 +6224,7 @@ LOG.debug(strutils.mask_password(msg), instance=instance) conf = self._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info, - context, mdevs) + context, mdevs, accel_info) xml = conf.to_xml() LOG.debug('End _get_guest_xml xml=%(xml)s', @@ -6326,12 +6568,80 @@ def _get_supported_vgpu_types(self): if not CONF.devices.enabled_vgpu_types: return [] - # TODO(sbauza): Move this check up to compute_manager.init_host - if len(CONF.devices.enabled_vgpu_types) > 1: - LOG.warning('libvirt only supports one GPU type per compute node,' - ' only first type will be used.') - requested_types = CONF.devices.enabled_vgpu_types[:1] - return requested_types + + for vgpu_type in CONF.devices.enabled_vgpu_types: + group = getattr(CONF, 'vgpu_%s' % vgpu_type, None) + if group is None or not group.device_addresses: + first_type = CONF.devices.enabled_vgpu_types[0] + if len(CONF.devices.enabled_vgpu_types) > 1: + # Only provide the warning if the operator provided more + # than one type as it's not needed to provide groups + # if you only use one vGPU type. + msg = ("The vGPU type '%(type)s' was listed in '[devices] " + "enabled_vgpu_types' but no corresponding " + "'[vgpu_%(type)s]' group or " + "'[vgpu_%(type)s] device_addresses' " + "option was defined. Only the first type " + "'%(ftype)s' will be used." % {'type': vgpu_type, + 'ftype': first_type}) + LOG.warning(msg) + # We need to reset the mapping table that we started to provide + # keys and values from previously processed vGPUs but since + # there is a problem for this vGPU type, we only want to + # support only the first type. + self.pgpu_type_mapping.clear() + return [first_type] + for device_address in group.device_addresses: + if device_address in self.pgpu_type_mapping: + raise exception.InvalidLibvirtGPUConfig( + reason="duplicate types for PCI ID %s" % device_address + ) + # Just checking whether the operator fat-fingered the address. + # If it's wrong, it will return an exception + try: + pci_utils.parse_address(device_address) + except exception.PciDeviceWrongAddressFormat: + raise exception.InvalidLibvirtGPUConfig( + reason="incorrect PCI address: %s" % device_address + ) + self.pgpu_type_mapping[device_address] = vgpu_type + return CONF.devices.enabled_vgpu_types + + def _get_vgpu_type_per_pgpu(self, device_address): + """Provides the vGPU type the pGPU supports. + + :param device_address: the libvirt PCI device name, + eg.'pci_0000_84_00_0' + """ + # Bail out quickly if we don't support vGPUs + if not self.supported_vgpu_types: + return + + if len(self.supported_vgpu_types) == 1: + # The operator wanted to only support one single type so we can + # blindly return it for every single pGPU + return self.supported_vgpu_types[0] + # The libvirt name is like 'pci_0000_84_00_0' + try: + device_address = "{}:{}:{}.{}".format( + *device_address[4:].split('_')) + # Validates whether it's a PCI ID... + pci_utils.parse_address(device_address) + # .format() can return IndexError + except (exception.PciDeviceWrongAddressFormat, IndexError): + # this is not a valid PCI address + LOG.warning("The PCI address %s was invalid for getting the" + "related vGPU type", device_address) + return + try: + return self.pgpu_type_mapping.get(device_address) + except KeyError: + LOG.warning("No vGPU type was configured for PCI address: %s", + device_address) + # We accept to return None instead of raising an exception + # because we prefer the callers to return the existing exceptions + # in case we can't find a specific pGPU + return def _count_mediated_devices(self, enabled_vgpu_types): """Counts the sysfs objects (handles) that represent a mediated device @@ -6347,6 +6657,12 @@ counts_per_parent = collections.defaultdict(int) mediated_devices = self._get_mediated_devices(types=enabled_vgpu_types) for mdev in mediated_devices: + parent_vgpu_type = self._get_vgpu_type_per_pgpu(mdev['parent']) + if mdev['type'] != parent_vgpu_type: + # Even if some mdev was created for another vGPU type, just + # verify all the mdevs related to the type that their pGPU + # has + continue counts_per_parent[mdev['parent']] += 1 return counts_per_parent @@ -6365,10 +6681,13 @@ # dev_id is the libvirt name for the PCI device, # eg. pci_0000_84_00_0 which matches a PCI address of 0000:84:00.0 dev_name = dev['dev_id'] + dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name) for _type in dev['types']: + if _type != dev_supported_type: + # This is not the type the operator wanted to support for + # this physical GPU + continue available = dev['types'][_type]['availableInstances'] - # TODO(sbauza): Once we support multiple types, check which - # PCI devices are set for this type # NOTE(sbauza): Even if we support multiple types, Nova will # only use one per physical GPU. counts_per_dev[dev_name] += available @@ -6391,7 +6710,7 @@ """ # Bail out early if operator doesn't care about providing vGPUs - enabled_vgpu_types = self._get_supported_vgpu_types() + enabled_vgpu_types = self.supported_vgpu_types if not enabled_vgpu_types: return {} inventories = {} @@ -6740,59 +7059,60 @@ vgpu_allocations[rp] = {'resources': {RC_VGPU: res[RC_VGPU]}} return vgpu_allocations - def _get_existing_mdevs_not_assigned(self, requested_types=None, - parent=None): + def _get_existing_mdevs_not_assigned(self, parent, requested_types=None): """Returns the already created mediated devices that are not assigned to a guest yet. + :param parent: Filter out result for only mdevs from the parent device. :param requested_types: Filter out the result for only mediated devices having those types. - :param parent: Filter out result for only mdevs from the parent device. """ allocated_mdevs = self._get_all_assigned_mediated_devices() mdevs = self._get_mediated_devices(requested_types) available_mdevs = set() for mdev in mdevs: + parent_vgpu_type = self._get_vgpu_type_per_pgpu(mdev['parent']) + if mdev['type'] != parent_vgpu_type: + # This mdev is using a vGPU type that is not supported by the + # configuration option for its pGPU parent, so we can't use it. + continue + # FIXME(sbauza): No longer accept the parent value to be nullable + # once we fix the reshape functional test if parent is None or mdev['parent'] == parent: available_mdevs.add(mdev["uuid"]) available_mdevs -= set(allocated_mdevs) return available_mdevs - def _create_new_mediated_device(self, requested_types, uuid=None, - parent=None): + def _create_new_mediated_device(self, parent, uuid=None): """Find a physical device that can support a new mediated device and create it. - :param requested_types: Filter only capable devices supporting those - types. + :param parent: The libvirt name of the parent GPU, eg. pci_0000_06_00_0 :param uuid: The possible mdev UUID we want to create again - :param parent: Only create a mdev for this device :returns: the newly created mdev UUID or None if not possible """ + supported_types = self.supported_vgpu_types # Try to see if we can still create a new mediated device - devices = self._get_mdev_capable_devices(requested_types) + devices = self._get_mdev_capable_devices(supported_types) for device in devices: - # For the moment, the libvirt driver only supports one - # type per host - # TODO(sbauza): Once we support more than one type, make - # sure we look at the flavor/trait for the asked type. - asked_type = requested_types[0] - if device['types'][asked_type]['availableInstances'] > 0: + dev_name = device['dev_id'] + # FIXME(sbauza): No longer accept the parent value to be nullable + # once we fix the reshape functional test + if parent is not None and dev_name != parent: + # The device is not the one that was called, not creating + # the mdev + continue + dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name) + if dev_supported_type and device['types'][ + dev_supported_type]['availableInstances'] > 0: # That physical GPU has enough room for a new mdev - dev_name = device['dev_id'] - # the parent attribute can be None - if parent is not None and dev_name != parent: - # The device is not the one that was called, not creating - # the mdev - continue # We need the PCI address, not the libvirt name # The libvirt name is like 'pci_0000_84_00_0' pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_')) - chosen_mdev = nova.privsep.libvirt.create_mdev(pci_addr, - asked_type, - uuid=uuid) + chosen_mdev = nova.privsep.libvirt.create_mdev( + pci_addr, dev_supported_type, uuid=uuid) return chosen_mdev @utils.synchronized(VGPU_RESOURCE_SEMAPHORE) @@ -6813,12 +7133,8 @@ vgpu_allocations = self._vgpu_allocations(allocations) if not vgpu_allocations: return - # TODO(sbauza): Once we have nested resource providers, find which one - # is having the related allocation for the specific VGPU type. - # For the moment, we should only have one allocation for - # ResourceProvider. - # TODO(sbauza): Iterate over all the allocations once we have - # nested Resource Providers. For the moment, just take the first. + # TODO(sbauza): For the moment, we only support allocations for only + # one pGPU. if len(vgpu_allocations) > 1: LOG.warning('More than one allocation was passed over to libvirt ' 'while at the moment libvirt only supports one. Only ' @@ -6836,8 +7152,12 @@ # exception raise exception.ComputeResourcesUnavailable( reason='vGPU resource is not available') - # TODO(sbauza): Remove this conditional in Train once all VGPU - # inventories are related to a child RP + # FIXME(sbauza): The functional reshape test assumes that we could + # run _allocate_mdevs() against non-nested RPs but this is impossible + # as all inventories have been reshaped *before now* since it's done + # on init_host() (when the compute restarts or whatever else calls it). + # That said, since fixing the functional test isn't easy yet, let's + # assume we still support a non-nested RP for now. if allocated_rp.parent_uuid is None: # We are on a root RP parent_device = None @@ -6863,10 +7183,10 @@ raise exception.ComputeResourcesUnavailable( reason='vGPU resource is not available') - requested_types = self._get_supported_vgpu_types() + supported_types = self.supported_vgpu_types # Which mediated devices are created but not assigned to a guest ? mdevs_available = self._get_existing_mdevs_not_assigned( - requested_types, parent_device) + parent_device, supported_types) chosen_mdevs = [] for c in six.moves.range(vgpus_asked): @@ -6875,8 +7195,7 @@ # Take the first available mdev chosen_mdev = mdevs_available.pop() else: - chosen_mdev = self._create_new_mediated_device( - requested_types, parent=parent_device) + chosen_mdev = self._create_new_mediated_device(parent_device) if not chosen_mdev: # If we can't find devices having available VGPUs, just raise raise exception.ComputeResourcesUnavailable( @@ -7225,12 +7544,9 @@ 'reserved': self._get_reserved_host_disk_gb_from_config(), } - # NOTE(sbauza): For the moment, the libvirt driver only supports - # providing the total number of virtual GPUs for a single GPU type. If - # you have multiple physical GPUs, each of them providing multiple GPU - # types, only one type will be used for each of the physical GPUs. - # If one of the pGPUs doesn't support this type, it won't be used. - # TODO(sbauza): Use traits to make a better world. + # TODO(sbauza): Use traits to providing vGPU types. For the moment, + # it will be only documentation support by explaining to use + # osc-placement to create custom traits for each of the pGPU RPs. self._update_provider_tree_for_vgpu( provider_tree, nodename, allocations=allocations) @@ -7243,12 +7559,12 @@ provider_tree.update_inventory(nodename, result) provider_tree.update_resources(nodename, resources) - traits = self._get_cpu_traits() - # _get_cpu_traits returns a dict of trait names mapped to boolean - # values. Add traits equal to True to provider tree, remove - # those False traits from provider tree. - traits_to_add = [t for t in traits if traits[t]] - traits_to_remove = set(traits) - set(traits_to_add) + # Add supported traits i.e. those equal to True to provider tree while + # removing the unsupported ones + traits_to_add = [ + t for t in self.static_traits if self.static_traits[t] + ] + traits_to_remove = set(self.static_traits) - set(traits_to_add) provider_tree.add_traits(nodename, *traits_to_add) provider_tree.remove_traits(nodename, *traits_to_remove) @@ -7306,6 +7622,26 @@ else: return db_const.MAX_INT + @property + def static_traits(self): + if self._static_traits is not None: + return self._static_traits + + traits = {} + traits.update(self._get_cpu_traits()) + traits.update(self._get_storage_bus_traits()) + traits.update(self._get_video_model_traits()) + traits.update(self._get_vif_model_traits()) + + _, invalid_traits = ot.check_traits(traits) + for invalid_trait in invalid_traits: + LOG.debug("Trait '%s' is not valid; ignoring.", invalid_trait) + del traits[invalid_trait] + + self._static_traits = traits + + return self._static_traits + @staticmethod def _is_reshape_needed_vgpu_on_root(provider_tree, nodename): """Determine if root RP has VGPU inventories. @@ -7349,13 +7685,6 @@ representing that resource provider in the tree """ # Create the VGPU child providers if they do not already exist. - # TODO(mriedem): For the moment, _get_supported_vgpu_types() only - # returns one single type but that will be changed once we support - # multiple types. - # Note that we can't support multiple vgpu types until a reshape has - # been performed on the vgpu resources provided by the root provider, - # if any. - # Dict of PGPU RPs keyed by their libvirt PCI name pgpu_rps = {} for pgpu_dev_id, inventory in inventories_dict.items(): @@ -7953,6 +8282,53 @@ claim.image_meta) return migrate_data + def _get_resources(self, instance, prefix=None): + resources = [] + if prefix: + migr_context = instance.migration_context + attr_name = prefix + 'resources' + if migr_context and attr_name in migr_context: + resources = getattr(migr_context, attr_name) or [] + else: + resources = instance.resources or [] + return resources + + def _get_vpmem_resources(self, resources): + vpmem_resources = [] + for resource in resources: + if 'metadata' in resource and \ + isinstance(resource.metadata, objects.LibvirtVPMEMDevice): + vpmem_resources.append(resource) + return vpmem_resources + + def _get_ordered_vpmem_resources(self, resources, flavor): + vpmem_resources = self._get_vpmem_resources(resources) + ordered_vpmem_resources = [] + labels = hardware.get_vpmems(flavor) + for label in labels: + for vpmem_resource in vpmem_resources: + if vpmem_resource.metadata.label == label: + ordered_vpmem_resources.append(vpmem_resource) + vpmem_resources.remove(vpmem_resource) + break + return ordered_vpmem_resources + + def _sorted_migrating_resources(self, instance, flavor): + """This method is used to sort instance.migration_context.new_resources + claimed on dest host, then the ordered new resources will be used to + update resources info (e.g. vpmems) in the new xml which is used for + live migration. + """ + resources = self._get_resources(instance, prefix='new_') + if not resources: + return + ordered_resources = [] + ordered_vpmem_resources = self._get_ordered_vpmem_resources( + resources, flavor) + ordered_resources.extend(ordered_vpmem_resources) + ordered_resources_obj = objects.ResourceList(objects=ordered_resources) + return ordered_resources_obj + def _get_live_migrate_numa_info(self, instance_numa_topology, flavor, image_meta): """Builds a LibvirtLiveMigrateNUMAInfo object to send to the source of @@ -8071,14 +8447,6 @@ relative=True) dest_check_data.instance_relative_path = instance_path - # NOTE(lyarwood): Used to indicate to the dest that the src is capable - # of wiring up the encrypted disk configuration for the domain. - # Note that this does not require the QEMU and Libvirt versions to - # decrypt LUKS to be installed on the source node. Only the Nova - # utility code to generate the correct XML is required, so we can - # default to True here for all computes >= Queens. - dest_check_data.src_supports_native_luks = True - # TODO(artom) Set to indicate that the source (us) can perform a # NUMA-aware live migration. NUMA-aware live migration will become # unconditionally supported in RPC 6.0, so this sentinel can be removed @@ -8432,12 +8800,16 @@ host=self._host) self._detach_direct_passthrough_vifs(context, migrate_data, instance) + new_resources = None + if isinstance(instance, objects.Instance): + new_resources = self._sorted_migrating_resources( + instance, instance.flavor) new_xml_str = libvirt_migrate.get_updated_guest_xml( # TODO(sahid): It's not a really good idea to pass # the method _get_volume_config and we should to find # a way to avoid this in future. guest, migrate_data, self._get_volume_config, - get_vif_config=get_vif_config) + get_vif_config=get_vif_config, new_resources=new_resources) # NOTE(pkoniszewski): Because of precheck which blocks # tunnelled block live migration with mapped volumes we @@ -8621,6 +8993,8 @@ n = 0 start = time.time() is_post_copy_enabled = self._is_post_copy_enabled(migration_flags) + # vpmem does not support post copy + is_post_copy_enabled &= not bool(self._get_vpmems(instance)) while True: info = guest.get_job_info() @@ -9069,15 +9443,7 @@ for bdm in block_device_mapping: connection_info = bdm['connection_info'] - # NOTE(lyarwood): Handle the P to Q LM during upgrade use case - # where an instance has encrypted volumes attached using the - # os-brick encryptors. Do not attempt to attach the encrypted - # volume using native LUKS decryption on the destionation. - src_native_luks = False - if migrate_data.obj_attr_is_set('src_supports_native_luks'): - src_native_luks = migrate_data.src_supports_native_luks - self._connect_volume(context, connection_info, instance, - allow_native_luks=src_native_luks) + self._connect_volume(context, connection_info, instance) self._pre_live_migration_plug_vifs( instance, network_info, migrate_data) @@ -9393,7 +9759,7 @@ dk_size = os.stat(path).st_blocks * 512 virt_size = os.path.getsize(path) backing_file = "" - over_commit_size = 0 + over_commit_size = int(virt_size) - dk_size elif disk_type == 'block' and block_device_info: dk_size = lvm.get_volume_size(path) @@ -10355,11 +10721,90 @@ nova.privsep.fs.FS_FORMAT_EXT4, nova.privsep.fs.FS_FORMAT_XFS] + def _get_vif_model_traits(self): + """Get vif model traits based on the currently enabled virt_type. + + Not all traits generated by this function may be valid and the result + should be validated. + + :return: A dict of trait names mapped to boolean values. + """ + all_models = set(itertools.chain( + *libvirt_vif.SUPPORTED_VIF_MODELS.values() + )) + supported_models = libvirt_vif.SUPPORTED_VIF_MODELS.get( + CONF.libvirt.virt_type, [] + ) + # construct the corresponding standard trait from the VIF model name + return { + f'COMPUTE_NET_VIF_MODEL_{model.replace("-", "_").upper()}': model + in supported_models for model in all_models + } + + def _get_storage_bus_traits(self): + """Get storage bus traits based on the currently enabled virt_type. + + For QEMU and KVM this function uses the information returned by the + libvirt domain capabilities API. For other virt types we generate the + traits based on the static information in the blockinfo module. + + Not all traits generated by this function may be valid and the result + should be validated. + + :return: A dict of trait names mapped to boolean values. + """ + all_buses = set(itertools.chain( + *blockinfo.SUPPORTED_STORAGE_BUSES.values() + )) + + if CONF.libvirt.virt_type in ('qemu', 'kvm'): + dom_caps = self._host.get_domain_capabilities() + supported_buses = set() + for arch_type in dom_caps: + for machine_type in dom_caps[arch_type]: + supported_buses.update( + dom_caps[arch_type][machine_type].devices.disk.buses + ) + else: + supported_buses = blockinfo.SUPPORTED_STORAGE_BUSES.get( + CONF.libvirt.virt_type, [] + ) + + # construct the corresponding standard trait from the storage bus name + return { + f'COMPUTE_STORAGE_BUS_{bus.replace("-", "_").upper()}': bus in + supported_buses for bus in all_buses + } + + def _get_video_model_traits(self): + """Get video model traits from libvirt. + + Not all traits generated by this function may be valid and the result + should be validated. + + :return: A dict of trait names mapped to boolean values. + """ + all_models = fields.VideoModel.ALL + + dom_caps = self._host.get_domain_capabilities() + supported_models = set() + for arch_type in dom_caps: + for machine_type in dom_caps[arch_type]: + supported_models.update( + dom_caps[arch_type][machine_type].devices.video.models + ) + + # construct the corresponding standard trait from the video model name + return { + f'COMPUTE_GRAPHICS_MODEL_{model.replace("-", "_").upper()}': model + in supported_models for model in all_models + } + def _get_cpu_traits(self): """Get CPU-related traits to be set and unset on the host's resource provider. - :return: A dict of trait names mapped to boolean values or None. + :return: A dict of trait names mapped to boolean values. """ traits = self._get_cpu_feature_traits() traits[ot.HW_CPU_X86_AMD_SEV] = self._host.supports_amd_sev @@ -10368,16 +10813,19 @@ return traits def _get_cpu_feature_traits(self): - """Get CPU traits of VMs based on guest CPU model config: - 1. if mode is 'host-model' or 'host-passthrough', use host's - CPU features. - 2. if mode is None, choose a default CPU model based on CPU - architecture. - 3. if mode is 'custom', use cpu_models to generate CPU features. + """Get CPU traits of VMs based on guest CPU model config. + + 1. If mode is 'host-model' or 'host-passthrough', use host's + CPU features. + 2. If mode is None, choose a default CPU model based on CPU + architecture. + 3. If mode is 'custom', use cpu_models to generate CPU features. + The code also accounts for cpu_model_extra_flags configuration when cpu_mode is 'host-model', 'host-passthrough' or 'custom', this ensures user specified CPU feature flags to be included. - :return: A dict of trait names mapped to boolean values or None. + + :return: A dict of trait names mapped to boolean values. """ cpu = self._get_guest_cpu_model_config() if not cpu: @@ -10413,6 +10861,11 @@ else: models = [self._get_cpu_model_mapping(model) for model in CONF.libvirt.cpu_models] + + # Aarch64 platform doesn't return the default CPU models + if caps.host.cpu.arch == fields.Architecture.AARCH64: + if not models: + models = ['max'] # For custom mode, iterate through cpu models for model in models: caps.host.cpu.model = model diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/guest.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/guest.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/guest.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/guest.py 2020-04-10 17:57:58.000000000 +0000 @@ -756,6 +756,22 @@ cur=status['cur'], end=status['end']) + def copy(self, dest_xml, shallow=False, reuse_ext=False, transient=False): + """Copy the guest-visible contents into a new disk + + http://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockCopy + + :param: dest_xml: XML describing the destination disk to copy to + :param: shallow: Limit copy to top of source backing chain + :param: reuse_ext: Reuse existing external file for a copy + :param: transient: Don't force usage of recoverable job for the copy + operation + """ + flags = shallow and libvirt.VIR_DOMAIN_BLOCK_COPY_SHALLOW or 0 + flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_COPY_REUSE_EXT or 0 + flags |= transient and libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB or 0 + return self._guest._domain.blockCopy(self._disk, dest_xml, flags=flags) + def rebase(self, base, shallow=False, reuse_ext=False, copy=False, relative=False, copy_dev=False): """Copy data from backing chain into a new disk @@ -792,9 +808,15 @@ return self._guest._domain.blockCommit( self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags) - def resize(self, size_kb): - """Resize block device to KiB size""" - self._guest._domain.blockResize(self._disk, size_kb) + def resize(self, size): + """Resize block device to the given size in bytes. + + This resizes the block device within the instance to the given size. + + :param size: The size to resize the device to in bytes. + """ + flags = libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES + self._guest._domain.blockResize(self._disk, size, flags=flags) def is_job_complete(self): """Return True if the job is complete, False otherwise diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/imagebackend.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/imagebackend.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/imagebackend.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/imagebackend.py 2020-04-10 17:57:58.000000000 +0000 @@ -34,7 +34,7 @@ import nova.conf from nova import exception from nova.i18n import _ -from nova import image +from nova.image import glance import nova.privsep.libvirt import nova.privsep.path from nova import utils @@ -50,7 +50,7 @@ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) -IMAGE_API = image.API() +IMAGE_API = glance.API() # NOTE(neiljerram): Don't worry if this fails. This sometimes happens, with @@ -980,7 +980,17 @@ reason=reason) def flatten(self): - self.driver.flatten(self.rbd_name, pool=self.driver.pool) + # NOTE(vdrok): only flatten images if they are not already flattened, + # meaning that parent info is present + try: + self.driver.parent_info(self.rbd_name, pool=self.driver.pool) + except exception.ImageUnacceptable: + LOG.debug( + "Image %(img)s from pool %(pool)s has no parent info, " + "consider it already flat", { + 'img': self.rbd_name, 'pool': self.driver.pool}) + else: + self.driver.flatten(self.rbd_name, pool=self.driver.pool) def get_model(self, connection): secret = None diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/migration.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/migration.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/migration.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/migration.py 2020-04-10 17:57:58.000000000 +0000 @@ -25,6 +25,7 @@ from nova.compute import power_state import nova.conf from nova import exception +from nova import objects from nova.virt import hardware from nova.virt.libvirt import config as vconfig @@ -80,7 +81,7 @@ def get_updated_guest_xml(guest, migrate_data, get_volume_config, - get_vif_config=None): + get_vif_config=None, new_resources=None): xml_doc = etree.fromstring(guest.get_xml_desc(dump_migratable=True)) xml_doc = _update_graphics_xml(xml_doc, migrate_data) xml_doc = _update_serial_xml(xml_doc, migrate_data) @@ -91,9 +92,33 @@ xml_doc = _update_vif_xml(xml_doc, migrate_data, get_vif_config) if 'dst_numa_info' in migrate_data: xml_doc = _update_numa_xml(xml_doc, migrate_data) + if new_resources: + xml_doc = _update_device_resources_xml(xml_doc, new_resources) return etree.tostring(xml_doc, encoding='unicode') +def _update_device_resources_xml(xml_doc, new_resources): + vpmems = [] + for resource in new_resources: + if 'metadata' in resource: + res_meta = resource.metadata + if isinstance(res_meta, objects.LibvirtVPMEMDevice): + vpmems.append(res_meta) + # If there are other resources in the future, the xml should + # be updated here like vpmems + xml_doc = _update_vpmems_xml(xml_doc, vpmems) + return xml_doc + + +def _update_vpmems_xml(xml_doc, vpmems): + memory_devices = xml_doc.findall("./devices/memory") + for pos, memory_dev in enumerate(memory_devices): + if memory_dev.get('model') == 'nvdimm': + devpath = memory_dev.find('./source/path') + devpath.text = vpmems[pos].devpath + return xml_doc + + def _update_numa_xml(xml_doc, migrate_data): LOG.debug('_update_numa_xml input xml=%s', etree.tostring(xml_doc, encoding='unicode', pretty_print=True)) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/utils.py 2020-04-10 17:57:58.000000000 +0000 @@ -114,8 +114,9 @@ base_cmd = ['qemu-img', 'create', '-f', 'qcow2'] cow_opts = [] if backing_file: - cow_opts += ['backing_file=%s' % backing_file] base_details = images.qemu_img_info(backing_file) + cow_opts += ['backing_file=%s' % backing_file] + cow_opts += ['backing_fmt=%s' % base_details.file_format] else: base_details = None # Explicitly inherit the value of 'cluster_size' property of a qcow2 @@ -536,6 +537,10 @@ mode = 'qemu32' elif arch == obj_fields.Architecture.PPC64LE: mode = 'POWER8' + # NOTE(kevinz): In aarch64, cpu model 'max' will offer the capabilities + # that all the stuff it can currently emulate, both for "TCG" and "KVM" + elif arch == obj_fields.Architecture.AARCH64: + mode = 'max' return mode diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/vif.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/vif.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/libvirt/vif.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/libvirt/vif.py 2020-04-10 17:57:58.000000000 +0000 @@ -58,41 +58,46 @@ MIN_LIBVIRT_TX_QUEUE_SIZE = (3, 7, 0) MIN_QEMU_TX_QUEUE_SIZE = (2, 10, 0) +SUPPORTED_VIF_MODELS = { + 'qemu': [ + network_model.VIF_MODEL_VIRTIO, + network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_LAN9118, + network_model.VIF_MODEL_SPAPR_VLAN], + 'kvm': [ + network_model.VIF_MODEL_VIRTIO, + network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000, + network_model.VIF_MODEL_SPAPR_VLAN], + 'xen': [ + network_model.VIF_MODEL_NETFRONT, + network_model.VIF_MODEL_NE2K_PCI, + network_model.VIF_MODEL_PCNET, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000], + 'lxc': [], + 'uml': [], + 'parallels': [ + network_model.VIF_MODEL_VIRTIO, + network_model.VIF_MODEL_RTL8139, + network_model.VIF_MODEL_E1000], +} + def is_vif_model_valid_for_virt(virt_type, vif_model): - valid_models = { - 'qemu': [network_model.VIF_MODEL_VIRTIO, - network_model.VIF_MODEL_NE2K_PCI, - network_model.VIF_MODEL_PCNET, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000, - network_model.VIF_MODEL_LAN9118, - network_model.VIF_MODEL_SPAPR_VLAN], - 'kvm': [network_model.VIF_MODEL_VIRTIO, - network_model.VIF_MODEL_NE2K_PCI, - network_model.VIF_MODEL_PCNET, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000, - network_model.VIF_MODEL_SPAPR_VLAN], - 'xen': [network_model.VIF_MODEL_NETFRONT, - network_model.VIF_MODEL_NE2K_PCI, - network_model.VIF_MODEL_PCNET, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000], - 'lxc': [], - 'uml': [], - 'parallels': [network_model.VIF_MODEL_VIRTIO, - network_model.VIF_MODEL_RTL8139, - network_model.VIF_MODEL_E1000], - } if vif_model is None: return True - if virt_type not in valid_models: + if virt_type not in SUPPORTED_VIF_MODELS: raise exception.UnsupportedVirtType(virt=virt_type) - return vif_model in valid_models[virt_type] + return vif_model in SUPPORTED_VIF_MODELS[virt_type] def set_vf_interface_vlan(pci_addr, mac_addr, vlan=0): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/powervm/disk/localdisk.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/powervm/disk/localdisk.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/powervm/disk/localdisk.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/powervm/disk/localdisk.py 2020-04-10 17:57:58.000000000 +0000 @@ -24,13 +24,13 @@ from nova import conf from nova import exception -from nova import image +from nova.image import glance from nova.virt.powervm.disk import driver as disk_dvr from nova.virt.powervm import vm LOG = logging.getLogger(__name__) CONF = conf.CONF -IMAGE_API = image.API() +IMAGE_API = glance.API() class LocalStorage(disk_dvr.DiskAdapter): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/powervm/disk/ssp.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/powervm/disk/ssp.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/powervm/disk/ssp.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/powervm/disk/ssp.py 2020-04-10 17:57:58.000000000 +0000 @@ -26,13 +26,13 @@ import pypowervm.wrappers.storage as pvm_stg from nova import exception -from nova import image +from nova.image import glance from nova.virt.powervm.disk import driver as disk_drv from nova.virt.powervm import vm LOG = logging.getLogger(__name__) -IMAGE_API = image.API() +IMAGE_API = glance.API() class SSPDiskAdapter(disk_drv.DiskAdapter): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/powervm/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/powervm/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/powervm/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/powervm/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -34,7 +34,7 @@ from nova.console import type as console_type from nova import exception as exc from nova.i18n import _ -from nova import image +from nova.image import glance from nova.virt import configdrive from nova.virt import driver from nova.virt.powervm import host as pvm_host @@ -78,6 +78,7 @@ 'supports_multiattach': False, 'supports_trusted_certs': False, 'supports_pcpus': False, + "supports_accelerators": False, # Supported image types "supports_image_type_aki": False, @@ -118,7 +119,7 @@ self.disk_dvr = importutils.import_object_ns( DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()], self.adapter, self.host_wrapper.uuid) - self.image_api = image.API() + self.image_api = glance.API() LOG.info("The PowerVM compute driver has been initialized.") @@ -240,7 +241,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be @@ -464,7 +465,7 @@ timeout=timeout) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Power on the specified instance. :param instance: nova.objects.instance.Instance @@ -473,7 +474,8 @@ vm.power_on(self.adapter, instance) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): """Reboot the specified instance. After this is called successfully, the instance's state @@ -489,6 +491,8 @@ :param block_device_info: Info pertaining to attached volumes :param bad_volumes_callback: Function to handle any bad volumes encountered + :param accel_info: List of accelerator request dicts. The exact + data struct is doc'd in nova/virt/driver.py::spawn(). """ self._log_operation(reboot_type + ' reboot', instance) vm.reboot(self.adapter, instance, reboot_type == 'HARD') @@ -641,9 +645,11 @@ # Run the flow tf_base.run(flow, instance=instance) - def extend_volume(self, connection_info, instance, requested_size): + def extend_volume(self, context, connection_info, instance, + requested_size): """Extend the disk attached to the instance. + :param context: security context :param dict connection_info: The connection for the extended volume. :param nova.objects.instance.Instance instance: The instance whose volume gets extended. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -70,6 +70,7 @@ "supports_multiattach": False, "supports_trusted_certs": False, "supports_pcpus": False, + "supports_accelerators": False, # Image type support flags "supports_image_type_aki": False, @@ -200,6 +201,13 @@ self._datastore_regex = None def init_host(self, host): + LOG.warning('The vmwareapi driver is deprecated and may be removed in ' + 'a future release. The driver is not tested by the ' + 'OpenStack project nor does it have clear maintainer(s) ' + 'and thus its quality can not be ensured. If you are ' + 'using the driver in production please let us know in ' + 'freenode IRC and/or the openstack-discuss mailing list.') + vim = self._session.vim if vim is None: self._session._create_session() @@ -533,7 +541,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): """Create VM instance.""" self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) @@ -563,7 +571,8 @@ self._vmops.snapshot(context, instance, image_id, update_task_state) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): """Reboot VM instance.""" self._vmops.reboot(instance, network_info, reboot_type) @@ -637,7 +646,7 @@ self._vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): """Rescue the specified instance.""" self._vmops.rescue(context, instance, network_info, image_meta) @@ -650,7 +659,7 @@ self._vmops.power_off(instance, timeout, retry_interval) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Power on the specified instance.""" self._vmops.power_on(instance) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/images.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/images.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/images.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/images.py 2020-04-10 17:57:58.000000000 +0000 @@ -32,7 +32,7 @@ from nova import exception from nova.i18n import _ -from nova import image +from nova.image import glance from nova.objects import fields from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import vm_util @@ -45,7 +45,7 @@ CONF = cfg.CONF LOG = logging.getLogger(__name__) -IMAGE_API = image.API() +IMAGE_API = glance.API() QUEUE_BUFFER_SIZE = 10 NFC_LEASE_UPDATE_PERIOD = 60 # update NFC lease every 60sec. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/vim_util.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/vim_util.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/vim_util.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/vim_util.py 2020-04-10 17:57:58.000000000 +0000 @@ -158,3 +158,13 @@ def get_entity_name(session, entity): return session._call_method(vutil, 'get_object_property', entity, 'name') + + +def get_array_items(array_obj): + """Get contained items if the object is a vSphere API array.""" + array_prefix = 'ArrayOf' + if array_obj.__class__.__name__.startswith(array_prefix): + attr_name = array_obj.__class__.__name__.replace(array_prefix, '', 1) + if hasattr(array_obj, attr_name): + return getattr(array_obj, attr_name) + return array_obj diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/vm_util.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/vm_util.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/vm_util.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/vm_util.py 2020-04-10 17:57:58.000000000 +0000 @@ -46,6 +46,14 @@ 'VirtualPCNet32', 'VirtualSriovEthernetCard', 'VirtualVmxnet', 'VirtualVmxnet3'] +CONTROLLER_TO_ADAPTER_TYPE = { + "VirtualLsiLogicController": constants.DEFAULT_ADAPTER_TYPE, + "VirtualBusLogicController": constants.ADAPTER_TYPE_BUSLOGIC, + "VirtualIDEController": constants.ADAPTER_TYPE_IDE, + "VirtualLsiLogicSASController": constants.ADAPTER_TYPE_LSILOGICSAS, + "ParaVirtualSCSIController": constants.ADAPTER_TYPE_PARAVIRTUAL +} + # A simple cache for storing inventory folder references. # Format: {inventory_path: folder_ref} _FOLDER_PATH_REF_MAPPING = {} @@ -674,14 +682,17 @@ return constants.DEFAULT_DISK_TYPE -def get_vmdk_info(session, vm_ref, uuid=None): - """Returns information for the primary VMDK attached to the given VM.""" +def get_hardware_devices(session, vm_ref): hardware_devices = session._call_method(vutil, "get_object_property", vm_ref, "config.hardware.device") - if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": - hardware_devices = hardware_devices.VirtualDevice + return vim_util.get_array_items(hardware_devices) + + +def get_vmdk_info(session, vm_ref, uuid=None): + """Returns information for the primary VMDK attached to the given VM.""" + hardware_devices = get_hardware_devices(session, vm_ref) vmdk_file_path = None vmdk_controller_key = None disk_type = None @@ -703,16 +714,9 @@ if root_disk and path.basename == root_disk: root_device = device vmdk_device = device - elif device.__class__.__name__ == "VirtualLsiLogicController": - adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE - elif device.__class__.__name__ == "VirtualBusLogicController": - adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC - elif device.__class__.__name__ == "VirtualIDEController": - adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE - elif device.__class__.__name__ == "VirtualLsiLogicSASController": - adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS - elif device.__class__.__name__ == "ParaVirtualSCSIController": - adapter_type_dict[device.key] = constants.ADAPTER_TYPE_PARAVIRTUAL + elif device.__class__.__name__ in CONTROLLER_TO_ADAPTER_TYPE: + adapter_type_dict[device.key] = CONTROLLER_TO_ADAPTER_TYPE[ + device.__class__.__name__] if root_disk: vmdk_device = root_device diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/volumeops.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/volumeops.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/vmwareapi/volumeops.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/vmwareapi/volumeops.py 2020-04-10 17:57:58.000000000 +0000 @@ -516,20 +516,26 @@ device = self._get_vmdk_backed_disk_device(vm_ref, data) - # Get details required for adding disk device such as - # adapter_type, disk_type - vmdk = vm_util.get_vmdk_info(self._session, volume_ref) + hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref) + adapter_type = None + for hw_device in hardware_devices: + if hw_device.key == device.controllerKey: + adapter_type = vm_util.CONTROLLER_TO_ADAPTER_TYPE.get( + hw_device.__class__.__name__) + break # IDE does not support disk hotplug - if vmdk.adapter_type == constants.ADAPTER_TYPE_IDE: + if adapter_type == constants.ADAPTER_TYPE_IDE: state = vm_util.get_vm_state(self._session, instance) if state != power_state.SHUTDOWN: raise exception.Invalid(_('%s does not support disk ' - 'hotplug.') % vmdk.adapter_type) + 'hotplug.') % adapter_type) + + disk_type = vm_util._get_device_disk_type(device) self._consolidate_vmdk_volume(instance, vm_ref, device, volume_ref, - adapter_type=vmdk.adapter_type, - disk_type=vmdk.disk_type) + adapter_type=adapter_type, + disk_type=disk_type) self.detach_disk_from_vm(vm_ref, instance, device) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/xenapi/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/xenapi/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/xenapi/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/xenapi/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -70,6 +70,7 @@ "supports_multiattach": False, "supports_trusted_certs": False, "supports_pcpus": False, + "supports_accelerators": False, # Image type support flags "supports_image_type_aki": False, @@ -215,7 +216,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): """Create VM instance.""" vgpu_info = self._get_vgpu_info(allocations) self._vmops.spawn(context, instance, image_meta, injected_files, @@ -252,7 +253,8 @@ self._vmops.post_interrupted_snapshot_cleanup(context, instance) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): """Reboot VM instance.""" self._vmops.reboot(instance, reboot_type, bad_volumes_callback=bad_volumes_callback) @@ -311,7 +313,7 @@ self._vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, - rescue_password): + rescue_password, block_device_info): """Rescue the specified instance.""" self._vmops.rescue(context, instance, network_info, image_meta, rescue_password) @@ -330,7 +332,7 @@ self._vmops.power_off(instance) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): """Power on the specified instance.""" self._vmops.power_on(instance) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/xenapi/image/utils.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/xenapi/image/utils.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/xenapi/image/utils.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/xenapi/image/utils.py 2020-04-10 17:57:58.000000000 +0000 @@ -19,11 +19,11 @@ from oslo_utils import importutils from nova import exception -from nova import image +from nova.image import glance _VDI_FORMAT_RAW = 1 -IMAGE_API = image.API() +IMAGE_API = glance.API() IMAGE_HANDLERS = {'direct_vhd': 'glance.GlanceStore', 'vdi_local_dev': 'vdi_through_dev.VdiThroughDevStore', 'vdi_remote_stream': 'vdi_stream.VdiStreamStore'} diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/xenapi/image/vdi_stream.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/xenapi/image/vdi_stream.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/xenapi/image/vdi_stream.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/xenapi/image/vdi_stream.py 2020-04-10 17:57:58.000000000 +0000 @@ -22,7 +22,7 @@ import nova.conf from nova import exception -from nova import image +from nova.image import glance from nova import utils as nova_utils from nova.virt.xenapi.image import utils from nova.virt.xenapi import vm_utils @@ -30,7 +30,7 @@ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) -IMAGE_API = image.API() +IMAGE_API = glance.API() class VdiStreamStore(object): diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/zvm/driver.py nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/zvm/driver.py --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova/virt/zvm/driver.py 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova/virt/zvm/driver.py 2020-04-10 17:57:58.000000000 +0000 @@ -119,7 +119,7 @@ 'local_gb_used': host_stats.get('disk_used', 0), 'hypervisor_type': host_stats.get('hypervisor_type', obj_fields.HVType.ZVM), - 'hypervisor_version': host_stats.get('hypervisor_version', ''), + 'hypervisor_version': host_stats.get('hypervisor_version', 0), 'hypervisor_hostname': host_stats.get('hypervisor_hostname', hypervisor_hostname), 'cpu_info': jsonutils.dumps(host_stats.get('cpu_info', {})), @@ -144,7 +144,7 @@ def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, - block_device_info=None, power_on=True): + block_device_info=None, power_on=True, accel_info=None): LOG.info("Spawning new instance %s on zVM hypervisor", instance.name, instance=instance) @@ -395,7 +395,7 @@ self._hypervisor.guest_softstop(instance.name) def power_on(self, context, instance, network_info, - block_device_info=None): + block_device_info=None, accel_info=None): self._hypervisor.guest_start(instance.name) def pause(self, instance): @@ -405,7 +405,8 @@ self._hypervisor.guest_unpause(instance.name) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None): + block_device_info=None, bad_volumes_callback=None, + accel_info=None): if reboot_type == 'SOFT': self._hypervisor.guest_reboot(instance.name) diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/entry_points.txt nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/entry_points.txt --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/entry_points.txt 2020-02-10 08:50:37.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/entry_points.txt 2020-04-10 17:58:17.000000000 +0000 @@ -14,11 +14,26 @@ nova-spicehtml5proxy = nova.cmd.spicehtml5proxy:main nova-status = nova.cmd.status:main +[nova.api.extra_spec_validators] +accel = nova.api.validation.extra_specs.accel +aggregate_instance_extra_specs = nova.api.validation.extra_specs.aggregate_instance_extra_specs +capabilities = nova.api.validation.extra_specs.capabilities +hw = nova.api.validation.extra_specs.hw +hw_rng = nova.api.validation.extra_specs.hw_rng +hw_video = nova.api.validation.extra_specs.hw_video +null = nova.api.validation.extra_specs.null +os = nova.api.validation.extra_specs.os +pci_passthrough = nova.api.validation.extra_specs.pci_passthrough +powervm = nova.api.validation.extra_specs.powervm +quota = nova.api.validation.extra_specs.quota +resources = nova.api.validation.extra_specs.resources +traits = nova.api.validation.extra_specs.traits +vmware = nova.api.validation.extra_specs.vmware + [nova.compute.monitors.cpu] virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor [nova.scheduler.driver] -fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler [oslo.config.opts] diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/pbr.json nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/pbr.json --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/pbr.json 2020-02-10 08:50:37.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/pbr.json 2020-04-10 17:58:17.000000000 +0000 @@ -1 +1 @@ -{"git_version": "1fcd74730d", "is_release": false} \ No newline at end of file +{"git_version": "57ff308d6d", "is_release": false} \ No newline at end of file diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/PKG-INFO nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/PKG-INFO --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/PKG-INFO 2020-02-10 08:50:37.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/PKG-INFO 2020-04-10 17:58:17.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: nova -Version: 20.1.0.dev969 +Version: 20.1.0.dev1547 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/requires.txt nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/requires.txt --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/requires.txt 2020-02-10 08:50:37.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/requires.txt 2020-04-10 17:58:17.000000000 +0000 @@ -25,12 +25,12 @@ os-brick>=2.6.2 os-resource-classes>=0.4.0 os-service-types>=1.7.0 -os-traits>=2.1.0 +os-traits>=2.2.0 os-vif>=1.14.0 os-win>=3.0.0 os-xenapi>=0.3.3 oslo.cache>=1.26.0 -oslo.concurrency>=3.26.0 +oslo.concurrency>=3.29.0 oslo.config>=6.1.0 oslo.context>=2.21.0 oslo.db>=4.44.0 @@ -45,7 +45,7 @@ oslo.serialization!=2.19.1,>=2.21.1 oslo.service>=1.40.1 oslo.upgradecheck>=0.1.1 -oslo.utils>=3.40.2 +oslo.utils>=4.1.0 oslo.versionedobjects>=1.35.0 paramiko>=2.0.0 pbr!=2.1.0,>=2.0.0 @@ -63,9 +63,12 @@ stevedore>=1.20.0 taskflow>=2.16.0 tooz>=1.58.0 -websockify>=0.8.0 +websockify>=0.9.0 zVMCloudConnector>=1.3.0 +[:(python_version=='3.6')] +dataclasses>=0.7 + [osprofiler] osprofiler>=1.4.0 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/SOURCES.txt nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/SOURCES.txt --- nova-21.0.0~b2~git2020021008.1fcd74730d/nova.egg-info/SOURCES.txt 2020-02-10 08:50:38.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/nova.egg-info/SOURCES.txt 2020-04-10 17:58:18.000000000 +0000 @@ -18,6 +18,7 @@ setup.py test-requirements.txt tox.ini +api-guide/source/accelerator-support.rst api-guide/source/authentication.rst api-guide/source/conf.py api-guide/source/down_cells.rst @@ -161,29 +162,6 @@ doc/api_samples/images/image-metadata-put-resp.json doc/api_samples/images/images-details-get-resp.json doc/api_samples/images/images-list-get-resp.json -doc/api_samples/keypairs/keypairs-get-resp.json -doc/api_samples/keypairs/keypairs-import-post-req.json -doc/api_samples/keypairs/keypairs-import-post-resp.json -doc/api_samples/keypairs/keypairs-list-resp.json -doc/api_samples/keypairs/keypairs-post-req.json -doc/api_samples/keypairs/keypairs-post-resp.json -doc/api_samples/keypairs/v2.10/keypairs-get-resp.json -doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json -doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json -doc/api_samples/keypairs/v2.10/keypairs-list-resp.json -doc/api_samples/keypairs/v2.10/keypairs-post-req.json -doc/api_samples/keypairs/v2.10/keypairs-post-resp.json -doc/api_samples/keypairs/v2.2/keypairs-get-resp.json -doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json -doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json -doc/api_samples/keypairs/v2.2/keypairs-list-resp.json -doc/api_samples/keypairs/v2.2/keypairs-post-req.json -doc/api_samples/keypairs/v2.2/keypairs-post-resp.json -doc/api_samples/keypairs/v2.35/keypairs-list-resp.json -doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json -doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json -doc/api_samples/keypairs/v2.35/keypairs-post-req.json -doc/api_samples/keypairs/v2.35/keypairs-post-resp.json doc/api_samples/limits/limit-get-resp.json doc/api_samples/limits/v2.36/limit-get-resp.json doc/api_samples/limits/v2.39/limit-get-resp.json @@ -366,8 +344,38 @@ doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json +doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json +doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json +doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json +doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json +doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json +doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json +doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json +doc/api_samples/os-keypairs/keypairs-get-resp.json +doc/api_samples/os-keypairs/keypairs-import-post-req.json +doc/api_samples/os-keypairs/keypairs-import-post-resp.json +doc/api_samples/os-keypairs/keypairs-list-resp.json +doc/api_samples/os-keypairs/keypairs-post-req.json +doc/api_samples/os-keypairs/keypairs-post-resp.json +doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json +doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json +doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json +doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json +doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json +doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json +doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json +doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json +doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json +doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json +doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json +doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json +doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json +doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json +doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json +doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json +doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json doc/api_samples/os-lock-server/lock-server.json doc/api_samples/os-lock-server/unlock-server.json doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json @@ -597,6 +605,12 @@ doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json doc/api_samples/os-volumes/v2.79/update-volume-req.json doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json +doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json +doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json +doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json +doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json +doc/api_samples/os-volumes/v2.85/update-volume-req.json +doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json doc/api_samples/server-ips/server-ips-network-resp.json doc/api_samples/server-ips/server-ips-resp.json doc/api_samples/server-metadata/server-metadata-all-req.json @@ -734,7 +748,10 @@ doc/api_samples/versions/v2-version-get-resp.json doc/api_samples/versions/v21-version-get-resp.json doc/api_samples/versions/versions-get-resp.json +doc/api_schemas/config_drive.json +doc/api_schemas/network_data.json doc/ext/__init__.py +doc/ext/extra_specs.py doc/ext/feature_matrix.py doc/ext/versioned_notifications.py doc/notification_samples/aggregate-add_host-end.json @@ -999,6 +1016,7 @@ doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.vsd doc/source/admin/figures/serial-console-flow.svg +doc/source/admin/troubleshooting/affinity-policy-violated.rst doc/source/admin/troubleshooting/orphaned-allocations.rst doc/source/admin/troubleshooting/rebuild-placement-db.rst doc/source/cli/index.rst @@ -1016,6 +1034,7 @@ doc/source/cli/nova-status.rst doc/source/common/numa-live-migration-warning.txt doc/source/configuration/config.rst +doc/source/configuration/extra-specs.rst doc/source/configuration/index.rst doc/source/configuration/policy.rst doc/source/configuration/sample-config.rst @@ -1025,6 +1044,7 @@ doc/source/contributor/api.rst doc/source/contributor/blueprints.rst doc/source/contributor/code-review.rst +doc/source/contributor/contributing.rst doc/source/contributor/development-environment.rst doc/source/contributor/documentation.rst doc/source/contributor/evacuate-vs-rebuild.rst @@ -1167,6 +1187,8 @@ nova.egg-info/pbr.json nova.egg-info/requires.txt nova.egg-info/top_level.txt +nova/accelerator/__init__.py +nova/accelerator/cyborg.py nova/api/__init__.py nova/api/auth.py nova/api/compute_req_id.py @@ -1320,6 +1342,23 @@ nova/api/validation/__init__.py nova/api/validation/parameter_types.py nova/api/validation/validators.py +nova/api/validation/extra_specs/__init__.py +nova/api/validation/extra_specs/accel.py +nova/api/validation/extra_specs/aggregate_instance_extra_specs.py +nova/api/validation/extra_specs/base.py +nova/api/validation/extra_specs/capabilities.py +nova/api/validation/extra_specs/hw.py +nova/api/validation/extra_specs/hw_rng.py +nova/api/validation/extra_specs/hw_video.py +nova/api/validation/extra_specs/null.py +nova/api/validation/extra_specs/os.py +nova/api/validation/extra_specs/pci_passthrough.py +nova/api/validation/extra_specs/powervm.py +nova/api/validation/extra_specs/quota.py +nova/api/validation/extra_specs/resources.py +nova/api/validation/extra_specs/traits.py +nova/api/validation/extra_specs/validators.py +nova/api/validation/extra_specs/vmware.py nova/cmd/__init__.py nova/cmd/api.py nova/cmd/api_metadata.py @@ -1377,6 +1416,7 @@ nova/conf/configdrive.py nova/conf/console.py nova/conf/consoleauth.py +nova/conf/cyborg.py nova/conf/database.py nova/conf/devices.py nova/conf/ephemeral_storage.py @@ -1719,7 +1759,6 @@ nova/hacking/__init__.py nova/hacking/checks.py nova/image/__init__.py -nova/image/api.py nova/image/glance.py nova/image/download/__init__.py nova/keymgr/__init__.py @@ -1774,9 +1813,7 @@ nova/objects/ec2.py nova/objects/external_event.py nova/objects/fields.py -nova/objects/fixed_ip.py nova/objects/flavor.py -nova/objects/floating_ip.py nova/objects/host_mapping.py nova/objects/hv_spec.py nova/objects/image_meta.py @@ -1793,7 +1830,6 @@ nova/objects/migration.py nova/objects/migration_context.py nova/objects/monitor_metric.py -nova/objects/network.py nova/objects/network_metadata.py nova/objects/network_request.py nova/objects/numa.py @@ -1803,7 +1839,6 @@ nova/objects/request_spec.py nova/objects/resource.py nova/objects/security_group.py -nova/objects/security_group_rule.py nova/objects/selection.py nova/objects/service.py nova/objects/tag.py @@ -1875,7 +1910,6 @@ nova/policies/simple_tenant_usage.py nova/policies/suspend_server.py nova/policies/tenant_networks.py -nova/policies/used_limits.py nova/policies/volumes.py nova/policies/volumes_attachments.py nova/privsep/__init__.py @@ -1955,6 +1989,7 @@ nova/tests/functional/test_cross_az_attach.py nova/tests/functional/test_cross_cell_migrate.py nova/tests/functional/test_external_networks.py +nova/tests/functional/test_flavor_extraspecs.py nova/tests/functional/test_images.py nova/tests/functional/test_instance_actions.py nova/tests/functional/test_json_filter.py @@ -1973,6 +2008,7 @@ nova/tests/functional/test_server_group.py nova/tests/functional/test_servers.py nova/tests/functional/test_servers_provider_tree.py +nova/tests/functional/test_service.py nova/tests/functional/api/__init__.py nova/tests/functional/api/client.py nova/tests/functional/api/openstack/__init__.py @@ -2103,29 +2139,6 @@ nova/tests/functional/api_sample_tests/api_samples/images/image-metadata-put-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-get-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-import-post-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-list-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/keypairs-post-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-get-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-import-post-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-list-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.10/keypairs-post-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-get-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-import-post-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-list-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.2/keypairs-post-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl -nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/limit-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/v2.36/limit-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/limits/v2.39/limit-get-resp.json.tpl @@ -2284,8 +2297,38 @@ nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-get-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-list-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/keypairs-post-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-get-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-list-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.10/keypairs-post-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-get-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-list-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.2/keypairs-post-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.35/keypairs-post-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/lock-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/unlock-server.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-lock-server/v2.73/lock-server-with-reason.json.tpl @@ -2499,6 +2542,12 @@ nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.79/update-volume-req.json.tpl nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/update-volume-req.json.tpl +nova/tests/functional/api_sample_tests/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-network-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-ips/server-ips-resp.json.tpl nova/tests/functional/api_sample_tests/api_samples/server-metadata/server-metadata-all-req.json.tpl @@ -2680,6 +2729,7 @@ nova/tests/functional/libvirt/test_reshape.py nova/tests/functional/libvirt/test_rt_servers.py nova/tests/functional/libvirt/test_shared_resource_provider.py +nova/tests/functional/libvirt/test_vgpu.py nova/tests/functional/libvirt/test_vpmem.py nova/tests/functional/notification_sample_tests/__init__.py nova/tests/functional/notification_sample_tests/notification_sample_base.py @@ -2742,6 +2792,7 @@ nova/tests/functional/regressions/test_bug_1825034.py nova/tests/functional/regressions/test_bug_1825537.py nova/tests/functional/regressions/test_bug_1830747.py +nova/tests/functional/regressions/test_bug_1831771.py nova/tests/functional/regressions/test_bug_1835822.py nova/tests/functional/regressions/test_bug_1837955.py nova/tests/functional/regressions/test_bug_1839560.py @@ -2751,6 +2802,7 @@ nova/tests/functional/regressions/test_bug_1849165.py nova/tests/functional/regressions/test_bug_1849409.py nova/tests/functional/regressions/test_bug_1852458.py +nova/tests/functional/regressions/test_bug_1862633.py nova/tests/functional/wsgi/__init__.py nova/tests/functional/wsgi/test_flavor_manage.py nova/tests/functional/wsgi/test_interfaces.py @@ -2820,6 +2872,8 @@ nova/tests/unit/test_weights.py nova/tests/unit/test_wsgi.py nova/tests/unit/utils.py +nova/tests/unit/accelerator/__init__.py +nova/tests/unit/accelerator/test_cyborg.py nova/tests/unit/api/__init__.py nova/tests/unit/api/test_auth.py nova/tests/unit/api/test_compute_req_id.py @@ -2913,6 +2967,9 @@ nova/tests/unit/api/openstack/compute/test_urlmap.py nova/tests/unit/api/openstack/compute/test_versions.py nova/tests/unit/api/openstack/compute/test_volumes.py +nova/tests/unit/api/validation/__init__.py +nova/tests/unit/api/validation/extra_specs/__init__.py +nova/tests/unit/api/validation/extra_specs/test_validators.py nova/tests/unit/cmd/__init__.py nova/tests/unit/cmd/test_baseproxy.py nova/tests/unit/cmd/test_cmd_db_blocks.py @@ -2954,6 +3011,7 @@ nova/tests/unit/conductor/tasks/test_live_migrate.py nova/tests/unit/conductor/tasks/test_migrate.py nova/tests/unit/conf/__init__.py +nova/tests/unit/conf/test_devices.py nova/tests/unit/conf/test_neutron.py nova/tests/unit/console/__init__.py nova/tests/unit/console/test_serial.py @@ -3011,9 +3069,7 @@ nova/tests/unit/objects/test_ec2.py nova/tests/unit/objects/test_external_event.py nova/tests/unit/objects/test_fields.py -nova/tests/unit/objects/test_fixed_ip.py nova/tests/unit/objects/test_flavor.py -nova/tests/unit/objects/test_floating_ip.py nova/tests/unit/objects/test_host_mapping.py nova/tests/unit/objects/test_hv_spec.py nova/tests/unit/objects/test_image_meta.py @@ -3031,7 +3087,6 @@ nova/tests/unit/objects/test_migration.py nova/tests/unit/objects/test_migration_context.py nova/tests/unit/objects/test_monitor_metric.py -nova/tests/unit/objects/test_network.py nova/tests/unit/objects/test_network_request.py nova/tests/unit/objects/test_numa.py nova/tests/unit/objects/test_objects.py @@ -3041,7 +3096,6 @@ nova/tests/unit/objects/test_request_spec.py nova/tests/unit/objects/test_resource.py nova/tests/unit/objects/test_security_group.py -nova/tests/unit/objects/test_security_group_rule.py nova/tests/unit/objects/test_selection.py nova/tests/unit/objects/test_service.py nova/tests/unit/objects/test_tag.py @@ -3062,7 +3116,43 @@ nova/tests/unit/policies/__init__.py nova/tests/unit/policies/base.py nova/tests/unit/policies/test_admin_actions.py +nova/tests/unit/policies/test_admin_password.py +nova/tests/unit/policies/test_agents.py +nova/tests/unit/policies/test_aggregates.py +nova/tests/unit/policies/test_assisted_volume_snapshots.py +nova/tests/unit/policies/test_attach_interfaces.py +nova/tests/unit/policies/test_availability_zone.py +nova/tests/unit/policies/test_console_auth_tokens.py +nova/tests/unit/policies/test_console_output.py +nova/tests/unit/policies/test_create_backup.py +nova/tests/unit/policies/test_deferred_delete.py +nova/tests/unit/policies/test_evacuate.py +nova/tests/unit/policies/test_flavor_access.py +nova/tests/unit/policies/test_flavor_manage.py +nova/tests/unit/policies/test_hypervisors.py +nova/tests/unit/policies/test_instance_actions.py +nova/tests/unit/policies/test_instance_usage_audit_log.py +nova/tests/unit/policies/test_limits.py +nova/tests/unit/policies/test_lock_server.py +nova/tests/unit/policies/test_migrate_server.py +nova/tests/unit/policies/test_migrations.py +nova/tests/unit/policies/test_pause_server.py +nova/tests/unit/policies/test_remote_consoles.py +nova/tests/unit/policies/test_rescue.py +nova/tests/unit/policies/test_security_groups.py +nova/tests/unit/policies/test_server_diagnostics.py +nova/tests/unit/policies/test_server_external_events.py +nova/tests/unit/policies/test_server_groups.py +nova/tests/unit/policies/test_server_ips.py +nova/tests/unit/policies/test_server_metadata.py +nova/tests/unit/policies/test_server_migrations.py +nova/tests/unit/policies/test_server_password.py +nova/tests/unit/policies/test_server_tags.py nova/tests/unit/policies/test_services.py +nova/tests/unit/policies/test_shelve.py +nova/tests/unit/policies/test_simple_tenant_usage.py +nova/tests/unit/policies/test_suspend_server.py +nova/tests/unit/policies/test_volumes.py nova/tests/unit/privsep/__init__.py nova/tests/unit/privsep/test_fs.py nova/tests/unit/privsep/test_idmapshift.py @@ -3078,9 +3168,9 @@ nova/tests/unit/scheduler/test_filters.py nova/tests/unit/scheduler/test_host_filters.py nova/tests/unit/scheduler/test_host_manager.py +nova/tests/unit/scheduler/test_manager.py nova/tests/unit/scheduler/test_request_filter.py nova/tests/unit/scheduler/test_rpcapi.py -nova/tests/unit/scheduler/test_scheduler.py nova/tests/unit/scheduler/test_scheduler_utils.py nova/tests/unit/scheduler/test_utils.py nova/tests/unit/scheduler/client/__init__.py @@ -3452,6 +3542,8 @@ releasenotes/notes/aarch64-set-proper-cpu-mode-8455bad7d69dc6fd.yaml releasenotes/notes/abort-live-migration-cb902bb0754b11b6.yaml releasenotes/notes/abort-live-migration-in-queue-0c917f415d6dac5a.yaml +releasenotes/notes/absolutely-non-inheritable-image-properties-85f7f304fdc20b61.yaml +releasenotes/notes/accelerator-requests-6c9a6fef77ab776a.yaml releasenotes/notes/add-action-initiator-to-instance-action-notifications-27e6a3031da274c5.yaml releasenotes/notes/add-aggregate-type-extra-specs-affinity-filter-79a2d3ee152b8ecd.yaml releasenotes/notes/add-api-config-to-api-group-af20a57a9e3e1b85.yaml @@ -3486,6 +3578,7 @@ releasenotes/notes/add-server-subresource-topology-c52e21f36497e62c.yaml releasenotes/notes/add-server-use_all_filters-policy-3ddfe1885056f0ca.yaml releasenotes/notes/add-storpool-libvirt-driver-8dfa78f46f58b034.yaml +releasenotes/notes/add-support-for-live-migration-with-vpmem-9af5057dbe551f3b.yaml releasenotes/notes/add-support-for-vgpu-libvirt-91d2983e643f5ff1.yaml releasenotes/notes/add-support-for-vpmem-libvirt-8b66add5b2d8f5f5.yaml releasenotes/notes/add-swap-volume-notifications-bb7e14230fccfd6e.yaml @@ -3501,6 +3594,7 @@ releasenotes/notes/allocation-candidates-limit-37fe5c2ce57daf7f.yaml releasenotes/notes/allocation-candidates-traits-1adf079ed0c6563c.yaml releasenotes/notes/allocation_candidates_support_member_of-92f7e1440ed63fe7.yaml +releasenotes/notes/allow-non-admin-filter-instance-more-filter-ea5abad7c32ff328.yaml releasenotes/notes/allow-reserved-equal-total-inventory-fe93584dd28c460d.yaml releasenotes/notes/always-set-dhcp-server-if-enable-dhcp-b96bf720af235902.yaml releasenotes/notes/api-consistency-cleanup-700b260ced206d92.yaml @@ -3520,6 +3614,7 @@ releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml releasenotes/notes/block_device_allocate_retries-min-0-uprade-dc97b8f0e7716a3b.yaml releasenotes/notes/boot-instance-specific-storage-backend-c34ee0a871efec3b.yaml +releasenotes/notes/bp-action-event-fault-details-8bfabc6e7390446a.yaml releasenotes/notes/bp-add-locked-reason-fb757750f7f077ef.yaml releasenotes/notes/bp-add-pagination-for-instance-actions-1c14cb3fc9887d2a.yaml releasenotes/notes/bp-add-pagination-for-os-migrations-2f8d5d257b0c5658.yaml @@ -3531,6 +3626,7 @@ releasenotes/notes/bp-cinder-new-attach-apis-eca854e27a255e3e.yaml releasenotes/notes/bp-deprecate-file-injection-feaf490524d10b3d.yaml releasenotes/notes/bp-deprecate-image-meta-proxy-api-7f21e1e6a94944ee.yaml +releasenotes/notes/bp-destroy-instance-with-datavolume-4c71b12e005832b0.yaml releasenotes/notes/bp-ephemeral-disk-ploop-a9b3af1f36ae42ed.yaml releasenotes/notes/bp-extend-in-use-rbd-volumes-8f334ce2a06ee247.yaml releasenotes/notes/bp-fix-console-auth-tokens-16b1b1b402dca362.yaml @@ -3640,10 +3736,13 @@ releasenotes/notes/bug-1834506-7c6875bbdc32ab0b.yaml releasenotes/notes/bug-1837877-cve-fault-message-exposure-5360d794f4976b7c.yaml releasenotes/notes/bug-1840978-nova-manage-255-88df61a0b69c21c7.yaml +releasenotes/notes/bug-1842149-5ba20d57872e9996.yaml +releasenotes/notes/bug-1845628-3152e73a1e4856b2.yaml releasenotes/notes/bug-1845986-70730d9f6c09e68b.yaml releasenotes/notes/bug-1852458-cell0-instance-action-e3112cf17bcc7c64.yaml releasenotes/notes/bug-1852610-service-delete-with-migrations-ca0565fc0b503519.yaml releasenotes/notes/bug-1856925-check-source-compute-resize-16e9c3b24cf72301.yaml +releasenotes/notes/bug-1864588-737c29560effd16e.yaml releasenotes/notes/bug-hyperv-1629040-e1eb35a7b31d9af8.yaml releasenotes/notes/bug-volume-attach-policy-1635358-671ce4d4ee8c211b.yaml releasenotes/notes/bug_1659328-73686be497f5f85a.yaml @@ -3656,6 +3755,7 @@ releasenotes/notes/check_destination_when_evacuating-37b52ebe8b5b086c.yaml releasenotes/notes/check_destination_when_livemig-e69d32e02d7a18c9.yaml releasenotes/notes/cinder-backend-report-discard-1def1c28140def9b.yaml +releasenotes/notes/cinder-detect-nonbootable-image-6fad7f865b45f879.yaml releasenotes/notes/cold-migration-with-target-queens-2dcd09c3a3414302.yaml releasenotes/notes/complex-anti-affinity-policies-dcf4719e859093be.yaml releasenotes/notes/compute-node-auto-disable-303eb9b0fdb4f3f1.yaml @@ -3721,9 +3821,11 @@ releasenotes/notes/deprecate-old-auth-parameters-948d70045335b312.yaml releasenotes/notes/deprecate-remap_vbd_dev-opt-c1690c5b447f0053.yaml releasenotes/notes/deprecate-retry-filter-4d1dba39a2c21836.yaml +releasenotes/notes/deprecate-scheduler-driver-opt-4d6a266590b52e2c.yaml releasenotes/notes/deprecate-snapshot-name-template-46966b0f5e6cabeb.yaml releasenotes/notes/deprecate-the-cinder-v2-support-0cebc90580a3e80f.yaml releasenotes/notes/deprecate-topic-opts-68b1a752dba1eb24.yaml +releasenotes/notes/deprecate-vmware-ussuri-39e0215eca80ffd7.yaml releasenotes/notes/deprecate-vmware-wsdl-location-97af576f53fef771.yaml releasenotes/notes/deprecate-xen-driver-train-bd57a16fa51ab679.yaml releasenotes/notes/deprecate-xenserver-vif-driver-option-12eb279c0c93c157.yaml @@ -3781,6 +3883,7 @@ releasenotes/notes/flavor-api-policy-granularity-f563d621c615fd64.yaml releasenotes/notes/flavor-description-02f8b8626da71a25.yaml releasenotes/notes/flavor-extra-spec-image-property-validation-7310954ba3822477.yaml +releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml releasenotes/notes/flavor_hw_watchdog_action-512d79155c91cb84.yaml releasenotes/notes/flavors-moved-to-api-database-b33489ed3b1b246b.yaml releasenotes/notes/forbidden-traits-in-nova-478f1884a06e50e7.yaml @@ -3806,6 +3909,7 @@ releasenotes/notes/hyperv_2k8_drop-fb309f811767c7c4.yaml releasenotes/notes/idempotent-put-resource-class-dc7a267c823b7995.yaml releasenotes/notes/image-id-in-snapshot-notification-7e1e10435475a1af.yaml +releasenotes/notes/image-metadata-prefiltering-2921c1d38951f7a9.yaml releasenotes/notes/image-precaching-d46506568fefa1ea.yaml releasenotes/notes/imageRef-as-uuid-only-0164c04206a42683.yaml releasenotes/notes/image_cache-conf-opts-moved-e552e4a2d59e056e.yaml @@ -3889,6 +3993,7 @@ releasenotes/notes/network-allocate-retries-min-a5288476b11bfe55.yaml releasenotes/notes/network-api-class-removed-a4a754ca24c02bde.yaml releasenotes/notes/network-templates-ignore-use_ipv6-6d93c26f52a5b487.yaml +releasenotes/notes/neutron-connection-retries-c276010afe238abc.yaml releasenotes/notes/neutron-mtu-6a7edd9e396107d7.yaml releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml releasenotes/notes/neutron-via-ksa-9f386b09cff98a9e.yaml @@ -3944,6 +4049,7 @@ releasenotes/notes/placement-allocations-link-in-get-resource-providers-0b1d26a264eceb4b.yaml releasenotes/notes/placement-api-endpoint-interface-set-29af8b9400ce7775.yaml releasenotes/notes/placement-api-member-of-d8a08d0d0c5700d7.yaml +releasenotes/notes/placement-audit-59a00dcfb188c6ac.yaml releasenotes/notes/placement-claims-844540aa7bf52b33.yaml releasenotes/notes/placement-config-section-59891ba38e0749e7.yaml releasenotes/notes/placement-cors-c7a83e8c63787736.yaml @@ -3993,6 +4099,7 @@ releasenotes/notes/recheck-quota-conf-043a5d6057b33282.yaml releasenotes/notes/records-list-skip-down-cells-84d995e75c77c041.yaml releasenotes/notes/refresh-quotas-usage-362b239171c75f5f.yaml +releasenotes/notes/register-allocation-per-cell-9177b3e2161a632c.yaml releasenotes/notes/reject-interface-attach-with-port-resource-request-17473ddc5a989a2a.yaml releasenotes/notes/reject-networks-with-qos-policy-2746c74fd1f3ff26.yaml releasenotes/notes/remove-api-rate-limit-option-91a17e057081381a.yaml @@ -4129,6 +4236,7 @@ releasenotes/notes/scheduler-upcalls-with-isolated-cells-0100eb5d1f212210.yaml releasenotes/notes/scheduling-to-disabled-hosts-79f5b5d20a42875a.yaml releasenotes/notes/send-notification-when-instance-tag-changes-67c08000b6e0cd2a.yaml +releasenotes/notes/separate-update-and-swap-volume-policy-for-attachment-e4c20d4907a52fa7.yaml releasenotes/notes/server_migrations-30519b35d3ea6763.yaml releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml releasenotes/notes/service-uuid-online-migration-17d48f198a6d4deb.yaml @@ -4150,11 +4258,10 @@ releasenotes/notes/stein-remove-hide_server_address_states-edbc36bc02e1df52.yaml releasenotes/notes/stop-scheduling-to-disabled-cells-eadbfe30d1f6be65.yaml releasenotes/notes/support-cold-migrating-neutron-ports-with-resource-request-6d23be654a253625.yaml -releasenotes/notes/support-evacuting-servers-with-neutron-ports-with-resource-request-04cf8c721cbc376f.yaml -releasenotes/notes/support-live-migrating-servers-with-neutron-ports-with-resource-request-cf9a21dacb9c5ece.yaml releasenotes/notes/support-neutron-ports-with-resource-request-cb9ad5e9757792d0.yaml releasenotes/notes/support-novnc-1.1.0-ce677fe3381b2a11.yaml releasenotes/notes/support-qemu-native-tls-for-migration-31d8b0ae9eb2c893.yaml +releasenotes/notes/support-server-move-operations-with-neutron-ports-with-resource-request-c41598d0e4aef37b.yaml releasenotes/notes/support-tag-when-boot-4dd124371e3ef446.yaml releasenotes/notes/support-to-query-nova-resources-filter-by-changes-before-e4942cde61070e28.yaml releasenotes/notes/supported-virtuozzo-version-569db9259a7ee579.yaml @@ -4171,6 +4278,7 @@ releasenotes/notes/trusted-certs-microversion-589b75f0180d4d51.yaml releasenotes/notes/trusted-metatada-b999f1417f678c44.yaml releasenotes/notes/trusted-vfs-abee6dff7c9b6940.yaml +releasenotes/notes/unauthed-version-discovery-cc38986617dc1c02.yaml releasenotes/notes/undeprecate-dhcp_domain-opt-77c9154c5b06e0ff.yaml releasenotes/notes/unsettable-keymap-settings-fa831c02e4158507.yaml releasenotes/notes/unversioned-as-default-notification_format-f149db44b319aa07.yaml @@ -4194,6 +4302,7 @@ releasenotes/notes/versioned-notification-interface-is-complete-06725d7d4d761849.yaml releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml releasenotes/notes/vgpu-18da86834c90f041.yaml +releasenotes/notes/vgpu-multiple-types-2b1ded7d1cc28880.yaml releasenotes/notes/vhost-user-mtu-23d0af36a8adfa56.yaml releasenotes/notes/vif-vrouter-multiqueue-077785e1a2d242a0.yaml releasenotes/notes/virt-device-tagged-attach-53e214d3b3fdd183.yaml diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/PKG-INFO nova-21.0.0~b3~git2020041013.57ff308d6d/PKG-INFO --- nova-21.0.0~b2~git2020021008.1fcd74730d/PKG-INFO 2020-02-10 08:50:38.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/PKG-INFO 2020-04-10 17:58:19.330579500 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: nova -Version: 20.1.0.dev969 +Version: 20.1.0.dev1547 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/absolutely-non-inheritable-image-properties-85f7f304fdc20b61.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/absolutely-non-inheritable-image-properties-85f7f304fdc20b61.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/absolutely-non-inheritable-image-properties-85f7f304fdc20b61.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/absolutely-non-inheritable-image-properties-85f7f304fdc20b61.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,37 @@ +--- +issues: + - | + In prior releases, an attempt to boot an instance directly from an image + that was created by the Block Storage Service from an encrypted volume + resulted in the instance going ACTIVE but being unusable. If a user then + performed the image-create action on such an instance, the new image would + inherit the ``cinder_encryption_key_id`` and, beginning with the 20.0.0 + (Train) release, the ``cinder_encryption_key_deletion_policy`` image + properties, assuming these were not included in the + ``non_inheritable_image_properties`` configuration option. (The default + setting for that option does *not* include these.) Beginning with 20.0.0 + (Train), when the new image was deleted, the encryption key for the + *original* image would be deleted, thereby rendering it unusable for the + normal workflow of creating a volume from the image and booting an instance + from the volume. Beginning with this release: + + * The Compute API will return a 400 (Bad Request) response to a request + to directly boot an image created from an encrypted volume. + * The image properties ``cinder_encryption_key_id`` and + ``cinder_encryption_key_deletion_policy`` are absolutely non-inheritable + regardless of the ``non_inheritable_image_properties`` setting. +upgrade: + - | + The ``non_inheritable_image_properties`` configuration option inhibits + the transfer of image properties from the image an instance was created + from to images created from that instance. There are, however, image + properties (for example, the properties used for image signature + validation) that should *never* be transferred to an instance snapshot. + Prior to this release, such properties were included in the default + setting for this configuration option, but this allowed the possibility + that they might be removed by mistake, thereby resulting in a poor user + experience. To prevent that from happening, nova now maintains an + internal list of image properties that are absolutely non-inheritable + regardless of the setting of the configuration option. See the help + text for ``non_inheritable_image_properties`` in the sample nova + configuration file for details. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/accelerator-requests-6c9a6fef77ab776a.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/accelerator-requests-6c9a6fef77ab776a.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/accelerator-requests-6c9a6fef77ab776a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/accelerator-requests-6c9a6fef77ab776a.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,14 @@ +--- +features: + - | + Handling accelerator requests for an instance is now supported (where + supported by the underlying virt driver) as of microversion + 2.82. The Cyborg service generates an event for the binding + completion for each accelerator request (ARQ) for an instance. + Adds a new event ``accelerator_request_bound`` for this to the API + ``POST /os-server-external-events`` + + The lists of operations that are supported or unsupported for + instances with accelerators are listed in + `accelerator operation guide + `_ diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/add-support-for-live-migration-with-vpmem-9af5057dbe551f3b.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/add-support-for-live-migration-with-vpmem-9af5057dbe551f3b.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/add-support-for-live-migration-with-vpmem-9af5057dbe551f3b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/add-support-for-live-migration-with-vpmem-9af5057dbe551f3b.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + The libvirt driver now supports live migration with virtual persistent + memory (vPMEM), which requires QEMU as hypervisor. In virtualization layer, + QEMU will copy vpmem over the network like volatile memory, due to the + typical large capacity of vPMEM, it may takes longer time for live + migration. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/allow-non-admin-filter-instance-more-filter-ea5abad7c32ff328.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/allow-non-admin-filter-instance-more-filter-ea5abad7c32ff328.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/allow-non-admin-filter-instance-more-filter-ea5abad7c32ff328.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/allow-non-admin-filter-instance-more-filter-ea5abad7c32ff328.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,17 @@ +--- + features: + - | + Allow the following filter parameters for ``GET /servers/detail`` + and ``GET /servers`` for non-admin in microversion 2.83: + + - availability_zone + - config_drive + - key_name + - created_at + - launched_at + - terminated_at + - power_state + - task_state + - vm_state + - progress + - user_id diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bp-action-event-fault-details-8bfabc6e7390446a.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bp-action-event-fault-details-8bfabc6e7390446a.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bp-action-event-fault-details-8bfabc6e7390446a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bp-action-event-fault-details-8bfabc6e7390446a.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + With microversion 2.84 the + ``GET /servers/{server_id}/os-instance-actions/{request_id}`` API returns + a ``details`` parameter for each failed event with a fault message, similar + to the server ``fault.message`` parameter in ``GET /servers/{server_id}`` + for a server with status ``ERROR``. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bp-destroy-instance-with-datavolume-4c71b12e005832b0.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bp-destroy-instance-with-datavolume-4c71b12e005832b0.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bp-destroy-instance-with-datavolume-4c71b12e005832b0.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bp-destroy-instance-with-datavolume-4c71b12e005832b0.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + With microversion 2.85 add new API + ``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` which + support for specifying ``delete_on_termination`` field in the request + body to re-config the attached volume whether to delete when the instance + is deleted. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bp-support-delete-on-termination-in-server-attach-volume-5d08b4e97fdd24f9.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bp-support-delete-on-termination-in-server-attach-volume-5d08b4e97fdd24f9.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bp-support-delete-on-termination-in-server-attach-volume-5d08b4e97fdd24f9.yaml 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bp-support-delete-on-termination-in-server-attach-volume-5d08b4e97fdd24f9.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -5,7 +5,8 @@ field in the request body when attaching a volume to a server, to support configuring whether to delete the data volume when the server is destroyed. Also, ``delete_on_termination`` is added to the GET responses when showing - attached volumes. + attached volumes, and the ``delete_on_termination`` field is contained + in the POST API response body when attaching a volume. The affected APIs are as follows: diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bug-1842149-5ba20d57872e9996.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bug-1842149-5ba20d57872e9996.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bug-1842149-5ba20d57872e9996.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bug-1842149-5ba20d57872e9996.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,17 @@ +--- +other: + - | + A new pair of ``ssl_ciphers`` and ``ssl_minimum_version`` configuration + options have been introduced for use by the ``nova-novncproxy``, + ``nova-serialproxy``, and ``nova-spicehtml5proxy`` services. These new + options allow one to configure the allowed TLS ciphers and minimum protocol + version to enforce for incoming client connections to the proxy services. + + This aims to address the issues reported in `bug 1842149`_, where it + describes that the proxy services can inherit insecure TLS ciphers + and protocol versions from the compiled-in defaults of the OpenSSL + library on the underlying system. The proxy services provided no way + to override such insecure defaults with current day generally accepted + secure TLS settings. + + .. _bug 1842149: https://bugs.launchpad.net/nova/+bug/1842149 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bug-1845628-3152e73a1e4856b2.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bug-1845628-3152e73a1e4856b2.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bug-1845628-3152e73a1e4856b2.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bug-1845628-3152e73a1e4856b2.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,13 @@ +--- +fixes: + - | + Prior to this release Nova determined if ``UEFI`` support should be + enable solely by checking host support as reported in `bug 1845628`_. + + Nova now correctly check for guest os support via the ``hw_firmware_type`` + image metadata property when spawning new instance and only + enables ``UEFI`` if the guest and host support it. + Guest deletion has also been updated to correctly clean up based on + the ``UEFI`` or ``BIOS`` configuration of the vm. + + .. _bug 1845628: https://bugs.launchpad.net/nova/+bug/1845628 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bug-1864588-737c29560effd16e.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bug-1864588-737c29560effd16e.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/bug-1864588-737c29560effd16e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/bug-1864588-737c29560effd16e.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + For AArch64 Nova now sets ``max`` as the default CPU model. It does the + right thing in context of both QEMU TCG (plain emulation) and for KVM + (hardware acceleration). diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/cinder-detect-nonbootable-image-6fad7f865b45f879.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/cinder-detect-nonbootable-image-6fad7f865b45f879.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/cinder-detect-nonbootable-image-6fad7f865b45f879.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/cinder-detect-nonbootable-image-6fad7f865b45f879.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,9 @@ +--- +fixes: + - | + The Compute service has never supported direct booting of an instance from + an image that was created by the Block Storage service from an encrypted + volume. Previously, this operation would result in an ACTIVE instance that + was unusable. Beginning with this release, an attempt to boot from such an + image will result in the Compute API returning a 400 (Bad Request) + response. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/deprecate-scheduler-driver-opt-4d6a266590b52e2c.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/deprecate-scheduler-driver-opt-4d6a266590b52e2c.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/deprecate-scheduler-driver-opt-4d6a266590b52e2c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/deprecate-scheduler-driver-opt-4d6a266590b52e2c.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,11 @@ +--- +deprecations: + - | + The ``[scheduler] driver`` config option has been deprecated. This was + previously used to switch between different scheduler drivers including + custom, out-of-tree ones. However, only the ``FilterScheduler`` has been + supported in-tree since 19.0.0 (Stein) and nova increasingly relies on + placement for basic functionality, meaning developing and maintaining + out-of-tree drivers is increasingly difficult. Users who still rely on a + custom scheduler driver should migrate to the filter scheduler, using + custom filters and weighters where necessary. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/deprecate-vmware-ussuri-39e0215eca80ffd7.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/deprecate-vmware-ussuri-39e0215eca80ffd7.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/deprecate-vmware-ussuri-39e0215eca80ffd7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/deprecate-vmware-ussuri-39e0215eca80ffd7.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,6 @@ +--- +deprecations: + - | + The vmwareapi driver is deprecated in this release and may be + removed in a future one. The driver is not tested by the OpenStack + Nova project and does not have a clear maintainer. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/flavor-extra-spec-validators-76d1f2e52ba753db.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,21 @@ +--- +features: + - | + The 2.86 microversion adds support for flavor extra spec validation when + creating or updating flavor extra specs. Use of an unrecognized or invalid + flavor extra spec in the following namespaces will result in a HTTP 400 + response. + + - ``accel`` + - ``aggregate_instance_extra_specs`` + - ``capabilities`` + - ``hw`` + - ``hw_rng`` + - ``hw_video`` + - ``os`` + - ``pci_passthrough`` + - ``powervm`` + - ``quota`` + - ``resources`` (including ``_{group}`` suffixes) + - ``trait`` (including ``_{group}`` suffixes) + - ``vmware`` diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/image-metadata-prefiltering-2921c1d38951f7a9.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/image-metadata-prefiltering-2921c1d38951f7a9.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/image-metadata-prefiltering-2921c1d38951f7a9.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/image-metadata-prefiltering-2921c1d38951f7a9.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,11 @@ +--- +features: + - | + A new image metadata prefilter has been added to allow translation of + hypervisor-specific device model requests to standard traits. When this + feature is enabled, nova is able to utilize placement to select hosts that + are capable of emulating the requested devices, avoiding hosts that + could not support the request. This feature is currently supported by the + libvirt driver and can be enabled by configuring the + ``[scheduler]/image_metadata_prefilter`` to ``True`` in the controller + ``nova.conf``. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/neutron-connection-retries-c276010afe238abc.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/neutron-connection-retries-c276010afe238abc.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/neutron-connection-retries-c276010afe238abc.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/neutron-connection-retries-c276010afe238abc.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,12 @@ +--- +fixes: + - | + A new config option ``[neutron]http_retries`` is added which defaults to + 3. It controls how many times to retry a Neutron API call in response to a + HTTP connection failure. An example scenario where it will help is when a + deployment is using HAProxy and connections get closed after idle time. If + an incoming request tries to re-use a connection that is simultaneously + being torn down, a HTTP connection failure will occur and previously Nova + would fail the entire request. With retries, Nova can be more resilient in + this scenario and continue the request if a retry succeeds. Refer to + https://launchpad.net/bugs/1866937 for more details. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/placement-audit-59a00dcfb188c6ac.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/placement-audit-59a00dcfb188c6ac.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/placement-audit-59a00dcfb188c6ac.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/placement-audit-59a00dcfb188c6ac.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,11 @@ +--- +other: + - | + A new ``nova-manage`` command, ``placement audit``, has been added. + This can be used to identify and optionally remove compute allocations in + placement that are no longer referenced by existing instances or + migrations. These orphaned allocations typically occur due to race + conditions during instance migration or removal and will result in capacity + issues if not addressed. + For more details on CLI usage, see the man page entry: + https://docs.openstack.org/nova/latest/cli/nova-manage.html#placement diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/register-allocation-per-cell-9177b3e2161a632c.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/register-allocation-per-cell-9177b3e2161a632c.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/register-allocation-per-cell-9177b3e2161a632c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/register-allocation-per-cell-9177b3e2161a632c.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,4 @@ +--- +features: + - Add ``--cell`` option to the ``nova-manage placement heal_allocations`` + command. This option allows healing instance allocations within a specific cell. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/separate-update-and-swap-volume-policy-for-attachment-e4c20d4907a52fa7.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/separate-update-and-swap-volume-policy-for-attachment-e4c20d4907a52fa7.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/separate-update-and-swap-volume-policy-for-attachment-e4c20d4907a52fa7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/separate-update-and-swap-volume-policy-for-attachment-e4c20d4907a52fa7.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + With microversion 2.85, existing policy ``os-volumes-attachments:update`` + is used for updating the resources with the change in its default value + from ``SYSTEM_ADMIN`` to ``PROJECT_MEMBER_OR_SYSTEM_ADMIN``. + New policy ``os-volumes-attachments:swap`` is introduced for swapping + the attachment of servers with default to ``SYSTEM_ADMIN``. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/support-evacuting-servers-with-neutron-ports-with-resource-request-04cf8c721cbc376f.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/support-evacuting-servers-with-neutron-ports-with-resource-request-04cf8c721cbc376f.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/support-evacuting-servers-with-neutron-ports-with-resource-request-04cf8c721cbc376f.yaml 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/support-evacuting-servers-with-neutron-ports-with-resource-request-04cf8c721cbc376f.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ ---- -features: - - | - The server ``evacute`` action API now supports servers with neutron - ports having resource requests, e.g. ports that have QoS minimum bandwidth - rules attached. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/support-live-migrating-servers-with-neutron-ports-with-resource-request-cf9a21dacb9c5ece.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/support-live-migrating-servers-with-neutron-ports-with-resource-request-cf9a21dacb9c5ece.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/support-live-migrating-servers-with-neutron-ports-with-resource-request-cf9a21dacb9c5ece.yaml 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/support-live-migrating-servers-with-neutron-ports-with-resource-request-cf9a21dacb9c5ece.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ ---- -features: - - | - The server ``os-migrateLive`` action API now supports servers with neutron - ports having resource requests, e.g. ports that have QoS minimum bandwidth - rules attached. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/support-server-move-operations-with-neutron-ports-with-resource-request-c41598d0e4aef37b.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/support-server-move-operations-with-neutron-ports-with-resource-request-c41598d0e4aef37b.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/support-server-move-operations-with-neutron-ports-with-resource-request-c41598d0e4aef37b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/support-server-move-operations-with-neutron-ports-with-resource-request-c41598d0e4aef37b.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,6 @@ +--- +features: + - | + The server ``evacuate``, ``os-migrateLive`` and ``unshelve`` action APIs + now support servers with neutron ports having resource requests, e.g. + ports that have QoS minimum bandwidth rules attached. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/unauthed-version-discovery-cc38986617dc1c02.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/unauthed-version-discovery-cc38986617dc1c02.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/unauthed-version-discovery-cc38986617dc1c02.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/unauthed-version-discovery-cc38986617dc1c02.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,22 @@ +--- +upgrade: + - | + New paste pipelines and middleware have been created to allow API version + discovery to be performed without authentication or redirects. Because this + involves an ``api-paste.ini`` change, you will need to manually update your + ``api-paste.ini`` with the one from the release to get this functionality. +fixes: + - | + When using the ``api-paste.ini`` from the release, version discovery + requests without a trailing slash will no longer receive a 302 redirect to + the corresponding URL with a trailing slash (e.g. a request for ``/v2.1`` + will no longer redirect to ``/v2.1/``). Instead, such requests will respond + with the version discovery document regardless of the presence of the + trailing slash. See + `bug 1728732 `_ for details. + - | + When using the ``api-paste.ini`` from the release, requests to the + versioned discovery endpoints (``/v2.1`` and ``/v2``) no longer require + authentication. When using the compute API through certain clients, such as + openstacksdk, this eliminates an unnecessary additional query. See + `bug 1845530 `_ for details. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/vgpu-multiple-types-2b1ded7d1cc28880.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/vgpu-multiple-types-2b1ded7d1cc28880.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/releasenotes/notes/vgpu-multiple-types-2b1ded7d1cc28880.yaml 1970-01-01 00:00:00.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/releasenotes/notes/vgpu-multiple-types-2b1ded7d1cc28880.yaml 2020-04-10 17:57:58.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + The libvirt driver now supports defining different virtual GPU types for + each physical GPU. See the ``[devices]/enabled_vgpu_types`` configuration + option for knowing how to do it. Please refer to + https://docs.openstack.org/nova/latest/admin/virtual-gpu.html for further + documentation. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/requirements.txt nova-21.0.0~b3~git2020041013.57ff308d6d/requirements.txt --- nova-21.0.0~b2~git2020021008.1fcd74730d/requirements.txt 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/requirements.txt 2020-04-10 17:57:58.000000000 +0000 @@ -32,16 +32,16 @@ requests>=2.14.2 # Apache-2.0 six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 -websockify>=0.8.0 # LGPLv3 +websockify>=0.9.0 # LGPLv3 oslo.cache>=1.26.0 # Apache-2.0 -oslo.concurrency>=3.26.0 # Apache-2.0 +oslo.concurrency>=3.29.0 # Apache-2.0 oslo.config>=6.1.0 # Apache-2.0 oslo.context>=2.21.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.21.1 # Apache-2.0 oslo.upgradecheck>=0.1.1 -oslo.utils>=3.40.2 # Apache-2.0 +oslo.utils>=4.1.0 # Apache-2.0 oslo.db>=4.44.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.messaging>=10.3.0 # Apache-2.0 @@ -55,7 +55,7 @@ oslo.versionedobjects>=1.35.0 # Apache-2.0 os-brick>=2.6.2 # Apache-2.0 os-resource-classes>=0.4.0 # Apache-2.0 -os-traits>=2.1.0 # Apache-2.0 +os-traits>=2.2.0 # Apache-2.0 os-vif>=1.14.0 # Apache-2.0 os-win>=3.0.0 # Apache-2.0 castellan>=0.16.0 # Apache-2.0 @@ -71,3 +71,4 @@ zVMCloudConnector>=1.3.0;sys_platform!='win32' # Apache 2.0 License futurist>=1.8.0 # Apache-2.0 openstacksdk>=0.35.0 # Apache-2.0 +dataclasses>=0.7;python_version=='3.6' # Apache 2.0 License diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/setup.cfg nova-21.0.0~b3~git2020041013.57ff308d6d/setup.cfg --- nova-21.0.0~b2~git2020021008.1fcd74730d/setup.cfg 2020-02-10 08:50:38.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/setup.cfg 2020-04-10 17:58:19.330579500 +0000 @@ -41,16 +41,26 @@ oslo.policy.enforcer = nova = nova.policy:get_enforcer oslo.policy.policies = - # The sample policies will be ordered by entry point and then by list - # returned from that entry point. If more control is desired split out each - # list_rules method into a separate entry point rather than using the - # aggregate method. nova = nova.policies:list_rules +nova.api.extra_spec_validators = + accel = nova.api.validation.extra_specs.accel + aggregate_instance_extra_specs = nova.api.validation.extra_specs.aggregate_instance_extra_specs + capabilities = nova.api.validation.extra_specs.capabilities + hw = nova.api.validation.extra_specs.hw + hw_rng = nova.api.validation.extra_specs.hw_rng + hw_video = nova.api.validation.extra_specs.hw_video + null = nova.api.validation.extra_specs.null + os = nova.api.validation.extra_specs.os + pci_passthrough = nova.api.validation.extra_specs.pci_passthrough + powervm = nova.api.validation.extra_specs.powervm + quota = nova.api.validation.extra_specs.quota + resources = nova.api.validation.extra_specs.resources + traits = nova.api.validation.extra_specs.traits + vmware = nova.api.validation.extra_specs.vmware nova.compute.monitors.cpu = virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor nova.scheduler.driver = filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler - fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler console_scripts = nova-api = nova.cmd.api:main nova-api-metadata = nova.cmd.api_metadata:main @@ -84,9 +94,6 @@ mapping_file = babel.cfg output_file = nova/locale/nova.pot -[bdist_wheel] -universal = 1 - [egg_info] tag_build = tag_date = 0 diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/tox.ini nova-21.0.0~b3~git2020041013.57ff308d6d/tox.ini --- nova-21.0.0~b2~git2020021008.1fcd74730d/tox.ini 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/tox.ini 2020-04-10 17:57:58.000000000 +0000 @@ -238,7 +238,7 @@ # E731 temporarily skipped because of the number of # these that have to be fixed enable-extensions = H106,H203,H904 -ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W504,E731 +ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W504,E731,H238 exclude = .venv,.git,.tox,dist,*lib/python*,*egg,build,tools/xenserver*,releasenotes # To get a list of functions that are more complex than 25, set max-complexity # to 25 and run 'tox -epep8'. diff -Nru nova-21.0.0~b2~git2020021008.1fcd74730d/.zuul.yaml nova-21.0.0~b3~git2020041013.57ff308d6d/.zuul.yaml --- nova-21.0.0~b2~git2020021008.1fcd74730d/.zuul.yaml 2020-02-10 08:50:32.000000000 +0000 +++ nova-21.0.0~b3~git2020041013.57ff308d6d/.zuul.yaml 2020-04-10 17:57:57.000000000 +0000 @@ -21,6 +21,7 @@ - ^doc/.*$ - ^nova/hacking/.*$ - ^nova/locale/.*$ + - ^nova/policies/.*$ - ^nova/tests/.*$ - ^nova/test.py$ - ^releasenotes/.*$ @@ -243,7 +244,7 @@ - job: name: nova-tempest-full-oslo.versionedobjects - parent: tempest-full + parent: tempest-full-py3 description: | Run test with git version of oslo.versionedobjects to check that changes to nova will work with the next released version of @@ -378,11 +379,33 @@ - nova-next - nova-tox-functional-py36 - tempest-integrated-compute: - irrelevant-files: *dsvm-irrelevant-files + # NOTE(gmann): Policies changes do not need to run all the + # integration test jobs. Running only tempest and grenade + # common jobs will be enough along with nova functional + # and unit tests. + irrelevant-files: &policies-irrelevant-files + - ^api-.*$ + - ^(test-|)requirements.txt$ + - ^.*\.rst$ + - ^.git.*$ + - ^doc/.*$ + - ^nova/hacking/.*$ + - ^nova/locale/.*$ + - ^nova/tests/.*$ + - ^nova/test.py$ + - ^releasenotes/.*$ + - ^setup.cfg$ + - ^tools/.*$ + - ^tox.ini$ - grenade-py3: - irrelevant-files: *dsvm-irrelevant-files + irrelevant-files: *policies-irrelevant-files - tempest-ipv6-only: irrelevant-files: *dsvm-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *dsvm-irrelevant-files + - cyborg-tempest: + irrelevant-files: *dsvm-irrelevant-files + voting: false gate: jobs: - nova-grenade-multinode @@ -391,11 +414,13 @@ - nova-multi-cell - nova-next - tempest-integrated-compute: - irrelevant-files: *dsvm-irrelevant-files + irrelevant-files: *policies-irrelevant-files - grenade-py3: - irrelevant-files: *dsvm-irrelevant-files + irrelevant-files: *policies-irrelevant-files - tempest-ipv6-only: irrelevant-files: *dsvm-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *dsvm-irrelevant-files experimental: jobs: - ironic-tempest-bfv: @@ -430,3 +455,7 @@ # NOTE(mriedem): Consider moving nova-tox-functional-py37 to the # check and gate queues once it's stable (like openstack-python37-jobs) - nova-tox-functional-py37 + - devstack-platform-fedora-latest: + irrelevant-files: *dsvm-irrelevant-files + - devstack-platform-fedora-latest-virt-preview: + irrelevant-files: *dsvm-irrelevant-files