diff -Nru ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/baremetal-api-v1-nodes.inc ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/baremetal-api-v1-nodes.inc --- ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/baremetal-api-v1-nodes.inc 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/baremetal-api-v1-nodes.inc 2020-04-10 17:06:41.000000000 +0000 @@ -101,6 +101,9 @@ .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. +.. versionadded:: 1.65 + Introduced the ``lessee`` field. + Normal response codes: 201 Error codes: 400,403,406 @@ -131,6 +134,7 @@ - vendor_interface: req_vendor_interface - owner: owner - description: n_description + - lessee: lessee **Example Node creation request with a dynamic driver:** @@ -201,6 +205,7 @@ - protected_reason: protected_reason - conductor: conductor - owner: owner + - lessee: lessee - description: n_description - allocation_uuid: allocation_uuid @@ -260,6 +265,9 @@ .. versionadded:: 1.51 Introduced the ``description`` field. +.. versionadded:: 1.65 + Introduced the ``lessee`` field. + Normal response codes: 200 Error codes: 400,403,406 @@ -279,6 +287,7 @@ - conductor: r_conductor - fault: r_fault - owner: owner + - lessee: lessee - description_contains: r_description_contains - fields: fields - limit: limit @@ -347,6 +356,9 @@ .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. +.. versionadded:: 1.65 + Introduced the ``lessee`` field. + Normal response codes: 200 Error codes: 400,403,406 @@ -366,6 +378,7 @@ - conductor_group: r_conductor_group - conductor: r_conductor - owner: owner + - lessee: lessee - description_contains: r_description_contains - limit: limit - marker: marker @@ -423,6 +436,7 @@ - protected: protected - protected_reason: protected_reason - owner: owner + - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid @@ -474,6 +488,9 @@ .. versionadded:: 1.61 Introduced the ``retired`` and ``retired_reason`` fields. +.. versionadded:: 1.65 + Introduced the ``lessee`` field. + Normal response codes: 200 Error codes: 400,403,404,406 @@ -537,6 +554,7 @@ - protected: protected - protected_reason: protected_reason - owner: owner + - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid @@ -632,6 +650,7 @@ - protected: protected - protected_reason: protected_reason - owner: owner + - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid diff -Nru ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/parameters.yaml ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/parameters.yaml --- ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/parameters.yaml 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/parameters.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -910,6 +910,12 @@ in: body required: true type: string +lessee: + description: | + A string or UUID of the tenant who is leasing the object. + in: body + required: false + type: string links: description: | A list of relative links. Includes the self and diff -Nru ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/node-create-response.json ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/node-create-response.json --- ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/node-create-response.json 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/node-create-response.json 2020-04-10 17:06:41.000000000 +0000 @@ -23,6 +23,7 @@ "instance_info": {}, "instance_uuid": null, "last_error": null, + "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", diff -Nru ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/node-show-response.json ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/node-show-response.json --- ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/node-show-response.json 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/node-show-response.json 2020-04-10 17:06:41.000000000 +0000 @@ -26,6 +26,7 @@ "instance_info": {}, "instance_uuid": null, "last_error": null, + "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", diff -Nru ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/nodes-list-details-response.json ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/nodes-list-details-response.json --- ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/nodes-list-details-response.json 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/nodes-list-details-response.json 2020-04-10 17:06:41.000000000 +0000 @@ -28,6 +28,7 @@ "instance_info": {}, "instance_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "last_error": null, + "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", @@ -132,6 +133,7 @@ "instance_info": {}, "instance_uuid": null, "last_error": null, + "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428", diff -Nru ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/node-update-driver-info-response.json ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/node-update-driver-info-response.json --- ironic-14.0.1~git2020032415.de2d907fc/api-ref/source/samples/node-update-driver-info-response.json 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/api-ref/source/samples/node-update-driver-info-response.json 2020-04-10 17:06:41.000000000 +0000 @@ -27,6 +27,7 @@ "instance_info": {}, "instance_uuid": null, "last_error": null, + "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", diff -Nru ironic-14.0.1~git2020032415.de2d907fc/AUTHORS ironic-14.0.1~git2020041013.af9e6ba90/AUTHORS --- ironic-14.0.1~git2020032415.de2d907fc/AUTHORS 2020-03-24 19:12:20.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/AUTHORS 2020-04-10 17:06:45.000000000 +0000 @@ -528,6 +528,7 @@ shuangyang.qian sjing sonu.kumar +spranjali srobert stephane suichangyin diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ChangeLog ironic-14.0.1~git2020041013.af9e6ba90/ChangeLog --- ironic-14.0.1~git2020032415.de2d907fc/ChangeLog 2020-03-24 19:12:20.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ChangeLog 2020-04-10 17:06:45.000000000 +0000 @@ -1,14 +1,53 @@ CHANGES ======= +* Change default ram value +* Added node multitenancy doc +* Support burning configdrive into boot ISO +* Add sushy-cli to client libraries release list +* Fix AttributeError in check allowed port fields +* Fix gunicorn name on Py3@CentOS7 in devstack +* Add node lessee field +* [doc] Images need some metadata for software RAID +* Drop netaddr - use netutils.is\_valid\_ipv6() +* Allow INSPECTWAIT state for lookup +* Improve \`redfish\` set-boot-mode implementation +* Change multinode job to voting +* Cleanup Python 2.7 support +* Use auth values from neutron conf when managing Neutron ports +* Fetch netmiko session log +* Doc - IPv6 Provisioning +* Additional IP addresses to IPv6 stateful ports +* Add network\_type to port local\_link\_connection +* Make oslo.i18n an optional dependency +* Make oslo.reports an optional dependency +* Do not autoescape all Jinja2 templates +* Make deploy step failure logging indicate the error +* Fix the remaining hacking issues +* Bump hacking to 3.0.0 +* Extend install\_bootloader command timeout +* Document deploy\_boot\_mode and boot\_option for standalone deployments +* Remove future usage +* Fix enabled\_hardware\_types from idrac-wsman to idrac +* Document our policies for stable branches +* Retry agent get\_command\_status upon failures +* Add troubleshooting on IPMI section +* Default IRONIC\_RAMDISK\_TYPE to dib +* Generalize clean step functions to support deploy steps +* Raise human-friendly messages on attempt to use pre-deploy steps drivers +* Hash the rescue\_password +* DRAC: Fix a failure to create virtual disk bug +* [doc] Add documentation for retirement support * Add info on how to enable ironic-tempest-plugin * Follow-up releasenote use\_secrets * Add indicators REST API endpoints * Do not use random to generate token +* Signal agent token is required * Support centos 7 rootwrap data directory * Refactoring: split out wrap\_ipv6 * Refactoring: move iSCSI deploy code to iscsi\_deploy.py * Clean up nits from adding additional node update policies +* Allow specifying target devices for software RAID * Documentation clarifications for software RAID * Drop rootwrap.d/ironic-lib.filters file * Expand user-image doc @@ -18,6 +57,7 @@ * Remove compatibility with pre-deploy steps drivers * Extend power sync timeout for Ericsson SDI * Skip clean steps from 'fake' interfaces in the documentation +* Rename ironic-tox-unit-with-driver-libs-python3 * Send our token back to the agent * Enable agent\_token for virtual media boot * Add separate policies for updating node instance\_info and extra @@ -76,6 +116,7 @@ * Deprecate ibmc * Fix incorrect ibmc\_address parsing on Python 3.8 * Fix entry paths for cleaning and deployment +* Nodes in maintenance didn't fail, when they should have * Fix API docs for target\_power\_state response * Document using CentOS 8 DIB IPA images for Ussuri and newer * Lower RAM for DIB jobs to 2 GiB diff -Nru ironic-14.0.1~git2020032415.de2d907fc/debian/changelog ironic-14.0.1~git2020041013.af9e6ba90/debian/changelog --- ironic-14.0.1~git2020032415.de2d907fc/debian/changelog 2020-03-24 19:12:51.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/debian/changelog 2020-04-17 08:57:51.000000000 +0000 @@ -1,3 +1,19 @@ +ironic (1:14.0.1~git2020041013.af9e6ba90-0ubuntu2) focal; urgency=medium + + * d/tests/ironic-daemons: increase sleep time between API http checks + to allow more time for service to start, avoiding races on slower + architectures. + + -- James Page Fri, 17 Apr 2020 09:57:51 +0100 + +ironic (1:14.0.1~git2020041013.af9e6ba90-0ubuntu1) focal; urgency=medium + + * New upstream snapshot for OpenStack Ussuri. + * d/ironic-common.postinst: Set ownership and permissions for all /var/lib + files and directories. + + -- Corey Bryant Fri, 10 Apr 2020 13:07:22 -0400 + ironic (1:14.0.1~git2020032415.de2d907fc-0ubuntu1) focal; urgency=medium * New upstream snapshot for OpenStack Ussuri. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/debian/ironic-common.postinst ironic-14.0.1~git2020041013.af9e6ba90/debian/ironic-common.postinst --- ironic-14.0.1~git2020032415.de2d907fc/debian/ironic-common.postinst 2020-03-24 19:12:51.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/debian/ironic-common.postinst 2020-04-17 08:57:51.000000000 +0000 @@ -18,8 +18,8 @@ find /etc/ironic/rootwrap.d -exec chown root:root "{}" + find /etc/ironic/rootwrap.d -type f -exec chmod 0644 "{}" + -o -type d -exec chmod 0755 "{}" + - chown ironic:ironic /var/lib/ironic - chmod 0750 /var/lib/ironic + find /var/lib/ironic -exec chown ironic:ironic "{}" + + find /var/lib/ironic -type f -exec chmod 0640 "{}" + -o -type d -exec chmod 0750 "{}" + default_db=/var/lib/ironic/ironic.db fi diff -Nru ironic-14.0.1~git2020032415.de2d907fc/debian/tests/ironic-daemons ironic-14.0.1~git2020041013.af9e6ba90/debian/tests/ironic-daemons --- ironic-14.0.1~git2020032415.de2d907fc/debian/tests/ironic-daemons 2020-03-24 19:12:51.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/debian/tests/ironic-daemons 2020-04-17 08:57:51.000000000 +0000 @@ -25,14 +25,14 @@ ret=0 timeout_loop () { - TIMEOUT=100 + TIMEOUT=90 while [ "$TIMEOUT" -gt 0 ]; do if $1 > /dev/null 2>&1; then echo "OK" break fi TIMEOUT=$((TIMEOUT - 1)) - sleep 0.1 + sleep 1 done if [ "$TIMEOUT" -le 0 ]; then diff -Nru ironic-14.0.1~git2020032415.de2d907fc/devstack/lib/ironic ironic-14.0.1~git2020041013.af9e6ba90/devstack/lib/ironic --- ironic-14.0.1~git2020032415.de2d907fc/devstack/lib/ironic 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/devstack/lib/ironic 2020-04-10 17:06:41.000000000 +0000 @@ -238,7 +238,7 @@ # Ironic IPA ramdisk type, supported types are: IRONIC_SUPPORTED_RAMDISK_TYPES_RE="^(tinyipa|dib)$" -IRONIC_RAMDISK_TYPE=${IRONIC_RAMDISK_TYPE:-tinyipa} +IRONIC_RAMDISK_TYPE=${IRONIC_RAMDISK_TYPE:-dib} # Confirm we have a supported ramdisk type or fail early. if [[ ! "$IRONIC_RAMDISK_TYPE" =~ $IRONIC_SUPPORTED_RAMDISK_TYPES_RE ]]; then @@ -479,6 +479,9 @@ # Fast track option IRONIC_DEPLOY_FAST_TRACK=${IRONIC_DEPLOY_FAST_TRACK:-False} +# Agent Token requirement +IRONIC_REQUIRE_AGENT_TOKEN=${IRONIC_REQUIRE_AGENT_TOKEN:-True} + # Define baremetal min_microversion in tempest config. Default value None is picked from tempest. TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-} @@ -913,11 +916,10 @@ # TODO(lucasagomes): Use Apache WSGI instead of gunicorn gunicorn=gunicorn - if python3_enabled; then - gunicorn=${gunicorn}3 - fi - if is_ubuntu; then + if python3_enabled; then + gunicorn=${gunicorn}3 + fi install_package $gunicorn else pip_install_gr "gunicorn" @@ -1294,6 +1296,8 @@ # Set fast track options iniset $IRONIC_CONF_FILE deploy fast_track $IRONIC_DEPLOY_FAST_TRACK + # Set requirement for agent tokens + iniset $IRONIC_CONF_FILE DEFAULT require_agent_token $IRONIC_REQUIRE_AGENT_TOKEN # No need to check if RabbitMQ is enabled, this call does it in a smart way if [[ "$IRONIC_RPC_TRANSPORT" == "oslo" ]]; then iniset_rpc_backend ironic $IRONIC_CONF_FILE diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/requirements.txt ironic-14.0.1~git2020041013.af9e6ba90/doc/requirements.txt --- ironic-14.0.1~git2020032415.de2d907fc/doc/requirements.txt 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/requirements.txt 2020-04-10 17:06:41.000000000 +0000 @@ -2,7 +2,7 @@ openstackdocstheme>=1.31.2 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 -sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD +sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD sphinxcontrib-pecanwsme>=0.10.0 # Apache-2.0 sphinxcontrib-seqdiag>=0.8.4 # BSD diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/agent-token.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/agent-token.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/agent-token.rst 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/agent-token.rst 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,121 @@ +.. _agent_token: + +=========== +Agent Token +=========== + +Purpose +======= + +The concept of agent tokens is to provide a mechanism by which the +relationship between an operating deployment of the Bare Metal Service +and an instance of the ``ironic-python-agent`` is verified. In a sense, +this token can be viewed as a session identifier or authentication token. + +.. warning:: + This functionality does not remove the risk of a man-in-the-middle attack + that could occur from connection intercept or when TLS is not used for + all communication. + +This becomes useful in the case of deploying an "edge" node where intermediate +networks are not trustworthy. + +How it works +============ + +These tokens are provided in one of two ways to the running agent. + +1. A pre-generated token which is embedded into virtual media ISOs. +2. A one-time generated token that are provided upon the first "lookup" + of the node. + +In both cases, the tokens are a randomly generated length of 128 characters. + +Once the token has been provided, the token cannot be retrieved or accessed. +It remains available to the conductors, and is stored in memory of the +``ironic-python-agent``. + +.. note:: + In the case of the token being embedded with virtual media, it is read + from a configuration file with-in the image. Ideally this should be paired + with Swift temporary URLs. + +With the token is available in memory in the agent, the token is embedded with +``heartbeat`` operations to the ironic API endpoint. This enables the API to +authenticate the heartbeat request, and refuse "heartbeat" requests from the +``ironic-python-agent``. With the ``Ussuri`` release, the confiuration option +``[DEFAULT]require_agent_token`` can be set ``True`` to explicitly require +token use. + +.. warning:: + If the Bare Metal Service is updated, and the version of + ``ironic-python-agent`` should be updated to enable this feature. + +In addition to heartbeats being verified, commands from the +``ironic-conductor`` service to the ``ironic-python-agent`` also include the +token, allowing the agent to authenticate the caller. + + +With Virtual Media +------------------ + +.. seqdiag:: + :scale: 80 + + diagram { + API; Conductor; Baremetal; Swift; IPA; + activation = none; + span_height = 1; + edge_length = 250; + default_note_color = white; + default_fontsize = 14; + + Conductor -> Conductor [label = "Generates a random token"]; + Conductor -> Conductor [label = "Generates configuration for IPA ramdisk"]; + Conductor -> Swift [label = "IPA image, with configuration is uploaded"]; + Conductor -> Baremetal [label = "Attach IPA virtual media in Swift as virtual CD"]; + Conductor -> Baremetal [label = "Conductor turns power on"]; + Baremetal -> Swift [label = "Baremetal reads virtual media"]; + Baremetal -> Baremetal [label = "Boots IPA virtual media image"]; + Baremetal -> Baremetal [label = "IPA is started"]; + IPA -> Baremetal [label = "IPA loads configuration and agent token into memory"]; + IPA -> API [label = "Lookup node"]; + API -> IPA [label = "API responds with node UUID and token value of '******'"]; + IPA -> API [label = "Heartbeat with agent token"]; + } + +With PXE/iPXE/etc. +------------------ + +.. seqdiag:: + :scale: 80 + + diagram { + API; Conductor; Baremetal; iPXE; IPA; + activation = none; + span_height = 1; + edge_length = 250; + default_note_color = white; + default_fontsize = 14; + + Conductor -> Baremetal [label = "Conductor turns power on"]; + Baremetal -> iPXE [label = "Baremetal reads kernel/ramdisk and starts boot"]; + Baremetal -> Baremetal [label = "Boots IPA virtual media image"]; + Baremetal -> Baremetal [label = "IPA is started"]; + IPA -> Baremetal [label = "IPA loads configuration"]; + IPA -> API [label = "Lookup node"]; + API -> Conductor [label = "API requests conductor to generates a random token"]; + API -> IPA [label = "API responds with node UUID and token value"]; + IPA -> API [label = "Heartbeat with agent token"]; + } + +Agent Configuration +=================== + +An additional setting which may be leveraged with the ``ironic-python-agent`` +is a ``agent_token_required`` setting. Under normal circumstances, this +setting can be asserted via the configuration supplied from the Bare Metal +service deployment upon the ``lookup`` action, but can be asserted via the +embedded configuration for the agent in the ramdisk. This setting is also +available via kernel command line as ``ipa-agent-token-required``. + diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/drivers/idrac.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/drivers/idrac.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/drivers/idrac.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/drivers/idrac.rst 2020-04-10 17:06:41.000000000 +0000 @@ -70,7 +70,7 @@ .. code-block:: ini [DEFAULT] - enabled_hardware_types=idrac-wsman + enabled_hardware_types=idrac enabled_management_interfaces=idrac-wsman enabled_power_interfaces=idrac-wsman diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/gmr.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/gmr.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/gmr.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/gmr.rst 2020-04-10 17:06:41.000000000 +0000 @@ -12,6 +12,16 @@ interface for any eventlet based process, allowing an administrator to telnet to a pre-defined port and execute a variety of commands. +Configuration +------------- + +The GMR feature is optional and requires the oslo.reports_ package to be +installed. For example, using pip:: + + pip install 'oslo.reports>=1.18.0' + +.. _oslo.reports: https://opendev.org/openstack/oslo.reports + Generating a GMR ---------------- diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/index.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/index.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/index.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/index.rst 2020-04-10 17:06:41.000000000 +0000 @@ -14,6 +14,7 @@ Node Deployment Node Cleaning Node Adoption + Node Retirement RAID Configuration BIOS Settings Node Rescuing @@ -32,6 +33,8 @@ Windows Images Troubleshooting FAQ Power Sync with the Compute Service + Agent Token + Node Multi-Tenancy .. toctree:: :hidden: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/node-multitenancy.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/node-multitenancy.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/node-multitenancy.rst 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/node-multitenancy.rst 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,145 @@ +================== +Node Multi-Tenancy +================== + +This guide explains the steps needed to enable node multi-tenancy. This +feature enables non-admins to perform API actions on nodes, limited by +policy configuration. The Bare Metal service supports two kinds of +non-admin users: + +* Owner: owns specific nodes and performs administrative actions on them +* Lessee: receives temporary and limited access to a node + +Setting the Owner and Lessee +============================ + +Non-administrative access to a node is controlled through a node's ``owner`` +or ``lessee`` attribute:: + + openstack baremetal node set --owner 080925ee2f464a2c9dce91ee6ea354e2 node-7 + openstack baremetal node set --lessee 2a210e5ff114c8f2b6e994218f51a904 node-10 + + +Configuring the Bare Metal Service Policy +========================================= + +By default, the Bare Metal service policy is configured so that a node +owner or lessee has no access to any node APIs. However, the policy +:doc:`policy file ` contains rules that +can be used to enable node API access:: + + # Owner of node + #"is_node_owner": "project_id:%(node.owner)s" + + # Lessee of node + #"is_node_lessee": "project_id:%(node.lessee)s" + +An administrator can then modify the policy file to expose individual node +APIs as follows:: + + # Change Node provision status + # PUT /nodes/{node_ident}/states/provision + #"baremetal:node:set_provision_state": "rule:is_admin" + "baremetal:node:set_provision_state": "rule:is_admin or rule:is_node_owner or rule:is_node_lessee" + + # Update Node records + # PATCH /nodes/{node_ident} + #"baremetal:node:update": "rule:is_admin or rule:is_node_owner" + +In addition, it is safe to expose the ``baremetal:node:list`` rule, as the +node list function now filters non-admins by owner and lessee:: + + # Retrieve multiple Node records, filtered by owner + # GET /nodes + # GET /nodes/detail + #"baremetal:node:list": "rule:baremetal:node:get" + "baremetal:node:list": "" + +Note that ``baremetal:node:list_all`` permits users to see all nodes +regardless of owner/lessee, so it should remain restricted to admins. + +Ports +----- + +Port APIs can be similarly exposed to node owners and lessees:: + + # Retrieve Port records + # GET /ports/{port_id} + # GET /nodes/{node_ident}/ports + # GET /nodes/{node_ident}/ports/detail + # GET /portgroups/{portgroup_ident}/ports + # GET /portgroups/{portgroup_ident}/ports/detail + #"baremetal:port:get": "rule:is_admin or rule:is_observer" + "baremetal:port:get": "rule:is_admin or rule:is_observer or rule:is_node_owner or rule:is_node_lessee" + + # Retrieve multiple Port records, filtered by owner + # GET /ports + # GET /ports/detail + #"baremetal:port:list": "rule:baremetal:port:get" + "baremetal:port:list": "" + + +Allocations +----------- + +Allocations respect node tenancy as well. A restricted allocation creates +an allocation tied to a project, and that can only match nodes where that +project is the owner or lessee. Here is a sample set of allocation policy +rules that allow non-admins to use allocations effectively:: + + # Retrieve Allocation records + # GET /allocations/{allocation_id} + # GET /nodes/{node_ident}/allocation + #"baremetal:allocation:get": "rule:is_admin or rule:is_observer" + "baremetal:allocation:get": "rule:is_admin or rule:is_observer or rule:is_allocation_owner" + + # Retrieve multiple Allocation records, filtered by owner + # GET /allocations + #"baremetal:allocation:list": "rule:baremetal:allocation:get" + "baremetal:allocation:list": "" + + # Retrieve multiple Allocation records + # GET /allocations + #"baremetal:allocation:list_all": "rule:baremetal:allocation:get" + + # Create Allocation records + # POST /allocations + #"baremetal:allocation:create": "rule:is_admin" + + # Create Allocation records that are restricted to an owner + # POST /allocations + #"baremetal:allocation:create_restricted": "rule:baremetal:allocation:create" + "baremetal:allocation:create_restricted": "" + + # Delete Allocation records + # DELETE /allocations/{allocation_id} + # DELETE /nodes/{node_ident}/allocation + #"baremetal:allocation:delete": "rule:is_admin" + "baremetal:allocation:delete": "rule:is_admin or rule:is_allocation_owner" + + # Change name and extra fields of an allocation + # PATCH /allocations/{allocation_id} + #"baremetal:allocation:update": "rule:is_admin" + "baremetal:allocation:update": "rule:is_admin or rule:is_allocation_owner" + +Deployment and Metalsmith +------------------------- + +Provisioning a node requires a specific set of APIs to be made available. +The following policy specifications are enough to allow a node owner to +use :metalsmith-doc:`Metalsmith ` to deploy upon a node:: + + "baremetal:node:get": "rule:is_admin or rule:is_observer or rule:is_node_owner" + "baremetal:node:list": "" + "baremetal:node:update_extra": "rule:is_admin or rule:is_node_owner" + "baremetal:node:update_instance_info": "rule:is_admin or rule:is_node_owner" + "baremetal:node:validate": "rule:is_admin or rule:is_node_owner" + "baremetal:node:set_provision_state": "rule:is_admin or rule:is_node_owner" + "baremetal:node:vif:list": "rule:is_admin or rule:is_node_owner" + "baremetal:node:vif:attach": "rule:is_admin or rule:is_node_owner" + "baremetal:node:vif:detach": "rule:is_admin or rule:is_node_owner" + "baremetal:allocation:get": "rule:is_admin or rule:is_observer or rule:is_allocation_owner" + "baremetal:allocation:list": "" + "baremetal:allocation:create_restricted": "" + "baremetal:allocation:delete": "rule:is_admin or rule:is_allocation_owner" + "baremetal:allocation:update": "rule:is_admin or rule:is_allocation_owner" diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/raid.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/raid.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/raid.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/raid.rst 2020-04-10 17:06:41.000000000 +0000 @@ -143,7 +143,12 @@ In order to trigger the setup of a Software RAID via the Ironic Python Agent, the value of this property needs to be set to ``software``. - ``physical_disks`` - A list of physical disks to use as read by the - RAID interface. Not supported for software RAID. + RAID interface. + + For software RAID ``physical_disks`` is a list of device hints in the same + format as used for :ref:`root-device-hints`. The number of provided hints + must match the expected number of backing devices (repeat the same hint if + necessary). .. note:: If properties from both "Backing physical disk hints" or @@ -260,6 +265,25 @@ ] } +*Example 6*. Software RAID, limiting backing block devices to exactly two +devices with the size exceeding 100 GiB: + +.. code-block:: json + + { + "logical_disks": [ + { + "size_gb": "MAX", + "raid_level": "0", + "controller": "software", + "physical_disks": [ + {"size": "> 100"}, + {"size": "> 100"} + ] + } + ] + } + Current RAID configuration -------------------------- After target RAID configuration is applied on the bare metal node, Ironic @@ -380,6 +404,21 @@ * There is no support for partition images, only whole-disk images are supported with Software RAID. See :doc:`/install/configure-glance-images`. +Image requirements +------------------ + +Since Ironic needs to perform additional steps when deploying nodes +with software RAID, there are some requirements the deployed images need +to fulfill. Up to and including the Train release, the image needs to +have its root file system on the first partition. Starting with Ussuri, +the image can also have additional metadata to point Ironic to the +partition with the root file system: for this, the image needs to set +the ``rootfs_uuid`` property with the file system UUID of the root file +system. The pre-Ussuri approach, i.e. to have the root file system on +the first partition, is kept as a fallback and hence allows software +RAID deployments where Ironic does not have access to any image metadata +(e.g. Ironic stand-alone). + Using RAID in nova flavor for scheduling ======================================== diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/retirement.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/retirement.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/retirement.rst 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/retirement.rst 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,64 @@ +.. _retirement: + +=============== +Node retirement +=============== + +Overview +======== + +Retiring nodes is a natural part of a server’s life cycle, for +instance when the end of the warranty is reached and the physical +space is needed for new deliveries to install replacement capacity. + +However, depending on the type of the deployment, removing nodes +from service can be a full workflow by itself as it may include +steps like moving applications to other hosts, cleaning sensitive +data from disks or the BMC, or tracking the dismantling of servers +from their racks. + +Ironic provides some means to support such workflows by allowing +to tag nodes as ``retired`` which will prevent any further +scheduling of instances, but will still allow for other operations, +such as cleaning, to happen (this marks an important difference to +nodes which have the ``maintenance`` flag set). + +How to use +========== + +When it is known that a node shall be retired, set the ``retired`` +flag on the node with:: + + openstack baremetal node set --retired node-001 + +This can be done irrespective of the state the node is in, so in +particular while the node is ``active``. + +.. NOTE:: + An exception are nodes which are in ``available``. For backwards + compatibility reasons, these nodes need to be moved to + ``manageable`` first. Trying to set the ``retired`` flag for + ``available`` nodes will result in an error. + +Optionally, a reason can be specified when a node is retired, e.g.:: + + openstack baremetal node set --retired node-001 \ + --retired-reason "End of warranty for delivery abc123" + +Upon instance deletion, an ``active`` node with the ``retired`` flag +set will not move to ``available``, but to ``manageable``. The node +will hence not be eligible for scheduling of new instances. + +Equally, nodes with ``retired`` set to True cannot move from ``manageable`` +to ``available``: the ``provide`` verb is blocked. This is to prevent +accidental re-use of nodes tagged for removal from the fleet. In order +to move these nodes to ``available`` none the less, the ``retired`` field +needs to be removed first. This can be done via:: + + openstack baremetal node unset --retired node-001 + +In order to facilitate the identification of nodes marked for retirement, +e.g. by other teams, ironic also allows to list all nodes which have the +``retired`` flag set:: + + openstack baremetal node list --retired diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/troubleshooting.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/troubleshooting.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/admin/troubleshooting.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/admin/troubleshooting.rst 2020-04-10 17:06:41.000000000 +0000 @@ -358,3 +358,36 @@ $ config terminal $ (config) interface eth1/11 $ (config-if) spanning-tree port type edge + + +IPMI errors +=========== + +When working with IPMI, several settings need to be enabled depending on vendors. + +Enable IPMI over LAN +-------------------- + +Machines may not have IPMI access over LAN enabled by default. This could cause +the IPMI port to be unreachable through ipmitool, as shown: + +.. code-block:: bash + + $ipmitool -I lan -H ipmi_host -U ipmi_user -P ipmi_pass chassis power status + Error: Unable to establish LAN session + +To fix this, enable `IPMI over lan` setting using your BMC tool or web app. + +Troubleshooting lanplus interface +--------------------------------- + +When working with lanplus interfaces, you may encounter the following error: + +.. code-block:: bash + + $ipmitool -I lanplus -H ipmi_host -U ipmi_user -P ipmi_pass power status + Error in open session response message : insufficient resources for session + Error: Unable to establish IPMI v2 / RMCP+ session + +To fix that issue, please enable `RMCP+ Cipher Suite3 Configuration` setting +using your BMC tool or web app. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/conf.py ironic-14.0.1~git2020041013.af9e6ba90/doc/source/conf.py --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/conf.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/conf.py 2020-04-10 17:06:41.000000000 +0000 @@ -54,6 +54,7 @@ 'db/sqlalchemy/alembic/env', 'db/sqlalchemy/alembic/versions/*', 'drivers/modules/ansible/playbooks*', + 'hacking', 'tests', ] apidoc_separate_modules = True @@ -72,6 +73,7 @@ 'ironic-ui', 'keystone', 'keystonemiddleware', + 'metalsmith', 'networking-baremetal', 'neutron', 'nova', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/contributor/index.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/contributor/index.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/contributor/index.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/contributor/index.rst 2020-04-10 17:06:41.000000000 +0000 @@ -113,3 +113,25 @@ Job roles in the CI How to add a new job? How to debug failures in CI jobs + +Our policy for stable branches +------------------------------ + +Stable branches that are on `Extended Maintenance`_ and haven't received +backports in a while, can be tagged as ``Unmaintained``, after discussions +within the ironic community. If such a decision is taken, an email will +be sent to the OpenStack mailing list. + +What does ``Unmaintained`` mean? The branch still exists, but the ironic +upstream community will not actively backport patches from maintained +branches. Fixes can still be merged, though, if pushed into review by +operators or other downstream developers. It also means that branchless +projects (e.g.: ironic-tempest-plugin), may not have configurations that are +compatible with those branches. + +As of 09 March 2020, the list of ``Unmaintained`` branches includes: + +* Ocata (Last commit - Jun 28, 2019) +* Pike (Last commit - Oct 2, 2019) + +.. _Extended Maintenance: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/contributor/releasing.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/contributor/releasing.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/contributor/releasing.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/contributor/releasing.rst 2020-04-10 17:06:41.000000000 +0000 @@ -61,6 +61,7 @@ * python-ironicclient * python-ironic-inspector-client +* sushy-cli Normal release -------------- diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/contributor/webapi-version-history.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/contributor/webapi-version-history.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/contributor/webapi-version-history.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/contributor/webapi-version-history.rst 2020-04-10 17:06:41.000000000 +0000 @@ -2,6 +2,23 @@ REST API Version History ======================== +1.65 (Ussuri, master) +--------------------- + +Added ``lessee`` field to the node object. The field should match the +``project_id`` of the intended lessee. If an allocation has an owner, +then the allocation process will only match the allocation with a node +that has the same ``owner`` or ``lessee``. + +1.64 (Ussuri, master) +--------------------- + +Added the ``network_type`` to the port objects ``local_link_connection`` field. +The ``network_type`` can be set to either ``managed`` or ``unmanaged``. When the +type is ``unmanaged`` other fields are not required. Use ``unmanaged`` when the +neutron ``network_interface`` is required, but the network is in fact a flat +network where no actual switch management is done. + 1.63 (Ussuri, master) --------------------- diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/install/configure-integration.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/install/configure-integration.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/install/configure-integration.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/install/configure-integration.rst 2020-04-10 17:06:41.000000000 +0000 @@ -8,6 +8,7 @@ configure-identity configure-compute configure-networking + configure-ipv6-networking configure-glance-swift enabling-https configure-cleaning diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/install/configure-ipv6-networking.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/install/configure-ipv6-networking.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/install/configure-ipv6-networking.rst 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/install/configure-ipv6-networking.rst 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,120 @@ +Configuring services for bare metal provisioning using IPv6 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use of IPv6 addressing for baremetal provisioning requires additional +configuration. This page covers the IPv6 specifics only. Please refer to +:doc:`/install/configure-tenant-networks` and +:doc:`/install/configure-networking` for general networking configuration. + + +Configure ironic PXE driver for provisioning using IPv6 addressing +================================================================== + +The ironic PXE driver operates in either IPv4 or IPv6 mode (IPv4 is the +default). To enable IPv6 mode, set the ``[pxe]/ip_version`` option in the Bare +Metal Service's configuration file (``/etc/ironic/ironic.conf``) to ``6``. + +.. Note:: Support for dual mode IPv4 and IPv6 operations is planned for a + future version of ironic. + + +Provisioning with IPv6 stateless addressing +------------------------------------------- + +When using stateless addressing DHCPv6 does not provide addresses to the client. +DHCPv6 however provides other configuration via DHCPv6 options such as the +bootfile-url and bootfile-parameters. + +Once the PXE driver is set to operate in IPv6 mode no further configuration is +required in the Baremetal Service. + +Creating networks and subnets in the Networking Service +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When creating the Baremetal Service network(s) and subnet(s) in the Networking +Service's, subnets should have ``ipv6-address-mode`` set to ``dhcpv6-stateless`` +and ``ip-version`` set to ``6``. Depending on whether a router in the Networking +Service is providing RA's (Router Advertisements) or not, the ``ipv6-ra-mode`` +for the subnet(s) should either be set to ``dhcpv6-stateless`` or be left unset. + +.. Note:: If ``ipv6-ra-mode`` is left unset, an external router on the network + is expected to provide RA's with the appropriate flags set for + automatic addressing and other configuration. + + +Provisioning with IPv6 stateful addressing +------------------------------------------ + +When using stateful addressing DHCPv6 is providing both addresses and other +configuration via DHCPv6 options such as the bootfile-url and bootfile- +parameters. + +The "identity-association" (IA) construct used by DHCPv6 is challenging when +booting over the network. Firmware, and ramdisks typically end up using +different DUID/IAID combinations and it is not always possible for one chain- +booting stage to release its address before giving control to the next step. In +case the DHCPv6 server is configured with static reservations only the result is +that booting will fail because the DHCPv6 server has no addresses available. To +get past this issue either configure the DHCPv6 server with multiple address +reservations for each host, or use a dynamic range. + +.. Note:: Support for multiple address reservations requires dnsmasq version + 2.81 or later. Some distributions may backport this feature to + earlier dnsmasq version as part of the packaging, check the + distributions release notes. + + If a different (not dnsmasq) DHCPv6 server backend is used with the + Networking service, use of multiple address reservations might not + work. + +Using the ``flat`` network interface +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Due to the "identity-association" challenges with DHCPv6 provisioning using the +``flat`` network interface is not recommended. When ironic operates with the +``flat`` network interface the server instance port is used for provisioning and +other operations. Ironic will not use multiple address reservations in this +scenario. Because of this **it will not work in most cases**. + +Using the ``neutron`` network interface +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When using the ``neutron`` network interface the Baremetal Service will allocate +multiple IPv6 addresses (4 addresses per port by default) on the service +networks used for provisioning, cleaning, rescue and introspection. The number +of addresses allocated can be controlled via the +``[neutron]/dhcpv6_stateful_address_count`` option in the Bare Metal Service's +configuration file (``/etc/ironic/ironic.conf``). Using multiple address +reservations ensures that the DHCPv6 server can lease addresses to each step. + +To enable IPv6 provisioning on neutron *flat* provider networks with no switch +management, the ``local_link_connection`` field of baremetal ports must be set +to ``{'network_type': 'unmanaged'}``. The following example shows how to set the +local_link_connection for operation on unmanaged networks:: + + openstack baremetal port set \ + --local-link-connection network_type=unmanaged + +The use of multiple IPv6 addresses must also be enabled in the Networking +Service's dhcp agent configuration (``/etc/neutron/dhcp_agent.ini``) by setting +the option ``[DEFAULT]/dnsmasq_enable_addr6_list`` to ``True`` (default +``False`` in Ussuri release). + +.. Note:: Support for multiple IPv6 address reservations in the dnsmasq backend + was added to the Networking Service Ussuri release. It was also + backported to the stable Train release. + + +Creating networks and subnets in the Networking Service +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When creating the ironic service network(s) and subnet(s) in the Networking +Service, subnets should have ``ipv6-address-mode`` set to ``dhcpv6-stateful`` +and ``ip-version`` set to ``6``. Depending on whether a router in the Networking +Service is providing RA's (Router Advertisements) or not, the ``ipv6-ra-mode`` +for the subnet(s) should be set to either ``dhcpv6-stateful`` or be left +unset. + +.. Note:: If ``ipv6-ra-mode`` is left unset, an external router on the network + is expected to provide RA's with the appropriate flags set for managed + addressing and other configuration. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/doc/source/install/standalone.rst ironic-14.0.1~git2020041013.af9e6ba90/doc/source/install/standalone.rst --- ironic-14.0.1~git2020032415.de2d907fc/doc/source/install/standalone.rst 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/doc/source/install/standalone.rst 2020-04-10 17:06:41.000000000 +0000 @@ -159,6 +159,28 @@ --instance-info image_source=$IMG \ --instance-info image_checksum=$MD5HASH +#. :ref:`Boot mode ` can be specified per instance:: + + openstack baremetal node set $NODE_UUID \ + --instance-info deploy_boot_mode=uefi + + Otherwise, the ``boot_mode`` capability from the node's ``properties`` will + be used. + + .. warning:: + The two settings must not contradict each other. + + .. note:: + The ``boot_mode`` capability is only used in the node's ``properties``, + not in ``instance_info`` like most other capabilities. Use the separate + ``instance_info/deploy_boot_mode`` field instead. + +#. To override the :ref:`boot option ` used for + this instance, set the ``boot_option`` capability:: + + openstack baremetal node set $NODE_UUID \ + --instance-info capabilities='{"boot_option": "local"}' + #. Starting with the Ussuri release, you can set :ref:`root device hints ` per instance:: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/driver-requirements.txt ironic-14.0.1~git2020041013.af9e6ba90/driver-requirements.txt --- ironic-14.0.1~git2020032415.de2d907fc/driver-requirements.txt 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/driver-requirements.txt 2020-04-10 17:06:41.000000000 +0000 @@ -11,7 +11,7 @@ python-xclarityclient>=0.1.6 # The Redfish hardware type uses the Sushy library -sushy>=3.1.0 +sushy>=3.2.0 # Ansible-deploy interface ansible>=2.7 diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/allocation.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/allocation.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/allocation.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/allocation.py 2020-04-10 17:06:41.000000000 +0000 @@ -360,16 +360,8 @@ return Allocation.convert_with_links(rpc_allocation, fields=fields) - @METRICS.timer('AllocationsController.post') - @expose.expose(Allocation, body=Allocation, - status_code=http_client.CREATED) - def post(self, allocation): - """Create a new allocation. - - :param allocation: an allocation within the request body. - """ - context = api.request.context - cdict = context.to_policy_values() + def _authorize_create_allocation(self, allocation): + cdict = api.request.context.to_policy_values() try: policy.authorize('baremetal:allocation:create', cdict, cdict) @@ -383,6 +375,19 @@ self._check_allowed_allocation_fields(allocation.as_dict()) allocation.owner = owner + return allocation + + @METRICS.timer('AllocationsController.post') + @expose.expose(Allocation, body=Allocation, + status_code=http_client.CREATED) + def post(self, allocation): + """Create a new allocation. + + :param allocation: an allocation within the request body. + """ + context = api.request.context + allocation = self._authorize_create_allocation(allocation) + if (allocation.name and not api_utils.is_valid_logical_name(allocation.name)): msg = _("Cannot create allocation with invalid name " diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/node.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/node.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/node.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/node.py 2020-04-10 17:06:41.000000000 +0000 @@ -869,8 +869,8 @@ raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) - if (rpc_node.provision_state == ir_states.INSPECTWAIT and - target == ir_states.VERBS['abort']): + if (rpc_node.provision_state == ir_states.INSPECTWAIT + and target == ir_states.VERBS['abort']): if not api_utils.allow_inspect_abort(): raise exception.NotAcceptable() @@ -1250,7 +1250,10 @@ owner = wsme.wsattr(str) """Field for storage of physical node owner""" - description = wsme.wsattr(str) + lessee = wsme.wsattr(wtypes.text) + """Field for storage of physical node lessee""" + + description = wsme.wsattr(wtypes.text) """Field for node description""" allocation_uuid = wsme.wsattr(types.uuid, readonly=True) @@ -1282,8 +1285,8 @@ value = [t['trait'] for t in kwargs['traits']['objects']] # NOTE(jroll) this is special-cased to "" and not Unset, # because it is used in hash ring calculations - elif k == 'conductor_group' and (k not in kwargs or - kwargs[k] is wtypes.Unset): + elif (k == 'conductor_group' + and (k not in kwargs or kwargs[k] is wtypes.Unset)): value = '' else: value = kwargs.get(k, wtypes.Unset) @@ -1341,8 +1344,8 @@ def convert_with_links(cls, rpc_node, fields=None, sanitize=True): node = Node(**rpc_node.as_dict()) - if (api_utils.allow_expose_conductors() and - (fields is None or 'conductor' in fields)): + if (api_utils.allow_expose_conductors() + and (fields is None or 'conductor' in fields)): # NOTE(kaifeng) It is possible a node gets orphaned in certain # circumstances, set conductor to None in such case. try: @@ -1482,7 +1485,7 @@ automated_clean=None, protected=False, protected_reason=None, owner=None, allocation_uuid='982ddb5b-bce5-4d23-8fb8-7f710f648cd5', - retired=False, retired_reason=None) + retired=False, retired_reason=None, lessee=None) # NOTE(matty_dubs): The chassis_uuid getter() is based on the # _chassis_uuid variable: sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12' @@ -1767,8 +1770,8 @@ and not api_utils.allow_portgroups_subcontrollers()) or (remainder[0] == 'vifs' and not api_utils.allow_vifs_subcontroller()) - or (remainder[0] == 'bios' and - not api_utils.allow_bios_interface()) + or (remainder[0] == 'bios' + and not api_utils.allow_bios_interface()) or (remainder[0] == 'allocation' and not api_utils.allow_allocations())): pecan.abort(http_client.NOT_FOUND) @@ -1801,6 +1804,7 @@ resource_class=None, resource_url=None, fields=None, fault=None, conductor_group=None, detail=None, conductor=None, owner=None, + lessee=None, project=None, description_contains=None): if self.from_chassis and not chassis_uuid: raise exception.MissingParameterValue( @@ -1844,6 +1848,8 @@ 'fault': fault, 'conductor_group': conductor_group, 'owner': owner, + 'lessee': lessee, + 'project': project, 'description_contains': description_contains, 'retired': retired, } @@ -1970,13 +1976,14 @@ types.boolean, types.boolean, str, types.uuid, int, str, str, str, types.listtype, str, str, str, types.boolean, str, - str, str) + str, str, str, str) def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, retired=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, fields=None, resource_class=None, fault=None, conductor_group=None, detail=None, conductor=None, - owner=None, description_contains=None): + owner=None, description_contains=None, lessee=None, + project=None): """Retrieve a list of nodes. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for @@ -2010,6 +2017,10 @@ that conductor. :param owner: Optional string value that set the owner whose nodes are to be retrurned. + :param lessee: Optional string value that set the lessee whose nodes + are to be returned. + :param project: Optional string value that set the project - lessee or + owner - whose nodes are to be returned. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param fault: Optional string value to get only nodes with that fault. @@ -2017,7 +2028,7 @@ with description field contains matching value. """ - owner = api_utils.check_list_policy('node', owner) + project = api_utils.check_list_policy('node', project) api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) @@ -2029,6 +2040,7 @@ api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_conductor(conductor) api_utils.check_allow_filter_by_owner(owner) + api_utils.check_allow_filter_by_lessee(lessee) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) @@ -2044,20 +2056,22 @@ conductor_group=conductor_group, detail=detail, conductor=conductor, - owner=owner, + owner=owner, lessee=lessee, + project=project, **extra_args) @METRICS.timer('NodesController.detail') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, types.boolean, str, types.uuid, int, str, str, str, str, str, - str, str, str, str) + str, str, str, str, + str, str) def detail(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, retired=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, resource_class=None, fault=None, conductor_group=None, conductor=None, owner=None, - description_contains=None): + description_contains=None, lessee=None, project=None): """Retrieve a list of nodes with detail. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for @@ -2090,11 +2104,15 @@ that conductor_group. :param owner: Optional string value that set the owner whose nodes are to be retrurned. + :param lessee: Optional string value that set the lessee whose nodes + are to be returned. + :param project: Optional string value that set the project - lessee or + owner - whose nodes are to be returned. :param description_contains: Optional string value to get only nodes with description field contains matching value. """ - owner = api_utils.check_list_policy('node', owner) + project = api_utils.check_list_policy('node', project) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) @@ -2102,6 +2120,7 @@ api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_owner(owner) + api_utils.check_allow_filter_by_lessee(lessee) api_utils.check_allowed_fields([sort_key]) # /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] @@ -2122,7 +2141,8 @@ fault=fault, conductor_group=conductor_group, conductor=conductor, - owner=owner, + owner=owner, lessee=lessee, + project=project, **extra_args) @METRICS.timer('NodesController.validate') @@ -2195,14 +2215,14 @@ "be set via the node traits API.") raise exception.Invalid(msg) - if (node.protected is not wtypes.Unset or - node.protected_reason is not wtypes.Unset): + if (node.protected is not wtypes.Unset + or node.protected_reason is not wtypes.Unset): msg = _("Cannot specify protected or protected_reason on node " "creation. These fields can only be set for active nodes") raise exception.Invalid(msg) - if (node.description is not wtypes.Unset and - len(node.description) > _NODE_DESCRIPTION_MAX_LENGTH): + if (node.description is not wtypes.Unset + and len(node.description) > _NODE_DESCRIPTION_MAX_LENGTH): msg = _("Cannot create node with description exceeding %s " "characters") % _NODE_DESCRIPTION_MAX_LENGTH raise exception.Invalid(msg) @@ -2273,6 +2293,25 @@ "characters") % _NODE_DESCRIPTION_MAX_LENGTH raise exception.Invalid(msg) + def _authorize_patch_and_get_node(self, node_ident, patch): + # deal with attribute-specific policy rules + policy_checks = [] + generic_update = False + for p in patch: + if p['path'].startswith('/instance_info'): + policy_checks.append('baremetal:node:update_instance_info') + elif p['path'].startswith('/extra'): + policy_checks.append('baremetal:node:update_extra') + else: + generic_update = True + + # always do at least one check + if generic_update or not policy_checks: + policy_checks.append('baremetal:node:update') + + return api_utils.check_multiple_node_policies_and_retrieve( + policy_checks, node_ident, with_suffix=True) + @METRICS.timer('NodesController.patch') @wsme.validate(types.uuid, types.boolean, [NodePatchType]) @expose.expose(Node, types.uuid_or_name, types.boolean, @@ -2292,24 +2331,7 @@ self._validate_patch(patch, reset_interfaces) context = api.request.context - - # deal with attribute-specific policy rules - policy_checks = [] - generic_update = False - for p in patch: - if p['path'].startswith('/instance_info'): - policy_checks.append('baremetal:node:update_instance_info') - elif p['path'].startswith('/extra'): - policy_checks.append('baremetal:node:update_extra') - else: - generic_update = True - - # always do at least one check - if generic_update or not policy_checks: - policy_checks.append('baremetal:node:update') - - rpc_node = api_utils.check_multiple_node_policies_and_retrieve( - policy_checks, node_ident, with_suffix=True) + rpc_node = self._authorize_patch_and_get_node(node_ident, patch) remove_inst_uuid_patch = [{'op': 'remove', 'path': '/instance_uuid'}] if rpc_node.maintenance and patch == remove_inst_uuid_patch: @@ -2339,7 +2361,7 @@ api_utils.check_owner_policy( 'node', 'baremetal:node:update_owner_provisioned', - rpc_node['owner']) + rpc_node['owner'], rpc_node['lessee']) except exception.HTTPForbidden: msg = _('Cannot update owner of node "%(node)s" while it ' 'is in state "%(state)s".') % { diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/port.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/port.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/port.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/port.py 2020-04-10 17:06:41.000000000 +0000 @@ -440,6 +440,14 @@ if ('is_smartnic' in fields and not api_utils.allow_port_is_smartnic()): raise exception.NotAcceptable() + if ('local_link_connection/network_type' in fields + and not api_utils.allow_local_link_connection_network_type()): + raise exception.NotAcceptable() + if (isinstance(fields, dict) + and fields.get('local_link_connection') is not None): + if (not api_utils.allow_local_link_connection_network_type() + and 'network_type' in fields['local_link_connection']): + raise exception.NotAcceptable() @METRICS.timer('PortsController.get_all') @expose.expose(PortCollection, types.uuid_or_name, types.uuid, @@ -668,11 +676,10 @@ 'baremetal:port:update', port_uuid) context = api.request.context - fields_to_check = set() for field in (self.advanced_net_fields + ['portgroup_uuid', 'physical_network', - 'is_smartnic']): + 'is_smartnic', 'local_link_connection/network_type']): field_path = '/%s' % field if (api_utils.get_patch_values(patch, field_path) or api_utils.is_path_removed(patch, field_path)): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/ramdisk.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/ramdisk.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/ramdisk.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/ramdisk.py 2020-04-10 17:06:41.000000000 +0000 @@ -53,7 +53,10 @@ 'statsd_port': CONF.metrics_statsd.agent_statsd_port }, 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout, - 'agent_token': token + 'agent_token': token, + # Not an API version based indicator, passing as configuration + # as the signifigants indicates support should also be present. + 'agent_token_required': CONF.require_agent_token, } @@ -202,15 +205,14 @@ agent_url = dii.get('agent_url') # If we have an agent_url on file, and we get something different # we should fail because this is unexpected behavior of the agent. - if (agent_url is not None - and agent_url != callback_url): - LOG.error('Received heartbeat for node %(node)s with ' - 'callback URL %(url)s. This is not expected, ' - 'and the heartbeat will not be processed.', - {'node': rpc_node.uuid, 'url': callback_url}) - raise exception.Invalid( - _('Detected change in ramdisk provided ' - '"callback_url"')) + if agent_url is not None and agent_url != callback_url: + LOG.error('Received heartbeat for node %(node)s with ' + 'callback URL %(url)s. This is not expected, ' + 'and the heartbeat will not be processed.', + {'node': rpc_node.uuid, 'url': callback_url}) + raise exception.Invalid( + _('Detected change in ramdisk provided ' + '"callback_url"')) # NOTE(TheJulia): If tokens are required, lets go ahead and fail the # heartbeat very early on. token_required = CONF.require_agent_token diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/types.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/types.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/types.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/types.py 2020-04-10 17:06:41.000000000 +0000 @@ -274,8 +274,9 @@ smart_nic_mandatory_fields = {'port_id', 'hostname'} mandatory_fields_list = [local_link_mandatory_fields, smart_nic_mandatory_fields] - optional_field = {'switch_info'} - valid_fields = set.union(optional_field, *mandatory_fields_list) + optional_fields = {'switch_info', 'network_type'} + valid_fields = set.union(optional_fields, *mandatory_fields_list) + valid_network_types = {'managed', 'unmanaged'} @staticmethod def validate(value): @@ -318,6 +319,25 @@ if invalid: raise exception.Invalid(_('%s are invalid keys') % (invalid)) + # If network_type is 'unmanaged', this is a network with no switch + # management. i.e local_link_connection details are not required. + if 'network_type' in keys: + if (value['network_type'] not in + LocalLinkConnectionType.valid_network_types): + msg = _( + 'Invalid network_type %(type)s, valid network_types are ' + '%(valid_network_types)s.') % { + 'type': value['network_type'], + 'valid_network_types': + LocalLinkConnectionType.valid_network_types} + raise exception.Invalid(msg) + + if (value['network_type'] == 'unmanaged' + and not (keys - {'network_type'})): + # Only valid network_type 'unmanaged' is set, no for further + # validation required. + return value + # Check any mandatory fields sets are present for mandatory_set in LocalLinkConnectionType.mandatory_fields_list: if mandatory_set <= keys: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -490,6 +490,7 @@ 'events': versions.MINOR_54_EVENTS, 'retired': versions.MINOR_61_NODE_RETIRED, 'retired_reason': versions.MINOR_61_NODE_RETIRED, + 'lessee': versions.MINOR_65_NODE_LESSEE, } for field in V31_FIELDS: @@ -659,8 +660,8 @@ 'opr': versions.MINOR_56_BUILD_CONFIGDRIVE} raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) - if ('vendor_data' in configdrive and - not allow_configdrive_vendor_data()): + if ('vendor_data' in configdrive + and not allow_configdrive_vendor_data()): msg = _('Providing vendor_data in configdrive is only supported' ' starting with API version %(base)s.%(opr)s') % { 'base': versions.BASE_VERSION, @@ -717,6 +718,20 @@ 'opr': versions.MINOR_50_NODE_OWNER}) +def check_allow_filter_by_lessee(lessee): + """Check if filtering nodes by lessee is allowed. + + Version 1.62 of the API allows filtering nodes by lessee. + """ + if (lessee is not None and api.request.version.minor + < versions.MINOR_65_NODE_LESSEE): + raise exception.NotAcceptable(_( + "Request not acceptable. The minimal required API version " + "should be %(base)s.%(opr)s") % + {'base': versions.BASE_VERSION, + 'opr': versions.MINOR_65_NODE_LESSEE}) + + def initial_node_provision_state(): """Return node state to use by default when creating new nodes. @@ -1057,14 +1072,12 @@ Version 1.43 allows a user to pass the detail query string to list the resource with all the fields. """ - return (api.request.version.minor >= - versions.MINOR_43_ENABLE_DETAIL_QUERY) + return api.request.version.minor >= versions.MINOR_43_ENABLE_DETAIL_QUERY def allow_reset_interfaces(): """Check if passing a reset_interfaces query string is allowed.""" - return (api.request.version.minor >= - versions.MINOR_45_RESET_INTERFACES) + return api.request.version.minor >= versions.MINOR_45_RESET_INTERFACES def get_request_return_fields(fields, detail, default_fields): @@ -1167,12 +1180,13 @@ policy.authorize(policy_name, cdict, cdict) -def check_owner_policy(object_type, policy_name, owner): +def check_owner_policy(object_type, policy_name, owner, lessee=None): """Check if the policy authorizes this request on an object. :param: object_type: type of object being checked :param: policy_name: Name of the policy to check. :param: owner: the owner + :param: lessee: the lessee :raises: HTTPForbidden if the policy forbids access. """ @@ -1180,6 +1194,8 @@ target_dict = dict(cdict) target_dict[object_type + '.owner'] = owner + if lessee: + target_dict[object_type + '.lessee'] = lessee policy.authorize(policy_name, target_dict, cdict) @@ -1207,7 +1223,8 @@ policy.authorize(policy_name, cdict, cdict) raise - check_owner_policy('node', policy_name, rpc_node['owner']) + check_owner_policy('node', policy_name, + rpc_node['owner'], rpc_node['lessee']) return rpc_node @@ -1255,7 +1272,8 @@ node_ident, with_suffix) else: - check_owner_policy('node', policy_name, rpc_node['owner']) + check_owner_policy('node', policy_name, + rpc_node['owner'], rpc_node['lessee']) return rpc_node @@ -1305,6 +1323,7 @@ rpc_node = objects.Node.get_by_id(context, rpc_port.node_id) target_dict = dict(cdict) target_dict['node.owner'] = rpc_node['owner'] + target_dict['node.lessee'] = rpc_node['lessee'] policy.authorize(policy_name, target_dict, cdict) return rpc_port, rpc_node @@ -1340,8 +1359,8 @@ Version 1.59 of the API added support for configdrive vendor_data. """ - return (api.request.version.minor >= - versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA) + return (api.request.version.minor + >= versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA) def allow_allocation_update(): @@ -1371,3 +1390,9 @@ def allow_agent_token(): """Check if agent token is available.""" return api.request.version.minor >= versions.MINOR_62_AGENT_TOKEN + + +def allow_local_link_connection_network_type(): + """Check if network_type is allowed in ports link_local_connection""" + return (api.request.version.minor + >= versions.MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/versions.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/versions.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/controllers/v1/versions.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/controllers/v1/versions.py 2020-04-10 17:06:41.000000000 +0000 @@ -101,6 +101,8 @@ # v1.61: Add retired and retired_reason to the node object. # v1.62: Add agent_token support for agent communication. # v1.63: Add support for indicators +# v1.64: Add network_type to port.local_link_connection +# v1.65: Add lessee to the node object. MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -166,6 +168,8 @@ MINOR_61_NODE_RETIRED = 61 MINOR_62_AGENT_TOKEN = 62 MINOR_63_INDICATORS = 63 +MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE = 64 +MINOR_65_NODE_LESSEE = 65 # When adding another version, update: # - MINOR_MAX_VERSION @@ -173,7 +177,7 @@ # explanation of what changed in the new version # - common/release_mappings.py, RELEASE_MAPPING['master']['api'] -MINOR_MAX_VERSION = MINOR_63_INDICATORS +MINOR_MAX_VERSION = MINOR_65_NODE_LESSEE # String representations of the minor and maximum versions _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/api/wsgi.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/wsgi.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/api/wsgi.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/api/wsgi.py 2020-04-10 17:06:41.000000000 +0000 @@ -14,10 +14,10 @@ import sys from oslo_config import cfg -import oslo_i18n as i18n from oslo_log import log from ironic.api import app +from ironic.common import i18n from ironic.common import service diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/api.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/api.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/api.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/api.py 2020-04-10 17:06:41.000000000 +0000 @@ -20,7 +20,11 @@ import sys from oslo_config import cfg -from oslo_reports import guru_meditation_report as gmr +from oslo_log import log +try: + from oslo_reports import guru_meditation_report as gmr +except ImportError: + gmr = None from ironic.common import profiler from ironic.common import service as ironic_service @@ -29,12 +33,18 @@ CONF = cfg.CONF +LOG = log.getLogger(__name__) + def main(): # Parse config file and command line options, then start logging ironic_service.prepare_service(sys.argv) - gmr.TextGuruMeditation.setup_autorun(version) + if gmr is not None: + gmr.TextGuruMeditation.setup_autorun(version) + else: + LOG.debug('Guru meditation reporting is disabled ' + 'because oslo.reports is not installed') profiler.setup('ironic_api', CONF.host) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/conductor.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/conductor.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/conductor.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/conductor.py 2020-04-10 17:06:41.000000000 +0000 @@ -23,7 +23,10 @@ from oslo_config import cfg from oslo_log import log -from oslo_reports import guru_meditation_report as gmr +try: + from oslo_reports import guru_meditation_report as gmr +except ImportError: + gmr = None from oslo_service import service from ironic.common import profiler @@ -83,7 +86,11 @@ # Parse config file and command line options, then start logging ironic_service.prepare_service(sys.argv) - gmr.TextGuruMeditation.setup_autorun(version) + if gmr is not None: + gmr.TextGuruMeditation.setup_autorun(version) + else: + LOG.debug('Guru meditation reporting is disabled ' + 'because oslo.reports is not installed') mgr = rpc_service.RPCService(CONF.host, 'ironic.conductor.manager', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/dbsync.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/dbsync.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/dbsync.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/dbsync.py 2020-04-10 17:06:41.000000000 +0000 @@ -17,8 +17,6 @@ Run storage database migration. """ -from __future__ import print_function - import sys from oslo_config import cfg diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/__init__.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/__init__.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/__init__.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/__init__.py 2020-04-10 17:06:41.000000000 +0000 @@ -25,6 +25,6 @@ eventlet.monkey_patch(os=False) -import oslo_i18n as i18n # noqa for I202 due to 'import eventlet' above +from ironic.common import i18n # noqa for I202 due to 'import eventlet' above i18n.install('ironic') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/status.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/status.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/cmd/status.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/cmd/status.py 2020-04-10 17:06:41.000000000 +0000 @@ -61,5 +61,6 @@ return upgradecheck.main( cfg.CONF, project='ironic', upgrade_command=Checks()) + if __name__ == '__main__': sys.exit(main()) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/glance_service/service_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/glance_service/service_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/glance_service/service_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/glance_service/service_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -115,8 +115,8 @@ if getattr(image, 'visibility', None) == 'public' or context.is_admin: return True - return (context.project_id and - getattr(image, 'owner', None) == context.project_id) + return (context.project_id + and getattr(image, 'owner', None) == context.project_id) def is_image_active(image): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/i18n.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/i18n.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/i18n.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/i18n.py 2020-04-10 17:06:41.000000000 +0000 @@ -19,9 +19,20 @@ """ -import oslo_i18n as i18n +try: + import oslo_i18n as i18n +except ImportError: + def _(msg): + return msg -_translators = i18n.TranslatorFactory(domain='ironic') + def install(domain): + # NOTE(dtantsur): this is called before logging is initialized, so we + # cannot really log a warning here. + pass +else: + _translators = i18n.TranslatorFactory(domain='ironic') -# The primary translation function using the well-known name "_" -_ = _translators.primary + # The primary translation function using the well-known name "_" + _ = _translators.primary + + install = i18n.install diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/images.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/images.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/images.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/images.py 2020-04-10 17:06:41.000000000 +0000 @@ -19,6 +19,7 @@ Handling of VM disk images. """ +import contextlib import os import shutil @@ -154,8 +155,67 @@ return utils.render_template(template, options) -def create_isolinux_image_for_bios(output_file, kernel, ramdisk, - kernel_params=None): +def _read_dir(root_dir, prefix_dir=None): + """Gather files under given directory. + + :param root_dir: a directory to traverse. + :returns: a dict mapping absolute paths to relative to the `root_dir`. + """ + files_info = {} + + if not prefix_dir: + prefix_dir = root_dir + + for entry in os.listdir(root_dir): + path = os.path.join(root_dir, entry) + if os.path.isdir(path): + files_info.update(_read_dir(path, prefix_dir)) + + else: + files_info[path] = path[len(prefix_dir) + 1:] + + return files_info + + +@contextlib.contextmanager +def _collect_files(image_path): + """Mount image and return a dictionary of paths found there. + + Mounts given image under a temporary directory, walk its contents + and produce a dictionary of absolute->relative paths found on the + image. + + :param image_path: ISO9660 or FAT-formatted image to mount. + :raises: ImageCreationFailed, if image inspection failed. + :returns: a dict mapping absolute paths to relative to the mount point. + """ + if not image_path: + yield {} + return + + with utils.tempdir() as mount_dir: + try: + utils.mount(image_path, mount_dir, '-o', 'loop') + + except processutils.ProcessExecutionError as e: + LOG.exception("Mounting filesystem image %(image)s " + "failed", {'image': image_path}) + raise exception.ImageCreationFailed(image_type='iso', error=e) + + try: + yield _read_dir(mount_dir) + + except EnvironmentError as e: + LOG.exception( + "Examining image %(images)s failed: ", {'image': image_path}) + _umount_without_raise(mount_dir) + raise exception.ImageCreationFailed(image_type='iso', error=e) + + _umount_without_raise(mount_dir) + + +def create_isolinux_image_for_bios( + output_file, kernel, ramdisk, kernel_params=None, configdrive=None): """Creates an isolinux image on the specified file. Copies the provided kernel, ramdisk to a directory, generates the isolinux @@ -169,6 +229,8 @@ :param kernel_params: a list of strings(each element being a string like 'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added as the kernel cmdline. + :param configdrive: ISO9660 or FAT-formatted OpenStack config drive + image. This image will be written onto the built ISO image. Optional. :raises: ImageCreationFailed, if image creation failed while copying files or while running command to generate iso. """ @@ -200,11 +262,15 @@ if ldlinux_src: files_info[ldlinux_src] = LDLINUX_BIN - try: - _create_root_fs(tmpdir, files_info) - except (OSError, IOError) as e: - LOG.exception("Creating the filesystem root failed.") - raise exception.ImageCreationFailed(image_type='iso', error=e) + with _collect_files(configdrive) as cfgdrv_files: + files_info.update(cfgdrv_files) + + try: + _create_root_fs(tmpdir, files_info) + + except EnvironmentError as e: + LOG.exception("Creating the filesystem root failed.") + raise exception.ImageCreationFailed(image_type='iso', error=e) cfg = _generate_cfg(kernel_params, CONF.isolinux_config_template, options) @@ -213,7 +279,8 @@ utils.write_to_file(isolinux_cfg, cfg) try: - utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", + utils.execute('mkisofs', '-r', '-V', + 'config-2' if configdrive else 'VMEDIA_BOOT_ISO', '-cache-inodes', '-J', '-l', '-no-emul-boot', '-boot-load-size', '4', '-boot-info-table', '-b', ISOLINUX_BIN, '-o', output_file, tmpdir) @@ -222,9 +289,9 @@ raise exception.ImageCreationFailed(image_type='iso', error=e) -def create_esp_image_for_uefi(output_file, kernel, ramdisk, - deploy_iso=None, esp_image=None, - kernel_params=None): +def create_esp_image_for_uefi( + output_file, kernel, ramdisk, deploy_iso=None, esp_image=None, + kernel_params=None, configdrive=None): """Creates an ESP image on the specified file. Copies the provided kernel, ramdisk and EFI system partition image (ESP) to @@ -244,6 +311,8 @@ :param kernel_params: a list of strings(each element being a string like 'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added as the kernel cmdline. + :param configdrive: ISO9660 or FAT-formatted OpenStack config drive + image. This image will be written onto the built ISO image. Optional. :raises: ImageCreationFailed, if image creation failed while copying files or while running command to generate iso. """ @@ -290,16 +359,20 @@ files_info.update(uefi_path_info) - try: - _create_root_fs(tmpdir, files_info) + with _collect_files(configdrive) as cfgdrv_files: + files_info.update(cfgdrv_files) - except (OSError, IOError) as e: - LOG.exception("Creating the filesystem root failed.") - raise exception.ImageCreationFailed(image_type='iso', error=e) + try: + _create_root_fs(tmpdir, files_info) - finally: - if deploy_iso: - _umount_without_raise(mountdir) + except EnvironmentError as e: + LOG.exception("Creating the filesystem root failed.") + raise exception.ImageCreationFailed( + image_type='iso', error=e) + + finally: + if deploy_iso: + _umount_without_raise(mountdir) # Generate and copy grub config file. grub_conf = _generate_cfg(kernel_params, @@ -308,8 +381,9 @@ # Create the boot_iso. try: - utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", '-l', - '-e', e_img_rel_path, '-no-emul-boot', + utils.execute('mkisofs', '-r', '-V', + 'config-2' if configdrive else 'VMEDIA_BOOT_ISO', + '-l', '-e', e_img_rel_path, '-no-emul-boot', '-o', output_file, tmpdir) except processutils.ProcessExecutionError as e: @@ -437,7 +511,8 @@ def create_boot_iso(context, output_filename, kernel_href, ramdisk_href, deploy_iso_href=None, esp_image_href=None, - root_uuid=None, kernel_params=None, boot_mode=None): + root_uuid=None, kernel_params=None, boot_mode=None, + configdrive_href=None): """Creates a bootable ISO image for a node. Given the hrefs for kernel, ramdisk, root partition's UUID and @@ -455,12 +530,15 @@ ISO is desired. :param esp_image_href: URL or glance UUID of FAT12/16/32-formatted EFI system partition image containing the EFI boot loader (e.g. GRUB2) - for each hardware architecture to boot. This image will be embedded - into the ISO image. If not specified, the `deploy_iso_href` option + for each hardware architecture to boot. This image will be written + onto the ISO image. If not specified, the `deploy_iso_href` option is only required for building UEFI-bootable ISO. :param kernel_params: a string containing whitespace separated values kernel cmdline arguments of the form K=V or K (optional). :boot_mode: the boot mode in which the deploy is to happen. + :param configdrive_href: URL to ISO9660 or FAT-formatted OpenStack config + drive image. This image will be embedded into the built ISO image. + Optional. :raises: ImageCreationFailed, if creating boot ISO failed. """ with utils.tempdir() as tmpdir: @@ -470,6 +548,14 @@ fetch(context, kernel_href, kernel_path) fetch(context, ramdisk_href, ramdisk_path) + if configdrive_href: + configdrive_path = os.path.join( + tmpdir, configdrive_href.split('/')[-1]) + fetch(context, configdrive_href, configdrive_path) + + else: + configdrive_path = None + params = [] if root_uuid: params.append('root=UUID=%s' % root_uuid) @@ -493,17 +579,15 @@ elif CONF.esp_image: esp_image_path = CONF.esp_image - create_esp_image_for_uefi(output_filename, - kernel_path, - ramdisk_path, - deploy_iso=deploy_iso_path, - esp_image=esp_image_path, - kernel_params=params) + create_esp_image_for_uefi( + output_filename, kernel_path, ramdisk_path, + deploy_iso=deploy_iso_path, esp_image=esp_image_path, + kernel_params=params, configdrive=configdrive_path) + else: - create_isolinux_image_for_bios(output_filename, - kernel_path, - ramdisk_path, - params) + create_isolinux_image_for_bios( + output_filename, kernel_path, ramdisk_path, + kernel_params=params, configdrive=configdrive_path) def is_whole_disk_image(ctx, instance_info): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/neutron.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/neutron.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/neutron.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/neutron.py 2020-04-10 17:06:41.000000000 +0000 @@ -12,9 +12,11 @@ import copy +from keystoneauth1 import loading as ks_loading from neutronclient.common import exceptions as neutron_exceptions from neutronclient.v2_0 import client as clientv20 from oslo_log import log +from oslo_utils import netutils from oslo_utils import uuidutils import retrying @@ -78,6 +80,49 @@ timeout=CONF.neutron.request_timeout) +def _get_conf_client(context): + """Retrieve a neutron client connection using conf parameters. + + :param context: request context, + instance of ironic.common.context.RequestContext + :returns: A neutron client. + """ + + auth = ks_loading.load_auth_from_conf_options(CONF, 'neutron') + session = ks_loading.load_session_from_conf_options( + CONF, + 'neutron', + auth=auth) + endpoint = keystone.get_endpoint('neutron', session=session, + auth=auth) + return clientv20.Client(session=session, + auth=auth, + endpoint_override=endpoint, + retries=CONF.neutron.retries, + global_request_id=context.global_id, + timeout=CONF.neutron.request_timeout) + + +def update_neutron_port(context, port_id, update_body, client=None): + """Undate a neutron port + + Uses neutron client from conf client to update a neutron client + an unbound state. + + :param context: request context, + instance of ironic.common.context.RequestContext + :param port_id: Neutron port ID. + :param update_body: Body of update + :param client: Optional Neutron client + """ + if not client: + # verify that user can see the port before updating it + get_client(context=context).show_port(port_id) + client = _get_conf_client(context) + + return client.update_port(port_id, update_body) + + def unbind_neutron_port(port_id, client=None, context=None): """Unbind a neutron port @@ -91,20 +136,17 @@ :raises: NetworkError """ - if not client: - client = get_client(context=context) - body_unbind = {'port': {'binding:host_id': '', 'binding:profile': {}}} body_reset_mac = {'port': {'mac_address': None}} try: - client.update_port(port_id, body_unbind) + update_neutron_port(context, port_id, body_unbind, client) # NOTE(hjensas): We need to reset the mac address in a separate step. # Exception PortBound will be raised by neutron as it refuses to # update the mac address of a bound port if we attempt to unbind and # reset the mac in the same call. - client.update_port(port_id, body_reset_mac) + update_neutron_port(context, port_id, body_reset_mac, client) # NOTE(vsaienko): Ignore if port was deleted before calling vif detach. except neutron_exceptions.PortNotFoundClient: LOG.info('Port %s was not found while unbinding.', port_id) @@ -141,10 +183,10 @@ msg = (_("Failed to remove the current binding from " "Neutron port %s, while updating its MAC " "address.") % port_id) - unbind_neutron_port(port_id, client=client, context=context) + unbind_neutron_port(port_id, context=context) msg = (_("Failed to update MAC address on Neutron port %s.") % port_id) - client.update_port(port_id, port_req_body) + update_neutron_port(context, port_id, port_req_body) # Restore original binding:profile and host_id if binding_host_id: @@ -153,7 +195,7 @@ port_req_body = {'port': {'binding:host_id': binding_host_id, 'binding:profile': binding_profile}} - client.update_port(port_id, port_req_body) + update_neutron_port(context, port_id, port_req_body) except (neutron_exceptions.NeutronClientException, exception.NetworkError): LOG.exception(msg) raise exception.FailedToUpdateMacOnPort(port_id=port_id) @@ -192,6 +234,32 @@ raise exception.NetworkError(msg) +def _add_ip_addresses_for_ipv6_stateful(context, port, client): + """Add additional IP addresses to the ipv6 stateful neutron port + + When network booting with DHCPv6-stateful we cannot control the CLID/IAID + used by the different clients, UEFI, iPXE, Ironic IPA etc. Multiple + IP address reservation is required in the DHCPv6 server to avoid + NoAddrsAvail issues. + + :param port: A neutron port + :param client: Neutron client + """ + fixed_ips = port['port']['fixed_ips'] + if (not fixed_ips + or not netutils.is_valid_ipv6(fixed_ips[0]['ip_address'])): + return + + subnet = client.show_subnet( + port['port']['fixed_ips'][0]['subnet_id']).get('subnet') + if subnet and subnet['ipv6_address_mode'] == 'dhcpv6-stateful': + for i in range(1, CONF.neutron.dhcpv6_stateful_address_count): + fixed_ips.append({'subnet_id': subnet['id']}) + + body = {'port': {'fixed_ips': fixed_ips}} + update_neutron_port(context, port['port']['id'], body) + + def add_ports_to_network(task, network_uuid, security_groups=None): """Create neutron ports to boot the ramdisk. @@ -227,8 +295,13 @@ 'network_id': network_uuid, 'admin_state_up': True, 'binding:vnic_type': VNIC_BAREMETAL, - 'device_owner': 'baremetal:none', + } + } + # separate out fields that can only be updated by admins + update_body = { + 'port': { 'binding:host_id': node.uuid, + 'device_owner': 'baremetal:none', } } if security_groups: @@ -256,14 +329,15 @@ for ironic_port in ports_to_create: # Start with a clean state for each port port_body = copy.deepcopy(body) + update_port_body = copy.deepcopy(update_body) # Skip ports that are missing required information for deploy. if not validate_port_info(node, ironic_port): failures.append(ironic_port.uuid) continue - port_body['port']['mac_address'] = ironic_port.address + update_port_body['port']['mac_address'] = ironic_port.address binding_profile = {'local_link_information': [portmap[ironic_port.uuid]]} - port_body['port']['binding:profile'] = binding_profile + update_port_body['port']['binding:profile'] = binding_profile if not ironic_port.pxe_enabled: LOG.debug("Adding port %(port)s to network %(net)s for " @@ -279,7 +353,7 @@ 'port %(port_id)s, hostname %(hostname)s', {'port_id': ironic_port.uuid, 'hostname': link_info['hostname']}) - port_body['port']['binding:host_id'] = link_info['hostname'] + update_port_body['port']['binding:host_id'] = link_info['hostname'] # TODO(hamdyk): use portbindings.VNIC_SMARTNIC from neutron-lib port_body['port']['binding:vnic_type'] = VNIC_SMARTNIC @@ -292,9 +366,13 @@ port_body['port']['extra_dhcp_opts'] = extra_dhcp_opts try: if is_smart_nic: - wait_for_host_agent(client, - port_body['port']['binding:host_id']) + wait_for_host_agent( + client, update_port_body['port']['binding:host_id']) port = client.create_port(port_body) + update_neutron_port(task.context, port['port']['id'], + update_port_body) + if CONF.neutron.dhcpv6_stateful_address_count > 1: + _add_ip_addresses_for_ipv6_stateful(task.context, port, client) if is_smart_nic: wait_for_port_status(client, port['port']['id'], 'ACTIVE') except neutron_exceptions.NeutronClientException as e: @@ -703,8 +781,8 @@ LOG.debug('Agent on host %(host_id)s is %(status)s', {'host_id': host_id, 'status': 'up' if is_alive else 'down'}) - if ((target_state == 'up' and is_alive) or - (target_state == 'down' and not is_alive)): + if ((target_state == 'up' and is_alive) + or (target_state == 'down' and not is_alive)): return True raise exception.NetworkError( 'Agent on host %(host)s failed to reach state %(state)s' % { diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/policy.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/policy.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/policy.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/policy.py 2020-04-10 17:06:41.000000000 +0000 @@ -66,6 +66,9 @@ policy.RuleDefault('is_node_owner', 'project_id:%(node.owner)s', description='Owner of node'), + policy.RuleDefault('is_node_lessee', + 'project_id:%(node.lessee)s', + description='Lessee of node'), policy.RuleDefault('is_allocation_owner', 'project_id:%(allocation.owner)s', description='Owner of allocation'), diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/pxe_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/pxe_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/pxe_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/pxe_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -596,8 +596,8 @@ # NOTE(pas-ha) do not report image kernel and ramdisk for # local boot or whole disk images so that they are not cached if (node.driver_internal_info.get('is_whole_disk_image') - or deploy_utils.get_boot_option(node) == 'local'): - return image_info + or deploy_utils.get_boot_option(node) == 'local'): + return image_info if ipxe_enabled: root_dir = get_ipxe_root_dir() else: @@ -657,9 +657,9 @@ if ipxe_enabled: image_href = pxe_info[label][0] if (CONF.pxe.ipxe_use_swift - and service_utils.is_glance_image(image_href)): - pxe_opts[option] = images.get_temp_url_for_glance_image( - task.context, image_href) + and service_utils.is_glance_image(image_href)): + pxe_opts[option] = images.get_temp_url_for_glance_image( + task.context, image_href) else: pxe_opts[option] = '/'.join([CONF.deploy.http_url, node.uuid, label]) @@ -809,7 +809,7 @@ def __get_property(properties, key): prop = __return_item_or_first_if_list(properties.get(key, '')) - if prop is not '': + if prop != '': return prop return __return_item_or_first_if_list(properties.get(key + 's', '')) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/release_mappings.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/release_mappings.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/release_mappings.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/release_mappings.py 2020-04-10 17:06:41.000000000 +0000 @@ -214,11 +214,11 @@ } }, 'master': { - 'api': '1.63', + 'api': '1.65', 'rpc': '1.50', 'objects': { 'Allocation': ['1.1'], - 'Node': ['1.33', '1.32'], + 'Node': ['1.34', '1.33', '1.32'], 'Conductor': ['1.3'], 'Chassis': ['1.3'], 'DeployTemplate': ['1.1'], diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/states.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/states.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/states.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/states.py 2020-04-10 17:06:41.000000000 +0000 @@ -244,15 +244,16 @@ """ _LOOKUP_ALLOWED_STATES = (DEPLOYING, DEPLOYWAIT, CLEANING, CLEANWAIT, - INSPECTING, RESCUING, RESCUEWAIT) + INSPECTING, INSPECTWAIT, RESCUING, RESCUEWAIT) LOOKUP_ALLOWED_STATES = frozenset(_LOOKUP_ALLOWED_STATES) """States when API lookups are normally allowed for nodes.""" _FASTTRACK_LOOKUP_ALLOWED_STATES = (ENROLL, MANAGEABLE, AVAILABLE, - DEPLOYING, DEPLOYWAIT, CLEANING, - CLEANWAIT, INSPECTING, RESCUING, - RESCUEWAIT) + DEPLOYING, DEPLOYWAIT, + CLEANING, CLEANWAIT, + INSPECTING, INSPECTWAIT, + RESCUING, RESCUEWAIT) FASTTRACK_LOOKUP_ALLOWED_STATES = frozenset(_FASTTRACK_LOOKUP_ALLOWED_STATES) """States where API lookups are permitted with fast track enabled.""" diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/common/utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/common/utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/common/utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -472,7 +472,12 @@ else: tmpl_name = 'template' loader = jinja2.DictLoader({tmpl_name: template}) - env = jinja2.Environment(loader=loader, autoescape=True) + # NOTE(pas-ha) bandit does not seem to cope with such syntaxis + # and still complains with B701 for that line + # NOTE(pas-ha) not using default_for_string=False as we set the name + # of the template above for strings too. + env = jinja2.Environment(loader=loader, # nosec B701 + autoescape=jinja2.select_autoescape()) tmpl = env.get_template(tmpl_name) return tmpl.render(params, enumerate=enumerate) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/allocations.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/allocations.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/allocations.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/allocations.py 2020-04-10 17:06:41.000000000 +0000 @@ -113,7 +113,7 @@ # UUIDs on the API level. filters['uuid_in'] = allocation.candidate_nodes if allocation.owner: - filters['owner'] = allocation.owner + filters['project'] = allocation.owner nodes = objects.Node.list(context, filters=filters) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/base_manager.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/base_manager.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/base_manager.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/base_manager.py 2020-04-10 17:06:41.000000000 +0000 @@ -476,15 +476,19 @@ node_iter = self.iter_nodes(filters=filters, sort_key=sort_key, sort_dir='asc') - + desired_maintenance = filters.get('maintenance') workers_count = 0 for node_uuid, driver, conductor_group in node_iter: try: with task_manager.acquire(context, node_uuid, purpose='node state check') as task: - if (task.node.maintenance - or task.node.provision_state - not in provision_state): + # Check maintenance value since it could have changed + # after the filtering was done. + if (desired_maintenance is not None + and desired_maintenance != task.node.maintenance): + continue + + if task.node.provision_state not in provision_state: continue target_state = (None if not keep_target_state else diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/deployments.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/deployments.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/deployments.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/deployments.py 2020-04-10 17:06:41.000000000 +0000 @@ -185,7 +185,7 @@ traceback=True, clean_up=False) try: - # This gets the deploy steps (if any) and puts them in the node's + # This gets the deploy steps and puts them in the node's # driver_internal_info['deploy_steps']. conductor_steps.set_node_deployment_steps(task) except exception.InstanceDeployFailure as e: @@ -196,6 +196,14 @@ '%(node)s. Error: %(err)s' % {'node': node.uuid, 'err': e}, _("Cannot get deploy steps; failed to deploy: %s") % e) + if not node.driver_internal_info.get('deploy_steps'): + msg = _('Error while getting deploy steps: no steps returned for ' + 'node %s') % node.uuid + utils.deploying_error_handler( + task, msg, + _("No deploy steps returned by the driver")) + raise exception.InstanceDeployFailure(msg) + do_next_deploy_step(task, 0, conductor_id) @@ -249,7 +257,10 @@ {'node': node.uuid, 'step': node.deploy_step, 'err': e}) utils.deploying_error_handler( task, log_msg, - _("Failed to deploy: %s") % node.deploy_step) + _("Failed to deploy: Deploy step %(step)s, " + "error: %(err)s.") % { + 'step': node.deploy_step, + 'err': e}) return except Exception as e: log_msg = ('Node %(node)s failed deploy step %(step)s with ' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/manager.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/manager.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/manager.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/manager.py 2020-04-10 17:06:41.000000000 +0000 @@ -610,9 +610,11 @@ # driver validation may check rescue_password, so save it on the # node early - instance_info = node.instance_info - instance_info['rescue_password'] = rescue_password - node.instance_info = instance_info + i_info = node.instance_info + i_info['rescue_password'] = rescue_password + i_info['hashed_rescue_password'] = utils.hash_password( + rescue_password) + node.instance_info = i_info node.save() try: @@ -886,8 +888,15 @@ task, skip_current_step=skip_current_step) # TODO(rloo): When deprecation period is over and node is in - # states.DEPLOYWAIT only, delete the 'if' and always 'resume'. - if node.provision_state != states.DEPLOYING: + # states.DEPLOYWAIT only, delete the check and always 'resume'. + if node.provision_state == states.DEPLOYING: + LOG.warning('Node %(node)s was found in the state %(state)s ' + 'in the continue_node_deploy RPC call. This is ' + 'deprecated, the driver must be updated to leave ' + 'nodes in %(new)s state instead.', + {'node': node.uuid, 'state': states.DEPLOYING, + 'new': states.DEPLOYWAIT}) + else: task.process_event('resume') task.set_spawn_error_hook(utils.spawn_deploying_error_handler, @@ -1285,10 +1294,10 @@ err_handler=utils.provisioning_error_handler) return - if (action == states.VERBS['abort'] and - node.provision_state in (states.CLEANWAIT, - states.RESCUEWAIT, - states.INSPECTWAIT)): + if (action == states.VERBS['abort'] + and node.provision_state in (states.CLEANWAIT, + states.RESCUEWAIT, + states.INSPECTWAIT)): self._do_abort(task) return @@ -1504,11 +1513,11 @@ # NOTE(dtantsur): it's also pointless (and dangerous) to # sync power state when a power action is in progress - if (task.node.provision_state == states.ENROLL or - not task.node.maintenance or - task.node.fault != faults.POWER_FAILURE or - task.node.target_power_state or - task.node.reservation): + if (task.node.provision_state == states.ENROLL + or not task.node.maintenance + or task.node.fault != faults.POWER_FAILURE + or task.node.target_power_state + or task.node.reservation): return False return True @@ -2043,14 +2052,14 @@ node = task.node vif = task.driver.network.get_current_vif(task, port) if ((node.provision_state == states.ACTIVE or node.instance_uuid) - and not node.maintenance and vif): - msg = _("Cannot delete the port %(port)s as node " - "%(node)s is active or has " - "instance UUID assigned or port is bound " - "to vif %(vif)s") - raise exception.InvalidState(msg % {'node': node.uuid, - 'port': port.uuid, - 'vif': vif}) + and not node.maintenance and vif): + msg = _("Cannot delete the port %(port)s as node " + "%(node)s is active or has " + "instance UUID assigned or port is bound " + "to vif %(vif)s") + raise exception.InvalidState(msg % {'node': node.uuid, + 'port': port.uuid, + 'vif': vif}) port.destroy() LOG.info('Successfully deleted port %(port)s. ' 'The node associated with the port was %(node)s', @@ -2318,13 +2327,13 @@ # Only allow updating MAC addresses for active nodes if maintenance # mode is on. if ((node.provision_state == states.ACTIVE or node.instance_uuid) - and 'address' in port_obj.obj_what_changed() - and not node.maintenance): - action = _("Cannot update hardware address for port " - "%(port)s as node %(node)s is active or has " - "instance UUID assigned") - raise exception.InvalidState(action % {'node': node.uuid, - 'port': port_uuid}) + and 'address' in port_obj.obj_what_changed() + and not node.maintenance): + action = _("Cannot update hardware address for port " + "%(port)s as node %(node)s is active or has " + "instance UUID assigned") + raise exception.InvalidState(action % {'node': node.uuid, + 'port': port_uuid}) # If port update is modifying the portgroup membership of the port # or modifying the local_link_connection, pxe_enabled or physical @@ -2972,6 +2981,7 @@ callback_timeout = CONF.conductor.inspect_wait_timeout filters = {'reserved': False, + 'maintenance': False, 'provision_state': states.INSPECTWAIT, 'inspection_started_before': callback_timeout} sort_key = 'inspection_started_at' @@ -3648,7 +3658,7 @@ # node_power_action will update the node record # so don't do that again here. utils.node_power_action(task, node.power_state) - except Exception as e: + except Exception: LOG.error( "Failed to change power state of node %(node)s " "to '%(state)s', attempt %(attempt)s of %(retries)s.", diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/steps.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/steps.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/steps.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/steps.py 2020-04-10 17:06:41.000000000 +0000 @@ -436,8 +436,8 @@ # NOTE(mgoddard): we'll need something a little more sophisticated to # track core steps once we split out the single core step. - is_core = (driver_step['interface'] == 'deploy' and - driver_step['step'] == 'deploy') + is_core = (driver_step['interface'] == 'deploy' + and driver_step['step'] == 'deploy') if is_core: error = (_('deploy step %(step)s on interface %(interface)s is a ' 'core step and cannot be overridden by user steps. It ' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/task_manager.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/task_manager.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/task_manager.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/task_manager.py 2020-04-10 17:06:41.000000000 +0000 @@ -495,8 +495,8 @@ 'target': self.node.target_provision_state, 'previous': self._prev_provision_state}) - if (self.node.provision_state.endswith('failed') or - self.node.provision_state == 'error'): + if (self.node.provision_state.endswith('failed') + or self.node.provision_state == 'error'): LOG.error(log_message) else: LOG.info(log_message) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conductor/utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conductor/utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. import contextlib +import crypt import datetime from distutils.version import StrictVersion import secrets @@ -41,6 +42,12 @@ CONF = cfg.CONF +PASSWORD_HASH_FORMAT = { + 'sha256': crypt.METHOD_SHA256, + 'sha512': crypt.METHOD_SHA512, +} + + @task_manager.require_exclusive_lock def node_set_boot_device(task, device, persistent=False): """Set the boot device for a node. @@ -437,7 +444,7 @@ task.process_event('fail', target_state=target_state) -def deploying_error_handler(task, logmsg, errmsg, traceback=False, +def deploying_error_handler(task, logmsg, errmsg=None, traceback=False, clean_up=True): """Put a failed node in DEPLOYFAIL. @@ -447,6 +454,7 @@ :param traceback: Boolean; True to log a traceback :param clean_up: Boolean; True to clean up """ + errmsg = errmsg or logmsg node = task.node LOG.error(logmsg, exc_info=traceback) node.last_error = errmsg @@ -706,9 +714,13 @@ instance_info = node.instance_info if 'rescue_password' in instance_info: del instance_info['rescue_password'] - node.instance_info = instance_info - if save: - node.save() + + if 'hashed_rescue_password' in instance_info: + del instance_info['hashed_rescue_password'] + + node.instance_info = instance_info + if save: + node.save() def validate_instance_info_traits(node): @@ -744,15 +756,15 @@ raise exception.InvalidParameterValue(err) -def _notify_conductor_resume_operation(task, operation, method): +def notify_conductor_resume_operation(task, operation): """Notify the conductor to resume an operation. :param task: the task :param operation: the operation, a string - :param method: The name of the RPC method, a string """ - LOG.debug('Sending RPC to conductor to resume %(op)s for node %(node)s', - {'op': operation, 'node': task.node.uuid}) + LOG.debug('Sending RPC to conductor to resume %(op)s steps for node ' + '%(node)s', {'op': operation, 'node': task.node.uuid}) + method = 'continue_node_%s' % operation from ironic.conductor import rpcapi uuid = task.node.uuid rpc = rpcapi.ConductorAPI() @@ -763,12 +775,11 @@ def notify_conductor_resume_clean(task): - _notify_conductor_resume_operation(task, 'cleaning', 'continue_node_clean') + notify_conductor_resume_operation(task, 'clean') def notify_conductor_resume_deploy(task): - _notify_conductor_resume_operation(task, 'deploying', - 'continue_node_deploy') + notify_conductor_resume_operation(task, 'deploy') def skip_automated_cleaning(node): @@ -1105,3 +1116,21 @@ """ return node.driver_internal_info.get( 'agent_secret_token_pregenerated', False) + + +def make_salt(): + """Generate a random salt with the indicator tag for password type. + + :returns: a valid salt for use with crypt.crypt + """ + return crypt.mksalt( + method=PASSWORD_HASH_FORMAT[ + CONF.conductor.rescue_password_hash_algorithm]) + + +def hash_password(password=''): + """Hashes a supplied password. + + :param value: Value to be hashed + """ + return crypt.crypt(password, make_salt()) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conf/agent.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conf/agent.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conf/agent.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conf/agent.py 2020-04-10 17:06:41.000000000 +0000 @@ -104,7 +104,11 @@ 'service.')), cfg.IntOpt('command_timeout', default=60, - help=_('Timeout (in seconds) for IPA commands.')), + help=_('Timeout (in seconds) for IPA commands. ' + 'Please note, the bootloader installation command ' + 'to the agent is permitted a timeout of twice the ' + 'value set here as these are IO heavy operations ' + 'depending on the configuration of the instance.')), cfg.IntOpt('max_command_attempts', default=3, help=_('This is the maximum number of attempts that will be ' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conf/conductor.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conf/conductor.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conf/conductor.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conf/conductor.py 2020-04-10 17:06:41.000000000 +0000 @@ -252,6 +252,18 @@ mutable=True, help=_('Glance ID, http:// or file:// URL of the initramfs of ' 'the default rescue image.')), + cfg.StrOpt('rescue_password_hash_algorithm', + default='sha256', + choices=['sha256', 'sha512'], + help=_('Password hash algorithm to be used for the rescue ' + 'password.')), + cfg.BoolOpt('require_rescue_password_hashed', + # TODO(TheJulia): Change this to True in Victoria. + default=False, + help=_('Option to cause the conductor to not fallback to ' + 'an un-hashed version of the rescue password, ' + 'permitting rescue with older ironic-python-agent ' + 'ramdisks.')), cfg.StrOpt('bootloader', mutable=True, help=_('Glance ID, http:// or file:// URL of the EFI system ' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/conf/neutron.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/conf/neutron.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/conf/neutron.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/conf/neutron.py 2020-04-10 17:06:41.000000000 +0000 @@ -102,6 +102,16 @@ '"neutron" network interface and not used for the ' '"flat" or "noop" network interfaces. If not ' 'specified, the default security group is used.')), + cfg.IntOpt('dhcpv6_stateful_address_count', + default=4, + help=_('Number of IPv6 addresses to allocate for ports created ' + 'for provisioning, cleaning, rescue or inspection on ' + 'DHCPv6-stateful networks. Different stages of the ' + 'chain-loading process will request addresses with ' + 'different CLID/IAID. Due to non-identical identifiers ' + 'multiple addresses must be reserved for the host to ' + 'ensure each step of the boot process can successfully ' + 'lease addresses.')) ] diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py 2020-04-10 17:06:41.000000000 +0000 @@ -18,13 +18,13 @@ """ +from alembic import op +import sqlalchemy as sa + # revision identifiers, used by Alembic. revision = '1e15e7122cc9' down_revision = '2aac7e0872f6' -from alembic import op -import sqlalchemy as sa - def upgrade(): op.add_column('deploy_templates', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/alembic/versions/b2ad35726bb0_add_node_lessee.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/alembic/versions/b2ad35726bb0_add_node_lessee.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/alembic/versions/b2ad35726bb0_add_node_lessee.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/alembic/versions/b2ad35726bb0_add_node_lessee.py 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,32 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""add node lessee + +Revision ID: b2ad35726bb0 +Revises: ce6c4b3cf5a2 +Create Date: 2020-01-07 20:49:50.851441 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'b2ad35726bb0' +down_revision = 'cd2c80feb331' + + +def upgrade(): + op.add_column('nodes', sa.Column('lessee', sa.String(255), + nullable=True)) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py 2020-04-10 17:06:41.000000000 +0000 @@ -18,13 +18,13 @@ """ +from alembic import op +import sqlalchemy as sa + # revision identifiers, used by Alembic. revision = 'cd2c80feb331' down_revision = 'ce6c4b3cf5a2' -from alembic import op -import sqlalchemy as sa - def upgrade(): op.add_column('nodes', sa.Column('retired', sa.Boolean(), nullable=True, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/api.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/api.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/api.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/api.py 2020-04-10 17:06:41.000000000 +0000 @@ -288,7 +288,7 @@ _NODE_QUERY_FIELDS = {'console_enabled', 'maintenance', 'retired', 'driver', 'resource_class', 'provision_state', 'uuid', 'id', 'fault', 'conductor_group', - 'owner'} + 'owner', 'lessee'} _NODE_IN_QUERY_FIELDS = {'%s_in' % field: field for field in ('uuid', 'provision_state')} _NODE_NON_NULL_FILTERS = {'associated': 'instance_uuid', @@ -296,7 +296,7 @@ 'with_power_state': 'power_state'} _NODE_FILTERS = ({'chassis_uuid', 'reserved_by_any_of', 'provisioned_before', 'inspection_started_before', - 'description_contains'} + 'description_contains', 'project'} | _NODE_QUERY_FIELDS | set(_NODE_IN_QUERY_FIELDS) | set(_NODE_NON_NULL_FILTERS)) @@ -354,6 +354,10 @@ if keyword is not None: query = query.filter( models.Node.description.like(r'%{}%'.format(keyword))) + if 'project' in filters: + project = filters['project'] + query = query.filter((models.Node.owner == project) + | (models.Node.lessee == project)) return query diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/models.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/models.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/db/sqlalchemy/models.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/db/sqlalchemy/models.py 2020-04-10 17:06:41.000000000 +0000 @@ -180,6 +180,7 @@ server_default=false()) protected_reason = Column(Text, nullable=True) owner = Column(String(255), nullable=True) + lessee = Column(String(255), nullable=True) allocation_id = Column(Integer, ForeignKey('allocations.id'), nullable=True) description = Column(Text, nullable=True) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/dhcp/neutron.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/dhcp/neutron.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/dhcp/neutron.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/dhcp/neutron.py 2020-04-10 17:06:41.000000000 +0000 @@ -61,8 +61,7 @@ port_id, dhcp_options, token=token, context=context) port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}} try: - neutron.get_client(token=token, context=context).update_port( - port_id, port_req_body) + neutron.update_neutron_port(context, port_id, port_req_body) except neutron_client_exc.NeutronClientException: LOG.exception("Failed to update Neutron port %s.", port_id) raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id) @@ -212,7 +211,7 @@ except (exception.FailedToGetIPAddressOnPort, exception.InvalidIPv4Address, exception.NetworkError): - failures.append(obj.uuid) + failures.append(obj.uuid) if failures: obj_name = 'portgroups' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/base.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/base.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/base.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/base.py 2020-04-10 17:06:41.000000000 +0000 @@ -750,7 +750,7 @@ def passthru_handler(*args, **kwargs): try: return func(*args, **kwargs) - except exception.IronicException as e: + except exception.IronicException: with excutils.save_and_reraise_exception(): LOG.exception(passthru_logmessage, api_method) except Exception as e: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/agent_base.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/agent_base.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/agent_base.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/agent_base.py 2020-04-10 17:06:41.000000000 +0000 @@ -42,26 +42,27 @@ METRICS = metrics_utils.get_metrics_logger(__name__) -# This contains a nested dictionary containing the post clean step -# hooks registered for each clean step of every interface. -# Every key of POST_CLEAN_STEP_HOOKS is an interface and its value -# is a dictionary. For this inner dictionary, the key is the name of -# the clean-step method in the interface, and the value is the post -# clean-step hook -- the function that is to be called after successful -# completion of the clean step. +# This contains a nested dictionary containing the post clean/deploy step hooks +# registered for each clean/deploy step of every interface. +# Every key is an interface and its value is a dictionary. For this inner +# dictionary, the key is the name of the clean-/deploy-step method in the +# interface, and the value is the post clean-/deploy-step hook -- the function +# that is to be called after successful completion of the clean/deploy step. # # For example: -# POST_CLEAN_STEP_HOOKS = +# _POST_STEP_HOOKS = { +# {'clean': # { # 'raid': {'create_configuration': , # 'delete_configuration': } # } +# } # # It means that method '' is to be called after # successfully completing the clean step 'create_configuration' of # raid interface. '' is to be called after # completing 'delete_configuration' of raid interface. -POST_CLEAN_STEP_HOOKS = {} +_POST_STEP_HOOKS = {'clean': {}, 'deploy': {}} VENDOR_PROPERTIES = { 'deploy_forces_oob_reboot': _( @@ -114,39 +115,70 @@ step hook. """ def decorator(func): - POST_CLEAN_STEP_HOOKS.setdefault(interface, {})[step] = func + _POST_STEP_HOOKS['clean'].setdefault(interface, {})[step] = func return func return decorator -def _get_post_clean_step_hook(node): - """Get post clean step hook for the currently executing clean step. +@METRICS.timer('post_deploy_step_hook') +def post_deploy_step_hook(interface, step): + """Decorator method for adding a post deploy step hook. + + This is a mechanism for adding a post deploy step hook for a particular + deploy step. The hook will get executed after the deploy step gets + executed successfully. The hook is not invoked on failure of the deploy + step. + + Any method to be made as a hook may be decorated with + @post_deploy_step_hook mentioning the interface and step after which the + hook should be executed. A TaskManager instance and the object for the + last completed command (provided by agent) will be passed to the hook + method. The return value of this method will be ignored. Any exception + raised by this method will be treated as a failure of the deploy step and + the node will be moved to DEPLOYFAIL state. - This method reads node.clean_step and returns the post clean - step hook for the currently executing clean step. + :param interface: name of the interface + :param step: The name of the step after which it should be executed. + :returns: A method which registers the given method as a post deploy + step hook. + """ + def decorator(func): + _POST_STEP_HOOKS['deploy'].setdefault(interface, {})[step] = func + return func + + return decorator + + +def _get_post_step_hook(node, step_type): + """Get post clean/deploy step hook for the currently executing step. :param node: a node object + :param step_type: 'clean' or 'deploy' :returns: a method if there is a post clean step hook for this clean step; None otherwise """ - interface = node.clean_step.get('interface') - step = node.clean_step.get('step') + step_obj = node.clean_step if step_type == 'clean' else node.deploy_step + interface = step_obj.get('interface') + step = step_obj.get('step') try: - return POST_CLEAN_STEP_HOOKS[interface][step] + return _POST_STEP_HOOKS[step_type][interface][step] except KeyError: pass -def _cleaning_reboot(task): - """Reboots a node out of band after a clean step that requires it. +def _post_step_reboot(task, step_type): + """Reboots a node out of band after a clean/deploy step that requires it. - If an agent clean step has 'reboot_requested': True, reboots the - node when the step is completed. Will put the node in CLEANFAIL - if the node cannot be rebooted. + If an agent step has 'reboot_requested': True, reboots the node when + the step is completed. Will put the node in CLEANFAIL/DEPLOYFAIL if + the node cannot be rebooted. :param task: a TaskManager instance + :param step_type: 'clean' or 'deploy' """ + current_step = (task.node.clean_step if step_type == 'clean' + else task.node.deploy_step) try: # NOTE(fellypefca): Call prepare_ramdisk on ensure that the # baremetal node boots back into the ramdisk after reboot. @@ -154,19 +186,25 @@ task.driver.boot.prepare_ramdisk(task, deploy_opts) manager_utils.node_power_action(task, states.REBOOT) except Exception as e: - msg = (_('Reboot requested by clean step %(step)s failed for ' + msg = (_('Reboot requested by %(type)s step %(step)s failed for ' 'node %(node)s: %(err)s') % - {'step': task.node.clean_step, + {'step': current_step, 'node': task.node.uuid, - 'err': e}) + 'err': e, + 'type': step_type}) LOG.error(msg, exc_info=not isinstance(e, exception.IronicException)) # do not set cleaning_reboot if we didn't reboot - manager_utils.cleaning_error_handler(task, msg) + if step_type == 'clean': + manager_utils.cleaning_error_handler(task, msg) + else: + manager_utils.deploying_error_handler(task, msg) return # Signify that we've rebooted driver_internal_info = task.node.driver_internal_info - driver_internal_info['cleaning_reboot'] = True + field = ('cleaning_reboot' if step_type == 'clean' + else 'deployment_reboot') + driver_internal_info[field] = True if not driver_internal_info.get('agent_secret_token_pregenerated', False): # Wipes out the existing recorded token because the machine will # need to re-establish the token. @@ -175,8 +213,8 @@ task.node.save() -def _get_completed_cleaning_command(task, commands): - """Returns None or a completed cleaning command from the agent. +def _get_completed_command(task, commands, step_type): + """Returns None or a completed clean/deploy command from the agent. :param task: a TaskManager instance to act on. :param commands: a set of command results from the agent, typically @@ -187,28 +225,32 @@ last_command = commands[-1] - if last_command['command_name'] != 'execute_clean_step': - # catches race condition where execute_clean_step is still + if last_command['command_name'] != 'execute_%s_step' % step_type: + # catches race condition where execute_step is still # processing so the command hasn't started yet - LOG.debug('Expected agent last command to be "execute_clean_step" ' + LOG.debug('Expected agent last command to be "execute_%(type)s_step" ' 'for node %(node)s, instead got "%(command)s". Waiting ' 'for next heartbeat.', {'node': task.node.uuid, - 'command': last_command['command_name']}) + 'command': last_command['command_name'], + 'type': step_type}) return last_result = last_command.get('command_result') or {} - last_step = last_result.get('clean_step') + last_step = last_result.get('%s_step' % step_type) + current_step = (task.node.clean_step if step_type == 'clean' + else task.node.deploy_step) if last_command['command_status'] == 'RUNNING': - LOG.debug('Clean step still running for node %(node)s: %(step)s', - {'step': last_step, 'node': task.node.uuid}) + LOG.debug('%(type)s step still running for node %(node)s: %(step)s', + {'step': last_step, 'node': task.node.uuid, + 'type': step_type.capitalize()}) return elif (last_command['command_status'] == 'SUCCEEDED' - and last_step != task.node.clean_step): - # A previous clean_step was running, the new command has not yet - # started. - LOG.debug('Clean step not yet started for node %(node)s: %(step)s', - {'step': last_step, 'node': task.node.uuid}) + and last_step != current_step): + # A previous step was running, the new command has not yet started. + LOG.debug('%(type)s step not yet started for node %(node)s: %(step)s', + {'step': last_step, 'node': task.node.uuid, + 'type': step_type.capitalize()}) return else: return last_command @@ -226,39 +268,36 @@ CONF.agent.deploy_logs_collect config option. :param exc: Exception that caused the failure. """ - log_traceback = (exc is not None and - not isinstance(exc, exception.IronicException)) + log_traceback = (exc is not None + and not isinstance(exc, exception.IronicException)) LOG.error(msg, exc_info=log_traceback) deploy_utils.set_failed_state(task, msg, collect_logs=collect_logs) raise exception.InstanceDeployFailure(msg) -def get_clean_steps(task, interface=None, override_priorities=None): - """Get the list of cached clean steps from the agent. - - #TODO(JoshNang) move to BootInterface +def get_steps(task, step_type, interface=None, override_priorities=None): + """Get the list of cached clean or deploy steps from the agent. - The clean steps cache is updated at the beginning of cleaning. + The steps cache is updated at the beginning of cleaning or deploy. :param task: a TaskManager object containing the node - :param interface: The interface for which clean steps + :param step_type: 'clean' or 'deploy' + :param interface: The interface for which clean/deploy steps are to be returned. If this is not provided, it returns the - clean steps for all interfaces. + steps for all interfaces. :param override_priorities: a dictionary with keys being step names and values being new priorities for them. If a step isn't in this dictionary, the step's original priority is used. - :raises NodeCleaningFailure: if the clean steps are not yet cached, - for example, when a node has just been enrolled and has not been - cleaned yet. - :returns: A list of clean step dictionaries + :returns: A list of clean/deploy step dictionaries """ node = task.node try: - all_steps = node.driver_internal_info['agent_cached_clean_steps'] + all_steps = node.driver_internal_info['agent_cached_%s_steps' + % step_type] except KeyError: - raise exception.NodeCleaningFailure(_('Cleaning steps are not yet ' - 'available for node %(node)s') - % {'node': node.uuid}) + LOG.debug('%(type)s steps are not yet available for node %(node)s', + {'type': step_type.capitalize(), 'node': node.uuid}) + return [] if interface: steps = [step.copy() for step in all_steps.get(interface, [])] @@ -277,26 +316,40 @@ return steps -def execute_clean_step(task, step): - """Execute a clean step asynchronously on the agent. +def _raise(step_type, msg): + assert step_type in ('clean', 'deploy') + exc = (exception.NodeCleaningFailure if step_type == 'clean' + else exception.InstanceDeployFailure) + raise exc(msg) - #TODO(JoshNang) move to BootInterface + +def execute_step(task, step, step_type): + """Execute a clean or deploy step asynchronously on the agent. :param task: a TaskManager object containing the node - :param step: a clean step dictionary to execute - :raises: NodeCleaningFailure if the agent does not return a command status - :returns: states.CLEANWAIT to signify the step will be completed async + :param step: a step dictionary to execute + :param step_type: 'clean' or 'deploy' + :raises: NodeCleaningFailure (clean step) or InstanceDeployFailure (deploy + step) if the agent does not return a command status. + :returns: states.CLEANWAIT/DEPLOYWAIT to signify the step will be + completed async """ client = _get_client() ports = objects.Port.list_by_node_id( task.context, task.node.id) - result = client.execute_clean_step(step, task.node, ports) + call = getattr(client, 'execute_%s_step' % step_type) + result = call(step, task.node, ports) if not result.get('command_status'): - raise exception.NodeCleaningFailure(_( + _raise(step_type, _( 'Agent on node %(node)s returned bad command result: ' '%(result)s') % {'node': task.node.uuid, 'result': result.get('command_error')}) - return states.CLEANWAIT + return states.CLEANWAIT if step_type == 'clean' else states.DEPLOYWAIT + + +def execute_clean_step(task, step): + # NOTE(dtantsur): left for compatibility with agent-based hardware types. + return execute_step(task, step, 'clean') class HeartbeatMixin(object): @@ -346,19 +399,33 @@ """ + def refresh_steps(self, task, step_type): + """Refresh the node's cached clean steps + + :param task: a TaskManager instance + :param step_type: "clean" or "deploy" + """ + def refresh_clean_steps(self, task): """Refresh the node's cached clean steps :param task: a TaskManager instance + """ + return self.refresh_steps(task, 'clean') + def process_next_step(self, task, step_type): + """Start the next clean/deploy step if the previous one is complete. + + :param task: a TaskManager instance + :param step_type: "clean" or "deploy" """ def continue_cleaning(self, task): """Start the next cleaning step if the previous one is complete. :param task: a TaskManager instance - """ + return self.process_next_step(task, 'clean') @property def heartbeat_allowed_states(self): @@ -555,53 +622,60 @@ 'erase_devices_metadata': CONF.deploy.erase_devices_metadata_priority, } - return get_clean_steps( - task, interface='deploy', + return get_steps( + task, 'clean', interface='deploy', override_priorities=new_priorities) - @METRICS.timer('AgentDeployMixin.refresh_clean_steps') - def refresh_clean_steps(self, task): - """Refresh the node's cached clean steps from the booted agent. + @METRICS.timer('AgentDeployMixin.refresh_steps') + def refresh_steps(self, task, step_type): + """Refresh the node's cached clean/deploy steps from the booted agent. - Gets the node's clean steps from the booted agent and caches them. + Gets the node's steps from the booted agent and caches them. The steps are cached to make get_clean_steps() calls synchronous, and - should be refreshed as soon as the agent boots to start cleaning or - if cleaning is restarted because of a cleaning version mismatch. + should be refreshed as soon as the agent boots to start cleaning/deploy + or if cleaning is restarted because of a hardware manager version + mismatch. :param task: a TaskManager instance - :raises: NodeCleaningFailure if the agent returns invalid results + :param step_type: 'clean' or 'deploy' + :raises: NodeCleaningFailure or InstanceDeployFailure if the agent + returns invalid results """ node = task.node previous_steps = node.driver_internal_info.get( - 'agent_cached_clean_steps') - LOG.debug('Refreshing agent clean step cache for node %(node)s. ' + 'agent_cached_%s_steps' % step_type) + LOG.debug('Refreshing agent %(type)s step cache for node %(node)s. ' 'Previously cached steps: %(steps)s', - {'node': node.uuid, 'steps': previous_steps}) + {'node': node.uuid, 'type': step_type, + 'steps': previous_steps}) - agent_result = self._client.get_clean_steps(node, task.ports).get( - 'command_result', {}) - missing = set(['clean_steps', 'hardware_manager_version']).difference( - agent_result) + call = getattr(self._client, 'get_%s_steps' % step_type) + agent_result = call(node, task.ports).get('command_result', {}) + missing = set(['%s_steps' % step_type, + 'hardware_manager_version']).difference(agent_result) if missing: - raise exception.NodeCleaningFailure(_( - 'agent get_clean_steps for node %(node)s returned an invalid ' - 'result. Keys: %(keys)s are missing from result: %(result)s.') + _raise(step_type, _( + 'agent get_%(type)s_steps for node %(node)s returned an ' + 'invalid result. Keys: %(keys)s are missing from result: ' + '%(result)s.') % ({'node': node.uuid, 'keys': missing, - 'result': agent_result})) + 'result': agent_result, 'type': step_type})) # agent_result['clean_steps'] looks like # {'HardwareManager': [{step1},{steps2}...], ...} steps = collections.defaultdict(list) - for step_list in agent_result['clean_steps'].values(): + for step_list in agent_result['%s_steps' % step_type].values(): for step in step_list: missing = set(['interface', 'step', 'priority']).difference( step) if missing: - raise exception.NodeCleaningFailure(_( - 'agent get_clean_steps for node %(node)s returned an ' - 'invalid clean step. Keys: %(keys)s are missing from ' - 'step: %(step)s.') % ({'node': node.uuid, - 'keys': missing, 'step': step})) + _raise(step_type, _( + 'agent get_%(type)s_steps for node %(node)s returned ' + 'an invalid %(type)s step. Keys: %(keys)s are missing' + 'from step: %(step)s.') % ({'node': node.uuid, + 'keys': missing, + 'step': step, + 'type': step_type})) steps[step['interface']].append(step) @@ -609,12 +683,14 @@ info = node.driver_internal_info info['hardware_manager_version'] = agent_result[ 'hardware_manager_version'] - info['agent_cached_clean_steps'] = dict(steps) - info['agent_cached_clean_steps_refreshed'] = str(timeutils.utcnow()) + info['agent_cached_%s_steps' % step_type] = dict(steps) + info['agent_cached_%s_steps_refreshed' % step_type] = str( + timeutils.utcnow()) node.driver_internal_info = info node.save() - LOG.debug('Refreshed agent clean step cache for node %(node)s: ' - '%(steps)s', {'node': node.uuid, 'steps': steps}) + LOG.debug('Refreshed agent %(type)s step cache for node %(node)s: ' + '%(steps)s', {'node': node.uuid, 'steps': steps, + 'type': step_type}) @METRICS.timer('AgentDeployMixin.execute_clean_step') def execute_clean_step(self, task, step): @@ -626,24 +702,27 @@ status :returns: states.CLEANWAIT to signify the step will be completed async """ - return execute_clean_step(task, step) + return execute_step(task, step, 'clean') - @METRICS.timer('AgentDeployMixin.continue_cleaning') - def continue_cleaning(self, task, **kwargs): - """Start the next cleaning step if the previous one is complete. + @METRICS.timer('AgentDeployMixin.process_next_step') + def process_next_step(self, task, step_type, **kwargs): + """Start the next clean/deploy step if the previous one is complete. In order to avoid errors and make agent upgrades painless, the agent compares the version of all hardware managers at the start of the - cleaning (the agent's get_clean_steps() call) and before executing - each clean step. If the version has changed between steps, the agent is - unable to tell if an ordering change will cause a cleaning issue so - it returns CLEAN_VERSION_MISMATCH. For automated cleaning, we restart - the entire cleaning cycle. For manual cleaning, we don't. + process (the agent's get_clean|deploy_steps() call) and before + executing each step. If the version has changed between steps, + the agent is unable to tell if an ordering change will cause an issue + so it returns CLEAN_VERSION_MISMATCH. For automated cleaning, we + restart the entire cleaning cycle. For manual cleaning or deploy, + we don't. - Additionally, if a clean_step includes the reboot_requested property + Additionally, if a step includes the reboot_requested property set to True, this method will coordinate the reboot once the step is completed. """ + assert step_type in ('clean', 'deploy') + node = task.node # For manual clean, the target provision state is MANAGEABLE, whereas # for automated cleaning, it is (the default) AVAILABLE. @@ -651,47 +730,61 @@ agent_commands = self._client.get_commands_status(task.node) if not agent_commands: - if task.node.driver_internal_info.get('cleaning_reboot'): + field = ('cleaning_reboot' if step_type == 'clean' + else 'deployment_reboot') + if task.node.driver_internal_info.get(field): # Node finished a cleaning step that requested a reboot, and # this is the first heartbeat after booting. Continue cleaning. info = task.node.driver_internal_info - info.pop('cleaning_reboot', None) + info.pop(field, None) task.node.driver_internal_info = info task.node.save() - manager_utils.notify_conductor_resume_clean(task) + manager_utils.notify_conductor_resume_operation(task, + step_type) return else: # Agent has no commands whatsoever return - command = _get_completed_cleaning_command(task, agent_commands) - LOG.debug('Cleaning command status for node %(node)s on step %(step)s:' + current_step = (node.clean_step if step_type == 'clean' + else node.deploy_step) + command = _get_completed_command(task, agent_commands, step_type) + LOG.debug('%(type)s command status for node %(node)s on step %(step)s:' ' %(command)s', {'node': node.uuid, - 'step': node.clean_step, - 'command': command}) + 'step': current_step, + 'command': command, + 'type': step_type}) if not command: # Agent command in progress return if command.get('command_status') == 'FAILED': - msg = (_('Agent returned error for clean step %(step)s on node ' + msg = (_('Agent returned error for %(type)s step %(step)s on node ' '%(node)s : %(err)s.') % {'node': node.uuid, 'err': command.get('command_error'), - 'step': node.clean_step}) + 'step': current_step, + 'type': step_type}) LOG.error(msg) return manager_utils.cleaning_error_handler(task, msg) - elif command.get('command_status') == 'CLEAN_VERSION_MISMATCH': + elif command.get('command_status') in ('CLEAN_VERSION_MISMATCH', + 'DEPLOY_VERSION_MISMATCH'): # Cache the new clean steps (and 'hardware_manager_version') try: - self.refresh_clean_steps(task) + self.refresh_steps(task, step_type) except exception.NodeCleaningFailure as e: msg = (_('Could not continue cleaning on node ' '%(node)s: %(err)s.') % {'node': node.uuid, 'err': e}) LOG.exception(msg) return manager_utils.cleaning_error_handler(task, msg) + except exception.InstanceDeployFailure as e: + msg = (_('Could not continue deployment on node ' + '%(node)s: %(err)s.') % + {'node': node.uuid, 'err': e}) + LOG.exception(msg) + return manager_utils.deploying_error_handler(task, msg) if manual_clean: # Don't restart manual cleaning if agent reboots to a new @@ -708,60 +801,77 @@ node.driver_internal_info = driver_internal_info node.save() else: - # Restart cleaning, agent must have rebooted to new version - LOG.info('During automated cleaning, node %s detected a ' - 'clean version mismatch. Resetting clean steps ' - 'and rebooting the node.', node.uuid) + # Restart the process, agent must have rebooted to new version + LOG.info('During %(type)s, node %(node)s detected a ' + '%(type)s version mismatch. Resetting %(type)s steps ' + 'and rebooting the node.', + {'type': step_type, 'node': node.uuid}) try: conductor_steps.set_node_cleaning_steps(task) - except exception.NodeCleaningFailure: + except exception.NodeCleaningFailure as e: msg = (_('Could not restart automated cleaning on node ' - '%(node)s: %(err)s.') % - {'node': node.uuid, - 'err': command.get('command_error'), + '%(node)s after step %(step)s: %(err)s.') % + {'node': node.uuid, 'err': e, 'step': node.clean_step}) LOG.exception(msg) return manager_utils.cleaning_error_handler(task, msg) + except exception.InstanceDeployFailure as e: + msg = (_('Could not restart deployment on node ' + '%(node)s after step %(step)s: %(err)s.') % + {'node': node.uuid, 'err': e, + 'step': node.deploy_step}) + LOG.exception(msg) + return manager_utils.deploying_error_handler(task, msg) - manager_utils.notify_conductor_resume_clean(task) + manager_utils.notify_conductor_resume_operation(task, step_type) elif command.get('command_status') == 'SUCCEEDED': - clean_step_hook = _get_post_clean_step_hook(node) - if clean_step_hook is not None: - LOG.debug('For node %(node)s, executing post clean step ' - 'hook %(method)s for clean step %(step)s', - {'method': clean_step_hook.__name__, + step_hook = _get_post_step_hook(node, step_type) + if step_hook is not None: + LOG.debug('For node %(node)s, executing post %(type)s step ' + 'hook %(method)s for %(type)s step %(step)s', + {'method': step_hook.__name__, 'node': node.uuid, - 'step': node.clean_step}) + 'step': current_step, + 'type': step_type}) try: - clean_step_hook(task, command) + step_hook(task, command) except Exception as e: - msg = (_('For node %(node)s, post clean step hook ' - '%(method)s failed for clean step %(step)s.' + msg = (_('For node %(node)s, post %(type)s step hook ' + '%(method)s failed for %(type)s step %(step)s.' '%(cls)s: %(error)s') % - {'method': clean_step_hook.__name__, + {'method': step_hook.__name__, 'node': node.uuid, 'error': e, 'cls': e.__class__.__name__, - 'step': node.clean_step}) + 'step': current_step, + 'type': step_type}) LOG.exception(msg) - return manager_utils.cleaning_error_handler(task, msg) + if step_type == 'clean': + return manager_utils.cleaning_error_handler(task, msg) + else: + return manager_utils.deploying_error_handler(task, msg) - if task.node.clean_step.get('reboot_requested'): - _cleaning_reboot(task) + if current_step.get('reboot_requested'): + _post_step_reboot(task, step_type) return - LOG.info('Agent on node %s returned cleaning command success, ' - 'moving to next clean step', node.uuid) - manager_utils.notify_conductor_resume_clean(task) + LOG.info('Agent on node %(node)s returned %(type)s command ' + 'success, moving to next step', + {'node': node.uuid, 'type': step_type}) + manager_utils.notify_conductor_resume_operation(task, step_type) else: - msg = (_('Agent returned unknown status for clean step %(step)s ' - 'on node %(node)s : %(err)s.') % + msg = (_('Agent returned unknown status for %(type)s step %(step)s' + ' on node %(node)s : %(err)s.') % {'node': node.uuid, 'err': command.get('command_status'), - 'step': node.clean_step}) + 'step': current_step, + 'type': step_type}) LOG.error(msg) - return manager_utils.cleaning_error_handler(task, msg) + if step_type == 'clean': + return manager_utils.cleaning_error_handler(task, msg) + else: + return manager_utils.deploying_error_handler(task, msg) @METRICS.timer('AgentDeployMixin.reboot_and_finish_deploy') def reboot_and_finish_deploy(self, task): @@ -846,6 +956,10 @@ # powered off. log_and_raise_deployment_error(task, msg, collect_logs=False, exc=e) + + # TODO(dtantsur): remove these two calls when this function becomes a + # real deploy step. + task.process_event('wait') manager_utils.notify_conductor_resume_deploy(task) @METRICS.timer('AgentDeployMixin.prepare_instance_to_boot') @@ -943,9 +1057,9 @@ # For whole disk images it is not necessary that the root_uuid # be provided since the bootloaders on the disk will be used whole_disk_image = internal_info.get('is_whole_disk_image') - if (software_raid or (root_uuid and not whole_disk_image) or - (whole_disk_image and - boot_mode_utils.get_boot_mode(node) == 'uefi')): + if (software_raid or (root_uuid and not whole_disk_image) + or (whole_disk_image + and boot_mode_utils.get_boot_mode(node) == 'uefi')): LOG.debug('Installing the bootloader for node %(node)s on ' 'partition %(part)s, EFI system partition %(efi)s', {'node': node.uuid, 'part': root_uuid, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/agent_client.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/agent_client.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/agent_client.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/agent_client.py 2020-04-10 17:06:41.000000000 +0000 @@ -61,7 +61,8 @@ retry_on_exception=( lambda e: isinstance(e, exception.AgentConnectionFailed)), stop_max_attempt_number=CONF.agent.max_command_attempts) - def _command(self, node, method, params, wait=False): + def _command(self, node, method, params, wait=False, + command_timeout_factor=1): """Sends command to agent. :param node: A Node object. @@ -71,6 +72,13 @@ body. :param wait: True to wait for the command to finish executing, False otherwise. + :param command_timeout_factor: An integer, default 1, by which to + multiply the [agent]command_timeout + value. This is intended for use with + extremely long running commands to + the agent ramdisk where a general + timeout value should not be extended + in all cases. :raises: IronicException when failed to issue the request or there was a malformed response from the agent. :raises: AgentAPIError when agent failed to execute specified command. @@ -89,8 +97,9 @@ {'node': node.uuid, 'method': method}) try: - response = self.session.post(url, params=request_params, data=body, - timeout=CONF.agent.command_timeout) + response = self.session.post( + url, params=request_params, data=body, + timeout=CONF.agent.command_timeout * command_timeout_factor) except (requests.ConnectionError, requests.Timeout) as e: msg = (_('Failed to connect to the agent running on node %(node)s ' 'for invoking command %(method)s. Error: %(error)s') % @@ -137,6 +146,10 @@ return result @METRICS.timer('AgentClient.get_commands_status') + @retrying.retry( + retry_on_exception=( + lambda e: isinstance(e, exception.AgentConnectionFailed)), + stop_max_attempt_number=CONF.agent.max_command_attempts) def get_commands_status(self, node): """Get command status from agent. @@ -159,6 +172,9 @@ * a dictionary containing keys clean_result and clean_step for the command clean.execute_clean_step; + * a dictionary containing keys deploy_result + and deploy_step for the command + deploy.execute_deploy_step; * a string representing result message for the command standby.cache_image; * None for the command standby.sync.> @@ -166,7 +182,16 @@ """ url = self._get_command_url(node) LOG.debug('Fetching status of agent commands for node %s', node.uuid) - resp = self.session.get(url, timeout=CONF.agent.command_timeout) + try: + resp = self.session.get(url, timeout=CONF.agent.command_timeout) + except (requests.ConnectionError, requests.Timeout) as e: + msg = (_('Failed to connect to the agent running on node %(node)s ' + 'to collect commands status. ' + 'Error: %(error)s') % + {'node': node.uuid, 'error': e}) + LOG.error(msg) + raise exception.AgentConnectionFailed(reason=msg) + result = resp.json()['commands'] status = '; '.join('%(cmd)s: result "%(res)s", error "%(err)s"' % {'cmd': r.get('command_name'), @@ -255,10 +280,20 @@ params = {'root_uuid': root_uuid, 'efi_system_part_uuid': efi_system_part_uuid, 'prep_boot_part_uuid': prep_boot_part_uuid} + + # NOTE(TheJulia): This command explicitly sends a larger timeout + # factor to the _command call such that the agent ramdisk has enough + # time to perform its work. + # TODO(TheJulia): We should likely split install_bootloader into many + # commands at some point, even though that would not be backwards + # compatible. We could at least begin to delineate the commands apart + # over the next cycle or two so we don't need a command timeout + # extension factor. return self._command(node=node, method='image.install_bootloader', params=params, - wait=True) + wait=True, + command_timeout_factor=2) @METRICS.timer('AgentClient.get_clean_steps') def get_clean_steps(self, node, ports): @@ -323,6 +358,69 @@ method='clean.execute_clean_step', params=params) + @METRICS.timer('AgentClient.get_deploy_steps') + def get_deploy_steps(self, node, ports): + """Get deploy steps from agent. + + :param node: A node object. + :param ports: Ports associated with the node. + :raises: IronicException when failed to issue the request or there was + a malformed response from the agent. + :raises: AgentAPIError when agent failed to execute specified command. + :returns: A dict containing command response from agent. + See :func:`get_commands_status` for a command result sample. + The value of key command_result is in the form of: + + :: + + { + 'deploy_steps': , + 'hardware_manager_version': + } + + """ + params = { + 'node': node.as_dict(secure=True), + 'ports': [port.as_dict() for port in ports] + } + return self._command(node=node, + method='deploy.get_deploy_steps', + params=params, + wait=True) + + @METRICS.timer('AgentClient.execute_deploy_step') + def execute_deploy_step(self, step, node, ports): + """Execute specified deploy step. + + :param step: A deploy step dictionary to execute. + :param node: A Node object. + :param ports: Ports associated with the node. + :raises: IronicException when failed to issue the request or there was + a malformed response from the agent. + :raises: AgentAPIError when agent failed to execute specified command. + :returns: A dict containing command response from agent. + See :func:`get_commands_status` for a command result sample. + The value of key command_result is in the form of: + + :: + + { + 'deploy_result': , + 'deploy_step': + } + + """ + params = { + 'step': step, + 'node': node.as_dict(secure=True), + 'ports': [port.as_dict() for port in ports], + 'deploy_version': node.driver_internal_info.get( + 'hardware_manager_version') + } + return self._command(node=node, + method='deploy.execute_deploy_step', + params=params) + @METRICS.timer('AgentClient.power_off') def power_off(self, node): """Soft powers off the bare metal node by shutting down ramdisk OS. @@ -379,15 +477,35 @@ to issue the request, or there was a malformed response from the agent. :raises: AgentAPIError when agent failed to execute specified command. + :raises: InstanceRescueFailure when the agent ramdisk is too old + to support transmission of the rescue password. :returns: A dict containing command response from agent. See :func:`get_commands_status` for a command result sample. """ - rescue_pass = node.instance_info.get('rescue_password') + rescue_pass = node.instance_info.get('hashed_rescue_password') + # TODO(TheJulia): Remove fallback to use the fallback_rescue_password + # in the Victoria cycle. + fallback_rescue_pass = node.instance_info.get( + 'rescue_password') if not rescue_pass: raise exception.IronicException(_('Agent rescue requires ' 'rescue_password in ' 'instance_info')) - params = {'rescue_password': rescue_pass} - return self._command(node=node, - method='rescue.finalize_rescue', - params=params) + params = {'rescue_password': rescue_pass, + 'hashed': True} + try: + return self._command(node=node, + method='rescue.finalize_rescue', + params=params) + except exception.AgentAPIError: + if CONF.conductor.require_rescue_password_hashed: + raise exception.InstanceRescueFailure( + _('Unable to rescue node due to an out of date agent ' + 'ramdisk. Please contact the administrator to update ' + 'the rescue ramdisk to contain an ironic-python-agent ' + 'version of at least 6.0.0.')) + else: + params = {'rescue_password': fallback_rescue_pass} + return self._command(node=node, + method='rescue.finalize_rescue', + params=params) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/agent.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/agent.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/agent.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/agent.py 2020-04-10 17:06:41.000000000 +0000 @@ -152,8 +152,8 @@ :raises: MissingParameterValue if required option(s) is not set. """ image_source = node.instance_info.get('image_source') - if (not service_utils.is_glance_image(image_source) or - CONF.agent.image_download_source != 'http'): + if (not service_utils.is_glance_image(image_source) + or CONF.agent.image_download_source != 'http'): return params = { @@ -221,8 +221,8 @@ if node.instance_info.get('image_checksum'): image_info['checksum'] = node.instance_info['image_checksum'] - if (node.instance_info.get('image_os_hash_algo') and - node.instance_info.get('image_os_hash_value')): + if (node.instance_info.get('image_os_hash_algo') + and node.instance_info.get('image_os_hash_value')): image_info['os_hash_algo'] = node.instance_info[ 'image_os_hash_algo'] image_info['os_hash_value'] = node.instance_info[ diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ansible/deploy.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ansible/deploy.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ansible/deploy.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ansible/deploy.py 2020-04-10 17:06:41.000000000 +0000 @@ -608,6 +608,9 @@ self.reboot_and_finish_deploy(task) task.driver.boot.clean_up_ramdisk(task) + # TODO(dtantsur): remove these two calls when this function becomes a + # real deploy step. + task.process_event('wait') manager_utils.notify_conductor_resume_deploy(task) @METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/drac/raid.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/drac/raid.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/drac/raid.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/drac/raid.py 2020-04-10 17:06:41.000000000 +0000 @@ -769,8 +769,9 @@ for disks_count in range(min_disks, candidate_max_disks + 1): if ('number_of_physical_disks' in logical_disk - and logical_disk['number_of_physical_disks'] != disks_count): - continue + and (logical_disk['number_of_physical_disks'] + != disks_count)): + continue # skip invalid disks_count if disks_count != _usable_disks_count(logical_disk['raid_level'], @@ -839,6 +840,40 @@ 'raid_config_parameters': raid_config_parameters} +def _validate_volume_size(node, logical_disks): + new_physical_disks = list_physical_disks(node) + free_space_mb = {} + new_processed_volumes = [] + for disk in new_physical_disks: + free_space_mb[disk] = disk.free_size_mb + + for logical_disk in logical_disks: + selected_disks = [disk for disk in new_physical_disks + if disk.id in logical_disk['physical_disks']] + + spans_count = _calculate_spans( + logical_disk['raid_level'], len(selected_disks)) + + new_max_vol_size_mb = _max_volume_size_mb( + logical_disk['raid_level'], + selected_disks, + free_space_mb, + spans_count=spans_count) + + if logical_disk['size_mb'] > new_max_vol_size_mb: + logical_disk['size_mb'] = new_max_vol_size_mb + LOG.info("Logical size does not match so calculating volume " + "properties for current logical_disk") + _calculate_volume_props( + logical_disk, new_physical_disks, free_space_mb) + new_processed_volumes.append(logical_disk) + + if new_processed_volumes: + return new_processed_volumes + + return logical_disks + + def _commit_to_controllers(node, controllers, substep="completed"): """Commit changes to RAID controllers on the node. @@ -931,6 +966,13 @@ logical_disks_to_create = node.driver_internal_info[ 'logical_disks_to_create'] + # Check valid properties attached to voiume after drives conversion + isVolValidationNeeded = node.driver_internal_info[ + 'volume_validation'] + if isVolValidationNeeded: + logical_disks_to_create = _validate_volume_size( + node, logical_disks_to_create) + controllers = list() for logical_disk in logical_disks_to_create: controller = dict() @@ -1076,8 +1118,6 @@ driver_internal_info = node.driver_internal_info driver_internal_info[ "logical_disks_to_create"] = logical_disks_to_create - node.driver_internal_info = driver_internal_info - node.save() commit_results = None if logical_disks_to_create: @@ -1091,6 +1131,11 @@ controllers_to_physical_disk_ids, substep="create_virtual_disks") + volume_validation = True if commit_results else False + driver_internal_info['volume_validation'] = volume_validation + node.driver_internal_info = driver_internal_info + node.save() + if commit_results: return commit_results else: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ilo/management.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ilo/management.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ilo/management.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ilo/management.py 2020-04-10 17:06:41.000000000 +0000 @@ -735,10 +735,10 @@ for device_type, pattern in erase_pattern.items(): if device_type == 'hdd' and pattern in ( 'overwrite', 'crypto', 'zero'): - continue + continue elif device_type == 'ssd' and pattern in ( 'block', 'crypto', 'zero'): - continue + continue else: invalid = True break diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ilo/power.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ilo/power.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ilo/power.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ilo/power.py 2020-04-10 17:06:41.000000000 +0000 @@ -127,8 +127,8 @@ use_post_state = False if _can_get_server_post_state(node): use_post_state = True - if (target_state in [states.POWER_OFF, states.SOFT_POWER_OFF] or - target_state == states.SOFT_REBOOT and not is_final_state): + if (target_state in [states.POWER_OFF, states.SOFT_POWER_OFF] + or target_state == states.SOFT_REBOOT and not is_final_state): state_to_check = ilo_common.POST_POWEROFF_STATE else: # It may not be able to finish POST if no bootable device is diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ipmitool.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ipmitool.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/ipmitool.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/ipmitool.py 2020-04-10 17:06:41.000000000 +0000 @@ -287,8 +287,8 @@ password = str(info.get('ipmi_password', '')) hex_kg_key = info.get('ipmi_hex_kg_key') dest_port = info.get('ipmi_port') - port = (info.get('ipmi_terminal_port') or - internal_info.get('allocated_ipmi_terminal_port')) + port = (info.get('ipmi_terminal_port') + or internal_info.get('allocated_ipmi_terminal_port')) priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR') bridging_type = info.get('ipmi_bridging', 'no') local_address = info.get('ipmi_local_address') @@ -527,8 +527,8 @@ with excutils.save_and_reraise_exception() as ctxt: err_list = [ x for x in ( - IPMITOOL_RETRYABLE_FAILURES + - CONF.ipmi.additional_retryable_ipmi_errors) + IPMITOOL_RETRYABLE_FAILURES + + CONF.ipmi.additional_retryable_ipmi_errors) if x in str(e)] if ((time.time() > end_time) or (num_tries == 0) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/network/common.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/network/common.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/network/common.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/network/common.py 2020-04-10 17:06:41.000000000 +0000 @@ -283,7 +283,7 @@ neutron.wait_for_host_agent(client, body['port']['binding:host_id']) try: - client.update_port(vif_id, body) + neutron.update_neutron_port(task.context, vif_id, body) if is_smart_nic: neutron.wait_for_port_status(client, vif_id, 'ACTIVE') except neutron_exceptions.ConnectionFailed as e: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/network/flat.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/network/flat.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/network/flat.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/network/flat.py 2020-04-10 17:06:41.000000000 +0000 @@ -55,7 +55,6 @@ def _bind_flat_ports(self, task): LOG.debug("Binding flat network ports") - client = neutron.get_client(context=task.context) for port_like_obj in task.ports + task.portgroups: vif_port_id = ( port_like_obj.internal_info.get(common.TENANT_VIF_KEY) @@ -71,7 +70,8 @@ } } try: - client.update_port(vif_port_id, body) + neutron.update_neutron_port(task.context, + vif_port_id, body) except neutron_exceptions.NeutronClientException as e: msg = (_('Unable to set binding:host_id for ' 'neutron port %(port_id)s. Error: ' @@ -87,8 +87,8 @@ portgroups = task.portgroups for port_like_obj in ports + portgroups: vif_port_id = ( - port_like_obj.internal_info.get(common.TENANT_VIF_KEY) or - port_like_obj.extra.get('vif_port_id')) + port_like_obj.internal_info.get(common.TENANT_VIF_KEY) + or port_like_obj.extra.get('vif_port_id')) if not vif_port_id: continue neutron.unbind_neutron_port(vif_port_id, context=task.context) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/boot.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/boot.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/boot.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/boot.py 2020-04-10 17:06:41.000000000 +0000 @@ -20,6 +20,7 @@ from ironic_lib import utils as ironic_utils from oslo_log import log +from oslo_serialization import base64 from oslo_utils import importutils from ironic.common import boot_devices @@ -411,7 +412,8 @@ @classmethod def _prepare_iso_image(cls, task, kernel_href, ramdisk_href, - bootloader_href=None, root_uuid=None, params=None): + bootloader_href=None, configdrive=None, + root_uuid=None, params=None): """Prepare an ISO to boot the node. Build bootable ISO out of `kernel_href` and `ramdisk_href` (and @@ -423,6 +425,9 @@ :param ramdisk_href: URL or Glance UUID of the ramdisk to use :param bootloader_href: URL or Glance UUID of the EFI bootloader image to use when creating UEFI bootbable ISO + :param configdrive: URL to or a compressed blob of a ISO9660 or + FAT-formatted OpenStack config drive image. This image will be + written onto the built ISO image. Optional. :param root_uuid: optional uuid of the root partition. :param params: a dictionary containing 'parameter name'->'value' mapping to be passed to kernel command line. @@ -467,24 +472,48 @@ 'params': kernel_params}) with tempfile.NamedTemporaryFile( - dir=CONF.tempdir, suffix='.iso') as fileobj: - boot_iso_tmp_file = fileobj.name - images.create_boot_iso( - task.context, boot_iso_tmp_file, - kernel_href, ramdisk_href, - esp_image_href=bootloader_href, - root_uuid=root_uuid, - kernel_params=kernel_params, - boot_mode=boot_mode) - - iso_object_name = cls._get_iso_image_name(task.node) - - image_url = cls._publish_image(boot_iso_tmp_file, iso_object_name) - - LOG.debug("Created ISO %(name)s in Swift for node %(node)s, exposed " - "as temporary URL %(url)s", {'node': task.node.uuid, - 'name': iso_object_name, - 'url': image_url}) + dir=CONF.tempdir, suffix='.iso') as boot_fileobj: + + with tempfile.NamedTemporaryFile( + dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj: + + configdrive_href = configdrive + + if configdrive: + parsed_url = urlparse.urlparse(configdrive) + if not parsed_url.scheme: + cfgdrv_blob = base64.decode_as_bytes(configdrive) + + with open(cfgdrv_fileobj.name, 'wb') as f: + f.write(cfgdrv_blob) + + configdrive_href = urlparse.urlunparse( + ('file', '', cfgdrv_fileobj.name, '', '', '')) + + LOG.info("Burning configdrive %(url)s to boot ISO image " + "for node %(node)s", {'url': configdrive_href, + 'node': task.node.uuid}) + + boot_iso_tmp_file = boot_fileobj.name + images.create_boot_iso( + task.context, boot_iso_tmp_file, + kernel_href, ramdisk_href, + esp_image_href=bootloader_href, + configdrive_href=configdrive_href, + root_uuid=root_uuid, + kernel_params=kernel_params, + boot_mode=boot_mode) + + iso_object_name = cls._get_iso_image_name(task.node) + + image_url = cls._publish_image( + boot_iso_tmp_file, iso_object_name) + + LOG.debug("Created ISO %(name)s in object store for node %(node)s, " + "exposed as temporary URL " + "%(url)s", {'node': task.node.uuid, + 'name': iso_object_name, + 'url': image_url}) return image_url @@ -758,8 +787,8 @@ self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD) self._cleanup_iso_image(task) - if (config_via_floppy and - self._has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY)): + if (config_via_floppy + and self._has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY)): self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY) self._cleanup_floppy_image(task) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/inspect.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/inspect.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/inspect.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/inspect.py 2020-04-10 17:06:41.000000000 +0000 @@ -120,13 +120,74 @@ "for node %(node)s", {'node': task.node.uuid, 'arch': arch}) + # TODO(etingof): should we respect root device hints here? + local_gb = self._detect_local_gb(task, system) + + if local_gb: + inspected_properties['local_gb'] = str(local_gb) + else: + LOG.warning("Could not provide a valid storage size configured " + "for node %(node)s. Assuming this is a disk-less node", + {'node': task.node.uuid}) + inspected_properties['local_gb'] = '0' + + if system.boot.mode: + if not drivers_utils.get_node_capability(task.node, 'boot_mode'): + capabilities = utils.get_updated_capabilities( + inspected_properties.get('capabilities', ''), + {'boot_mode': BOOT_MODE_MAP[system.boot.mode]}) + + inspected_properties['capabilities'] = capabilities + + valid_keys = self.ESSENTIAL_PROPERTIES + missing_keys = valid_keys - set(inspected_properties) + if missing_keys: + error = (_('Failed to discover the following properties: ' + '%(missing_keys)s on node %(node)s'), + {'missing_keys': ', '.join(missing_keys), + 'node': task.node.uuid}) + raise exception.HardwareInspectionFailure(error=error) + + task.node.properties = inspected_properties + task.node.save() + + LOG.debug("Node properties for %(node)s are updated as " + "%(properties)s", {'properties': inspected_properties, + 'node': task.node.uuid}) + + self._create_ports(task, system) + + return states.MANAGEABLE + + def _create_ports(self, task, system): + if (system.ethernet_interfaces + and system.ethernet_interfaces.summary): + macs = system.ethernet_interfaces.summary + + # Create ports for the discovered NICs being in 'enabled' state + enabled_macs = {nic_mac: nic_state + for nic_mac, nic_state in macs.items() + if nic_state == sushy.STATE_ENABLED} + if enabled_macs: + inspect_utils.create_ports_if_not_exist( + task, enabled_macs, get_mac_address=lambda x: x[0]) + else: + LOG.warning("Not attempting to create any port as no NICs " + "were discovered in 'enabled' state for node " + "%(node)s: %(mac_data)s", + {'mac_data': macs, 'node': task.node.uuid}) + else: + LOG.warning("No NIC information discovered " + "for node %(node)s", {'node': task.node.uuid}) + + def _detect_local_gb(self, task, system): simple_storage_size = 0 try: LOG.debug("Attempting to discover system simple storage size for " "node %(node)s", {'node': task.node.uuid}) - if (system.simple_storage and - system.simple_storage.disks_sizes_bytes): + if (system.simple_storage + and system.simple_storage.disks_sizes_bytes): simple_storage_size = [ size for size in system.simple_storage.disks_sizes_bytes if size >= 4 * units.Gi @@ -184,60 +245,4 @@ # Note(deray): Convert the received size to GiB and reduce the # value by 1 GB as consumers like Ironic requires the ``local_gb`` # to be returned 1 less than actual size. - local_gb = max(0, int(local_gb / units.Gi - 1)) - - # TODO(etingof): should we respect root device hints here? - - if local_gb: - inspected_properties['local_gb'] = str(local_gb) - else: - LOG.warning("Could not provide a valid storage size configured " - "for node %(node)s. Assuming this is a disk-less node", - {'node': task.node.uuid}) - inspected_properties['local_gb'] = '0' - - if system.boot.mode: - if not drivers_utils.get_node_capability(task.node, 'boot_mode'): - capabilities = utils.get_updated_capabilities( - inspected_properties.get('capabilities', ''), - {'boot_mode': BOOT_MODE_MAP[system.boot.mode]}) - - inspected_properties['capabilities'] = capabilities - - valid_keys = self.ESSENTIAL_PROPERTIES - missing_keys = valid_keys - set(inspected_properties) - if missing_keys: - error = (_('Failed to discover the following properties: ' - '%(missing_keys)s on node %(node)s'), - {'missing_keys': ', '.join(missing_keys), - 'node': task.node.uuid}) - raise exception.HardwareInspectionFailure(error=error) - - task.node.properties = inspected_properties - task.node.save() - - LOG.debug("Node properties for %(node)s are updated as " - "%(properties)s", {'properties': inspected_properties, - 'node': task.node.uuid}) - - if (system.ethernet_interfaces and - system.ethernet_interfaces.summary): - macs = system.ethernet_interfaces.summary - - # Create ports for the discovered NICs being in 'enabled' state - enabled_macs = {nic_mac: nic_state - for nic_mac, nic_state in macs.items() - if nic_state == sushy.STATE_ENABLED} - if enabled_macs: - inspect_utils.create_ports_if_not_exist( - task, enabled_macs, get_mac_address=lambda x: x[0]) - else: - LOG.warning("Not attempting to create any port as no NICs " - "were discovered in 'enabled' state for node " - "%(node)s: %(mac_data)s", - {'mac_data': macs, 'node': task.node.uuid}) - else: - LOG.warning("No NIC information discovered " - "for node %(node)s", {'node': task.node.uuid}) - - return states.MANAGEABLE + return max(0, int(local_gb / units.Gi - 1)) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/management.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/management.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/management.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/management.py 2020-04-10 17:06:41.000000000 +0000 @@ -127,7 +127,7 @@ system = redfish_utils.get_system(task.node) try: - system.set_system_boot_source( + system.set_system_boot_options( BOOT_DEVICE_MAP_REV[device], enabled=BOOT_DEVICE_PERSISTENT_MAP_REV[persistent]) except sushy.exceptions.SushyError as e: @@ -189,27 +189,8 @@ """ system = redfish_utils.get_system(task.node) - boot_device = system.boot.get('target') - if not boot_device: - error_msg = (_('Cannot change boot mode on node %(node)s ' - 'because its boot device is not set.') % - {'node': task.node.uuid}) - LOG.error(error_msg) - raise exception.RedfishError(error_msg) - - boot_override = system.boot.get('enabled') - if not boot_override: - error_msg = (_('Cannot change boot mode on node %(node)s ' - 'because its boot source override is not set.') % - {'node': task.node.uuid}) - LOG.error(error_msg) - raise exception.RedfishError(error_msg) - try: - system.set_system_boot_source( - boot_device, - enabled=boot_override, - mode=BOOT_MODE_MAP_REV[mode]) + system.set_system_boot_options(mode=BOOT_MODE_MAP_REV[mode]) except sushy.exceptions.SushyError as e: error_msg = (_('Setting boot mode to %(mode)s ' @@ -485,8 +466,9 @@ '%(error)s', {'node': task.node.uuid, 'error': e}) try: - if (component in (None, components.DISK) and - system.simple_storage and system.simple_storage.drives): + if (component in (None, components.DISK) + and system.simple_storage + and system.simple_storage.drives): indicators[components.DISK] = { drive.uuid: properties for drive in system.simple_storage.drives @@ -530,8 +512,9 @@ INDICATOR_MAP_REV[state]) return - elif (component == components.DISK and - system.simple_storage and system.simple_storage.drives): + elif (component == components.DISK + and system.simple_storage + and system.simple_storage.drives): for drive in system.simple_storage.drives: if drive.uuid == indicator: drive.set_indicator_led( @@ -581,8 +564,9 @@ if chassis.uuid == indicator: return INDICATOR_MAP[chassis.indicator_led] - if (component == components.DISK and - system.simple_storage and system.simple_storage.drives): + if (component == components.DISK + and system.simple_storage + and system.simple_storage.drives): for drive in system.simple_storage.drives: if drive.uuid == indicator: return INDICATOR_MAP[drive.indicator_led] diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/redfish/utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/redfish/utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -173,7 +173,7 @@ 'auth_type': auth_type, 'node_uuid': node.uuid} if root_prefix: - sushy_params['root_prefix'] = root_prefix + sushy_params['root_prefix'] = root_prefix return sushy_params @@ -223,8 +223,8 @@ if CONF.redfish.connection_cache_size: self.__class__._sessions[self._session_key] = conn - if (len(self.__class__._sessions) > - CONF.redfish.connection_cache_size): + if (len(self.__class__._sessions) + > CONF.redfish.connection_cache_size): self._expire_oldest_session() return conn diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/snmp.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/snmp.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/snmp.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/snmp.py 2020-04-10 17:06:41.000000000 +0000 @@ -404,7 +404,7 @@ def memoized(self, node_info): hashable_node_info = frozenset((key, val) for key, val in node_info.items() - if key is not 'outlet') + if key != 'outlet') if hashable_node_info not in _memoized: _memoized[hashable_node_info] = f(self) return _memoized[hashable_node_info] @@ -420,7 +420,7 @@ hashable_node_info = ( frozenset((key, val) for key, val in self.snmp_info.items() - if key is not 'outlet') + if key != 'outlet') ) del _memoized[hashable_node_info] self.driver = self._get_pdu_driver(self.snmp_info) @@ -808,8 +808,8 @@ system_id = self.oid_enterprise + getattr(obj, 'system_id') - if (system_id in drivers_map and - drivers_map[system_id] is not obj): + if (system_id in drivers_map + and drivers_map[system_id] is not obj): raise exception.InvalidParameterValue(_( "SNMPDriverAuto: duplicate driver system ID prefix " "%(system_id)s") % {'system_id': system_id}) @@ -858,6 +858,7 @@ def _fetch_driver(self): return self.client.get(self.SYS_OBJ_OID) + # A dictionary of supported drivers keyed by snmp_driver attribute DRIVER_CLASSES = { 'apc': SNMPDriverAPCMasterSwitch, @@ -953,23 +954,23 @@ if 'priv_protocol' not in snmp_info: snmp_info['priv_protocol'] = snmp_priv_protocols['des'] - if ('priv_protocol' in snmp_info and - 'auth_protocol' not in snmp_info): + if ('priv_protocol' in snmp_info + and 'auth_protocol' not in snmp_info): raise exception.MissingParameterValue(_( "SNMPPowerDriver: SNMPv3 privacy requires authentication. " "Please add `driver_info/auth_protocol` property to node " "%(node)s configuration.") % {'node': node.uuid}) - if ('auth_protocol' in snmp_info and - 'auth_key' not in snmp_info): + if ('auth_protocol' in snmp_info + and 'auth_key' not in snmp_info): raise exception.MissingParameterValue(_( "SNMPPowerDriver: missing SNMPv3 authentication key while " "`driver_info/snmp_auth_protocol` is present. Please " "add `driver_info/snmp_auth_key` to node %(node)s " "configuration.") % {'node': node.uuid}) - if ('priv_protocol' in snmp_info and - 'priv_key' not in snmp_info): + if ('priv_protocol' in snmp_info + and 'priv_key' not in snmp_info): raise exception.MissingParameterValue(_( "SNMPPowerDriver: missing SNMPv3 privacy key while " "`driver_info/snmp_priv_protocol` is present. Please " diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/storage/external.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/storage/external.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/modules/storage/external.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/modules/storage/external.py 2020-04-10 17:06:41.000000000 +0000 @@ -36,12 +36,12 @@ raise exception(msg) if (not self.should_write_image(task) - and not common_pxe_utils.is_ipxe_enabled(task)): - msg = _("The [pxe]/ipxe_enabled option must " - "be set to True to support network " - "booting to an iSCSI volume or the boot " - "interface must be set to ``ipxe``.") - _fail_validation(task, msg) + and not common_pxe_utils.is_ipxe_enabled(task)): + msg = _("The [pxe]/ipxe_enabled option must " + "be set to True to support network " + "booting to an iSCSI volume or the boot " + "interface must be set to ``ipxe``.") + _fail_validation(task, msg) def get_properties(self): return {} diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/raid_config_schema.json ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/raid_config_schema.json --- ironic-14.0.1~git2020032415.de2d907fc/ironic/drivers/raid_config_schema.json 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/drivers/raid_config_schema.json 2020-04-10 17:06:41.000000000 +0000 @@ -57,8 +57,10 @@ "description": "Controller to use for this logical disk. If not specified, the driver will choose a suitable RAID controller on the bare metal node. Optional." }, "physical_disks": { - "type": "array", - "items": { "type": "string" }, + "anyOf": [ + {"type": "array", "items": { "type": "string" }}, + {"type": "array", "items": { "type": "object" }, "minItems": 2} + ], "description": "The physical disks to use for this logical disk. If not specified, the driver will choose suitable physical disks to use. Optional." } }, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/hacking/checks.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/hacking/checks.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/hacking/checks.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/hacking/checks.py 2020-04-10 17:06:41.000000000 +0000 @@ -14,6 +14,8 @@ import re +from hacking import core + # N323: Found use of _() without explicit import of _! @@ -29,6 +31,7 @@ custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") +@core.flake8ext def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function @@ -43,13 +46,9 @@ # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass - elif (underscore_import_check.match(logical_line) or - custom_underscore_check.match(logical_line)): + elif (underscore_import_check.match(logical_line) + or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) - elif (translated_log.match(logical_line) or - string_translation.match(logical_line)): + elif (translated_log.match(logical_line) + or string_translation.match(logical_line)): yield(0, "N323: Found use of _() without explicit import of _!") - - -def factory(register): - register(check_explicit_underscore_import) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/objects/node.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/objects/node.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/objects/node.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/objects/node.py 2020-04-10 17:06:41.000000000 +0000 @@ -74,7 +74,8 @@ # Version 1.31: Add allocation_id field # Version 1.32: Add description field # Version 1.33: Add retired and retired_reason fields - VERSION = '1.33' + # Version 1.34: Add lessee field + VERSION = '1.34' dbapi = db_api.get_instance() @@ -159,6 +160,7 @@ 'vendor_interface': object_fields.StringField(nullable=True), 'traits': object_fields.ObjectField('TraitList', nullable=True), 'owner': object_fields.StringField(nullable=True), + 'lessee': object_fields.StringField(nullable=True), 'description': object_fields.StringField(nullable=True), 'retired': objects.fields.BooleanField(nullable=True), 'retired_reason': object_fields.StringField(nullable=True), @@ -428,8 +430,8 @@ for attr_name in ('last_error', 'maintenance_reason'): attr_value = getattr(self, attr_name, '') - if (attr_value and isinstance(attr_value, str) and - len(attr_value) > CONF.log_in_db_max_size): + if (attr_value and isinstance(attr_value, str) + and len(attr_value) > CONF.log_in_db_max_size): LOG.info('Truncating too long %s to %s characters for node %s', attr_name, CONF.log_in_db_max_size, self.uuid) setattr(self, attr_name, @@ -602,6 +604,8 @@ should be set to None (or removed). Version 1.33: retired was added. For versions prior to this, it should be set to False (or removed). + Version 1.34: lessee was added. For versions prior to this, it should + be set to None or removed. :param target_version: the desired version of the object :param remove_unavailable_fields: True to remove fields that are @@ -616,7 +620,7 @@ ('bios_interface', 24), ('fault', 25), ('automated_clean', 28), ('protected_reason', 29), ('owner', 30), ('allocation_id', 31), ('description', 32), - ('retired_reason', 33)] + ('retired_reason', 33), ('lessee', 34)] for name, minor in fields: self._adjust_field_to_version(name, None, target_version, 1, minor, remove_unavailable_fields) @@ -675,6 +679,7 @@ 'storage_interface': ('node', 'storage_interface'), 'vendor_interface': ('node', 'vendor_interface'), 'owner': ('node', 'owner'), + 'lessee': ('node', 'lessee'), 'power_state': ('node', 'power_state'), 'properties': ('node', 'properties'), 'protected': ('node', 'protected'), @@ -706,7 +711,8 @@ # Version 1.12: Add node owner field. # Version 1.13: Add description field. # Version 1.14: Add retired and retired_reason fields exposed via API. - VERSION = '1.14' + # Version 1.15: Add node lessee field. + VERSION = '1.15' fields = { 'clean_step': object_fields.FlexibleDictField(nullable=True), 'conductor_group': object_fields.StringField(nullable=True), @@ -737,6 +743,7 @@ 'vendor_interface': object_fields.StringField(nullable=True), 'name': object_fields.StringField(nullable=True), 'owner': object_fields.StringField(nullable=True), + 'lessee': object_fields.StringField(nullable=True), 'power_state': object_fields.StringField(nullable=True), 'properties': object_fields.FlexibleDictField(nullable=True), 'protected': object_fields.BooleanField(nullable=True), @@ -793,7 +800,8 @@ # Version 1.12: Parent NodePayload version 1.12 # Version 1.13: Parent NodePayload version 1.13 # Version 1.14: Parent NodePayload version 1.14 - VERSION = '1.14' + # Version 1.15: Parent NodePayload version 1.15 + VERSION = '1.15' fields = { # "to_power" indicates the future target_power_state of the node. A @@ -848,7 +856,8 @@ # Version 1.12: Parent NodePayload version 1.12 # Version 1.13: Parent NodePayload version 1.13 # Version 1.14: Parent NodePayload version 1.14 - VERSION = '1.14' + # Version 1.15: Parent NodePayload version 1.15 + VERSION = '1.15' fields = { 'from_power': object_fields.StringField(nullable=True) @@ -887,7 +896,8 @@ # Version 1.12: Parent NodePayload version 1.12 # Version 1.13: Parent NodePayload version 1.13 # Version 1.14: Parent NodePayload version 1.14 - VERSION = '1.14' + # Version 1.15: Parent NodePayload version 1.15 + VERSION = '1.15' SCHEMA = dict(NodePayload.SCHEMA, **{'instance_info': ('node', 'instance_info')}) @@ -933,7 +943,8 @@ # Version 1.10: Parent NodePayload version 1.12 # Version 1.11: Parent NodePayload version 1.13 # Version 1.12: Parent NodePayload version 1.14 - VERSION = '1.12' + # Version 1.13: Parent NodePayload version 1.15 + VERSION = '1.13' SCHEMA = dict(NodePayload.SCHEMA, **{'instance_info': ('node', 'instance_info'), diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_deploy_template.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_deploy_template.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_deploy_template.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_deploy_template.py 2020-04-10 17:06:41.000000000 +0000 @@ -655,7 +655,7 @@ def test_add_root_non_existent(self, mock_save): patch = [{'path': '/foo', 'value': 'bar', 'op': 'add'}] self._test_update_bad_request( - mock_save, patch, "Adding a new attribute \(/foo\)") + mock_save, patch, "Adding a new attribute \\(/foo\\)") def test_add_too_high_index_step_fail(self, mock_save): step = { diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_expose.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_expose.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_expose.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_expose.py 2020-04-10 17:06:41.000000000 +0000 @@ -47,21 +47,23 @@ module_path = os.path.abspath(sys.modules[module].__file__) machinery.SourceFileLoader(uuidutils.generate_uuid(), module_path).load_module() + expected_calls = [ + 'api_utils.check_node_policy_and_retrieve', + 'api_utils.check_list_policy', + 'api_utils.check_multiple_node_policies_and_retrieve', + 'self._get_node_and_topic', + 'api_utils.check_port_policy_and_retrieve', + 'api_utils.check_port_list_policy', + 'self._authorize_patch_and_get_node', + ] for func in self.exposed_methods: src = inspect.getsource(func) self.assertTrue( - ('api_utils.check_node_policy_and_retrieve' in src) or - ('api_utils.check_list_policy' in src) or - ('api_utils.check_multiple_node_policies_and_retrieve' in - src) or - ('self._get_node_and_topic' in src) or - ('api_utils.check_port_policy_and_retrieve' in src) or - ('api_utils.check_port_list_policy' in src) or - ('policy.authorize' in src and - 'context.to_policy_values' in src), - 'no policy check found in in exposed ' - 'method %s' % func) + any(call in src for call in expected_calls) + or ('policy.authorize' in src + and 'context.to_policy_values' in src), + 'no policy check found in in exposed method %s' % func) def test_chassis_api_policy(self): self._test('ironic.api.controllers.v1.chassis') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_node.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_node.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_node.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_node.py 2020-04-10 17:06:41.000000000 +0000 @@ -137,6 +137,7 @@ self.assertNotIn('owner', data['nodes'][0]) self.assertNotIn('retired', data['nodes'][0]) self.assertNotIn('retired_reason', data['nodes'][0]) + self.assertNotIn('lessee', data['nodes'][0]) def test_get_one(self): node = obj_utils.create_test_node(self.context, @@ -179,6 +180,7 @@ self.assertIn('protected', data) self.assertIn('protected_reason', data) self.assertIn('owner', data) + self.assertIn('lessee', data) self.assertNotIn('allocation_id', data) self.assertIn('allocation_uuid', data) @@ -384,6 +386,23 @@ self.assertTrue(data['retired']) self.assertEqual('warranty expired', data['retired_reason']) + def test_node_lessee_hidden_in_lower_version(self): + self._test_node_field_hidden_in_lower_version('lessee', + '1.64', '1.65') + + def test_node_lessee_null_field(self): + node = obj_utils.create_test_node(self.context, lessee=None) + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: '1.65'}) + self.assertIsNone(data['lessee']) + + def test_node_lessee_present(self): + node = obj_utils.create_test_node(self.context, + lessee="some-lucky-project") + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: '1.65'}) + self.assertEqual(data['lessee'], "some-lucky-project") + def test_get_one_custom_fields(self): node = obj_utils.create_test_node(self.context, chassis_id=self.chassis.id) @@ -590,6 +609,14 @@ headers={api_base.Version.string: '1.51'}) self.assertIn('description', response) + def test_get_lessee_field(self): + node = obj_utils.create_test_node(self.context, + lessee='some-lucky-project') + fields = 'lessee' + response = self.get_json('/nodes/%s?fields=%s' % (node.uuid, fields), + headers={api_base.Version.string: '1.65'}) + self.assertIn('lessee', response) + def test_get_with_allocation(self): allocation = obj_utils.create_test_allocation(self.context) node = obj_utils.create_test_node(self.context, @@ -650,6 +677,7 @@ self.assertIn('protected', data['nodes'][0]) self.assertIn('protected_reason', data['nodes'][0]) self.assertIn('owner', data['nodes'][0]) + self.assertIn('lessee', data['nodes'][0]) # never expose the chassis_id self.assertNotIn('chassis_id', data['nodes'][0]) self.assertNotIn('allocation_id', data['nodes'][0]) @@ -687,6 +715,7 @@ self.assertIn('protected', data['nodes'][0]) self.assertIn('protected_reason', data['nodes'][0]) self.assertIn('owner', data['nodes'][0]) + self.assertIn('lessee', data['nodes'][0]) for field in api_utils.V31_FIELDS: self.assertIn(field, data['nodes'][0]) # never expose the chassis_id @@ -764,14 +793,14 @@ self.assertEqual(http_client.FORBIDDEN, response.status_int) @mock.patch.object(policy, 'authorize', spec=True) - def test_detail_list_all_forbid_owner_proj_mismatch(self, mock_authorize): + def test_detail_list_all_forbid_project_mismatch(self, mock_authorize): def mock_authorize_function(rule, target, creds): if rule == 'baremetal:node:list_all': raise exception.HTTPForbidden(resource='fake') return True mock_authorize.side_effect = mock_authorize_function - response = self.get_json('/nodes/detail?owner=54321', + response = self.get_json('/nodes/detail?project=54321', expect_errors=True, headers={ api_base.Version.string: '1.50', @@ -788,17 +817,27 @@ mock_authorize.side_effect = mock_authorize_function nodes = [] - for id in range(5): + for id in range(3): node = obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid(), owner='12345') nodes.append(node.uuid) + for id in range(3): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='12345') + nodes.append(node.uuid) for id in range(2): node = obj_utils.create_test_node(self.context, - uuid=uuidutils.generate_uuid()) + uuid=uuidutils.generate_uuid(), + owner='54321') + for id in range(2): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='54321') data = self.get_json('/nodes/detail', headers={ - api_base.Version.string: '1.50', + api_base.Version.string: '1.65', 'X-Project-Id': '12345'}) self.assertEqual(len(nodes), len(data['nodes'])) @@ -1005,14 +1044,14 @@ self.assertEqual(http_client.FORBIDDEN, response.status_int) @mock.patch.object(policy, 'authorize', spec=True) - def test_many_list_all_forbid_owner_proj_mismatch(self, mock_authorize): + def test_many_list_all_forbid_project_mismatch(self, mock_authorize): def mock_authorize_function(rule, target, creds): if rule == 'baremetal:node:list_all': raise exception.HTTPForbidden(resource='fake') return True mock_authorize.side_effect = mock_authorize_function - response = self.get_json('/nodes?owner=54321', + response = self.get_json('/nodes?project=54321', expect_errors=True, headers={ api_base.Version.string: '1.50', @@ -1029,17 +1068,27 @@ mock_authorize.side_effect = mock_authorize_function nodes = [] - for id in range(5): + for id in range(3): node = obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid(), owner='12345') nodes.append(node.uuid) + for id in range(3): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='12345') + nodes.append(node.uuid) for id in range(2): node = obj_utils.create_test_node(self.context, - uuid=uuidutils.generate_uuid()) + uuid=uuidutils.generate_uuid(), + owner='54321') + for id in range(2): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='54321') data = self.get_json('/nodes', headers={ - api_base.Version.string: '1.50', + api_base.Version.string: '1.65', 'X-Project-Id': '12345'}) self.assertEqual(len(nodes), len(data['nodes'])) @@ -1956,6 +2005,36 @@ self.assertIn(node2.uuid, uuids) self.assertNotIn(node1.uuid, uuids) + def test_get_nodes_by_lessee(self): + node1 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='project1') + node2 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='project2') + + for base_url in ('/nodes', '/nodes/detail'): + data = self.get_json(base_url + '?lessee=project1', + headers={api_base.Version.string: "1.65"}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node1.uuid, uuids) + self.assertNotIn(node2.uuid, uuids) + data = self.get_json(base_url + '?lessee=project2', + headers={api_base.Version.string: "1.65"}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node2.uuid, uuids) + self.assertNotIn(node1.uuid, uuids) + + def test_get_nodes_by_lessee_not_allowed(self): + for url in ('/nodes?lessee=project1', + '/nodes/detail?lessee=project1'): + response = self.get_json( + url, headers={api_base.Version.string: "1.64"}, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + self.assertTrue(response.json['error_message']) + def test_get_console_information(self): node = obj_utils.create_test_node(self.context) expected_console_info = {'test': 'test-data'} @@ -3474,6 +3553,33 @@ self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) + def test_update_lessee(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + headers = {api_base.Version.string: '1.65'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/lessee', + 'value': 'new-project', + 'op': 'replace'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + + def test_update_lessee_old_api(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + headers = {api_base.Version.string: '1.64'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/lessee', + 'value': 'new-project', + 'op': 'replace'}], + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + def test_patch_allocation_forbidden(self): node = obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid()) @@ -4359,6 +4465,25 @@ self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) + def test_create_node_lessee(self): + ndict = test_api_utils.post_get_test_node(lessee='project') + response = self.post_json('/nodes', ndict, + headers={api_base.Version.string: + str(api_v1.max_version())}) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/nodes/%s' % ndict['uuid'], + headers={api_base.Version.string: + str(api_v1.max_version())}) + self.assertEqual('project', result['lessee']) + + def test_create_node_lessee_old_api_version(self): + headers = {api_base.Version.string: '1.64'} + ndict = test_api_utils.post_get_test_node(lessee='project') + response = self.post_json('/nodes', ndict, headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + class TestDelete(test_api_base.BaseApiTest): @@ -6071,15 +6196,15 @@ headers={api_base.Version.string: self.version}) expected_json = [ - {u'created_at': ret['bios'][0]['created_at'], - u'updated_at': ret['bios'][0]['updated_at'], - u'links': [ - {u'href': u'http://localhost/v1/nodes/' + self.node.uuid + - '/bios/virtualization', u'rel': u'self'}, - {u'href': u'http://localhost/nodes/' + self.node.uuid + - '/bios/virtualization', u'rel': u'bookmark'}], u'name': - u'virtualization', u'value': u'on'}] - self.assertEqual({u'bios': expected_json}, ret) + {'created_at': ret['bios'][0]['created_at'], + 'updated_at': ret['bios'][0]['updated_at'], + 'links': [ + {'href': 'http://localhost/v1/nodes/%s/bios/virtualization' + % self.node.uuid, 'rel': 'self'}, + {'href': 'http://localhost/nodes/%s/bios/virtualization' + % self.node.uuid, 'rel': 'bookmark'}], + 'name': 'virtualization', 'value': 'on'}] + self.assertEqual({'bios': expected_json}, ret) def test_get_all_bios_fails_with_bad_version(self): ret = self.get_json('/nodes/%s/bios' % self.node.uuid, @@ -6092,15 +6217,15 @@ headers={api_base.Version.string: self.version}) expected_json = { - u'virtualization': { - u'created_at': ret['virtualization']['created_at'], - u'updated_at': ret['virtualization']['updated_at'], - u'links': [ - {u'href': u'http://localhost/v1/nodes/' + self.node.uuid + - '/bios/virtualization', u'rel': u'self'}, - {u'href': u'http://localhost/nodes/' + self.node.uuid + - '/bios/virtualization', u'rel': u'bookmark'}], - u'name': u'virtualization', u'value': u'on'}} + 'virtualization': { + 'created_at': ret['virtualization']['created_at'], + 'updated_at': ret['virtualization']['updated_at'], + 'links': [ + {'href': 'http://localhost/v1/nodes/%s/bios/virtualization' + % self.node.uuid, u'rel': u'self'}, + {'href': 'http://localhost/nodes/%s/bios/virtualization' + % self.node.uuid, u'rel': u'bookmark'}], + 'name': 'virtualization', 'value': 'on'}} self.assertEqual(expected_json, ret) def test_get_one_bios_fails_with_bad_version(self): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_port.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_port.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_port.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_port.py 2020-04-10 17:06:41.000000000 +0000 @@ -185,6 +185,15 @@ self.assertFalse(mock_allow_portgroup.called) mock_allow_physnet.assert_called_once_with() + def test__check_allowed_port_fields_local_link_connection_none_type( + self, mock_allow_port, mock_allow_portgroup, mock_allow_physnet): + mock_allow_port.return_value = True + mock_allow_physnet.return_value = True + self.assertIsNone( + self.controller._check_allowed_port_fields( + {'local_link_connection': None})) + mock_allow_port.assert_called_once_with() + class TestListPorts(test_api_base.BaseApiTest): @@ -1246,6 +1255,59 @@ self.assertTrue(response.json['error_message']) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + def test_add_local_link_connection_network_type(self, mock_upd): + response = self.patch_json( + '/ports/%s' % self.port.uuid, + [{'path': '/local_link_connection/network_type', + 'value': 'unmanaged', 'op': 'add'}], + headers={api_base.Version.string: '1.64'}) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual( + 'unmanaged', + response.json['local_link_connection']['network_type']) + self.assertTrue(mock_upd.called) + + kargs = mock_upd.call_args[0][2] + self.assertEqual('unmanaged', + kargs.local_link_connection['network_type']) + + def test_add_local_link_connection_network_type_old_api(self, mock_upd): + response = self.patch_json( + '/ports/%s' % self.port.uuid, + [{'path': '/local_link_connection/network_type', + 'value': 'unmanaged', 'op': 'add'}], + headers={api_base.Version.string: '1.63'}, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + + def test_remove_local_link_connection_network_type(self, mock_upd): + llc = {'network_type': 'unmanaged'} + port = obj_utils.create_test_port(self.context, + node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + address='bb:bb:bb:bb:bb:bb', + local_link_connection=llc) + llc.pop('network_type') + response = self.patch_json( + '/ports/%s' % port.uuid, + [{'path': '/local_link_connection/network_type', 'op': 'remove'}], + headers={api_base.Version.string: '1.64'}) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertTrue(mock_upd.called) + self.assertEqual(llc, response.json['local_link_connection']) + + def test_remove_local_link_connection_network_type_old_api(self, mock_upd): + response = self.patch_json( + '/ports/%s' % self.port.uuid, + [{'path': '/local_link_connection/network_type', 'op': 'remove'}], + headers={api_base.Version.string: '1.63'}, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + def test_set_pxe_enabled_false_old_api(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/pxe_enabled', @@ -2261,6 +2323,26 @@ response = self.post_json('/ports', pdict, headers=headers, expect_errors=True) self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + self.assertFalse(mock_create.called) + + def test_create_port_with_network_type_in_llc(self, mock_create): + pdict = post_get_test_port( + local_link_connection={'network_type': 'unmanaged'}) + response = self.post_json('/ports', pdict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, + 'test-topic') + + def test_create_port_with_network_type_in_llc_old_api_version( + self, mock_create): + headers = {api_base.Version.string: '1.63'} + pdict = post_get_test_port( + local_link_connection={'network_type': 'unmanaged'}) + response = self.post_json('/ports', pdict, headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) self.assertFalse(mock_create.called) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_ramdisk.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_ramdisk.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_ramdisk.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_ramdisk.py 2020-04-10 17:06:41.000000000 +0000 @@ -62,7 +62,7 @@ self.mock_get_node_with_token.return_value = node def _check_config(self, data): - expected_metrics = { + expected_config = { 'metrics': { 'backend': 'statsd', 'prepend_host': CONF.metrics.agent_prepend_host, @@ -76,9 +76,10 @@ 'statsd_port': CONF.metrics_statsd.agent_statsd_port }, 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout, - 'agent_token': mock.ANY + 'agent_token': mock.ANY, + 'agent_token_required': False, } - self.assertEqual(expected_metrics, data['config']) + self.assertEqual(expected_config, data['config']) self.assertIsNotNone(data['config']['agent_token']) self.assertNotEqual('******', data['config']['agent_token']) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_types.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_types.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_types.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_types.py 2020-04-10 17:06:41.000000000 +0000 @@ -365,6 +365,23 @@ self.assertFalse(v.validate_for_smart_nic(value)) self.assertRaises(exception.Invalid, v.validate, value) + def test_local_link_connection_net_type_unmanaged(self): + v = types.locallinkconnectiontype + value = {'network_type': 'unmanaged'} + self.assertItemsEqual(value, v.validate(value)) + + def test_local_link_connection_net_type_unmanaged_combine_ok(self): + v = types.locallinkconnectiontype + value = {'network_type': 'unmanaged', + 'switch_id': '0a:1b:2c:3d:4e:5f', + 'port_id': 'rep0-0'} + self.assertItemsEqual(value, v.validate(value)) + + def test_local_link_connection_net_type_invalid(self): + v = types.locallinkconnectiontype + value = {'network_type': 'invalid'} + self.assertRaises(exception.Invalid, v.validate, value) + @mock.patch("ironic.api.request", mock.Mock(version=mock.Mock(minor=10))) class TestVifType(base.TestCase): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/api/controllers/v1/test_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/api/controllers/v1/test_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -803,6 +803,7 @@ self.valid_node_uuid = uuidutils.generate_uuid() self.node = test_api_utils.post_get_test_node() self.node['owner'] = '12345' + self.node['lessee'] = '54321' @mock.patch.object(api, 'request', spec_set=["context", "version"]) @mock.patch.object(policy, 'authorize', spec=True) @@ -813,10 +814,11 @@ mock_pr.context.to_policy_values.return_value = {} utils.check_owner_policy( - 'node', 'fake_policy', self.node['owner'] + 'node', 'fake_policy', self.node['owner'], self.node['lessee'] ) mock_authorize.assert_called_once_with( - 'fake_policy', {'node.owner': '12345'}, {}) + 'fake_policy', + {'node.owner': '12345', 'node.lessee': '54321'}, {}) @mock.patch.object(api, 'request', spec_set=["context", "version"]) @mock.patch.object(policy, 'authorize', spec=True) @@ -832,7 +834,7 @@ utils.check_owner_policy, 'node', 'fake-policy', - self.node['owner'] + self.node ) @@ -842,6 +844,7 @@ self.valid_node_uuid = uuidutils.generate_uuid() self.node = test_api_utils.post_get_test_node() self.node['owner'] = '12345' + self.node['lessee'] = '54321' @mock.patch.object(api, 'request', spec_set=["context", "version"]) @mock.patch.object(policy, 'authorize', spec=True) @@ -860,7 +863,8 @@ mock_grn.assert_called_once_with(self.valid_node_uuid) mock_grnws.assert_not_called() mock_authorize.assert_called_once_with( - 'fake_policy', {'node.owner': '12345'}, {}) + 'fake_policy', + {'node.owner': '12345', 'node.lessee': '54321'}, {}) self.assertEqual(self.node, rpc_node) @mock.patch.object(api, 'request', spec_set=["context", "version"]) @@ -880,7 +884,8 @@ mock_grn.assert_not_called() mock_grnws.assert_called_once_with(self.valid_node_uuid) mock_authorize.assert_called_once_with( - 'fake_policy', {'node.owner': '12345'}, {}) + 'fake_policy', + {'node.owner': '12345', 'node.lessee': '54321'}, {}) self.assertEqual(self.node, rpc_node) @mock.patch.object(api, 'request', spec_set=["context"]) @@ -1022,6 +1027,7 @@ self.valid_node_uuid = uuidutils.generate_uuid() self.node = test_api_utils.post_get_test_node() self.node['owner'] = '12345' + self.node['lessee'] = '54321' @mock.patch.object(utils, 'check_node_policy_and_retrieve') @mock.patch.object(utils, 'check_owner_policy') @@ -1037,7 +1043,7 @@ mock_cnpar.assert_called_once_with('fake_policy_1', self.valid_node_uuid, False) mock_cop.assert_called_once_with( - 'node', 'fake_policy_2', '12345') + 'node', 'fake_policy_2', '12345', '54321') self.assertEqual(self.node, rpc_node) @mock.patch.object(utils, 'check_node_policy_and_retrieve') @@ -1075,7 +1081,7 @@ mock_cnpar.assert_called_once_with('fake_policy_1', self.valid_node_uuid, False) mock_cop.assert_called_once_with( - 'node', 'fake_policy_2', '12345') + 'node', 'fake_policy_2', '12345', '54321') class TestCheckListPolicy(base.TestCase): @@ -1190,6 +1196,7 @@ self.valid_port_uuid = uuidutils.generate_uuid() self.node = test_api_utils.post_get_test_node() self.node['owner'] = '12345' + self.node['lessee'] = '54321' self.port = objects.Port(self.context, node_id=42) @mock.patch.object(api, 'request', spec_set=["context", "version"]) @@ -1211,7 +1218,9 @@ self.valid_port_uuid) mock_ngbi.assert_called_once_with(mock_pr.context, 42) mock_authorize.assert_called_once_with( - 'fake_policy', {'node.owner': '12345'}, {}) + 'fake_policy', + {'node.owner': '12345', 'node.lessee': '54321'}, + {}) self.assertEqual(self.port, rpc_port) self.assertEqual(self.node, rpc_node) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_driver_factory.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_driver_factory.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_driver_factory.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_driver_factory.py 2020-04-10 17:06:41.000000000 +0000 @@ -385,8 +385,8 @@ return [fake.FakeVendorB, fake.FakeVendorA] -OPTIONAL_INTERFACES = (drivers_base.BareDriver().optional_interfaces + - ['vendor']) +OPTIONAL_INTERFACES = (drivers_base.BareDriver().optional_interfaces + + ['vendor']) class HardwareTypeLoadTestCase(db_base.DbTestCase): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_images.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_images.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_images.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_images.py 2020-04-10 17:06:41.000000000 +0000 @@ -413,6 +413,21 @@ options) self.assertEqual(expected_cfg, cfg) + @mock.patch.object(images, 'os', autospec=True) + def test__read_dir(self, mock_os): + mock_os.path.join = os.path.join + mock_os.path.isdir.side_effect = (False, True, False) + mock_os.listdir.side_effect = [['a', 'b'], ['c']] + + file_info = images._read_dir('/mnt') + + expected = { + '/mnt/a': 'a', + '/mnt/b/c': 'b/c' + } + + self.assertEqual(expected, file_info) + @mock.patch.object(os.path, 'relpath', autospec=True) @mock.patch.object(os, 'walk', autospec=True) @mock.patch.object(utils, 'mount', autospec=True) @@ -749,8 +764,8 @@ params = ['root=UUID=root-uuid', 'kernel-params'] create_isolinux_mock.assert_called_once_with( 'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', - deploy_iso='tmpdir/deploy_iso-uuid', esp_image=None, - kernel_params=params) + deploy_iso='tmpdir/deploy_iso-uuid', + esp_image=None, kernel_params=params, configdrive=None) @mock.patch.object(images, 'create_esp_image_for_uefi', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @@ -778,7 +793,7 @@ create_isolinux_mock.assert_called_once_with( 'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', deploy_iso=None, esp_image='tmpdir/efiboot-uuid', - kernel_params=params) + kernel_params=params, configdrive=None) @mock.patch.object(images, 'create_esp_image_for_uefi', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @@ -805,8 +820,8 @@ params = ['root=UUID=root-uuid', 'kernel-params'] create_isolinux_mock.assert_called_once_with( 'output_file', 'tmpdir/kernel-href', 'tmpdir/ramdisk-href', - deploy_iso='tmpdir/deploy_iso-href', esp_image=None, - kernel_params=params) + deploy_iso='tmpdir/deploy_iso-href', + esp_image=None, kernel_params=params, configdrive=None) @mock.patch.object(images, 'create_esp_image_for_uefi', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @@ -834,7 +849,7 @@ create_isolinux_mock.assert_called_once_with( 'output_file', 'tmpdir/kernel-href', 'tmpdir/ramdisk-href', deploy_iso=None, esp_image='tmpdir/efiboot-href', - kernel_params=params) + kernel_params=params, configdrive=None) @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @@ -847,25 +862,27 @@ images.create_boot_iso('ctx', 'output_file', 'kernel-uuid', 'ramdisk-uuid', 'deploy_iso-uuid', - 'efiboot-uuid', 'root-uuid', 'kernel-params', - 'bios') + 'efiboot-uuid', 'root-uuid', + 'kernel-params', 'bios', 'configdrive') fetch_images_mock.assert_any_call( 'ctx', 'kernel-uuid', 'tmpdir/kernel-uuid') fetch_images_mock.assert_any_call( 'ctx', 'ramdisk-uuid', 'tmpdir/ramdisk-uuid') + fetch_images_mock.assert_any_call( + 'ctx', 'configdrive', 'tmpdir/configdrive') + # Note (NobodyCam): the original assert asserted that fetch_images # was not called with parameters, this did not # work, So I instead assert that there were only # Two calls to the mock validating the above # asserts. - self.assertEqual(2, fetch_images_mock.call_count) + self.assertEqual(3, fetch_images_mock.call_count) params = ['root=UUID=root-uuid', 'kernel-params'] - create_isolinux_mock.assert_called_once_with('output_file', - 'tmpdir/kernel-uuid', - 'tmpdir/ramdisk-uuid', - params) + create_isolinux_mock.assert_called_once_with( + 'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', + kernel_params=params, configdrive='tmpdir/configdrive') @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @@ -879,19 +896,20 @@ images.create_boot_iso('ctx', 'output_file', 'kernel-uuid', 'ramdisk-uuid', 'deploy_iso-uuid', - 'efiboot-uuid', 'root-uuid', 'kernel-params', - None) + 'efiboot-uuid', 'root-uuid', + 'kernel-params', None, 'http://configdrive') fetch_images_mock.assert_any_call( 'ctx', 'kernel-uuid', 'tmpdir/kernel-uuid') fetch_images_mock.assert_any_call( 'ctx', 'ramdisk-uuid', 'tmpdir/ramdisk-uuid') + fetch_images_mock.assert_any_call( + 'ctx', 'http://configdrive', 'tmpdir/configdrive') params = ['root=UUID=root-uuid', 'kernel-params'] - create_isolinux_mock.assert_called_once_with('output_file', - 'tmpdir/kernel-uuid', - 'tmpdir/ramdisk-uuid', - params) + create_isolinux_mock.assert_called_once_with( + 'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', + configdrive='tmpdir/configdrive', kernel_params=params) @mock.patch.object(image_service, 'get_image_service', autospec=True) def test_get_glance_image_properties_no_such_prop(self, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_neutron.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_neutron.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_neutron.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_neutron.py 2020-04-10 17:06:41.000000000 +0000 @@ -145,6 +145,104 @@ self.assertEqual(0, mock_sauth.call_count) +class TestNeutronConfClient(base.TestCase): + + def setUp(self): + super(TestNeutronConfClient, self).setUp() + # NOTE(pas-ha) register keystoneauth dynamic options manually + plugin = kaloading.get_plugin_loader('password') + opts = kaloading.get_auth_plugin_conf_options(plugin) + self.cfg_fixture.register_opts(opts, group='neutron') + self.config(retries=2, + group='neutron') + self.config(username='test-admin-user', + project_name='test-admin-tenant', + password='test-admin-password', + auth_url='test-auth-uri', + auth_type='password', + interface='internal', + service_type='network', + timeout=10, + group='neutron') + # force-reset the global session object + neutron._NEUTRON_SESSION = None + self.context = context.RequestContext(global_request_id='global') + + @mock.patch('keystoneauth1.loading.load_auth_from_conf_options', + autospec=True, return_value=mock.sentinel.auth) + @mock.patch('keystoneauth1.loading.load_session_from_conf_options', + autospec=True, return_value=mock.sentinel.session) + @mock.patch('ironic.common.keystone.get_endpoint', autospec=True, + return_value='neutron_url') + @mock.patch.object(client.Client, "__init__", return_value=None, + autospec=True) + def test_get_neutron_conf_client(self, mock_client, mock_get_endpoint, + mock_session, mock_auth): + neutron._get_conf_client(self.context) + mock_client.assert_called_once_with(mock.ANY, # this is 'self' + session=mock.sentinel.session, + auth=mock.sentinel.auth, retries=2, + endpoint_override='neutron_url', + global_request_id='global', + timeout=45) + + +class TestUpdateNeutronPort(base.TestCase): + def setUp(self): + super(TestUpdateNeutronPort, self).setUp() + + self.uuid = uuidutils.generate_uuid() + self.context = context.RequestContext() + self.update_body = {'port': {}} + + @mock.patch.object(neutron, 'get_client', autospec=True) + @mock.patch.object(neutron, '_get_conf_client', autospec=True) + def test_update_neutron_port(self, conf_client_mock, client_mock): + client_mock.return_value.show_port.return_value = {'port': {}} + conf_client_mock.return_value.update_port.return_value = {'port': {}} + + neutron.update_neutron_port(self.context, self.uuid, self.update_body) + + client_mock.assert_called_once_with(context=self.context) + client_mock.return_value.show_port.assert_called_once_with(self.uuid) + conf_client_mock.assert_called_once_with(self.context) + conf_client_mock.return_value.update_port.assert_called_once_with( + self.uuid, self.update_body) + + @mock.patch.object(neutron, 'get_client', autospec=True) + @mock.patch.object(neutron, '_get_conf_client', autospec=True) + def test_update_neutron_port_with_client(self, conf_client_mock, + client_mock): + client_mock.return_value.show_port.return_value = {'port': {}} + conf_client_mock.return_value.update_port.return_value = {'port': {}} + client = mock.Mock() + client.update_port.return_value = {'port': {}} + + neutron.update_neutron_port(self.context, self.uuid, self.update_body, + client) + + self.assertFalse(client_mock.called) + self.assertFalse(conf_client_mock.called) + client.update_port.assert_called_once_with(self.uuid, self.update_body) + + @mock.patch.object(neutron, 'get_client', autospec=True) + @mock.patch.object(neutron, '_get_conf_client', autospec=True) + def test_update_neutron_port_with_exception(self, conf_client_mock, + client_mock): + client_mock.return_value.show_port.side_effect = \ + neutron_client_exc.NeutronClientException + conf_client_mock.return_value.update_port.return_value = {'port': {}} + + self.assertRaises( + neutron_client_exc.NeutronClientException, + neutron.update_neutron_port, + self.context, self.uuid, self.update_body) + + client_mock.assert_called_once_with(context=self.context) + client_mock.return_value.show_port.assert_called_once_with(self.uuid) + self.assertFalse(conf_client_mock.called) + + class TestNeutronNetworkActions(db_base.DbTestCase): _CLIENT_ID = ( @@ -161,7 +259,8 @@ )] # Very simple neutron port representation self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00', - 'mac_address': '52:54:00:cf:2d:32'} + 'mac_address': '52:54:00:cf:2d:32', + 'fixed_ips': []} self.network_uuid = uuidutils.generate_uuid() self.client_mock = mock.Mock() self.client_mock.list_agents.return_value = { @@ -171,7 +270,8 @@ patcher.start() self.addCleanup(patcher.stop) - def _test_add_ports_to_network(self, is_client_id, + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) + def _test_add_ports_to_network(self, update_mock, is_client_id, security_groups=None, add_all_ports=False): # Ports will be created only if pxe_enabled is True @@ -191,14 +291,18 @@ extra['client-id'] = self._CLIENT_ID port.extra = extra port.save() - expected_body = { + expected_create_body = { 'port': { 'network_id': self.network_uuid, 'admin_state_up': True, 'binding:vnic_type': 'baremetal', + 'device_id': self.node.uuid, + } + } + expected_update_body = { + 'port': { 'device_owner': 'baremetal:none', 'binding:host_id': self.node.uuid, - 'device_id': self.node.uuid, 'mac_address': port.address, 'binding:profile': { 'local_link_information': [port.local_link_connection] @@ -206,18 +310,20 @@ } } if security_groups: - expected_body['port']['security_groups'] = security_groups + expected_create_body['port']['security_groups'] = security_groups if is_client_id: - expected_body['port']['extra_dhcp_opts'] = ( + expected_create_body['port']['extra_dhcp_opts'] = ( [{'opt_name': '61', 'opt_value': self._CLIENT_ID}]) if add_all_ports: - expected_body2 = copy.deepcopy(expected_body) - expected_body2['port']['mac_address'] = port2.address - expected_body2['fixed_ips'] = [] + expected_create_body2 = copy.deepcopy(expected_create_body) + expected_update_body2 = copy.deepcopy(expected_update_body) + expected_update_body2['port']['mac_address'] = port2.address + expected_create_body2['fixed_ips'] = [] neutron_port2 = {'id': '132f871f-eaec-4fed-9475-0d54465e0f01', - 'mac_address': port2.address} + 'mac_address': port2.address, + 'fixed_ips': []} self.client_mock.create_port.side_effect = [ {'port': self.neutron_port}, {'port': neutron_port2} @@ -235,12 +341,21 @@ task, self.network_uuid, security_groups=security_groups) self.assertEqual(expected, ports) if add_all_ports: - calls = [mock.call(expected_body), - mock.call(expected_body2)] - self.client_mock.create_port.assert_has_calls(calls) + create_calls = [mock.call(expected_create_body), + mock.call(expected_create_body2)] + update_calls = [ + mock.call(self.context, self.neutron_port['id'], + expected_update_body), + mock.call(self.context, neutron_port2['id'], + expected_update_body2)] + self.client_mock.create_port.assert_has_calls(create_calls) + update_mock.assert_has_calls(update_calls) else: self.client_mock.create_port.assert_called_once_with( - expected_body) + expected_create_body) + update_mock.assert_called_once_with( + self.context, self.neutron_port['id'], + expected_update_body) def test_add_ports_to_network(self): self._test_add_ports_to_network(is_client_id=False, @@ -259,6 +374,38 @@ self._test_add_ports_to_network(is_client_id=False, security_groups=sg_ids) + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) + def test__add_ip_addresses_for_ipv6_stateful(self, mock_update): + subnet_id = uuidutils.generate_uuid() + self.client_mock.show_subnet.return_value = { + 'subnet': { + 'id': subnet_id, + 'ip_version': 6, + 'ipv6_address_mode': 'dhcpv6-stateful' + } + } + self.neutron_port['fixed_ips'] = [{'subnet_id': subnet_id, + 'ip_address': '2001:db8::1'}] + + expected_body = { + 'port': { + 'fixed_ips': [ + {'subnet_id': subnet_id, 'ip_address': '2001:db8::1'}, + {'subnet_id': subnet_id}, + {'subnet_id': subnet_id}, + {'subnet_id': subnet_id} + ] + } + } + + neutron._add_ip_addresses_for_ipv6_stateful( + self.context, + {'port': self.neutron_port}, + self.client_mock + ) + mock_update.assert_called_once_with( + self.context, self.neutron_port['id'], expected_body) + def test_verify_sec_groups(self): sg_ids = [] for i in range(2): @@ -339,20 +486,25 @@ def test_add_ports_with_client_id_to_network(self): self._test_add_ports_to_network(is_client_id=True) + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) @mock.patch.object(neutron, 'validate_port_info', autospec=True) - def test_add_ports_to_network_instance_uuid(self, vpi_mock): + def test_add_ports_to_network_instance_uuid(self, vpi_mock, update_mock): self.node.instance_uuid = uuidutils.generate_uuid() self.node.network_interface = 'neutron' self.node.save() port = self.ports[0] - expected_body = { + expected_create_body = { 'port': { 'network_id': self.network_uuid, 'admin_state_up': True, 'binding:vnic_type': 'baremetal', + 'device_id': self.node.instance_uuid, + } + } + expected_update_body = { + 'port': { 'device_owner': 'baremetal:none', 'binding:host_id': self.node.uuid, - 'device_id': self.node.instance_uuid, 'mac_address': port.address, 'binding:profile': { 'local_link_information': [port.local_link_connection] @@ -366,7 +518,11 @@ with task_manager.acquire(self.context, self.node.uuid) as task: ports = neutron.add_ports_to_network(task, self.network_uuid) self.assertEqual(expected, ports) - self.client_mock.create_port.assert_called_once_with(expected_body) + self.client_mock.create_port.assert_called_once_with( + expected_create_body) + update_mock.assert_called_once_with(self.context, + self.neutron_port['id'], + expected_update_body) self.assertTrue(vpi_mock.called) @mock.patch.object(neutron, 'rollback_ports', autospec=True) @@ -381,8 +537,9 @@ self.network_uuid) rollback_mock.assert_called_once_with(task, self.network_uuid) + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) @mock.patch.object(neutron, 'LOG', autospec=True) - def test_add_network_create_some_ports_fail(self, log_mock): + def test_add_network_create_some_ports_fail(self, log_mock, update_mock): object_utils.create_test_port( self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), @@ -655,6 +812,19 @@ self.assertFalse(res) self.assertTrue(log_mock.error.called) + @mock.patch.object(neutron, 'LOG', autospec=True) + def test_validate_port_info_neutron_with_network_type_unmanaged( + self, log_mock): + self.node.network_interface = 'neutron' + self.node.save() + llc = {'network_type': 'unmanaged'} + port = object_utils.create_test_port( + self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), + address='52:54:00:cf:2d:33', local_link_connection=llc) + res = neutron.validate_port_info(self.node, port) + self.assertTrue(res) + self.assertFalse(log_mock.warning.called) + def test_validate_agent_up(self): self.client_mock.list_agents.return_value = { 'agents': [{'alive': True}]} @@ -743,10 +913,11 @@ neutron.wait_for_port_status, self.client_mock, 'port_id', 'DOWN') + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) @mock.patch.object(neutron, 'wait_for_host_agent', autospec=True) @mock.patch.object(neutron, 'wait_for_port_status', autospec=True) def test_add_smartnic_port_to_network( - self, wait_port_mock, wait_agent_mock): + self, wait_port_mock, wait_agent_mock, update_mock): # Ports will be created only if pxe_enabled is True self.node.network_interface = 'neutron' self.node.save() @@ -764,14 +935,18 @@ port.is_smartnic = True port.save() - expected_body = { + expected_create_body = { 'port': { 'network_id': self.network_uuid, 'admin_state_up': True, 'binding:vnic_type': 'smart-nic', + 'device_id': self.node.uuid, + } + } + expected_update_body = { + 'port': { 'device_owner': 'baremetal:none', 'binding:host_id': port.local_link_connection['hostname'], - 'device_id': self.node.uuid, 'mac_address': port.address, 'binding:profile': { 'local_link_information': [port.local_link_connection] @@ -787,7 +962,9 @@ ports = neutron.add_ports_to_network(task, self.network_uuid) self.assertEqual(expected, ports) self.client_mock.create_port.assert_called_once_with( - expected_body) + expected_create_body) + update_mock.assert_called_once_with( + self.context, self.neutron_port['id'], expected_update_body) wait_agent_mock.assert_called_once_with( self.client_mock, 'hostname') wait_port_mock.assert_called_once_with( @@ -890,18 +1067,20 @@ super(TestUpdatePortAddress, self).setUp() self.context = context.RequestContext() - def test_update_port_address(self, mock_client): + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) + def test_update_port_address(self, mock_unp, mock_client): address = 'fe:54:00:77:07:d9' port_id = 'fake-port-id' expected = {'port': {'mac_address': address}} mock_client.return_value.show_port.return_value = {} neutron.update_port_address(port_id, address, context=self.context) - mock_client.return_value.update_port.assert_called_once_with(port_id, - expected) + mock_unp.assert_called_once_with(self.context, port_id, expected) + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) @mock.patch.object(neutron, 'unbind_neutron_port', autospec=True) - def test_update_port_address_with_binding(self, mock_unp, mock_client): + def test_update_port_address_with_binding(self, mock_unp, mock_update, + mock_client): address = 'fe:54:00:77:07:d9' port_id = 'fake-port-id' @@ -909,19 +1088,22 @@ 'port': {'binding:host_id': 'host', 'binding:profile': 'foo'}} - calls = [mock.call(port_id, {'port': {'mac_address': address}}), - mock.call(port_id, {'port': {'binding:host_id': 'host', + calls = [mock.call(self.context, port_id, + {'port': {'mac_address': address}}), + mock.call(self.context, port_id, + {'port': {'binding:host_id': 'host', 'binding:profile': 'foo'}})] neutron.update_port_address(port_id, address, context=self.context) mock_unp.assert_called_once_with( port_id, - client=mock_client(context=self.context), context=self.context) - mock_client.return_value.update_port.assert_has_calls(calls) + mock_update.assert_has_calls(calls) + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) @mock.patch.object(neutron, 'unbind_neutron_port', autospec=True) - def test_update_port_address_without_binding(self, mock_unp, mock_client): + def test_update_port_address_without_binding(self, mock_unp, mock_update, + mock_client): address = 'fe:54:00:77:07:d9' port_id = 'fake-port-id' expected = {'port': {'mac_address': address}} @@ -930,7 +1112,7 @@ neutron.update_port_address(port_id, address, context=self.context) self.assertFalse(mock_unp.called) - mock_client.return_value.update_port.assert_any_call(port_id, expected) + mock_update.assert_any_call(self.context, port_id, expected) def test_update_port_address_show_failed(self, mock_client): address = 'fe:54:00:77:07:d9' @@ -957,17 +1139,18 @@ port_id, address, context=self.context) mock_unp.assert_called_once_with( port_id, - client=mock_client(context=self.context), context=self.context) self.assertFalse(mock_client.return_value.update_port.called) + @mock.patch.object(neutron, 'update_neutron_port', autospec=True) @mock.patch.object(neutron, 'unbind_neutron_port', autospec=True) def test_update_port_address_with_exception(self, mock_unp, + mock_update, mock_client): address = 'fe:54:00:77:07:d9' port_id = 'fake-port-id' mock_client.return_value.show_port.return_value = {} - mock_client.return_value.update_port.side_effect = ( + mock_update.side_effect = ( neutron_client_exc.NeutronClientException()) self.assertRaises(exception.FailedToUpdateMacOnPort, @@ -975,14 +1158,14 @@ port_id, address, context=self.context) -@mock.patch.object(neutron, 'get_client', autospec=True) +@mock.patch.object(neutron, 'update_neutron_port', autospec=True) class TestUnbindPort(base.TestCase): def setUp(self): super(TestUnbindPort, self).setUp() self.context = context.RequestContext() - def test_unbind_neutron_port_client_passed(self, mock_client): + def test_unbind_neutron_port_client_passed(self, mock_unp): port_id = 'fake-port-id' body_unbind = { 'port': { @@ -995,20 +1178,18 @@ 'mac_address': None } } + client = mock.MagicMock() update_calls = [ - mock.call(port_id, body_unbind), - mock.call(port_id, body_reset_mac) + mock.call(self.context, port_id, body_unbind, client), + mock.call(self.context, port_id, body_reset_mac, client) ] - neutron.unbind_neutron_port(port_id, - mock_client(context=self.context), - context=self.context) - self.assertEqual(1, mock_client.call_count) - mock_client.return_value.update_port.assert_has_calls(update_calls) + neutron.unbind_neutron_port(port_id, client, context=self.context) + self.assertEqual(2, mock_unp.call_count) + mock_unp.assert_has_calls(update_calls) @mock.patch.object(neutron, 'LOG', autospec=True) - def test_unbind_neutron_port_failure(self, mock_log, mock_client): - mock_client.return_value.update_port.side_effect = ( - neutron_client_exc.NeutronClientException()) + def test_unbind_neutron_port_failure(self, mock_log, mock_unp): + mock_unp.side_effect = (neutron_client_exc.NeutronClientException()) body = { 'port': { 'binding:host_id': '', @@ -1018,12 +1199,10 @@ port_id = 'fake-port-id' self.assertRaises(exception.NetworkError, neutron.unbind_neutron_port, port_id, context=self.context) - mock_client.assert_called_once_with(context=self.context) - mock_client.return_value.update_port.assert_called_once_with(port_id, - body) + mock_unp.assert_called_once_with(self.context, port_id, body, None) mock_log.exception.assert_called_once() - def test_unbind_neutron_port(self, mock_client): + def test_unbind_neutron_port(self, mock_unp): port_id = 'fake-port-id' body_unbind = { 'port': { @@ -1037,17 +1216,16 @@ } } update_calls = [ - mock.call(port_id, body_unbind), - mock.call(port_id, body_reset_mac) + mock.call(self.context, port_id, body_unbind, None), + mock.call(self.context, port_id, body_reset_mac, None) ] neutron.unbind_neutron_port(port_id, context=self.context) - mock_client.assert_called_once_with(context=self.context) - mock_client.return_value.update_port.assert_has_calls(update_calls) + mock_unp.assert_has_calls(update_calls) @mock.patch.object(neutron, 'LOG', autospec=True) - def test_unbind_neutron_port_not_found(self, mock_log, mock_client): + def test_unbind_neutron_port_not_found(self, mock_log, mock_unp): port_id = 'fake-port-id' - mock_client.return_value.update_port.side_effect = ( + mock_unp.side_effect = ( neutron_client_exc.PortNotFoundClient()) body = { 'port': { @@ -1056,9 +1234,7 @@ } } neutron.unbind_neutron_port(port_id, context=self.context) - mock_client.assert_called_once_with(context=self.context) - mock_client.return_value.update_port.assert_called_once_with(port_id, - body) + mock_unp.assert_called_once_with(self.context, port_id, body, None) mock_log.info.assert_called_once_with('Port %s was not found while ' 'unbinding.', port_id) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_policy.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_policy.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_policy.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_policy.py 2020-04-10 17:06:41.000000000 +0000 @@ -69,6 +69,19 @@ self.assertTrue(policy.check('is_node_owner', target, c1)) self.assertFalse(policy.check('is_node_owner', target, c2)) + def test_is_node_lessee(self): + c1 = {'project_id': '1234', + 'project_name': 'demo', + 'project_domain_id': 'default'} + c2 = {'project_id': '5678', + 'project_name': 'demo', + 'project_domain_id': 'default'} + target = dict.copy(c1) + target['node.lessee'] = '1234' + + self.assertTrue(policy.check('is_node_lessee', target, c1)) + self.assertFalse(policy.check('is_node_lessee', target, c2)) + def test_is_allocation_owner(self): c1 = {'project_id': '1234', 'project_name': 'demo', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_pxe_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_pxe_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_pxe_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_pxe_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -1072,8 +1072,8 @@ self.node.uuid) with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info, - ipxe_enabled=True) + pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info, + ipxe_enabled=True) mock_ensure_tree.assert_called_with(expected_path) mock_fetch_image.assert_called_once_with(self.context, mock.ANY, list(fake_pxe_info.values()), @@ -1177,10 +1177,9 @@ ramdisk_label)) } - if (whle_dsk_img - or deploy_utils.get_boot_option(self.node) == 'local'): - ramdisk = 'no_ramdisk' - kernel = 'no_kernel' + if whle_dsk_img or deploy_utils.get_boot_option(self.node) == 'local': + ramdisk = 'no_ramdisk' + kernel = 'no_kernel' else: image_info.update({ 'kernel': ('kernel_id', @@ -1461,10 +1460,9 @@ kernel = os.path.join(http_url, self.node.uuid, 'kernel') ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk') - if (whle_dsk_img - or deploy_utils.get_boot_option(self.node) == 'local'): - ramdisk = 'no_ramdisk' - kernel = 'no_kernel' + if whle_dsk_img or deploy_utils.get_boot_option(self.node) == 'local': + ramdisk = 'no_ramdisk' + kernel = 'no_kernel' else: image_info.update({ 'kernel': ('kernel_id', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_raid.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_raid.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_raid.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_raid.py 2020-04-10 17:06:41.000000000 +0000 @@ -35,6 +35,11 @@ raid.validate_configuration( raid_config, raid_config_schema=self.schema) + def test_validate_configuration_okay_software(self): + raid_config = json.loads(raid_constants.RAID_SW_CONFIG_OKAY) + raid.validate_configuration( + raid_config, raid_config_schema=self.schema) + def test_validate_configuration_no_logical_disk(self): self.assertRaises(exception.InvalidParameterValue, raid.validate_configuration, @@ -138,6 +143,13 @@ self.assertRaises(exception.InvalidParameterValue, raid.validate_configuration, raid_config, + raid_config_schema=self.schema) + + def test_validate_configuration_too_few_physical_disks(self): + raid_config = json.loads(raid_constants.RAID_CONFIG_TOO_FEW_PHY_DISKS) + self.assertRaises(exception.InvalidParameterValue, + raid.validate_configuration, + raid_config, raid_config_schema=self.schema) def test_validate_configuration_additional_property(self): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/common/test_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/common/test_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -535,6 +535,15 @@ self.params, is_file=False)) + def test_render_with_quotes(self): + """test jinja2 autoescaping for everything is disabled """ + self.expected = '"spam" ham' + self.params = {'foo': '"spam"', 'bar': 'ham'} + self.assertEqual(self.expected, + utils.render_template(self.template, + self.params, + is_file=False)) + @mock.patch('ironic.common.utils.jinja2.FileSystemLoader', autospec=True) def test_render_file(self, jinja_fsl_mock): path = '/path/to/template.j2' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_allocations.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_allocations.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_allocations.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_allocations.py 2020-04-10 17:06:41.000000000 +0000 @@ -350,14 +350,20 @@ @mock.patch.object(task_manager, 'acquire', autospec=True, side_effect=task_manager.acquire) - def test_nodes_filtered_out_owner(self, mock_acquire): - # Owner does not match + def test_nodes_filtered_out_project(self, mock_acquire): + # Owner and lessee do not match obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid(), owner='54321', resource_class='x-large', power_state='power off', provision_state='available') + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + lessee='54321', + resource_class='x-large', + power_state='power off', + provision_state='available') allocation = obj_utils.create_test_allocation(self.context, resource_class='x-large', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_base_manager.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_base_manager.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_base_manager.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_base_manager.py 2020-04-10 17:06:41.000000000 +0000 @@ -521,3 +521,26 @@ self.assertIsNone(test_node.last_error) self.assertTrue(log_mock.warning.called) self.assertFalse(mock_notify.called) + + +class MiscTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase): + def setUp(self): + super(MiscTestCase, self).setUp() + self._start_service() + + def test__fail_transient_state(self): + node = obj_utils.create_test_node(self.context, + driver='fake-hardware', + provision_state=states.DEPLOYING) + self.service._fail_transient_state(states.DEPLOYING, 'unknown err') + node.refresh() + self.assertEqual(states.DEPLOYFAIL, node.provision_state) + + def test__fail_transient_state_maintenance(self): + node = obj_utils.create_test_node(self.context, + driver='fake-hardware', + maintenance=True, + provision_state=states.DEPLOYING) + self.service._fail_transient_state(states.DEPLOYING, 'unknown err') + node.refresh() + self.assertEqual(states.DEPLOYFAIL, node.provision_state) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_deployments.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_deployments.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_deployments.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_deployments.py 2020-04-10 17:06:41.000000000 +0000 @@ -341,6 +341,25 @@ self.assertEqual(fake_deploy_steps, task.node.driver_internal_info['deploy_steps']) + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True) + def test__do_node_deploy_driver_raises_error_old(self, mock_deploy): + # Mocking FakeDeploy.deploy before starting the service, causes + # it not to be a deploy_step. + self._start_service() + node = obj_utils.create_test_node(self.context, driver='fake-hardware', + provision_state=states.DEPLOYING, + target_provision_state=states.ACTIVE) + task = task_manager.TaskManager(self.context, node.uuid) + + self.assertRaises(exception.InstanceDeployFailure, + deployments.do_node_deploy, task, + self.service.conductor.id) + node.refresh() + self.assertEqual(states.DEPLOYFAIL, node.provision_state) + self.assertEqual(states.ACTIVE, node.target_provision_state) + self.assertIsNotNone(node.last_error) + self.assertFalse(mock_deploy.called) + @mgr_utils.mock_record_keepalive class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_manager.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_manager.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_manager.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_manager.py 2020-04-10 17:06:41.000000000 +0000 @@ -2652,6 +2652,7 @@ call_args=(self.service._do_node_rescue, task), err_handler=conductor_utils.spawn_rescue_error_handler) self.assertIn('rescue_password', task.node.instance_info) + self.assertIn('hashed_rescue_password', task.node.instance_info) self.assertNotIn('agent_url', task.node.driver_internal_info) def test_do_node_rescue_invalid_state(self): @@ -2665,6 +2666,7 @@ self.context, node.uuid, "password") node.refresh() self.assertNotIn('rescue_password', node.instance_info) + self.assertNotIn('hashed_rescue_password', node.instance_info) self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0]) def _test_do_node_rescue_when_validate_fail(self, mock_validate): @@ -2679,7 +2681,7 @@ self.service.do_node_rescue, self.context, node.uuid, "password") node.refresh() - self.assertNotIn('rescue_password', node.instance_info) + self.assertNotIn('hashed_rescue_password', node.instance_info) # Compare true exception hidden by @messaging.expected_exceptions self.assertEqual(exception.InstanceRescueFailure, exc.exc_info[0]) @@ -2714,10 +2716,11 @@ @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue') def test__do_node_rescue_returns_rescuewait(self, mock_rescue): self._start_service() - node = obj_utils.create_test_node(self.context, driver='fake-hardware', - provision_state=states.RESCUING, - instance_info={'rescue_password': - 'password'}) + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.RESCUING, + instance_info={'rescue_password': 'password', + 'hashed_rescue_password': '1234'}) with task_manager.TaskManager(self.context, node.uuid) as task: mock_rescue.return_value = states.RESCUEWAIT self.service._do_node_rescue(task) @@ -2725,14 +2728,17 @@ self.assertEqual(states.RESCUEWAIT, node.provision_state) self.assertEqual(states.RESCUE, node.target_provision_state) self.assertIn('rescue_password', node.instance_info) + self.assertIn('hashed_rescue_password', node.instance_info) @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue') def test__do_node_rescue_returns_rescue(self, mock_rescue): self._start_service() - node = obj_utils.create_test_node(self.context, driver='fake-hardware', - provision_state=states.RESCUING, - instance_info={'rescue_password': - 'password'}) + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.RESCUING, + instance_info={ + 'rescue_password': 'password', + 'hashed_rescue_password': '1234'}) with task_manager.TaskManager(self.context, node.uuid) as task: mock_rescue.return_value = states.RESCUE self.service._do_node_rescue(task) @@ -2740,15 +2746,18 @@ self.assertEqual(states.RESCUE, node.provision_state) self.assertEqual(states.NOSTATE, node.target_provision_state) self.assertIn('rescue_password', node.instance_info) + self.assertIn('hashed_rescue_password', node.instance_info) @mock.patch.object(manager, 'LOG') @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue') def test__do_node_rescue_errors(self, mock_rescue, mock_log): self._start_service() - node = obj_utils.create_test_node(self.context, driver='fake-hardware', - provision_state=states.RESCUING, - instance_info={'rescue_password': - 'password'}) + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.RESCUING, + instance_info={ + 'rescue_password': 'password', + 'hashed_rescue_password': '1234'}) mock_rescue.side_effect = exception.InstanceRescueFailure( 'failed to rescue') with task_manager.TaskManager(self.context, node.uuid) as task: @@ -2758,6 +2767,7 @@ self.assertEqual(states.RESCUEFAIL, node.provision_state) self.assertEqual(states.RESCUE, node.target_provision_state) self.assertNotIn('rescue_password', node.instance_info) + self.assertNotIn('hashed_rescue_password', node.instance_info) self.assertTrue(node.last_error.startswith('Failed to rescue')) self.assertTrue(mock_log.error.called) @@ -2765,10 +2775,12 @@ @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue') def test__do_node_rescue_bad_state(self, mock_rescue, mock_log): self._start_service() - node = obj_utils.create_test_node(self.context, driver='fake-hardware', - provision_state=states.RESCUING, - instance_info={'rescue_password': - 'password'}) + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.RESCUING, + instance_info={ + 'rescue_password': 'password', + 'hashed_rescue_password': '1234'}) mock_rescue.return_value = states.ACTIVE with task_manager.TaskManager(self.context, node.uuid) as task: self.service._do_node_rescue(task) @@ -2776,6 +2788,7 @@ self.assertEqual(states.RESCUEFAIL, node.provision_state) self.assertEqual(states.RESCUE, node.target_provision_state) self.assertNotIn('rescue_password', node.instance_info) + self.assertNotIn('hashed_rescue_password', node.instance_info) self.assertTrue(node.last_error.startswith('Failed to rescue')) self.assertTrue(mock_log.error.called) @@ -6063,6 +6076,7 @@ self.task2 = self._create_task(node=self.node2) self.filters = {'reserved': False, + 'maintenance': False, 'inspection_started_before': 300, 'provision_state': states.INSPECTWAIT} self.columns = ['uuid', 'driver', 'conductor_group'] @@ -7383,11 +7397,11 @@ self.assertEqual(expected_exc, exc.exc_info[0]) def test_update_volume_target_node_not_found(self): - self._test_update_volume_target_exception(exception.NodeNotFound) + self._test_update_volume_target_exception(exception.NodeNotFound) def test_update_volume_target_not_found(self): - self._test_update_volume_target_exception( - exception.VolumeTargetNotFound) + self._test_update_volume_target_exception( + exception.VolumeTargetNotFound) def test_update_volume_target_node_power_on(self): node = obj_utils.create_test_node(self.context, driver='fake-hardware', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/conductor/test_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/conductor/test_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -1184,17 +1184,20 @@ @mock.patch.object(conductor_utils, 'LOG') def test_spawn_rescue_error_handler_no_worker(self, log_mock): exc = exception.NoFreeConductorWorker() - self.node.instance_info = {'rescue_password': 'pass'} + self.node.instance_info = {'rescue_password': 'pass', + 'hashed_rescue_password': '12'} conductor_utils.spawn_rescue_error_handler(exc, self.node) self.node.save.assert_called_once_with() self.assertIn('No free conductor workers', self.node.last_error) self.assertTrue(log_mock.warning.called) self.assertNotIn('rescue_password', self.node.instance_info) + self.assertNotIn('hashed_rescue_password', self.node.instance_info) @mock.patch.object(conductor_utils, 'LOG') def test_spawn_rescue_error_handler_other_error(self, log_mock): exc = Exception('foo') - self.node.instance_info = {'rescue_password': 'pass'} + self.node.instance_info = {'rescue_password': 'pass', + 'hashed_rescue_password': '12'} conductor_utils.spawn_rescue_error_handler(exc, self.node) self.assertFalse(self.node.save.called) self.assertFalse(log_mock.warning.called) @@ -1686,33 +1689,30 @@ @mock.patch.object(rpcapi.ConductorAPI, 'continue_node_deploy', autospec=True) @mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for', autospec=True) - def test__notify_conductor_resume_operation(self, mock_topic, - mock_rpc_call): + def test_notify_conductor_resume_operation(self, mock_topic, + mock_rpc_call): mock_topic.return_value = 'topic' with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - conductor_utils._notify_conductor_resume_operation( - task, 'deploying', 'continue_node_deploy') + conductor_utils.notify_conductor_resume_operation(task, 'deploy') mock_rpc_call.assert_called_once_with( mock.ANY, task.context, self.node.uuid, topic='topic') - @mock.patch.object(conductor_utils, '_notify_conductor_resume_operation', + @mock.patch.object(conductor_utils, 'notify_conductor_resume_operation', autospec=True) def test_notify_conductor_resume_clean(self, mock_resume): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: conductor_utils.notify_conductor_resume_clean(task) - mock_resume.assert_called_once_with( - task, 'cleaning', 'continue_node_clean') + mock_resume.assert_called_once_with(task, 'clean') - @mock.patch.object(conductor_utils, '_notify_conductor_resume_operation', + @mock.patch.object(conductor_utils, 'notify_conductor_resume_operation', autospec=True) def test_notify_conductor_resume_deploy(self, mock_resume): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: conductor_utils.notify_conductor_resume_deploy(task) - mock_resume.assert_called_once_with( - task, 'deploying', 'continue_node_deploy') + mock_resume.assert_called_once_with(task, 'deploy') @mock.patch.object(time, 'sleep', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/sqlalchemy/test_migrations.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/sqlalchemy/test_migrations.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/sqlalchemy/test_migrations.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/sqlalchemy/test_migrations.py 2020-04-10 17:06:41.000000000 +0000 @@ -944,8 +944,8 @@ deploy_template_steps.insert().execute(step) # Query by deploy template ID. result = deploy_template_steps.select( - deploy_template_steps.c.deploy_template_id == - template_id).execute().first() + deploy_template_steps.c.deploy_template_id + == template_id).execute().first() self.assertEqual(template_id, result['deploy_template_id']) self.assertEqual(interface, result['interface']) self.assertEqual(step_name, result['step']) @@ -990,6 +990,11 @@ self.assertFalse(node['retired']) self.assertIsNone(node['retired_reason']) + def _check_b2ad35726bb0(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + col_names = [column.name for column in nodes.c] + self.assertIn('lessee', col_names) + def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_api.upgrade('head') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/test_api.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/test_api.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/test_api.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/test_api.py 2020-04-10 17:06:41.000000000 +0000 @@ -195,10 +195,10 @@ self.dbapi.update_to_latest_versions(self.context, 1)) node = self.dbapi.get_node_by_uuid(orig_node.uuid) chassis = self.dbapi.get_chassis_by_uuid(orig_chassis.uuid) - self.assertTrue(node.version == self.node_old_ver or - chassis.version == self.chassis_old_ver) - self.assertTrue(node.version == self.node_ver or - chassis.version == self.chassis_ver) + self.assertTrue(node.version == self.node_old_ver + or chassis.version == self.chassis_old_ver) + self.assertTrue(node.version == self.node_ver + or chassis.version == self.chassis_ver) def _create_nodes(self, num_nodes): version = self.node_old_ver diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/test_nodes.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/test_nodes.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/test_nodes.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/test_nodes.py 2020-04-10 17:06:41.000000000 +0000 @@ -399,6 +399,35 @@ self.dbapi.get_node_list, filters=filters) + def test_get_node_list_filter_by_project(self): + utils.create_test_node(uuid=uuidutils.generate_uuid()) + node2 = utils.create_test_node( + uuid=uuidutils.generate_uuid(), + owner='project1', + lessee='project2', + ) + node3 = utils.create_test_node( + uuid=uuidutils.generate_uuid(), + owner='project2', + ) + node4 = utils.create_test_node( + uuid=uuidutils.generate_uuid(), + owner='project1', + lessee='project3', + ) + + res = self.dbapi.get_node_list(filters={'project': 'project1'}) + self.assertEqual([node2.id, node4.id], [r.id for r in res]) + + res = self.dbapi.get_node_list(filters={'project': 'project2'}) + self.assertEqual([node2.id, node3.id], [r.id for r in res]) + + res = self.dbapi.get_node_list(filters={'project': 'project3'}) + self.assertEqual([node4.id], [r.id for r in res]) + + res = self.dbapi.get_node_list(filters={'project': 'flargle'}) + self.assertEqual([], [r.id for r in res]) + def test_get_node_list_description(self): node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(), description='Hello') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/db/utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/db/utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -226,7 +226,7 @@ 'description': kw.get('description'), 'retired': kw.get('retired', False), 'retired_reason': kw.get('retired_reason', None), - + 'lessee': kw.get('lessee', None), } for iface in drivers_base.ALL_INTERFACES: diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/dhcp/test_neutron.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/dhcp/test_neutron.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/dhcp/test_neutron.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/dhcp/test_neutron.py 2020-04-10 17:06:41.000000000 +0000 @@ -48,8 +48,8 @@ dhcp_factory.DHCPFactory._dhcp_provider = None - @mock.patch('ironic.common.neutron.get_client', autospec=True) - def test_update_port_dhcp_opts(self, client_mock): + @mock.patch('ironic.common.neutron.update_neutron_port', autospec=True) + def test_update_port_dhcp_opts(self, update_mock): opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', @@ -63,14 +63,14 @@ with task_manager.acquire(self.context, self.node.uuid) as task: api.provider.update_port_dhcp_opts(port_id, opts, context=task.context) - client_mock.return_value.update_port.assert_called_once_with( - port_id, expected) + update_mock.assert_called_once_with( + task.context, port_id, expected) - @mock.patch('ironic.common.neutron.get_client', autospec=True) - def test_update_port_dhcp_opts_with_exception(self, client_mock): + @mock.patch('ironic.common.neutron.update_neutron_port', autospec=True) + def test_update_port_dhcp_opts_with_exception(self, update_mock): opts = [{}] port_id = 'fake-port-id' - client_mock.return_value.update_port.side_effect = ( + update_mock.side_effect = ( neutron_client_exc.NeutronClientException()) api = dhcp_factory.DHCPFactory() diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/drac/test_raid.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/drac/test_raid.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/drac/test_raid.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/drac/test_raid.py 2020-04-10 17:06:41.000000000 +0000 @@ -727,6 +727,74 @@ self.assertEqual(expected_physical_disk_ids, logical_disks[0]['physical_disks']) + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + def test__validate_volume_size_requested_more_than_actual_size( + self, mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + self.logical_disk = { + 'physical_disks': [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'raid_level': '1+0', 'is_root_volume': True, + 'size_mb': 102400000, + 'controller': 'RAID.Integrated.1-1'} + + self.logical_disks = [self.logical_disk.copy()] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + processed_logical_disks = drac_raid._validate_volume_size( + self.node, self.node.target_raid_config['logical_disks']) + + self.assertEqual(2287104, processed_logical_disks[0]['size_mb']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + def test__validate_volume_size_requested_less_than_actual_size( + self, mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + self.logical_disk = { + 'physical_disks': [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'raid_level': '1+0', 'is_root_volume': True, + 'size_mb': 204800, + 'controller': 'RAID.Integrated.1-1'} + + self.logical_disks = [self.logical_disk.copy()] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + processed_logical_disks = drac_raid._validate_volume_size( + self.node, self.node.target_raid_config['logical_disks']) + + self.assertEqual(self.logical_disk, processed_logical_disks[0]) + class DracRaidInterfaceTestCase(test_utils.BaseDracTest): @@ -860,6 +928,9 @@ self.assertEqual(1, mock_change_physical_disk_state.call_count) self.node.refresh() + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) self.assertEqual(next_substep, task.node.driver_internal_info[ 'raid_config_substep']) @@ -938,6 +1009,9 @@ self.assertEqual(1, mock_client.create_virtual_disk.call_count) self.node.refresh() + self.assertEqual(False, + task.node.driver_internal_info[ + 'volume_validation']) self.assertEqual(next_substep, task.node.driver_internal_info[ 'raid_config_substep']) @@ -963,8 +1037,9 @@ mock_change_physical_disk_state.return_value = { 'is_reboot_required': constants.RebootRequired.optional, 'conversion_results': { - 'RAID.Integrated.1-1': {'is_reboot_required': 'optional', - 'is_commit_required': True}}, + 'RAID.Integrated.1-1': { + 'is_reboot_required': constants.RebootRequired.false, + 'is_commit_required': False}}, 'commit_required_ids': ['RAID.Integrated.1-1']} mock_commit_config.return_value = '42' @@ -974,6 +1049,9 @@ task, create_root_volume=False, create_nonroot_volumes=False, delete_existing=False) + self.assertEqual(False, + task.node.driver_internal_info[ + 'volume_validation']) self.assertEqual(0, mock_client.create_virtual_disk.call_count) self.assertEqual(0, mock_commit_config.call_count) @@ -1039,6 +1117,9 @@ task, create_root_volume=True, create_nonroot_volumes=False, delete_existing=True) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) mock_commit_config.assert_called_with( task.node, raid_controller='RAID.Integrated.1-1', realtime=True, reboot=False) @@ -1094,6 +1175,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1150,6 +1234,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1199,6 +1286,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) self.node.refresh() self.assertEqual(['42'], self.node.driver_internal_info['raid_config_job_ids']) @@ -1246,6 +1336,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', @@ -1296,6 +1389,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', @@ -1353,6 +1449,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1433,6 +1532,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1535,6 +1637,9 @@ task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/ibmc/test_power.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/ibmc/test_power.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/ibmc/test_power.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/ibmc/test_power.py 2020-04-10 17:06:41.000000000 +0000 @@ -84,8 +84,8 @@ # Mocks mock_system_get_results = ( - [mock.Mock(power_state=transient)] * 3 + - [mock.Mock(power_state=final)]) + [mock.Mock(power_state=transient)] * 3 + + [mock.Mock(power_state=final)]) conn.system.get.side_effect = mock_system_get_results task.driver.power.set_power_state(task, expect_state) @@ -119,8 +119,8 @@ # Mocks mock_system_get_results = ( - [mock.Mock(power_state=transient)] * 5 + - [mock.Mock(power_state=final)]) + [mock.Mock(power_state=transient)] * 5 + + [mock.Mock(power_state=final)]) conn.system.get.side_effect = mock_system_get_results self.assertRaises(exception.PowerStateFailure, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/intel_ipmi/test_intel_ipmi.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/intel_ipmi/test_intel_ipmi.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/intel_ipmi/test_intel_ipmi.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/intel_ipmi/test_intel_ipmi.py 2020-04-10 17:06:41.000000000 +0000 @@ -36,33 +36,33 @@ enabled_vendor_interfaces=['ipmitool', 'no-vendor']) def _validate_interfaces(self, task, **kwargs): - self.assertIsInstance( - task.driver.management, - kwargs.get('management', intel_management.IntelIPMIManagement)) - self.assertIsInstance( - task.driver.power, - kwargs.get('power', ipmitool.IPMIPower)) - self.assertIsInstance( - task.driver.boot, - kwargs.get('boot', pxe.PXEBoot)) - self.assertIsInstance( - task.driver.deploy, - kwargs.get('deploy', iscsi_deploy.ISCSIDeploy)) - self.assertIsInstance( - task.driver.console, - kwargs.get('console', noop.NoConsole)) - self.assertIsInstance( - task.driver.raid, - kwargs.get('raid', noop.NoRAID)) - self.assertIsInstance( - task.driver.vendor, - kwargs.get('vendor', ipmitool.VendorPassthru)) - self.assertIsInstance( - task.driver.storage, - kwargs.get('storage', noop_storage.NoopStorage)) - self.assertIsInstance( - task.driver.rescue, - kwargs.get('rescue', noop.NoRescue)) + self.assertIsInstance( + task.driver.management, + kwargs.get('management', intel_management.IntelIPMIManagement)) + self.assertIsInstance( + task.driver.power, + kwargs.get('power', ipmitool.IPMIPower)) + self.assertIsInstance( + task.driver.boot, + kwargs.get('boot', pxe.PXEBoot)) + self.assertIsInstance( + task.driver.deploy, + kwargs.get('deploy', iscsi_deploy.ISCSIDeploy)) + self.assertIsInstance( + task.driver.console, + kwargs.get('console', noop.NoConsole)) + self.assertIsInstance( + task.driver.raid, + kwargs.get('raid', noop.NoRAID)) + self.assertIsInstance( + task.driver.vendor, + kwargs.get('vendor', ipmitool.VendorPassthru)) + self.assertIsInstance( + task.driver.storage, + kwargs.get('storage', noop_storage.NoopStorage)) + self.assertIsInstance( + task.driver.rescue, + kwargs.get('rescue', noop.NoRescue)) def test_default_interfaces(self): node = obj_utils.create_test_node(self.context, driver='intel-ipmi') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/network/test_common.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/network/test_common.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/network/test_common.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/network/test_common.py 2020-04-10 17:06:41.000000000 +0000 @@ -400,22 +400,26 @@ common.get_free_port_like_object, task, self.vif_id, {'physnet2'}) + @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True) @mock.patch.object(neutron_common, 'get_client', autospec=True) - def test_plug_port_to_tenant_network_client(self, mock_gc): + def test_plug_port_to_tenant_network_client(self, mock_gc, mock_update): self.port.internal_info = {common.TENANT_VIF_KEY: self.vif_id} self.port.save() with task_manager.acquire(self.context, self.node.id) as task: common.plug_port_to_tenant_network(task, self.port, client=mock.MagicMock()) self.assertFalse(mock_gc.called) + self.assertTrue(mock_update.called) + @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True) @mock.patch.object(neutron_common, 'get_client', autospec=True) - def test_plug_port_to_tenant_network_no_client(self, mock_gc): + def test_plug_port_to_tenant_network_no_client(self, mock_gc, mock_update): self.port.internal_info = {common.TENANT_VIF_KEY: self.vif_id} self.port.save() with task_manager.acquire(self.context, self.node.id) as task: common.plug_port_to_tenant_network(task, self.port) self.assertTrue(mock_gc.called) + self.assertTrue(mock_update.called) @mock.patch.object(neutron_common, 'get_client', autospec=True) def test_plug_port_to_tenant_network_no_tenant_vif(self, mock_gc): @@ -432,9 +436,10 @@ @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) @mock.patch.object(neutron_common, 'wait_for_port_status', autospec=True) + @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True) @mock.patch.object(neutron_common, 'get_client', autospec=True) def test_plug_port_to_tenant_network_smartnic_port( - self, mock_gc, wait_port_mock, wait_agent_mock): + self, mock_gc, mock_update, wait_port_mock, wait_agent_mock): nclient = mock.MagicMock() mock_gc.return_value = nclient local_link_connection = self.port.local_link_connection @@ -449,6 +454,7 @@ nclient, 'hostname') wait_port_mock.assert_called_once_with( nclient, self.vif_id, 'ACTIVE') + self.assertTrue(mock_update.called) class TestVifPortIDMixin(db_base.DbTestCase): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/network/test_flat.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/network/test_flat.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/network/test_flat.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/network/test_flat.py 2020-04-10 17:06:41.000000000 +0000 @@ -170,10 +170,8 @@ self.port.refresh() self.assertNotIn('cleaning_vif_port_id', self.port.internal_info) - @mock.patch.object(neutron, 'get_client') - def test__bind_flat_ports_set_binding_host_id(self, client_mock): - upd_mock = mock.Mock() - client_mock.return_value.update_port = upd_mock + @mock.patch.object(neutron, 'update_neutron_port') + def test__bind_flat_ports_set_binding_host_id(self, update_mock): extra = {'vif_port_id': 'foo'} utils.create_test_port(self.context, node_id=self.node.id, address='52:54:00:cf:2d:33', extra=extra, @@ -183,12 +181,10 @@ 'mac_address': '52:54:00:cf:2d:33'}} with task_manager.acquire(self.context, self.node.id) as task: self.interface._bind_flat_ports(task) - upd_mock.assert_called_once_with('foo', exp_body) + update_mock.assert_called_once_with(self.context, 'foo', exp_body) - @mock.patch.object(neutron, 'get_client') - def test__bind_flat_ports_set_binding_host_id_portgroup(self, client_mock): - upd_mock = mock.Mock() - client_mock.return_value.update_port = upd_mock + @mock.patch.object(neutron, 'update_neutron_port') + def test__bind_flat_ports_set_binding_host_id_portgroup(self, update_mock): internal_info = {'tenant_vif_port_id': 'foo'} utils.create_test_portgroup( self.context, node_id=self.node.id, internal_info=internal_info, @@ -204,8 +200,9 @@ 'mac_address': '52:54:00:cf:2d:31'}} with task_manager.acquire(self.context, self.node.id) as task: self.interface._bind_flat_ports(task) - upd_mock.assert_has_calls([ - mock.call('bar', exp_body1), mock.call('foo', exp_body2)]) + update_mock.assert_has_calls([ + mock.call(self.context, 'bar', exp_body1), + mock.call(self.context, 'foo', exp_body2)]) @mock.patch.object(neutron, 'unbind_neutron_port') def test__unbind_flat_ports(self, unbind_neutron_port_mock): @@ -234,10 +231,9 @@ [mock.call('foo', context=self.context), mock.call('bar', context=self.context)]) - @mock.patch.object(neutron, 'get_client') - def test__bind_flat_ports_set_binding_host_id_raise(self, client_mock): - client_mock.return_value.update_port.side_effect = \ - (neutron_exceptions.ConnectionFailed()) + @mock.patch.object(neutron, 'update_neutron_port') + def test__bind_flat_ports_set_binding_host_id_raise(self, update_mock): + update_mock.side_effect = (neutron_exceptions.ConnectionFailed()) extra = {'vif_port_id': 'foo'} utils.create_test_port(self.context, node_id=self.node.id, address='52:54:00:cf:2d:33', extra=extra, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/network/test_neutron.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/network/test_neutron.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/network/test_neutron.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/network/test_neutron.py 2020-04-10 17:06:41.000000000 +0000 @@ -572,10 +572,11 @@ log_mock.error.call_args[0][0]) @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) + @mock.patch.object(neutron_common, 'update_neutron_port') @mock.patch.object(neutron_common, 'get_client') @mock.patch.object(neutron, 'LOG') def test_configure_tenant_networks_multiple_ports_one_vif_id( - self, log_mock, client_mock, wait_agent_mock): + self, log_mock, client_mock, update_mock, wait_agent_mock): expected_body = { 'port': { 'binding:vnic_type': 'baremetal', @@ -585,20 +586,20 @@ 'mac_address': '52:54:00:cf:2d:32' } } - upd_mock = mock.Mock() - client_mock.return_value.update_port = upd_mock with task_manager.acquire(self.context, self.node.id) as task: self.interface.configure_tenant_networks(task) client_mock.assert_called_once_with(context=task.context) - upd_mock.assert_called_once_with(self.port.extra['vif_port_id'], - expected_body) + update_mock.assert_called_once_with(self.context, + self.port.extra['vif_port_id'], + expected_body) @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) + @mock.patch.object(neutron_common, 'update_neutron_port') @mock.patch.object(neutron_common, 'get_client') def test_configure_tenant_networks_update_fail(self, client_mock, + update_mock, wait_agent_mock): - client = client_mock.return_value - client.update_port.side_effect = neutron_exceptions.ConnectionFailed( + update_mock.side_effect = neutron_exceptions.ConnectionFailed( reason='meow') with task_manager.acquire(self.context, self.node.id) as task: self.assertRaisesRegex( @@ -607,12 +608,12 @@ client_mock.assert_called_once_with(context=task.context) @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) + @mock.patch.object(neutron_common, 'update_neutron_port') @mock.patch.object(neutron_common, 'get_client') - def _test_configure_tenant_networks(self, client_mock, wait_agent_mock, + def _test_configure_tenant_networks(self, client_mock, update_mock, + wait_agent_mock, is_client_id=False, vif_int_info=False): - upd_mock = mock.Mock() - client_mock.return_value.update_port = upd_mock if vif_int_info: kwargs = {'internal_info': { 'tenant_vif_port_id': uuidutils.generate_uuid()}} @@ -668,9 +669,9 @@ else: portid1 = self.port.extra['vif_port_id'] portid2 = second_port.extra['vif_port_id'] - upd_mock.assert_has_calls( - [mock.call(portid1, port1_body), - mock.call(portid2, port2_body)], + update_mock.assert_has_calls( + [mock.call(self.context, portid1, port1_body), + mock.call(self.context, portid2, port2_body)], any_order=True ) @@ -693,11 +694,12 @@ self._test_configure_tenant_networks(is_client_id=True) @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) + @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True) @mock.patch.object(neutron_common, 'get_client', autospec=True) @mock.patch.object(neutron_common, 'get_local_group_information', autospec=True) def test_configure_tenant_networks_with_portgroups( - self, glgi_mock, client_mock, wait_agent_mock): + self, glgi_mock, client_mock, update_mock, wait_agent_mock): pg = utils.create_test_portgroup( self.context, node_id=self.node.id, address='ff:54:00:cf:2d:32', extra={'vif_port_id': uuidutils.generate_uuid()}) @@ -717,8 +719,6 @@ 'port_id': 'Ethernet1/2', 'switch_info': 'switch2'} ) - upd_mock = mock.Mock() - client_mock.return_value.update_port = upd_mock local_group_info = {'a': 'b'} glgi_mock.return_value = local_group_info expected_body = { @@ -747,9 +747,11 @@ self.interface.configure_tenant_networks(task) client_mock.assert_called_once_with(context=task.context) glgi_mock.assert_called_once_with(task, pg) - upd_mock.assert_has_calls( - [mock.call(self.port.extra['vif_port_id'], call1_body), - mock.call(pg.extra['vif_port_id'], call2_body)] + update_mock.assert_has_calls( + [mock.call(self.context, self.port.extra['vif_port_id'], + call1_body), + mock.call(self.context, pg.extra['vif_port_id'], + call2_body)] ) def test_need_power_on_true(self): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/redfish/test_boot.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/redfish/test_boot.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/redfish/test_boot.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/redfish/test_boot.py 2020-04-10 17:06:41.000000000 +0000 @@ -364,6 +364,7 @@ mock_create_boot_iso.assert_called_once_with( mock.ANY, mock.ANY, 'http://kernel/img', 'http://ramdisk/img', boot_mode='uefi', esp_image_href='http://bootloader/img', + configdrive_href=mock.ANY, kernel_params='nofb nomodeset vga=normal', root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123') @@ -393,6 +394,7 @@ mock_create_boot_iso.assert_called_once_with( mock.ANY, mock.ANY, 'http://kernel/img', 'http://ramdisk/img', boot_mode=None, esp_image_href=None, + configdrive_href=mock.ANY, kernel_params='nofb nomodeset vga=normal', root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123') @@ -416,6 +418,7 @@ mock_create_boot_iso.assert_called_once_with( mock.ANY, mock.ANY, 'http://kernel/img', 'http://ramdisk/img', boot_mode=None, esp_image_href=None, + configdrive_href=mock.ANY, kernel_params=kernel_params, root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/redfish/test_management.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/redfish/test_management.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/redfish/test_management.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/redfish/test_management.py 2020-04-10 17:06:41.000000000 +0000 @@ -96,12 +96,12 @@ task.driver.management.set_boot_device(task, target) # Asserts - fake_system.set_system_boot_source.assert_called_once_with( + fake_system.set_system_boot_options.assert_called_once_with( expected, enabled=sushy.BOOT_SOURCE_ENABLED_ONCE) mock_get_system.assert_called_once_with(task.node) # Reset mocks - fake_system.set_system_boot_source.reset_mock() + fake_system.set_system_boot_options.reset_mock() mock_get_system.reset_mock() @mock.patch.object(redfish_utils, 'get_system', autospec=True) @@ -119,19 +119,19 @@ task.driver.management.set_boot_device( task, boot_devices.PXE, persistent=target) - fake_system.set_system_boot_source.assert_called_once_with( + fake_system.set_system_boot_options.assert_called_once_with( sushy.BOOT_SOURCE_TARGET_PXE, enabled=expected) mock_get_system.assert_called_once_with(task.node) # Reset mocks - fake_system.set_system_boot_source.reset_mock() + fake_system.set_system_boot_options.reset_mock() mock_get_system.reset_mock() @mock.patch.object(sushy, 'Sushy', autospec=True) @mock.patch.object(redfish_utils, 'get_system', autospec=True) def test_set_boot_device_fail(self, mock_get_system, mock_sushy): fake_system = mock.Mock() - fake_system.set_system_boot_source.side_effect = ( + fake_system.set_system_boot_options.side_effect = ( sushy.exceptions.SushyError() ) mock_get_system.return_value = fake_system @@ -140,7 +140,7 @@ self.assertRaisesRegex( exception.RedfishError, 'Redfish set boot device', task.driver.management.set_boot_device, task, boot_devices.PXE) - fake_system.set_system_boot_source.assert_called_once_with( + fake_system.set_system_boot_options.assert_called_once_with( sushy.BOOT_SOURCE_TARGET_PXE, enabled=sushy.BOOT_SOURCE_ENABLED_ONCE) mock_get_system.assert_called_once_with(task.node) @@ -183,19 +183,19 @@ task.driver.management.set_boot_mode(task, mode=mode) # Asserts - fake_system.set_system_boot_source.assert_called_once_with( - mock.ANY, enabled=mock.ANY, mode=mode) + fake_system.set_system_boot_options.assert_called_once_with( + mode=mode) mock_get_system.assert_called_once_with(task.node) # Reset mocks - fake_system.set_system_boot_source.reset_mock() + fake_system.set_system_boot_options.reset_mock() mock_get_system.reset_mock() @mock.patch.object(sushy, 'Sushy', autospec=True) @mock.patch.object(redfish_utils, 'get_system', autospec=True) def test_set_boot_mode_fail(self, mock_get_system, mock_sushy): fake_system = mock.Mock() - fake_system.set_system_boot_source.side_effect = ( + fake_system.set_system_boot_options.side_effect = ( sushy.exceptions.SushyError) mock_get_system.return_value = fake_system with task_manager.acquire(self.context, self.node.uuid, @@ -203,8 +203,8 @@ self.assertRaisesRegex( exception.RedfishError, 'Setting boot mode', task.driver.management.set_boot_mode, task, boot_modes.UEFI) - fake_system.set_system_boot_source.assert_called_once_with( - mock.ANY, enabled=mock.ANY, mode=boot_modes.UEFI) + fake_system.set_system_boot_options.assert_called_once_with( + mode=boot_modes.UEFI) mock_get_system.assert_called_once_with(task.node) @mock.patch.object(redfish_utils, 'get_system', autospec=True) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_agent_base.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_agent_base.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_agent_base.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_agent_base.py 2020-04-10 17:06:41.000000000 +0000 @@ -222,7 +222,7 @@ autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, 'reboot_to_instance', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_in_maintenance(self, ncrc_mock, rti_mock, cd_mock): # NOTE(pas-ha) checking only for states that are not noop @@ -253,7 +253,7 @@ autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, 'reboot_to_instance', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_in_maintenance_abort(self, ncrc_mock, rti_mock, cd_mock): @@ -289,7 +289,7 @@ autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, 'reboot_to_instance', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_with_reservation(self, ncrc_mock, rti_mock, cd_mock): # NOTE(pas-ha) checking only for states that are not noop @@ -316,7 +316,7 @@ autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, 'reboot_to_instance', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_noops_in_wrong_state(self, ncrc_mock, rti_mock, cd_mock, log_mock): @@ -343,7 +343,7 @@ autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, 'reboot_to_instance', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_noops_in_wrong_state2(self, ncrc_mock, rti_mock, cd_mock): @@ -426,10 +426,10 @@ @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, - 'refresh_clean_steps', autospec=True) + 'refresh_steps', autospec=True) @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps, mock_refresh, mock_touch): @@ -441,17 +441,17 @@ self.deploy.heartbeat(task, 'http://127.0.0.1:8080', '1.0.0') mock_touch.assert_called_once_with(mock.ANY) - mock_refresh.assert_called_once_with(mock.ANY, task) - mock_notify.assert_called_once_with(task) + mock_refresh.assert_called_once_with(mock.ANY, task, 'clean') + mock_notify.assert_called_once_with(task, 'clean') mock_set_steps.assert_called_once_with(task) @mock.patch.object(manager_utils, 'cleaning_error_handler') @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) @mock.patch.object(agent_base.HeartbeatMixin, - 'refresh_clean_steps', autospec=True) + 'refresh_steps', autospec=True) @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) def test_heartbeat_resume_clean_fails(self, mock_notify, mock_set_steps, mock_refresh, mock_touch, @@ -789,7 +789,7 @@ self.assertEqual(2, get_power_state_mock.call_count) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) collect_mock.assert_called_once_with(task.node) resume_mock.assert_called_once_with(task) @@ -830,7 +830,7 @@ remove_provisioning_net_mock.assert_called_once_with(mock.ANY, task) configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertFalse(mock_collect.called) resume_mock.assert_called_once_with(task) @@ -862,7 +862,7 @@ remove_provisioning_net_mock.assert_called_once_with(mock.ANY, task) configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertFalse(mock_collect.called) @@ -900,7 +900,7 @@ remove_provisioning_net_mock.assert_called_once_with(mock.ANY, task) configure_tenant_net_mock.assert_called_once_with(mock.ANY, task) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertFalse(mock_collect.called) @@ -1036,7 +1036,7 @@ mock.call(task, states.POWER_OFF), mock.call(task, states.POWER_ON), ]) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertFalse(mock_collect.called) resume_mock.assert_called_once_with(task) @@ -1069,7 +1069,7 @@ mock.call(task, states.POWER_OFF), mock.call(task, states.POWER_ON), ]) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) log_error = ('The version of the IPA ramdisk used in the ' 'deployment do not support the command "sync"') @@ -1496,7 +1496,7 @@ self.assertFalse(prepare_mock.called) self.assertFalse(failed_state_mock.called) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1519,19 +1519,20 @@ with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: self.deploy.continue_cleaning(task) - notify_mock.assert_called_once_with(task) + notify_mock.assert_called_once_with(task, 'clean') @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) - def test__cleaning_reboot(self, mock_reboot, mock_prepare, mock_build_opt): + def test__post_step_reboot(self, mock_reboot, mock_prepare, + mock_build_opt): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: i_info = task.node.driver_internal_info i_info['agent_secret_token'] = 'magicvalue01' task.node.driver_internal_info = i_info - agent_base._cleaning_reboot(task) + agent_base._post_step_reboot(task, 'clean') self.assertTrue(mock_build_opt.called) self.assertTrue(mock_prepare.called) mock_reboot.assert_called_once_with(task, states.REBOOT) @@ -1543,7 +1544,27 @@ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) - def test__cleaning_reboot_pregenerated_token( + def test__post_step_reboot_deploy(self, mock_reboot, mock_prepare, + mock_build_opt): + with task_manager.acquire(self.context, self.node['uuid'], + shared=False) as task: + i_info = task.node.driver_internal_info + i_info['agent_secret_token'] = 'magicvalue01' + task.node.driver_internal_info = i_info + agent_base._post_step_reboot(task, 'deploy') + self.assertTrue(mock_build_opt.called) + self.assertTrue(mock_prepare.called) + mock_reboot.assert_called_once_with(task, states.REBOOT) + self.assertTrue( + task.node.driver_internal_info['deployment_reboot']) + self.assertNotIn('agent_secret_token', + task.node.driver_internal_info) + + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True, + autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test__post_step_reboot_pregenerated_token( self, mock_reboot, mock_prepare, mock_build_opt): with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: @@ -1551,7 +1572,7 @@ i_info['agent_secret_token'] = 'magicvalue01' i_info['agent_secret_token_pregenerated'] = True task.node.driver_internal_info = i_info - agent_base._cleaning_reboot(task) + agent_base._post_step_reboot(task, 'clean') self.assertTrue(mock_build_opt.called) self.assertTrue(mock_prepare.called) mock_reboot.assert_called_once_with(task, states.REBOOT) @@ -1563,13 +1584,13 @@ autospec=True) @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) - def test__cleaning_reboot_fail(self, mock_reboot, mock_handler, - mock_prepare, mock_build_opt): + def test__post_step_reboot_fail(self, mock_reboot, mock_handler, + mock_prepare, mock_build_opt): mock_reboot.side_effect = RuntimeError("broken") with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: - agent_base._cleaning_reboot(task) + agent_base._post_step_reboot(task, 'clean') mock_reboot.assert_called_once_with(task, states.REBOOT) mock_handler.assert_called_once_with(task, mock.ANY) self.assertNotIn('cleaning_reboot', @@ -1578,6 +1599,23 @@ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True, autospec=True) + @mock.patch.object(manager_utils, 'deploying_error_handler', autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test__post_step_reboot_fail_deploy(self, mock_reboot, mock_handler, + mock_prepare, mock_build_opt): + mock_reboot.side_effect = RuntimeError("broken") + + with task_manager.acquire(self.context, self.node['uuid'], + shared=False) as task: + agent_base._post_step_reboot(task, 'deploy') + mock_reboot.assert_called_once_with(task, states.REBOOT) + mock_handler.assert_called_once_with(task, mock.ANY) + self.assertNotIn('deployment_reboot', + task.node.driver_internal_info) + + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', spec_set=True, + autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1603,7 +1641,7 @@ self.deploy.continue_cleaning(task) reboot_mock.assert_called_once_with(task, states.REBOOT) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1621,16 +1659,17 @@ self.node.save() # Represents a freshly booted agent with no commands status_mock.return_value = [] + with task_manager.acquire(self.context, self.node['uuid'], shared=False) as task: self.deploy.continue_cleaning(task) - notify_mock.assert_called_once_with(task) + notify_mock.assert_called_once_with(task, 'clean') self.assertNotIn('cleaning_reboot', task.node.driver_internal_info) @mock.patch.object(agent_base, - '_get_post_clean_step_hook', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + '_get_post_step_hook', autospec=True) + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1653,14 +1692,14 @@ shared=False) as task: self.deploy.continue_cleaning(task) - get_hook_mock.assert_called_once_with(task.node) + get_hook_mock.assert_called_once_with(task.node, 'clean') hook_mock.assert_called_once_with(task, command_status) - notify_mock.assert_called_once_with(task) + notify_mock.assert_called_once_with(task, 'clean') - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_base, - '_get_post_clean_step_hook', autospec=True) + '_get_post_step_hook', autospec=True) @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1685,12 +1724,12 @@ shared=False) as task: self.deploy.continue_cleaning(task) - get_hook_mock.assert_called_once_with(task.node) + get_hook_mock.assert_called_once_with(task.node, 'clean') hook_mock.assert_called_once_with(task, command_status) error_handler_mock.assert_called_once_with(task, mock.ANY) self.assertFalse(notify_mock.called) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1719,7 +1758,7 @@ self.deploy.continue_cleaning(task) self.assertFalse(notify_mock.called) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) @@ -1752,10 +1791,10 @@ @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_base.AgentDeployMixin, - 'refresh_clean_steps', autospec=True) + 'refresh_steps', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def _test_continue_cleaning_clean_version_mismatch( @@ -1772,8 +1811,8 @@ with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.deploy.continue_cleaning(task) - notify_mock.assert_called_once_with(task) - refresh_steps_mock.assert_called_once_with(mock.ANY, task) + notify_mock.assert_called_once_with(task, 'clean') + refresh_steps_mock.assert_called_once_with(mock.ANY, task, 'clean') if manual: self.assertFalse( task.node.driver_internal_info['skip_current_clean_step']) @@ -1792,10 +1831,10 @@ @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', + @mock.patch.object(manager_utils, 'notify_conductor_resume_operation', autospec=True) @mock.patch.object(agent_base.AgentDeployMixin, - 'refresh_clean_steps', autospec=True) + 'refresh_steps', autospec=True) @mock.patch.object(agent_client.AgentClient, 'get_commands_status', autospec=True) def test_continue_cleaning_clean_version_mismatch_fail( @@ -1816,7 +1855,7 @@ self.deploy.continue_cleaning(task) status_mock.assert_called_once_with(mock.ANY, task.node) - refresh_steps_mock.assert_called_once_with(mock.ANY, task) + refresh_steps_mock.assert_called_once_with(mock.ANY, task, 'clean') error_mock.assert_called_once_with(task, mock.ANY) self.assertFalse(notify_mock.called) self.assertFalse(steps_mock.called) @@ -1836,37 +1875,8 @@ self.deploy.continue_cleaning(task) error_mock.assert_called_once_with(task, mock.ANY) - def _test_clean_step_hook(self, hook_dict_mock): - """Helper method for unit tests related to clean step hooks. - - This is a helper method for other unit tests related to - clean step hooks. It acceps a mock 'hook_dict_mock' which is - a MagicMock and sets it up to function as a mock dictionary. - After that, it defines a dummy hook_method for two clean steps - raid.create_configuration and raid.delete_configuration. - - :param hook_dict_mock: An instance of mock.MagicMock() which - is the mocked value of agent_base.POST_CLEAN_STEP_HOOKS - :returns: a tuple, where the first item is the hook method created - by this method and second item is the backend dictionary for - the mocked hook_dict_mock - """ - hook_dict = {} - - def get(key, default): - return hook_dict.get(key, default) - - def getitem(self, key): - return hook_dict[key] - - def setdefault(key, default): - if key not in hook_dict: - hook_dict[key] = default - return hook_dict[key] - - hook_dict_mock.get = get - hook_dict_mock.__getitem__ = getitem - hook_dict_mock.setdefault = setdefault + def _test_clean_step_hook(self): + """Helper method for unit tests related to clean step hooks.""" some_function_mock = mock.MagicMock() @agent_base.post_clean_step_hook( @@ -1876,43 +1886,41 @@ def hook_method(): some_function_mock('some-arguments') - return hook_method, hook_dict + return hook_method - @mock.patch.object(agent_base, 'POST_CLEAN_STEP_HOOKS', - spec_set=dict) - def test_post_clean_step_hook(self, hook_dict_mock): + @mock.patch.object(agent_base, '_POST_STEP_HOOKS', + {'clean': {}, 'deploy': {}}) + def test_post_clean_step_hook(self): # This unit test makes sure that hook methods are registered # properly and entries are made in # agent_base.POST_CLEAN_STEP_HOOKS - hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock) - self.assertEqual(hook_method, - hook_dict['raid']['create_configuration']) - self.assertEqual(hook_method, - hook_dict['raid']['delete_configuration']) - - @mock.patch.object(agent_base, 'POST_CLEAN_STEP_HOOKS', - spec_set=dict) - def test__get_post_clean_step_hook(self, hook_dict_mock): - # Check if agent_base._get_post_clean_step_hook can get + hook_method = self._test_clean_step_hook() + hooks = agent_base._POST_STEP_HOOKS['clean'] + self.assertEqual(hook_method, hooks['raid']['create_configuration']) + self.assertEqual(hook_method, hooks['raid']['delete_configuration']) + + @mock.patch.object(agent_base, '_POST_STEP_HOOKS', + {'clean': {}, 'deploy': {}}) + def test__get_post_step_hook(self): + # Check if agent_base._get_post_step_hook can get # clean step for which hook is registered. - hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock) + hook_method = self._test_clean_step_hook() self.node.clean_step = {'step': 'create_configuration', 'interface': 'raid'} self.node.save() - hook_returned = agent_base._get_post_clean_step_hook(self.node) + hook_returned = agent_base._get_post_step_hook(self.node, 'clean') self.assertEqual(hook_method, hook_returned) - @mock.patch.object(agent_base, 'POST_CLEAN_STEP_HOOKS', - spec_set=dict) - def test__get_post_clean_step_hook_no_hook_registered( - self, hook_dict_mock): - # Make sure agent_base._get_post_clean_step_hook returns + @mock.patch.object(agent_base, '_POST_STEP_HOOKS', + {'clean': {}, 'deploy': {}}) + def test__get_post_step_hook_no_hook_registered(self): + # Make sure agent_base._get_post_step_hook returns # None when no clean step hook is registered for the clean step. - hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock) + self._test_clean_step_hook() self.node.clean_step = {'step': 'some-clean-step', 'interface': 'some-other-interface'} self.node.save() - hook_returned = agent_base._get_post_clean_step_hook(self.node) + hook_returned = agent_base._get_post_step_hook(self.node, 'clean') self.assertIsNone(hook_returned) @mock.patch.object(manager_utils, 'restore_power_state_if_needed', @@ -1947,7 +1955,7 @@ self.assertEqual(2, get_power_state_mock.call_count) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) collect_mock.assert_called_once_with(task.node) resume_mock.assert_called_once_with(task) @@ -1982,16 +1990,22 @@ ] } } + # NOTE(dtantsur): deploy steps are structurally identical to clean + # steps, reusing self.clean_steps for simplicity + self.deploy_steps = { + 'hardware_manager_version': '1', + 'deploy_steps': self.clean_steps['clean_steps'], + } @mock.patch.object(agent_client.AgentClient, 'get_clean_steps', autospec=True) - def test_refresh_clean_steps(self, client_mock): + def test_refresh_steps(self, client_mock): client_mock.return_value = { 'command_result': self.clean_steps} with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - self.deploy.refresh_clean_steps(task) + self.deploy.refresh_steps(task, 'clean') client_mock.assert_called_once_with(mock.ANY, task.node, task.ports) @@ -2010,9 +2024,36 @@ self.assertEqual([self.clean_steps['clean_steps'][ 'SpecificHardwareManager'][1]], steps['raid']) + @mock.patch.object(agent_client.AgentClient, 'get_deploy_steps', + autospec=True) + def test_refresh_steps_deploy(self, client_mock): + client_mock.return_value = { + 'command_result': self.deploy_steps} + + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.refresh_steps(task, 'deploy') + + client_mock.assert_called_once_with(mock.ANY, task.node, + task.ports) + self.assertEqual('1', task.node.driver_internal_info[ + 'hardware_manager_version']) + self.assertIn('agent_cached_deploy_steps_refreshed', + task.node.driver_internal_info) + steps = task.node.driver_internal_info['agent_cached_deploy_steps'] + self.assertEqual({'deploy', 'raid'}, set(steps)) + # Since steps are returned in dicts, they have non-deterministic + # ordering + self.assertIn(self.clean_steps['clean_steps'][ + 'GenericHardwareManager'][0], steps['deploy']) + self.assertIn(self.clean_steps['clean_steps'][ + 'SpecificHardwareManager'][0], steps['deploy']) + self.assertEqual([self.clean_steps['clean_steps'][ + 'SpecificHardwareManager'][1]], steps['raid']) + @mock.patch.object(agent_client.AgentClient, 'get_clean_steps', autospec=True) - def test_refresh_clean_steps_missing_steps(self, client_mock): + def test_refresh_steps_missing_steps(self, client_mock): del self.clean_steps['clean_steps'] client_mock.return_value = { 'command_result': self.clean_steps} @@ -2021,14 +2062,14 @@ self.context, self.node.uuid, shared=False) as task: self.assertRaisesRegex(exception.NodeCleaningFailure, 'invalid result', - self.deploy.refresh_clean_steps, - task) + self.deploy.refresh_steps, + task, 'clean') client_mock.assert_called_once_with(mock.ANY, task.node, task.ports) @mock.patch.object(agent_client.AgentClient, 'get_clean_steps', autospec=True) - def test_refresh_clean_steps_missing_interface(self, client_mock): + def test_refresh_steps_missing_interface(self, client_mock): step = self.clean_steps['clean_steps']['SpecificHardwareManager'][1] del step['interface'] client_mock.return_value = { @@ -2038,16 +2079,16 @@ self.context, self.node.uuid, shared=False) as task: self.assertRaisesRegex(exception.NodeCleaningFailure, 'invalid clean step', - self.deploy.refresh_clean_steps, - task) + self.deploy.refresh_steps, + task, 'clean') client_mock.assert_called_once_with(mock.ANY, task.node, task.ports) -class CleanStepMethodsTestCase(db_base.DbTestCase): +class StepMethodsTestCase(db_base.DbTestCase): def setUp(self): - super(CleanStepMethodsTestCase, self).setUp() + super(StepMethodsTestCase, self).setUp() self.clean_steps = { 'deploy': [ @@ -2072,10 +2113,25 @@ self.ports = [object_utils.create_test_port(self.context, node_id=self.node.id)] - def test_agent_get_clean_steps(self): + def test_agent_get_steps(self): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + response = agent_base.get_steps(task, 'clean') + + # Since steps are returned in dicts, they have non-deterministic + # ordering + self.assertThat(response, matchers.HasLength(3)) + self.assertIn(self.clean_steps['deploy'][0], response) + self.assertIn(self.clean_steps['deploy'][1], response) + self.assertIn(self.clean_steps['raid'][0], response) + + def test_agent_get_steps_deploy(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - response = agent_base.get_clean_steps(task) + task.node.driver_internal_info = { + 'agent_cached_deploy_steps': self.clean_steps + } + response = agent_base.get_steps(task, 'deploy') # Since steps are returned in dicts, they have non-deterministic # ordering @@ -2084,40 +2140,40 @@ self.assertIn(self.clean_steps['deploy'][1], response) self.assertIn(self.clean_steps['raid'][0], response) - def test_get_clean_steps_custom_interface(self): + def test_get_steps_custom_interface(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - response = agent_base.get_clean_steps(task, interface='raid') + response = agent_base.get_steps(task, 'clean', interface='raid') self.assertThat(response, matchers.HasLength(1)) self.assertEqual(self.clean_steps['raid'], response) - def test_get_clean_steps_override_priorities(self): + def test_get_steps_override_priorities(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: new_priorities = {'create_configuration': 42} - response = agent_base.get_clean_steps( - task, interface='raid', override_priorities=new_priorities) + response = agent_base.get_steps( + task, 'clean', interface='raid', + override_priorities=new_priorities) self.assertEqual(42, response[0]['priority']) - def test_get_clean_steps_override_priorities_none(self): + def test_get_steps_override_priorities_none(self): with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: # this is simulating the default value of a configuration option new_priorities = {'create_configuration': None} - response = agent_base.get_clean_steps( - task, interface='raid', override_priorities=new_priorities) + response = agent_base.get_steps( + task, 'clean', interface='raid', + override_priorities=new_priorities) self.assertEqual(10, response[0]['priority']) - def test_get_clean_steps_missing_steps(self): + def test_get_steps_missing_steps(self): info = self.node.driver_internal_info del info['agent_cached_clean_steps'] self.node.driver_internal_info = info self.node.save() with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - self.assertRaises(exception.NodeCleaningFailure, - agent_base.get_clean_steps, - task) + self.assertEqual([], agent_base.get_steps(task, 'clean')) @mock.patch('ironic.objects.Port.list_by_node_id', spec_set=types.FunctionType) @@ -2130,13 +2186,27 @@ with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - response = agent_base.execute_clean_step( - task, - self.clean_steps['deploy'][0]) + response = agent_base.execute_step( + task, self.clean_steps['deploy'][0], 'clean') self.assertEqual(states.CLEANWAIT, response) @mock.patch('ironic.objects.Port.list_by_node_id', spec_set=types.FunctionType) + @mock.patch.object(agent_client.AgentClient, 'execute_deploy_step', + autospec=True) + def test_execute_deploy_step(self, client_mock, list_ports_mock): + client_mock.return_value = { + 'command_status': 'SUCCEEDED'} + list_ports_mock.return_value = self.ports + + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + response = agent_base.execute_step( + task, self.clean_steps['deploy'][0], 'deploy') + self.assertEqual(states.DEPLOYWAIT, response) + + @mock.patch('ironic.objects.Port.list_by_node_id', + spec_set=types.FunctionType) @mock.patch.object(agent_client.AgentClient, 'execute_clean_step', autospec=True) def test_execute_clean_step_running(self, client_mock, list_ports_mock): @@ -2146,9 +2216,8 @@ with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - response = agent_base.execute_clean_step( - task, - self.clean_steps['deploy'][0]) + response = agent_base.execute_step( + task, self.clean_steps['deploy'][0], 'clean') self.assertEqual(states.CLEANWAIT, response) @mock.patch('ironic.objects.Port.list_by_node_id', @@ -2163,7 +2232,6 @@ with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: - response = agent_base.execute_clean_step( - task, - self.clean_steps['deploy'][0]) + response = agent_base.execute_step( + task, self.clean_steps['deploy'][0], 'clean') self.assertEqual(states.CLEANWAIT, response) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_agent_client.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_agent_client.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_agent_client.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_agent_client.py 2020-04-10 17:06:41.000000000 +0000 @@ -191,6 +191,17 @@ 'api_version': CONF.agent.agent_api_version}, timeout=CONF.agent.command_timeout) + def test_get_commands_status_retries(self): + with mock.patch.object(self.client.session, 'get', + autospec=True) as mock_get: + res = mock.MagicMock(spec_set=['json']) + res.json.return_value = {'commands': []} + mock_get.side_effect = [ + requests.ConnectionError('boom'), + res] + self.assertEqual([], self.client.get_commands_status(self.node)) + self.assertEqual(2, mock_get.call_count) + def test_prepare_image(self): self.client._command = mock.MagicMock(spec_set=[]) image_info = {'image_id': 'image'} @@ -271,7 +282,8 @@ self.node, root_uuid, efi_system_part_uuid=efi_system_part_uuid, prep_boot_part_uuid=prep_boot_part_uuid) self.client._command.assert_called_once_with( - node=self.node, method='image.install_bootloader', params=params, + command_timeout_factor=2, node=self.node, + method='image.install_bootloader', params=params, wait=True) def test_install_bootloader(self): @@ -330,8 +342,10 @@ def test_finalize_rescue(self): self.client._command = mock.MagicMock(spec_set=[]) self.node.instance_info['rescue_password'] = 'password' + self.node.instance_info['hashed_rescue_password'] = '1234' expected_params = { - 'rescue_password': 'password', + 'rescue_password': '1234', + 'hashed': True, } self.client.finalize_rescue(self.node) self.client._command.assert_called_once_with( @@ -346,6 +360,36 @@ self.node) self.assertFalse(self.client._command.called) + def test_finalize_rescue_fallback(self): + self.config(require_rescue_password_hashed=False, group="conductor") + self.client._command = mock.MagicMock(spec_set=[]) + self.node.instance_info['rescue_password'] = 'password' + self.node.instance_info['hashed_rescue_password'] = '1234' + self.client._command.side_effect = [ + exception.AgentAPIError('blah'), + ('', '')] + self.client.finalize_rescue(self.node) + self.client._command.assert_has_calls([ + mock.call(node=mock.ANY, method='rescue.finalize_rescue', + params={'rescue_password': '1234', + 'hashed': True}), + mock.call(node=mock.ANY, method='rescue.finalize_rescue', + params={'rescue_password': 'password'})]) + + def test_finalize_rescue_fallback_restricted(self): + self.config(require_rescue_password_hashed=True, group="conductor") + self.client._command = mock.MagicMock(spec_set=[]) + self.node.instance_info['rescue_password'] = 'password' + self.node.instance_info['hashed_rescue_password'] = '1234' + self.client._command.side_effect = exception.AgentAPIError('blah') + self.assertRaises(exception.InstanceRescueFailure, + self.client.finalize_rescue, + self.node) + self.client._command.assert_has_calls([ + mock.call(node=mock.ANY, method='rescue.finalize_rescue', + params={'rescue_password': '1234', + 'hashed': True})]) + def test__command_agent_client(self): response_data = {'status': 'ok'} response_text = json.dumps(response_data) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_agent.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_agent.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_agent.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_agent.py 2020-04-10 17:06:41.000000000 +0000 @@ -1042,33 +1042,33 @@ set_dhcp_provider_mock.assert_called_once_with() clean_dhcp_mock.assert_called_once_with(task) - @mock.patch.object(agent_base, 'get_clean_steps', autospec=True) - def test_get_clean_steps(self, mock_get_clean_steps): + @mock.patch.object(agent_base, 'get_steps', autospec=True) + def test_get_clean_steps(self, mock_get_steps): # Test getting clean steps mock_steps = [{'priority': 10, 'interface': 'deploy', 'step': 'erase_devices'}] - mock_get_clean_steps.return_value = mock_steps + mock_get_steps.return_value = mock_steps with task_manager.acquire(self.context, self.node.uuid) as task: steps = self.driver.get_clean_steps(task) - mock_get_clean_steps.assert_called_once_with( - task, interface='deploy', + mock_get_steps.assert_called_once_with( + task, 'clean', interface='deploy', override_priorities={'erase_devices': None, 'erase_devices_metadata': None}) self.assertEqual(mock_steps, steps) - @mock.patch.object(agent_base, 'get_clean_steps', autospec=True) - def test_get_clean_steps_config_priority(self, mock_get_clean_steps): + @mock.patch.object(agent_base, 'get_steps', autospec=True) + def test_get_clean_steps_config_priority(self, mock_get_steps): # Test that we can override the priority of get clean steps # Use 0 because it is an edge case (false-y) and used in devstack self.config(erase_devices_priority=0, group='deploy') self.config(erase_devices_metadata_priority=0, group='deploy') mock_steps = [{'priority': 10, 'interface': 'deploy', 'step': 'erase_devices'}] - mock_get_clean_steps.return_value = mock_steps + mock_get_steps.return_value = mock_steps with task_manager.acquire(self.context, self.node.uuid) as task: self.driver.get_clean_steps(task) - mock_get_clean_steps.assert_called_once_with( - task, interface='deploy', + mock_get_steps.assert_called_once_with( + task, 'clean', interface='deploy', override_priorities={'erase_devices': 0, 'erase_devices_metadata': 0}) @@ -1280,7 +1280,7 @@ get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertTrue(remove_symlink_mock.called) resume_mock.assert_called_once_with(task) @@ -1330,7 +1330,7 @@ get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) resume_mock.assert_called_once_with(task) @@ -1391,7 +1391,7 @@ get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) resume_mock.assert_called_once_with(task) @@ -1455,7 +1455,7 @@ get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True) @@ -1554,7 +1554,7 @@ get_power_state_mock.assert_called_once_with(task) node_power_action_mock.assert_called_once_with( task, states.POWER_ON) - self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) resume_mock.assert_called_once_with(task) @@ -1774,7 +1774,7 @@ } self.node = object_utils.create_test_node(self.context, **n) - @mock.patch.object(agent_base, 'get_clean_steps', autospec=True) + @mock.patch.object(agent_base, 'get_steps', autospec=True) def test_get_clean_steps(self, get_steps_mock): get_steps_mock.return_value = [ {'step': 'create_configuration', 'interface': 'raid', @@ -1789,7 +1789,7 @@ self.assertEqual(0, ret[1]['priority']) @mock.patch.object(raid, 'filter_target_raid_config') - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_create_configuration(self, execute_mock, filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: @@ -1802,10 +1802,11 @@ self.assertEqual( self.target_raid_config, task.node.driver_internal_info['target_raid_config']) - execute_mock.assert_called_once_with(task, self.clean_step) + execute_mock.assert_called_once_with(task, self.clean_step, + 'clean') @mock.patch.object(raid, 'filter_target_raid_config') - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_create_configuration_skip_root(self, execute_mock, filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: @@ -1819,13 +1820,14 @@ return_value = task.driver.raid.create_configuration( task, create_root_volume=False) self.assertEqual(states.CLEANWAIT, return_value) - execute_mock.assert_called_once_with(task, self.clean_step) + execute_mock.assert_called_once_with(task, self.clean_step, + 'clean') self.assertEqual( exp_target_raid_config, task.node.driver_internal_info['target_raid_config']) @mock.patch.object(raid, 'filter_target_raid_config') - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_create_configuration_skip_nonroot(self, execute_mock, filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: @@ -1839,13 +1841,14 @@ return_value = task.driver.raid.create_configuration( task, create_nonroot_volumes=False) self.assertEqual(states.CLEANWAIT, return_value) - execute_mock.assert_called_once_with(task, self.clean_step) + execute_mock.assert_called_once_with(task, self.clean_step, + 'clean') self.assertEqual( exp_target_raid_config, task.node.driver_internal_info['target_raid_config']) @mock.patch.object(raid, 'filter_target_raid_config') - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_create_configuration_no_target_raid_config_after_skipping( self, execute_mock, filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: @@ -1860,7 +1863,7 @@ self.assertFalse(execute_mock.called) @mock.patch.object(raid, 'filter_target_raid_config') - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_create_configuration_empty_target_raid_config( self, execute_mock, filter_target_raid_config_mock): execute_mock.return_value = states.CLEANING @@ -1890,7 +1893,7 @@ self.node.clean_step = {'interface': 'raid', 'step': 'create_configuration'} command = {'command_result': {'clean_result': 'foo'}} - create_hook = agent_base._get_post_clean_step_hook(self.node) + create_hook = agent_base._get_post_step_hook(self.node, 'clean') with task_manager.acquire(self.context, self.node.uuid) as task: create_hook(task, command) update_raid_info_mock.assert_called_once_with(task.node, 'foo') @@ -1906,13 +1909,14 @@ task, command) self.assertFalse(update_raid_info_mock.called) - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_delete_configuration(self, execute_mock): execute_mock.return_value = states.CLEANING with task_manager.acquire(self.context, self.node.uuid) as task: return_value = task.driver.raid.delete_configuration(task) - execute_mock.assert_called_once_with(task, self.clean_step) + execute_mock.assert_called_once_with(task, self.clean_step, + 'clean') self.assertEqual(states.CLEANING, return_value) def test__delete_configuration_final(self): @@ -1931,7 +1935,7 @@ 'step': 'delete_configuration'} self.node.raid_config = {'foo': 'bar'} command = {'command_result': {'clean_result': 'foo'}} - delete_hook = agent_base._get_post_clean_step_hook(self.node) + delete_hook = agent_base._get_post_step_hook(self.node, 'clean') with task_manager.acquire(self.context, self.node.uuid) as task: delete_hook(task, command) @@ -1954,7 +1958,8 @@ self.config(**config_kwarg) self.config(enabled_hardware_types=['fake-hardware']) instance_info = INSTANCE_INFO - instance_info.update({'rescue_password': 'password'}) + instance_info.update({'rescue_password': 'password', + 'hashed_rescue_password': '1234'}) driver_info = DRIVER_INFO driver_info.update({'rescue_ramdisk': 'my_ramdisk', 'rescue_kernel': 'my_kernel'}) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_deploy_utils.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_deploy_utils.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_deploy_utils.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_deploy_utils.py 2020-04-10 17:06:41.000000000 +0000 @@ -938,8 +938,8 @@ result = boot_mode_utils.get_boot_mode_for_deploy(self.node) self.assertEqual('bios', result) - instance_info = {'capabilities': {'trusted_boot': 'True'}, - 'capabilities': {'secure_boot': 'True'}} + instance_info = {'capabilities': {'trusted_boot': 'True', + 'secure_boot': 'True'}} self.node.instance_info = instance_info result = boot_mode_utils.get_boot_mode_for_deploy(self.node) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_ipxe.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_ipxe.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_ipxe.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_ipxe.py 2020-04-10 17:06:41.000000000 +0000 @@ -298,13 +298,13 @@ mock_instance_img_info.assert_called_once_with( task, ipxe_enabled=True) elif mode == 'deploy': - mock_cache_r_k.assert_called_once_with( - task, {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'}, - ipxe_enabled=True) + mock_cache_r_k.assert_called_once_with( + task, {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'}, + ipxe_enabled=True) elif mode == 'rescue': - mock_cache_r_k.assert_called_once_with( - task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'}, - ipxe_enabled=True) + mock_cache_r_k.assert_called_once_with( + task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'}, + ipxe_enabled=True) if uefi: mock_pxe_config.assert_called_once_with( task, {}, CONF.pxe.uefi_pxe_config_template, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py 2020-04-10 17:06:41.000000000 +0000 @@ -968,7 +968,7 @@ tear_down_cleaning_mock.assert_called_once_with( task, manage_boot=True) - @mock.patch.object(agent_base, 'get_clean_steps', autospec=True) + @mock.patch.object(agent_base, 'get_steps', autospec=True) def test_get_clean_steps(self, mock_get_clean_steps): # Test getting clean steps self.config(group='deploy', erase_devices_priority=10) @@ -981,19 +981,19 @@ with task_manager.acquire(self.context, self.node.uuid) as task: steps = task.driver.deploy.get_clean_steps(task) mock_get_clean_steps.assert_called_once_with( - task, interface='deploy', + task, 'clean', interface='deploy', override_priorities={ 'erase_devices': 10, 'erase_devices_metadata': 5}) self.assertEqual(mock_steps, steps) - @mock.patch.object(agent_base, 'execute_clean_step', autospec=True) + @mock.patch.object(agent_base, 'execute_step', autospec=True) def test_execute_clean_step(self, agent_execute_clean_step_mock): with task_manager.acquire(self.context, self.node.uuid) as task: task.driver.deploy.execute_clean_step( task, {'some-step': 'step-info'}) agent_execute_clean_step_mock.assert_called_once_with( - task, {'some-step': 'step-info'}) + task, {'some-step': 'step-info'}, 'clean') @mock.patch.object(agent_base.AgentDeployMixin, 'reboot_and_finish_deploy', autospec=True) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_pxe.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_pxe.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/modules/test_pxe.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/modules/test_pxe.py 2020-04-10 17:06:41.000000000 +0000 @@ -296,15 +296,15 @@ mock_instance_img_info.assert_called_once_with( task, ipxe_enabled=False) elif mode == 'deploy': - mock_cache_r_k.assert_called_once_with( - task, - {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'}, - ipxe_enabled=False) + mock_cache_r_k.assert_called_once_with( + task, + {'deploy_kernel': 'a', 'deploy_ramdisk': 'r'}, + ipxe_enabled=False) elif mode == 'rescue': - mock_cache_r_k.assert_called_once_with( - task, - {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'}, - ipxe_enabled=False) + mock_cache_r_k.assert_called_once_with( + task, + {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'}, + ipxe_enabled=False) if uefi: mock_pxe_config.assert_called_once_with( task, {}, CONF.pxe.uefi_pxe_config_template, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/test_fake_hardware.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/test_fake_hardware.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/test_fake_hardware.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/test_fake_hardware.py 2020-04-10 17:06:41.000000000 +0000 @@ -96,7 +96,7 @@ self.driver.management.validate(self.task) def test_management_interface_set_boot_device_good(self): - self.driver.management.set_boot_device(self.task, boot_devices.PXE) + self.driver.management.set_boot_device(self.task, boot_devices.PXE) def test_management_interface_set_boot_device_fail(self): self.assertRaises(exception.InvalidParameterValue, diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/test_ipmi.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/test_ipmi.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/drivers/test_ipmi.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/drivers/test_ipmi.py 2020-04-10 17:06:41.000000000 +0000 @@ -35,33 +35,33 @@ enabled_vendor_interfaces=['ipmitool', 'no-vendor']) def _validate_interfaces(self, task, **kwargs): - self.assertIsInstance( - task.driver.management, - kwargs.get('management', ipmitool.IPMIManagement)) - self.assertIsInstance( - task.driver.power, - kwargs.get('power', ipmitool.IPMIPower)) - self.assertIsInstance( - task.driver.boot, - kwargs.get('boot', pxe.PXEBoot)) - self.assertIsInstance( - task.driver.deploy, - kwargs.get('deploy', iscsi_deploy.ISCSIDeploy)) - self.assertIsInstance( - task.driver.console, - kwargs.get('console', noop.NoConsole)) - self.assertIsInstance( - task.driver.raid, - kwargs.get('raid', noop.NoRAID)) - self.assertIsInstance( - task.driver.vendor, - kwargs.get('vendor', ipmitool.VendorPassthru)) - self.assertIsInstance( - task.driver.storage, - kwargs.get('storage', noop_storage.NoopStorage)) - self.assertIsInstance( - task.driver.rescue, - kwargs.get('rescue', noop.NoRescue)) + self.assertIsInstance( + task.driver.management, + kwargs.get('management', ipmitool.IPMIManagement)) + self.assertIsInstance( + task.driver.power, + kwargs.get('power', ipmitool.IPMIPower)) + self.assertIsInstance( + task.driver.boot, + kwargs.get('boot', pxe.PXEBoot)) + self.assertIsInstance( + task.driver.deploy, + kwargs.get('deploy', iscsi_deploy.ISCSIDeploy)) + self.assertIsInstance( + task.driver.console, + kwargs.get('console', noop.NoConsole)) + self.assertIsInstance( + task.driver.raid, + kwargs.get('raid', noop.NoRAID)) + self.assertIsInstance( + task.driver.vendor, + kwargs.get('vendor', ipmitool.VendorPassthru)) + self.assertIsInstance( + task.driver.storage, + kwargs.get('storage', noop_storage.NoopStorage)) + self.assertIsInstance( + task.driver.rescue, + kwargs.get('rescue', noop.NoRescue)) def test_default_interfaces(self): node = obj_utils.create_test_node(self.context, driver='ipmi') diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/objects/test_node.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/objects/test_node.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/objects/test_node.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/objects/test_node.py 2020-04-10 17:06:41.000000000 +0000 @@ -1134,6 +1134,68 @@ self.assertIsNone(node.description) self.assertEqual({}, node.obj_get_changes()) + def test_lessee_supported_missing(self): + # lessee not set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + delattr(node, 'lessee') + node.obj_reset_changes() + node._convert_to_version("1.34") + self.assertIsNone(node.lessee) + self.assertEqual({'lessee': None}, + node.obj_get_changes()) + + def test_lessee_supported_set(self): + # lessee set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.lessee = "some-lucky-project" + node.obj_reset_changes() + node._convert_to_version("1.34") + self.assertEqual("some-lucky-project", + node.lessee) + self.assertEqual({}, node.obj_get_changes()) + + def test_lessee_unsupported_missing(self): + # lessee not set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + delattr(node, 'lessee') + node.obj_reset_changes() + node._convert_to_version("1.33") + self.assertNotIn('lessee', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_lessee_unsupported_set_remove(self): + # lessee set, should be removed. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.lessee = "some-lucky-project" + node.obj_reset_changes() + node._convert_to_version("1.33") + self.assertNotIn('lessee', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_lessee_unsupported_set_no_remove_non_default(self): + # lessee set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.lessee = "some-lucky-project" + node.obj_reset_changes() + node._convert_to_version("1.33", False) + self.assertIsNone(node.lessee) + self.assertEqual({'lessee': None}, + node.obj_get_changes()) + + def test_lessee_unsupported_set_no_remove_default(self): + # lessee set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.lessee = None + node.obj_reset_changes() + node._convert_to_version("1.33", False) + self.assertIsNone(node.lessee) + self.assertEqual({}, node.obj_get_changes()) + class TestNodePayloads(db_base.DbTestCase): diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/objects/test_objects.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/objects/test_objects.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/objects/test_objects.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/objects/test_objects.py 2020-04-10 17:06:41.000000000 +0000 @@ -676,7 +676,7 @@ # version bump. It is an MD5 hash of the object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.33-d6a8ba8dd3be3b2bbad0e0a5b9887aa8', + 'Node': '1.34-ae873e627cf30bf28fe9f98a807b6200', 'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', 'Port': '1.9-0cb9202a4ec442e8c0d87a324155eaaf', @@ -684,21 +684,21 @@ 'Conductor': '1.3-d3f53e853b4d58cae5bfbd9a8341af4a', 'EventType': '1.1-aa2ba1afd38553e3880c267404e8d370', 'NotificationPublisher': '1.0-51a09397d6c0687771fb5be9a999605d', - 'NodePayload': '1.14-8b2dfc37d800f268d29a580ac034e2c6', + 'NodePayload': '1.15-86ee30dbf374be4cf17c5b501d9e2e7b', 'NodeSetPowerStateNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeSetPowerStatePayload': '1.14-dcd4d7911717ba323ab4c3297b92c31c', + 'NodeSetPowerStatePayload': '1.15-3c64b07a2b96c2661e7743b47ed43705', 'NodeCorrectedPowerStateNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeCorrectedPowerStatePayload': '1.14-c7d20e953bbb9a1a4ce31ce22068e4bf', + 'NodeCorrectedPowerStatePayload': '1.15-59a224a9191cdc9f1acc2e0dcd2d3adb', 'NodeSetProvisionStateNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeSetProvisionStatePayload': '1.14-6d4145044a98c5cc80a40d69bbd98f61', + 'NodeSetProvisionStatePayload': '1.15-488a3d62a0643d17e288ecf89ed5bbb4', 'VolumeConnector': '1.0-3e0252c0ab6e6b9d158d09238a577d97', 'VolumeTarget': '1.0-0b10d663d8dae675900b2c7548f76f5e', 'ChassisCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', 'ChassisCRUDPayload': '1.0-dce63895d8186279a7dd577cffccb202', 'NodeCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeCRUDPayload': '1.12-3f63cdace5159785535049025ddf6a5c', + 'NodeCRUDPayload': '1.13-8f673253ff8d7389897a6a80d224ac33', 'PortCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', 'PortCRUDPayload': '1.3-21235916ed54a91b2a122f59571194e7', 'NodeMaintenanceNotification': '1.0-59acc533c11d306f149846f922739c15', diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/raid_constants.py ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/raid_constants.py --- ironic-14.0.1~git2020032415.de2d907fc/ironic/tests/unit/raid_constants.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic/tests/unit/raid_constants.py 2020-04-10 17:06:41.000000000 +0000 @@ -36,6 +36,19 @@ } ''' +RAID_SW_CONFIG_OKAY = ''' +{ + "logical_disks": [ + { + "raid_level": "1", + "size_gb": 100, + "controller": "software", + "physical_disks": [{"size": ">= 50"}, {"name": "/dev/sdc"}] + } + ] +} +''' + RAID_CONFIG_NO_LOGICAL_DISKS = ''' { "logical_disks": [] @@ -194,6 +207,19 @@ } ] } +''' + +RAID_CONFIG_TOO_FEW_PHY_DISKS = ''' +{ + "logical_disks": [ + { + "raid_level": "1", + "size_gb": 100, + "controller": "Smart Array P822 in Slot 2", + "physical_disks": [{"size": ">= 50"}] + } + ] +} ''' RAID_CONFIG_ADDITIONAL_PROP = ''' diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/pbr.json ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/pbr.json --- ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/pbr.json 2020-03-24 19:12:20.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/pbr.json 2020-04-10 17:06:46.000000000 +0000 @@ -1 +1 @@ -{"git_version": "de2d907fc", "is_release": false} \ No newline at end of file +{"git_version": "af9e6ba90", "is_release": false} \ No newline at end of file diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/PKG-INFO ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/PKG-INFO --- ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/PKG-INFO 2020-03-24 19:12:20.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/PKG-INFO 2020-04-10 17:06:45.000000000 +0000 @@ -1,6 +1,6 @@ -Metadata-Version: 1.1 +Metadata-Version: 2.1 Name: ironic -Version: 14.0.1.dev87 +Version: 14.0.1.dev163 Summary: OpenStack Bare Metal Provisioning Home-page: https://docs.openstack.org/ironic/latest/ Author: OpenStack @@ -55,6 +55,11 @@ Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Requires-Python: >=3.6 +Provides-Extra: guru_meditation_reports +Provides-Extra: i18n +Provides-Extra: test diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/requires.txt ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/requires.txt --- ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/requires.txt 2020-03-24 19:12:20.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/requires.txt 2020-04-10 17:06:45.000000000 +0000 @@ -17,12 +17,10 @@ oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.40.0 -oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 -oslo.reports>=1.18.0 oslo.rootwrap>=5.8.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 @@ -44,3 +42,31 @@ rfc3986>=0.3.1 stevedore>=1.20.0 tooz>=1.58.0 + +[guru_meditation_reports] +oslo.reports>=1.18.0 + +[i18n] +oslo.i18n>=3.15.3 + +[test] +Babel!=2.4.0,>=2.3.4 +PyMySQL>=0.7.6 +Pygments>=2.2.0 +WebTest>=2.0.27 +bandit!=1.6.0,<2.0.0,>=1.1.0 +bashate>=0.5.1 +coverage!=4.4,>=4.0 +ddt>=1.0.1 +doc8>=0.6.0 +fixtures>=3.0.0 +flake8-import-order>=0.13 +hacking<3.1.0,>=3.0.0 +iso8601>=0.1.11 +mock>=3.0.0 +oslotest>=3.2.0 +psycopg2>=2.7.3 +stestr>=1.0.0 +testresources>=2.0.0 +testscenarios>=0.4 +testtools>=2.2.0 diff -Nru ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/SOURCES.txt ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/SOURCES.txt --- ironic-14.0.1~git2020032415.de2d907fc/ironic.egg-info/SOURCES.txt 2020-03-24 19:12:20.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/ironic.egg-info/SOURCES.txt 2020-04-10 17:06:46.000000000 +0000 @@ -164,6 +164,7 @@ doc/source/index.rst doc/source/_exts/automated_steps.py doc/source/admin/adoption.rst +doc/source/admin/agent-token.rst doc/source/admin/api-audit-support.rst doc/source/admin/bios.rst doc/source/admin/boot-from-volume.rst @@ -179,6 +180,7 @@ doc/source/admin/metrics.rst doc/source/admin/multitenancy.rst doc/source/admin/node-deployment.rst +doc/source/admin/node-multitenancy.rst doc/source/admin/notifications.rst doc/source/admin/portgroups.rst doc/source/admin/power-sync.rst @@ -186,6 +188,7 @@ doc/source/admin/raid.rst doc/source/admin/report.txt doc/source/admin/rescue.rst +doc/source/admin/retirement.rst doc/source/admin/security.rst doc/source/admin/troubleshooting.rst doc/source/admin/upgrade-guide.rst @@ -252,6 +255,7 @@ doc/source/install/configure-identity.rst doc/source/install/configure-integration.rst doc/source/install/configure-ipmi.rst +doc/source/install/configure-ipv6-networking.rst doc/source/install/configure-iscsi.rst doc/source/install/configure-networking.rst doc/source/install/configure-nova-flavors.rst @@ -477,6 +481,7 @@ ironic/db/sqlalchemy/alembic/versions/868cb606a74a_add_version_field_in_base_class.py ironic/db/sqlalchemy/alembic/versions/93706939026c_add_node_protected_field.py ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py +ironic/db/sqlalchemy/alembic/versions/b2ad35726bb0_add_node_lessee.py ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py ironic/db/sqlalchemy/alembic/versions/b9117ac17882_add_node_deploy_step.py ironic/db/sqlalchemy/alembic/versions/bb59b63f55a_add_node_driver_internal_info.py @@ -977,8 +982,10 @@ releasenotes/notes/adoption-feature-update-d2160954a2c36b0a.yaml releasenotes/notes/agent-api-bf9f18d8d38075e4.yaml releasenotes/notes/agent-can-request-reboot-6238e13e2e898f68.yaml +releasenotes/notes/agent-command-status-retry-f9b6f53a823c6b01.yaml releasenotes/notes/agent-http-provisioning-d116b3ff36669d16.yaml releasenotes/notes/agent-takeover-60f27cef21ebfb48.yaml +releasenotes/notes/agent-token-support-0a5b5aa1585dfbb5.yaml releasenotes/notes/agent-wol-driver-4116f64907d0db9c.yaml releasenotes/notes/agent_partition_image-48a03700f41a3980.yaml releasenotes/notes/allocation-added-owner-policy-c650074e68d03289.yaml @@ -1078,6 +1085,7 @@ releasenotes/notes/configure-notifications-72824356e7d8832a.yaml releasenotes/notes/console-port-allocation-bb07c43e3890c54c.yaml releasenotes/notes/context-domain-id-name-deprecation-ae6e40718273be8d.yaml +releasenotes/notes/continue-node-deploy-state-63d9dc9cdcf8e37a.yaml releasenotes/notes/correct-api-version-check-conditional-for-nodename-439bebc02fb5493d.yaml releasenotes/notes/create-on-conductor-c1c52a1f022c4048.yaml releasenotes/notes/create-port-on-conductor-b921738b4b2a5def.yaml @@ -1085,11 +1093,13 @@ releasenotes/notes/dbsync-online_data_migration-edcf0b1cc3667582.yaml releasenotes/notes/debug-no-api-tracebacks-a8a0caddc9676b06.yaml releasenotes/notes/debug-sensor-data-fix-for-ipmitool-eb13e80ccdd984db.yaml +releasenotes/notes/decouple-boot-params-2b05806435ad21e5.yaml releasenotes/notes/default-resource-class-e11bacfb01d6841b.yaml releasenotes/notes/default-swift_account-b008d08e85bdf154.yaml releasenotes/notes/default_boot_option-f22c01f976bc2de7.yaml releasenotes/notes/dell-boss-raid1-ec33e5b9c59d4021.yaml releasenotes/notes/deny-too-long-chassis-description-0690d6f67ed002d5.yaml +releasenotes/notes/deploy-step-error-d343e8cb7d1b2305.yaml releasenotes/notes/deploy-steps-required-aa72cdf1c0ec0e84.yaml releasenotes/notes/deploy-templates-5df3368df862631c.yaml releasenotes/notes/deploy_steps-243b341cf742f7cc.yaml @@ -1116,6 +1126,7 @@ releasenotes/notes/deprecated-neutron-ops-79abab5b013b7939.yaml releasenotes/notes/deprecated-neutron-opts-2e1d9e65f00301d3.yaml releasenotes/notes/dhcp-provider-clean-dhcp-9352717903d6047e.yaml +releasenotes/notes/dhcpv6-stateful-address-count-0f94ac6a55bd9e51.yaml releasenotes/notes/disable-clean-step-reset-ilo-1869a6e08f39901c.yaml releasenotes/notes/disable_periodic_tasks-0ea39fa7a8a108c6.yaml releasenotes/notes/disk-label-capability-d36d126e0ad36dca.yaml @@ -1145,6 +1156,7 @@ releasenotes/notes/erase-devices-metadata-config-f39b6ca415a87757.yaml releasenotes/notes/error-resilient-enabled_drivers-4e9c864ed6eaddd1.yaml releasenotes/notes/expose-conductor-d13c9c4ef9d9de86.yaml +releasenotes/notes/extends-install-bootloader-timeout-8fce9590bf405cdf.yaml releasenotes/notes/fail-when-vif-port-id-is-missing-7640669f9d9e705d.yaml releasenotes/notes/fake-noop-bebc43983eb801d1.yaml releasenotes/notes/fake_soft_power-32683a848a989fc2.yaml @@ -1173,6 +1185,7 @@ releasenotes/notes/fix-disk-identifier-overwrite-42b33a5a0f7742d8.yaml releasenotes/notes/fix-do-not-tear-down-nodes-upon-cleaning-failure-a9cda6ae71ed2540.yaml releasenotes/notes/fix-drac-job-state-8c5422bbeaf15226.yaml +releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml releasenotes/notes/fix-esp-grub-path-9e5532993dccc07a.yaml releasenotes/notes/fix-fast-track-entry-path-467c20f97aeb2f4b.yaml releasenotes/notes/fix-fields-missing-from-next-url-fd9fddf8e70b65ea.yaml @@ -1369,6 +1382,7 @@ releasenotes/notes/net-names-b8a36aa30659ce2f.yaml releasenotes/notes/network-flat-use-node-uuid-for-binding-hostid-afb43097e7204b99.yaml releasenotes/notes/neutron-port-timeout-cbd82e1d09c6a46c.yaml +releasenotes/notes/neutron-port-update-598183909d44396c.yaml releasenotes/notes/new_capabilities-5241619c4b46a460.yaml releasenotes/notes/newton-driver-deprecations-e40369be37203057.yaml releasenotes/notes/next-link-for-instance-uuid-f46eafe5b575f3de.yaml @@ -1393,6 +1407,8 @@ releasenotes/notes/node-credentials-cleaning-b1903f49ffeba029.yaml releasenotes/notes/node-deletion-update-resources-53862e48ab658f77.yaml releasenotes/notes/node-fault-8c59c0ecb94ba562.yaml +releasenotes/notes/node-in-maintenance-fail-afd0eace24fa28be.yaml +releasenotes/notes/node-lessee-4fb320a597192742.yaml releasenotes/notes/node-name-remove-720aa8007f2f8b75.yaml releasenotes/notes/node-owner-policy-d7168976bba70566.yaml releasenotes/notes/node-owner-policy-ports-1d3193fd897feaa6.yaml @@ -1421,7 +1437,9 @@ releasenotes/notes/opentack-baremetal-request-id-daa72b785eaaaa8d.yaml releasenotes/notes/optional-redfish-system-id-3f6e8b0ac989cb9b.yaml releasenotes/notes/orphan-nodes-389cb6d90c2917ec.yaml +releasenotes/notes/oslo-i18n-optional-76bab4d2697c6f94.yaml releasenotes/notes/oslo-proxy-headers-middleware-22188a2976f8f460.yaml +releasenotes/notes/oslo-reports-optional-59469955eaffdf1d.yaml releasenotes/notes/oslopolicy-scripts-bdcaeaf7dd9ce2ac.yaml releasenotes/notes/osprofiler-61a330800abe4ee6.yaml releasenotes/notes/parallel-erasure-1943da9b53a2095d.yaml @@ -1434,6 +1452,7 @@ releasenotes/notes/pin-api-version-029748f7d3be68d1.yaml releasenotes/notes/port-0-is-valid-d7188af3be6f3ecb.yaml releasenotes/notes/port-list-bad-request-078512862c22118e.yaml +releasenotes/notes/port-local-link-connection-network-type-71103d919e27fc5d.yaml releasenotes/notes/port-physical-network-a7009dc514353796.yaml releasenotes/notes/port_delete-6628b736a1b556f6.yaml releasenotes/notes/portgroup-crud-notifications-91204635528972b2.yaml @@ -1451,6 +1470,7 @@ releasenotes/notes/queens-prelude-61fb897e96ed64c5.yaml releasenotes/notes/radosgw-temp-url-b04aac50698b4461.yaml releasenotes/notes/raid-dell-boss-e9c5da9ddceedd67.yaml +releasenotes/notes/raid-hints-c27097ded0137f7c.yaml releasenotes/notes/raid-to-support-jbod-568f88207b9216e2.yaml releasenotes/notes/raise-bad-request-exception-on-validating-inspection-failure-57d7fd2999cf4ecf.yaml releasenotes/notes/ramdisk-boot-fails-4e8286e6a4e0dfb6.yaml @@ -1565,6 +1585,7 @@ releasenotes/notes/streaming-partition-images-d58fe619658b066e.yaml releasenotes/notes/sum-based-update-firmware-manual-clean-step-e69ade488060cf27.yaml releasenotes/notes/support-root-device-hints-with-operators-96cf34fa37b5b2e8.yaml +releasenotes/notes/support_to_hash_rescue_password-0915927e41e6d845.yaml releasenotes/notes/tempest_plugin_removal-009f9ce8456b16fe.yaml releasenotes/notes/train-release-59ff1643ec92c10a.yaml releasenotes/notes/transmit-all-ports-b570009d1a008067.yaml diff -Nru ironic-14.0.1~git2020032415.de2d907fc/lower-constraints.txt ironic-14.0.1~git2020041013.af9e6ba90/lower-constraints.txt --- ironic-14.0.1~git2020032415.de2d907fc/lower-constraints.txt 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/lower-constraints.txt 2020-04-10 17:06:41.000000000 +0000 @@ -10,7 +10,7 @@ fixtures==3.0.0 flake8-import-order==0.13 futurist==1.2.0 -hacking==1.0.0 +hacking==3.0.0 ironic-lib==2.17.1 iso8601==0.1.11 Jinja2==2.10 diff -Nru ironic-14.0.1~git2020032415.de2d907fc/PKG-INFO ironic-14.0.1~git2020041013.af9e6ba90/PKG-INFO --- ironic-14.0.1~git2020032415.de2d907fc/PKG-INFO 2020-03-24 19:12:21.280131300 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/PKG-INFO 2020-04-10 17:06:46.382400500 +0000 @@ -1,6 +1,6 @@ -Metadata-Version: 1.1 +Metadata-Version: 2.1 Name: ironic -Version: 14.0.1.dev87 +Version: 14.0.1.dev163 Summary: OpenStack Bare Metal Provisioning Home-page: https://docs.openstack.org/ironic/latest/ Author: OpenStack @@ -55,6 +55,11 @@ Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Requires-Python: >=3.6 +Provides-Extra: guru_meditation_reports +Provides-Extra: i18n +Provides-Extra: test diff -Nru ironic-14.0.1~git2020032415.de2d907fc/playbooks/legacy/grenade-dsvm-ironic/run.yaml ironic-14.0.1~git2020041013.af9e6ba90/playbooks/legacy/grenade-dsvm-ironic/run.yaml --- ironic-14.0.1~git2020032415.de2d907fc/playbooks/legacy/grenade-dsvm-ironic/run.yaml 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/playbooks/legacy/grenade-dsvm-ironic/run.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -97,6 +97,8 @@ export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=7" + export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_REQUIRE_AGENT_TOKEN=False" + # Ensure the ironic-vars-EARLY file exists touch ironic-vars-early # Pull in the EARLY variables injected by the optional builders diff -Nru ironic-14.0.1~git2020032415.de2d907fc/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml ironic-14.0.1~git2020041013.af9e6ba90/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml --- ironic-14.0.1~git2020032415.de2d907fc/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -151,6 +151,8 @@ export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=7" + export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_REQUIRE_AGENT_TOKEN=False" + # Ensure the ironic-vars-EARLY file exists touch ironic-vars-early # Pull in the EARLY variables injected by the optional builders diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/agent-command-status-retry-f9b6f53a823c6b01.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/agent-command-status-retry-f9b6f53a823c6b01.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/agent-command-status-retry-f9b6f53a823c6b01.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/agent-command-status-retry-f9b6f53a823c6b01.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue with the agent client code where checks of the agent + command status had no logic to prevent an intermittent or transient + connection failure from causing the entire operation to fail. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/agent-token-support-0a5b5aa1585dfbb5.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/agent-token-support-0a5b5aa1585dfbb5.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/agent-token-support-0a5b5aa1585dfbb5.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/agent-token-support-0a5b5aa1585dfbb5.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,19 @@ +--- +features: + - | + Adds support of ``agent token`` which serves as a mechanism to secure + the normally unauthenticated API endpoints in ironic which are used in + the mechanics of baremetal provisioning. This feature is optional, however + operators may require this feature by changing the + ``[DEFAULT]require_agent_token`` setting to ``True``. +upgrades: + - | + In order to use the new Agent Token support, all ramdisk settings should + be updated for all nodes in ironic. If token use is required by ironic's + configuration, and the ramdisks have not been updated, then all + deployment, cleaning, and rescue operations will fail until the version of + the ironic-python-agent ramdisk has been updated. +issues: + - | + The ``ansible`` deployment interface does not support use of an + ``agent token`` at this time. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/continue-node-deploy-state-63d9dc9cdcf8e37a.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/continue-node-deploy-state-63d9dc9cdcf8e37a.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/continue-node-deploy-state-63d9dc9cdcf8e37a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/continue-node-deploy-state-63d9dc9cdcf8e37a.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,9 @@ +--- +deprecations: + - | + Some deploy interfaces use the ``continue_node_deploy`` RPC call to notify + the conductor when they're ready to leave the ``deploy`` core deploy step. + Currently ironic allows a node to be either in ``wait call-back`` or + ``deploying`` state when entering this call. This is deprecated, and in + the next release a node will have to be in the ``wait call-back`` + (``DEPLOYWAIT``) state for this call. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/decouple-boot-params-2b05806435ad21e5.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/decouple-boot-params-2b05806435ad21e5.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/decouple-boot-params-2b05806435ad21e5.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/decouple-boot-params-2b05806435ad21e5.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,10 @@ +--- +fixes: + - | + Improves interoperability with Redfish BMCs by untying node boot + mode change from other boot parameters change (such as boot device, + boot frequency). +upgrade: + - | + The required minimum version of the ``sushy`` python Redfish API client + library is now version ``3.2.0``. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/deploy-step-error-d343e8cb7d1b2305.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/deploy-step-error-d343e8cb7d1b2305.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/deploy-step-error-d343e8cb7d1b2305.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/deploy-step-error-d343e8cb7d1b2305.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes vague node ``last_error`` field reporting upon deploy step + failure by providing the exception error message in addition + to the step that failed. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/dhcpv6-stateful-address-count-0f94ac6a55bd9e51.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/dhcpv6-stateful-address-count-0f94ac6a55bd9e51.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/dhcpv6-stateful-address-count-0f94ac6a55bd9e51.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/dhcpv6-stateful-address-count-0f94ac6a55bd9e51.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,14 @@ +--- +features: + - | + For baremetal operations on DHCPv6-stateful networks multiple IPv6 + addresses can now be allocated for neutron ports created for provisioning, + cleaning, rescue or inspection. The new parameter + ``[neutron]/dhcpv6_stateful_address_count`` controls the number of addresses + to allocate (Default: 4). +fixes: + - | + The 'no address available' problem seen when network booting on + DHCPv6-stateful networks is fixed with the support for allocating multiple + IPv6 addresses. See `bug: 1861032 + `_. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/extends-install-bootloader-timeout-8fce9590bf405cdf.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/extends-install-bootloader-timeout-8fce9590bf405cdf.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/extends-install-bootloader-timeout-8fce9590bf405cdf.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/extends-install-bootloader-timeout-8fce9590bf405cdf.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixes an agent command issue in the bootloader installation process that + can present itself as a connection timeout under heavy IO load conditions. + Now installation commands have an internal timeout which is double the + conductor wide ``[agent]command_timeout``. For more information, see + bug `2007483 `_. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,22 @@ +fixes: + - | + Certain RAID controllers (PERC H730P) require physical disks + to be switched from non-RAID (JBOD) mode to RAID mode to be + included in a virtual disk. When this conversion happens, + the available free space on the physical disk is reduced due + to some space being allocated to RAID mode housekeeping. + If the user requests a virtual disk (a RAID 1 for example) + with a size close to the max size of the physical disks when + they are in JBOD mode, then creation of the virtual disk + following conversion of the physical disks from JBOD to RAID + mode will fail since there is not enough space due to the + space used by RAID mode housekeeping. + This patch works around this issue by recalculating the RAID + volume size after physical disk conversion has completed and + the free space on the converted drives is known. Note that + this may result in a virtual disk that is slightly smaller + than the requested size, but still the max size that the + drives can support. + See bug + `bug 2007359 `_ + for more details diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/neutron-port-update-598183909d44396c.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/neutron-port-update-598183909d44396c.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/neutron-port-update-598183909d44396c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/neutron-port-update-598183909d44396c.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Changes neutron port updates to use auth values from Ironic's neutron + conf, preventing issues that can arise when a non-admin user manages + Ironic nodes. A check is added to the port update function to verify that + the user can actually see the port. This adds an additional Neutron + request call to all port updates. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/node-in-maintenance-fail-afd0eace24fa28be.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/node-in-maintenance-fail-afd0eace24fa28be.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/node-in-maintenance-fail-afd0eace24fa28be.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/node-in-maintenance-fail-afd0eace24fa28be.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + If a node is in mid-deployment or cleaning and its conductor dies, ironic + will move that node into a failed state. However, this wasn't being done + if those nodes were also in maintenance. This has been fixed. See + `story 2007098 `_ for + more details. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/node-lessee-4fb320a597192742.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/node-lessee-4fb320a597192742.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/node-lessee-4fb320a597192742.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/node-lessee-4fb320a597192742.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,5 @@ +--- +features: + - | + Adds a ``lessee`` field to nodes. This field is exposed to policy, so if + a policy file permits, a lessee will have access to specified node APIs. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/oslo-i18n-optional-76bab4d2697c6f94.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/oslo-i18n-optional-76bab4d2697c6f94.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/oslo-i18n-optional-76bab4d2697c6f94.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/oslo-i18n-optional-76bab4d2697c6f94.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The dependency on ``oslo.i18n`` is now optional. If you would like messages + from ironic to be translated, you need to install it explicitly. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/oslo-reports-optional-59469955eaffdf1d.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/oslo-reports-optional-59469955eaffdf1d.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/oslo-reports-optional-59469955eaffdf1d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/oslo-reports-optional-59469955eaffdf1d.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The guru meditation reporting functionality is now optional and the + ``oslo.reports`` package is no longer a part of requirements. Install it + manually if you need this feature. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/port-local-link-connection-network-type-71103d919e27fc5d.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/port-local-link-connection-network-type-71103d919e27fc5d.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/port-local-link-connection-network-type-71103d919e27fc5d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/port-local-link-connection-network-type-71103d919e27fc5d.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,7 @@ +--- +features: + - | + To allow use of the ``neutron`` network interface in combination with + ``flat`` provider networks where no actual switch management is done. The + `local_link_connection` field on ports is extended to support the + ``network_type`` field. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/raid-hints-c27097ded0137f7c.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/raid-hints-c27097ded0137f7c.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/raid-hints-c27097ded0137f7c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/raid-hints-c27097ded0137f7c.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,7 @@ +--- +features: + - | + Target devices for software RAID can now be specified in the form of + device hints (same as for root devices) in the ``physical_disks`` + parameter of a logical disk configuration. This requires + ironic-python-agent from the Ussuri release series. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/support_to_hash_rescue_password-0915927e41e6d845.yaml ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/support_to_hash_rescue_password-0915927e41e6d845.yaml --- ironic-14.0.1~git2020032415.de2d907fc/releasenotes/notes/support_to_hash_rescue_password-0915927e41e6d845.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/releasenotes/notes/support_to_hash_rescue_password-0915927e41e6d845.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -0,0 +1,23 @@ +--- +features: + - | + Passwords for ``rescue`` operation are now hashed for + transmission to the ``ironic-python-agent``. This functionality + requires ``ironic-python-agent`` version ``6.0.0``. + + The setting ``[conductor]rescue_password_hash_algorithm`` + now defaults to ``sha256``, and may be set to + ``sha256``, or ``sha512``. +upgrades: + - | + The version of ``ironic-python-agent`` should be upgraded to + at least version ``6.0.0`` for rescue passwords to be hashed + for transmission. +security: + - | + Operators wishing to enforce all rescue passwords to be hashed + should use the ``[conductor]require_rescue_password_hashed`` + setting and set it to a value of ``True``. + + This setting will be changed to a default of ``True`` in the + Victoria development cycle. diff -Nru ironic-14.0.1~git2020032415.de2d907fc/requirements.txt ironic-14.0.1~git2020041013.af9e6ba90/requirements.txt --- ironic-14.0.1~git2020032415.de2d907fc/requirements.txt 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/requirements.txt 2020-04-10 17:06:41.000000000 +0000 @@ -21,11 +21,9 @@ oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.40.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 -oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 -oslo.reports>=1.18.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 diff -Nru ironic-14.0.1~git2020032415.de2d907fc/setup.cfg ironic-14.0.1~git2020041013.af9e6ba90/setup.cfg --- ironic-14.0.1~git2020032415.de2d907fc/setup.cfg 2020-03-24 19:12:21.284131500 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/setup.cfg 2020-04-10 17:06:46.382400500 +0000 @@ -6,6 +6,7 @@ author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/ironic/latest/ +python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology @@ -13,6 +14,7 @@ License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python + Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 @@ -174,6 +176,9 @@ mapping_file = babel.cfg output_file = ironic/locale/ironic.pot -[wheel] -universal = 1 +[extras] +guru_meditation_reports = + oslo.reports>=1.18.0 # Apache-2.0 +i18n = + oslo.i18n>=3.15.3 # Apache-2.0 diff -Nru ironic-14.0.1~git2020032415.de2d907fc/setup.py ironic-14.0.1~git2020041013.af9e6ba90/setup.py --- ironic-14.0.1~git2020032415.de2d907fc/setup.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/setup.py 2020-04-10 17:06:41.000000000 +0000 @@ -16,14 +16,6 @@ # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) diff -Nru ironic-14.0.1~git2020032415.de2d907fc/test-requirements.txt ironic-14.0.1~git2020041013.af9e6ba90/test-requirements.txt --- ironic-14.0.1~git2020032415.de2d907fc/test-requirements.txt 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/test-requirements.txt 2020-04-10 17:06:41.000000000 +0000 @@ -1,7 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -hacking>=1.0.0,<1.1.0 # Apache-2.0 +hacking>=3.0.0,<3.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 ddt>=1.0.1 # MIT doc8>=0.6.0 # Apache-2.0 diff -Nru ironic-14.0.1~git2020032415.de2d907fc/tools/check-releasenotes.py ironic-14.0.1~git2020041013.af9e6ba90/tools/check-releasenotes.py --- ironic-14.0.1~git2020032415.de2d907fc/tools/check-releasenotes.py 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/tools/check-releasenotes.py 2020-04-10 17:06:41.000000000 +0000 @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import os import re import sys diff -Nru ironic-14.0.1~git2020032415.de2d907fc/tox.ini ironic-14.0.1~git2020041013.af9e6ba90/tox.ini --- ironic-14.0.1~git2020032415.de2d907fc/tox.ini 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/tox.ini 2020-04-10 17:06:41.000000000 +0000 @@ -22,7 +22,7 @@ stestr run {posargs} passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY -[testenv:unit-with-driver-libs-python3] +[testenv:unit-with-driver-libs] deps = {[testenv]deps} -r{toxinidir}/driver-requirements.txt @@ -111,27 +111,29 @@ [flake8] # [W503] Line break before binary operator. -# NOTE(TheJulia): Adding W606 to the ignore list -# until we are able to remove them due to deprecation -# period passing. Patch merged in March 2018 to rename -# method arguments from reserved keywords. -ignore = E129,W503,W606 +ignore = E129,W503 filename = *.py,app.wsgi exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build import-order-style = pep8 application-import-names = ironic -max-complexity=18 +max-complexity=20 # [H106] Don't put vim configuration in source files. # [H203] Use assertIs(Not)None to check for None. # [H204] Use assert(Not)Equal to check for equality. # [H205] Use assert(Greater|Less)(Equal) for comparison. +# TODO(dtantsur): [H210] Require ‘autospec’, ‘spec’, or ‘spec_set’ in mock.patch/mock.patch.object calls # [H904] Delay string interpolations at logging calls. enable-extensions=H106,H203,H204,H205,H904 [hacking] -local-check-factory = ironic.hacking.checks.factory import_exceptions = testtools.matchers, ironic.common.i18n +[flake8:local-plugins] +# [N323] Found use of _() without explicit import of _! +extension = + N323 = checks:check_explicit_underscore_import +paths = ./ironic/hacking/ + [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt diff -Nru ironic-14.0.1~git2020032415.de2d907fc/zuul.d/ironic-jobs.yaml ironic-14.0.1~git2020041013.af9e6ba90/zuul.d/ironic-jobs.yaml --- ironic-14.0.1~git2020032415.de2d907fc/zuul.d/ironic-jobs.yaml 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/zuul.d/ironic-jobs.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -39,13 +39,12 @@ IRONIC_CALLBACK_TIMEOUT: 600 IRONIC_DEPLOY_DRIVER: ipmi IRONIC_INSPECTOR_BUILD_RAMDISK: False - IRONIC_RAMDISK_TYPE: tinyipa IRONIC_TEMPEST_BUILD_TIMEOUT: 720 IRONIC_TEMPEST_WHOLE_DISK_IMAGE: False IRONIC_VM_COUNT: 1 IRONIC_VM_EPHEMERAL_DISK: 1 + IRONIC_VM_SPECS_RAM: 2048 IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs' - IRONIC_VM_SPECS_RAM: 384 # NOTE(dtantsur): in some jobs we end up with 12 disks total, so reduce # each of them. For don't need all 10 GiB for CirrOS anyway. IRONIC_VM_SPECS_DISK: 4 @@ -59,6 +58,7 @@ ironic: https://opendev.org/openstack/ironic zuul_copy_output: '{{ devstack_base_dir }}/ironic-bm-logs': 'logs' + '{{ devstack_base_dir }}/data/networking-generic-switch/netmiko_session.log': 'logs' devstack_services: q-agt: false q-dhcp: false @@ -110,7 +110,9 @@ IRONIC_DEFAULT_RESCUE_INTERFACE: agent IRONIC_ENABLED_DEPLOY_INTERFACES: "iscsi,direct,ansible" IRONIC_ENABLED_RESCUE_INTERFACES: "fake,agent,no-rescue" + IRONIC_RAMDISK_TYPE: tinyipa IRONIC_RPC_TRANSPORT: json-rpc + IRONIC_VM_SPECS_RAM: 384 IRONIC_VM_COUNT: 6 IRONIC_VM_VOLUME_COUNT: 2 # We're using a lot of disk space in this job. Some testing nodes have @@ -158,7 +160,6 @@ devstack_localrc: IRONIC_BOOT_MODE: uefi IRONIC_ENABLED_BOOT_INTERFACES: redfish-virtual-media - IRONIC_VM_SPECS_RAM: 512 SWIFT_ENABLE_TEMPURLS: True SWIFT_TEMPURL_KEY: secretkey IRONIC_AUTOMATED_CLEAN_ENABLED: False @@ -201,11 +202,13 @@ mysql: False postgresql: True +# NOTE(rpittau): converted job but not running for now as there +# could be an issue with the lookup in ironic-python-agent - job: - name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa - description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool + description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool parent: ironic-base - timeout: 5400 + timeout: 9600 vars: devstack_localrc: IRONIC_DEFAULT_DEPLOY_INTERFACE: direct @@ -213,6 +216,7 @@ IRONIC_ENABLED_RESCUE_INTERFACES: "fake,agent,no-rescue" IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True IRONIC_VM_EPHEMERAL_DISK: 0 + IRONIC_VM_SPECS_RAM: 3096 SWIFT_ENABLE_TEMPURLS: True SWIFT_TEMPURL_KEY: secretkey devstack_services: @@ -222,8 +226,8 @@ s-proxy: True - job: - name: ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa - description: ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa + name: ironic-tempest-ipa-wholedisk-bios-pxe_snmp + description: ironic-tempest-ipa-wholedisk-bios-pxe_snmp parent: ironic-base timeout: 5400 vars: @@ -235,19 +239,19 @@ IRONIC_AUTOMATED_CLEAN_ENABLED: False - job: - name: ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa - description: ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa + name: ironic-tempest-ipa-partition-uefi-pxe_ipmitool + description: ironic-tempest-ipa-partition-uefi-pxe_ipmitool parent: ironic-base timeout: 5400 vars: devstack_localrc: IRONIC_BOOT_MODE: uefi - IRONIC_VM_SPECS_RAM: 512 + IRONIC_VM_SPECS_RAM: 3096 IRONIC_AUTOMATED_CLEAN_ENABLED: False - job: - name: ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa - description: ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa + name: ironic-tempest-ipa-partition-pxe_ipmitool + description: ironic-tempest-ipa-partition-pxe_ipmitool parent: ironic-base timeout: 5400 @@ -302,9 +306,9 @@ s-proxy: True - job: - name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect + description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect + parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool timeout: 5400 vars: devstack_localrc: @@ -314,9 +318,9 @@ IRONIC_ENABLED_RESCUE_INTERFACES: "fake,no-rescue" - job: - name: ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - description: ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + name: ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect + description: ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect + parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool timeout: 5400 vars: devstack_localrc: @@ -455,6 +459,7 @@ networking-generic-switch: https://opendev.org/openstack/networking-generic-switch zuul_copy_output: '{{ devstack_base_dir }}/ironic-bm-logs': 'logs' + '{{ devstack_base_dir }}/data/networking-generic-switch/netmiko_session.log': 'logs' devstack_services: c-api: False c-bak: False @@ -527,12 +532,12 @@ n-cpu: True - job: - name: ironic-tox-unit-with-driver-libs-python3 + name: ironic-tox-unit-with-driver-libs parent: tox description: | Run python 3 unit tests with driver dependencies installed. vars: - tox_envlist: unit-with-driver-libs-python3 + tox_envlist: unit-with-driver-libs - job: name: ironic-inspector-tempest-discovery-fast-track @@ -553,7 +558,6 @@ devstack_localrc: IRONIC_ENABLED_HARDWARE_TYPES: ipmi IRONIC_ENABLED_BOOT_INTERFACES: pxe - IRONIC_VM_SPECS_RAM: 512 IRONIC_IPXE_ENABLED: False IRONIC_BOOT_MODE: uefi IRONIC_AUTOMATED_CLEAN_ENABLED: False @@ -593,13 +597,38 @@ s-proxy: True devstack_localrc: IRONIC_DEFAULT_DEPLOY_INTERFACE: direct - IRONIC_RAMDISK_TYPE: dib IRONIC_DIB_RAMDISK_OS: centos8 IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True IRONIC_TEMPEST_BUILD_TIMEOUT: 900 IRONIC_VM_EPHEMERAL_DISK: 0 IRONIC_VM_INTERFACE_COUNT: 1 - IRONIC_VM_SPECS_RAM: 2048 IRONIC_AUTOMATED_CLEAN_ENABLED: False SWIFT_ENABLE_TEMPURLS: True SWIFT_TEMPURL_KEY: secretkey + +# NOTE(rpittau): OLD TINYIPA JOBS +# Those jobs are used by other projects, we leave them here until +# we can convert them to dib. + +# Used by devstack/ironic/nova/neutron +- job: + name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + parent: ironic-base + timeout: 5400 + vars: + devstack_localrc: + IRONIC_DEFAULT_DEPLOY_INTERFACE: direct + IRONIC_DEFAULT_RESCUE_INTERFACE: agent + IRONIC_ENABLED_RESCUE_INTERFACES: "fake,agent,no-rescue" + IRONIC_RAMDISK_TYPE: tinyipa + IRONIC_VM_SPECS_RAM: 384 + IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True + IRONIC_VM_EPHEMERAL_DISK: 0 + SWIFT_ENABLE_TEMPURLS: True + SWIFT_TEMPURL_KEY: secretkey + devstack_services: + s-account: True + s-container: True + s-object: True + s-proxy: True diff -Nru ironic-14.0.1~git2020032415.de2d907fc/zuul.d/project.yaml ironic-14.0.1~git2020041013.af9e6ba90/zuul.d/project.yaml --- ironic-14.0.1~git2020032415.de2d907fc/zuul.d/project.yaml 2020-03-24 19:12:17.000000000 +0000 +++ ironic-14.0.1~git2020041013.af9e6ba90/zuul.d/project.yaml 2020-04-10 17:06:41.000000000 +0000 @@ -9,7 +9,7 @@ - release-notes-jobs-python3 check: jobs: - - ironic-tox-unit-with-driver-libs-python3 + - ironic-tox-unit-with-driver-libs - ironic-standalone - ironic-tempest-functional-python3 - ironic-grenade-dsvm @@ -18,20 +18,19 @@ voting: false - ironic-tempest-partition-bios-redfish-pxe - ironic-tempest-partition-uefi-redfish-vmedia - - ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa - - ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa - - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode: - voting: false + - ironic-tempest-ipa-partition-pxe_ipmitool + - ironic-tempest-ipa-partition-uefi-pxe_ipmitool + - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa - - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - - ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect + - ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect - ironic-tempest-bfv - ironic-tempest-ipa-partition-uefi-pxe-grub2 - metalsmith-integration-glance-localboot-centos7 # Non-voting jobs - ironic-tox-bandit: voting: false - - ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa: + - ironic-tempest-ipa-wholedisk-bios-pxe_snmp: voting: false - ironic-inspector-tempest: voting: false @@ -48,7 +47,7 @@ gate: queue: ironic jobs: - - ironic-tox-unit-with-driver-libs-python3 + - ironic-tox-unit-with-driver-libs - ironic-standalone - ironic-tempest-functional-python3 - ironic-grenade-dsvm @@ -56,13 +55,12 @@ # - ironic-grenade-dsvm-multinode-multitenant - ironic-tempest-partition-bios-redfish-pxe - ironic-tempest-partition-uefi-redfish-vmedia - - ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa - - ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa - # removing from voting due to end of cycle gate instability. - # - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode + - ironic-tempest-ipa-partition-pxe_ipmitool + - ironic-tempest-ipa-partition-uefi-pxe_ipmitool + - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa - - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - - ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect + - ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect - ironic-tempest-bfv - ironic-tempest-ipa-partition-uefi-pxe-grub2 - metalsmith-integration-glance-localboot-centos7