diff -Nru ironic-12.0.0/api-ref/regenerate-samples.sh ironic-12.1.0/api-ref/regenerate-samples.sh --- ironic-12.0.0/api-ref/regenerate-samples.sh 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/regenerate-samples.sh 2019-03-21 20:07:40.000000000 +0000 @@ -11,7 +11,7 @@ OS_AUTH_TOKEN=$(openstack token issue | grep ' id ' | awk '{print $4}') IRONIC_URL="http://127.0.0.1:6385" -IRONIC_API_VERSION="1.37" +IRONIC_API_VERSION="1.55" export OS_AUTH_TOKEN IRONIC_URL @@ -27,6 +27,8 @@ DOC_CREATED_AT="2016-08-18T22:28:48.643434+11:11" DOC_UPDATED_AT="2016-08-18T22:28:49.653974+00:00" DOC_IRONIC_CONDUCTOR_HOSTNAME="897ab1dad809" +DOC_ALLOCATION_UUID="3bf138ba-6d71-44e7-b6a1-ca9cac17103e" +DOC_DEPLOY_TEMPLATE_UUID="bbb45f41-d4bc-4307-8d1d-32f95ce1e920" function GET { # GET $RESOURCE @@ -167,14 +169,38 @@ GET v1/nodes/detail > nodes-list-details-response.json GET v1/nodes/$NID > node-show-response.json -# Put the Node in maintenance mode, then continue doing everything else -PUT v1/nodes/$NID/maintenance node-maintenance-request.json - # Node traits PUT v1/nodes/$NID/traits node-set-traits-request.json GET v1/nodes/$NID/traits > node-traits-list-response.json ############ +# ALLOCATIONS + +POST v1/allocations allocation-create-request.json > allocation-create-response.json +AID=$(cat allocation-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') +if [ "$AID" == "" ]; then + exit 1 +else + echo "Allocation created. UUID: $AID" +fi + +# Create a failed allocation for listing +POST v1/allocations allocation-create-request-2.json + +# Poor man's wait_for_allocation +sleep 1 + +GET v1/allocations > allocations-list-response.json +GET v1/allocations/$AID > allocation-show-response.json +GET v1/nodes/$NID/allocation > node-allocation-show-response.json + +############ +# NODES - MAINTENANCE + +# Do this after allocation API to be able to create successful allocations +PUT v1/nodes/$NID/maintenance node-maintenance-request.json + +############ # PORTGROUPS # Before we can create a portgroup, we must @@ -301,6 +327,21 @@ GET v1/nodes/$NID/volume/targets > node-volume-target-list-response.json GET v1/nodes/$NID/volume/targets?detail=True > node-volume-target-detail-response.json +################## +# DEPLOY TEMPLATES + +POST v1/deploy_templates deploy-template-create-request.json > deploy-template-create-response.json +DTID=$(cat deploy-template-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') +if [ "$DTID" == "" ]; then + exit 1 +else + echo "Deploy template created. UUID: $DTID" +fi + +GET v1/deploy_templates > deploy-template-list-response.json +GET v1/deploy_templates?detail=True > deploy-template-detail-response.json +GET v1/deploy_templates/$DTID > deploy-template-show-response.json +PATCH v1/deploy_templates/$DTID deploy-template-update-request.json > deploy-template-update-response.json ##################### # Replace automatically generated UUIDs by already used in documentation @@ -312,6 +353,8 @@ sed -i "s/$PGID/$DOC_PORTGROUP_UUID/" *.json sed -i "s/$VCID/$DOC_VOL_CONNECTOR_UUID/" *.json sed -i "s/$VTID/$DOC_VOL_TARGET_UUID/" *.json +sed -i "s/$AID/$DOC_ALLOCATION_UUID/" *.json +sed -i "s/$DTID/$DOC_DEPLOY_TEMPLATE_UUID/" *.json sed -i "s/$(hostname)/$DOC_IRONIC_CONDUCTOR_HOSTNAME/" *.json sed -i "s/created_at\": \".*\"/created_at\": \"$DOC_CREATED_AT\"/" *.json sed -i "s/updated_at\": \".*\"/updated_at\": \"$DOC_UPDATED_AT\"/" *.json diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-allocation.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-allocation.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-allocation.inc 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-allocation.inc 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,210 @@ +.. -*- rst -*- + +========================= +Allocations (allocations) +========================= + +The Allocation resource represents a request to find and allocate a Node for +deployment. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Create Allocation +================= + +.. rest_method:: POST /v1/allocations + +Creates an allocation. + +A Node can be requested by its resource class and traits. Additionally, Nodes +can be pre-filtered on the client side, and the resulting list of UUIDs and/or +names can be submitted as ``candidate_nodes``. Otherwise all nodes are +considered. + +A Node is suitable for an Allocation if all of the following holds: + +* ``provision_state`` is ``available`` +* ``power_state`` is not ``null`` +* ``maintenance`` is ``false`` +* ``instance_uuid`` is ``null`` +* ``resource_class`` matches requested one +* ``traits`` list contains all of the requested ones + +The allocation process is asynchronous. The new Allocation is returned in +the ``allocating`` state, and the process continues in the background. If it +succeeds, the ``node_uuid`` field is populated with the Node's UUID, and the +Node's ``instance_uuid`` field is set to the Allocation's UUID. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Normal response codes: 201 + +Error response codes: 400, 401, 403, 409, 503 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - resource_class: req_allocation_resource_class + - candidate_nodes: req_candidate_nodes + - name: req_allocation_name + - traits: req_allocation_traits + - uuid: req_uuid + - extra: req_extra + +Request Example +--------------- + +.. literalinclude:: samples/allocation-create-request.json + :language: javascript + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - candidate_nodes: candidate_nodes + - last_error: allocation_last_error + - name: allocation_name + - node_uuid: allocation_node + - resource_class: allocation_resource_class + - state: allocation_state + - traits: allocation_traits + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +.. literalinclude:: samples/allocation-create-response.json + :language: javascript + +List Allocations +================ + +.. rest_method:: GET /v1/allocations + +Lists all Allocations. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Normal response codes: 200 + +Error response codes: 400, 401, 403, 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - node: r_allocation_node + - resource_class: r_resource_class + - state: r_allocation_state + - fields: fields + - limit: limit + - marker: marker + - sort_dir: sort_dir + - sort_key: sort_key + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - candidate_nodes: candidate_nodes + - last_error: allocation_last_error + - name: allocation_name + - node_uuid: allocation_node + - resource_class: allocation_resource_class + - state: allocation_state + - traits: allocation_traits + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +.. literalinclude:: samples/allocations-list-response.json + :language: javascript + +Show Allocation Details +======================= + +.. rest_method:: GET /v1/allocations/{allocation_id} + +Shows details for an Allocation. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Normal response codes: 200 + +Error response codes: 400, 401, 403, 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - fields: fields + - allocation_id: allocation_ident + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - candidate_nodes: candidate_nodes + - last_error: allocation_last_error + - name: allocation_name + - node_uuid: allocation_node + - resource_class: allocation_resource_class + - state: allocation_state + - traits: allocation_traits + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +.. literalinclude:: samples/allocation-show-response.json + :language: javascript + +Delete Allocation +================= + +.. rest_method:: DELETE /v1/allocations/{allocation_id} + +Deletes an Allocation. + +If the Allocation has a Node associated, the Node's ``instance_uuid`` is reset. + +The deletion will fail if the Allocation has a Node assigned and the Node is +``active`` and not in the maintenance mode. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Normal response codes: 204 + +Error response codes: 400, 401, 403, 404, 409, 503 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - allocation_id: allocation_ident diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-conductors.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-conductors.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-conductors.inc 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-conductors.inc 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,97 @@ +.. -*- rst -*- + +======================= +Conductors (conductors) +======================= + +.. versionadded:: 1.49 + +Listing Conductor resources is done through the ``conductors`` resource. + +Conductor resources are read-only, they can not be created, updated, or +removed. + + +List Conductors +=============== + +.. rest_method:: GET /v1/conductors + +Return a list of conductors known by the Bare Metal service. + +By default, this query will return the hostname, conductor group, and alive +status for each Conductor. When ``detail`` is set to True in the query string, +will return the full representation of the resource. + +Normal response code: 200 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - fields: fields_for_conductor + - limit: limit + - marker: marker + - sort_dir: sort_dir + - sort_key: sort_key + - detail: detail + +Response +-------- + +.. rest_parameters:: parameters.yaml + + - hostname: hostname + - conductor_group: conductor_group + - alive: alive + - drivers: drivers + - links: links + +**Example Conductor list response:** + +.. literalinclude:: samples/conductor-list-response.json + :language: javascript + +**Example detailed Conductor list response:** + +.. literalinclude:: samples/conductor-list-details-response.json + :language: javascript + + +Show Conductor Details +====================== + +.. rest_method:: GET /v1/conductors/{hostname} + +Shows details for a conductor. By default, this will return the full +representation of the resource; an optional ``fields`` parameter can be +supplied to return only the specified set. + +Normal response codes: 200 + +Error codes: 400,403,404,406 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - hostname: hostname_ident + - fields: fields_for_conductor + +Response +-------- + +.. rest_parameters:: parameters.yaml + + - hostname: hostname + - conductor_group: conductor_group + - alive: alive + - drivers: drivers + - links: links + +**Example JSON representation of a Conductor:** + +.. literalinclude:: samples/conductor-show-response.json + :language: javascript diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-deploy-templates.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-deploy-templates.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-deploy-templates.inc 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-deploy-templates.inc 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,222 @@ +.. -*- rst -*- + +=================================== +Deploy Templates (deploy_templates) +=================================== + +The Deploy Template resource represents a collection of Deploy Steps that may +be executed during deployment of a node. A deploy template is matched for a +node if at the time of deployment, the template's name matches a trait in the +node's ``instance_info.traits``. + +.. versionadded:: 1.55 + Deploy Template API was introduced. + +Create Deploy Template +====================== + +.. rest_method:: POST /v1/deploy_templates + +Creates a deploy template. + +.. versionadded:: 1.55 + Deploy Template API was introduced. + +Normal response codes: 201 + +Error response codes: 400, 401, 403, 409 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - name: deploy_template_name + - steps: deploy_template_steps + - uuid: req_uuid + - extra: req_extra + +Request Example +--------------- + +.. literalinclude:: samples/deploy-template-create-request.json + :language: javascript + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - name: deploy_template_name + - steps: deploy_template_steps + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +.. literalinclude:: samples/deploy-template-create-response.json + :language: javascript + +List Deploy Templates +===================== + +.. rest_method:: GET /v1/deploy_templates + +Lists all deploy templates. + +.. versionadded:: 1.55 + Deploy Template API was introduced. + +Normal response codes: 200 + +Error response codes: 400, 401, 403, 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - fields: fields + - limit: limit + - marker: marker + - sort_dir: sort_dir + - sort_key: sort_key + - detail: detail + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - name: deploy_template_name + - steps: deploy_template_steps + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +**Example deploy template list response:** + +.. literalinclude:: samples/deploy-template-list-response.json + :language: javascript + +**Example detailed deploy template list response:** + +.. literalinclude:: samples/deploy-template-detail-response.json + :language: javascript + +Show Deploy Template Details +============================ + +.. rest_method:: GET /v1/deploy_templates/{deploy_template_id} + +Shows details for a deploy template. + +.. versionadded:: 1.55 + Deploy Template API was introduced. + +Normal response codes: 200 + +Error response codes: 400, 401, 403, 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - fields: fields + - deploy_template_id: deploy_template_ident + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - name: deploy_template_name + - steps: deploy_template_steps + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +.. literalinclude:: samples/deploy-template-show-response.json + :language: javascript + +Update a Deploy Template +======================== + +.. rest_method:: PATCH /v1/deploy_templates/{deploy_template_id} + +Update a deploy template. + +.. versionadded:: 1.55 + Deploy Template API was introduced. + +Normal response code: 200 + +Error response codes: 400, 401, 403, 404, 409 + +Request +------- + +The BODY of the PATCH request must be a JSON PATCH document, adhering to +`RFC 6902 `_. + +Request +------- + +.. rest_parameters:: parameters.yaml + + - deploy_template_id: deploy_template_ident + +.. literalinclude:: samples/deploy-template-update-request.json + :language: javascript + +Response +-------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - name: deploy_template_name + - steps: deploy_template_steps + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +.. literalinclude:: samples/deploy-template-update-response.json + :language: javascript + +Delete Deploy Template +====================== + +.. rest_method:: DELETE /v1/deploy_template/{deploy_template_id} + +Deletes a deploy template. + +.. versionadded:: 1.55 + Deploy Template API was introduced. + +Normal response codes: 204 + +Error response codes: 400, 401, 403, 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - deploy_template_id: deploy_template_ident diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-drivers.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-drivers.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-drivers.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-drivers.inc 2019-03-21 20:07:40.000000000 +0000 @@ -80,6 +80,7 @@ .. rest_parameters:: parameters.yaml + - default_bios_interface: default_bios_interface - default_boot_interface: default_boot_interface - default_console_interface: default_console_interface - default_deploy_interface: default_deploy_interface @@ -91,6 +92,7 @@ - default_rescue_interface: default_rescue_interface - default_storage_interface: default_storage_interface - default_vendor_interface: default_vendor_interface + - enabled_bios_interfaces: enabled_bios_interfaces - enabled_boot_interfaces: enabled_boot_interfaces - enabled_console_interfaces: enabled_console_interfaces - enabled_deploy_interfaces: enabled_deploy_interfaces @@ -140,6 +142,7 @@ - name: driver_name - hosts: hosts - type: response_driver_type + - default_bios_interface: default_bios_interface - default_boot_interface: default_boot_interface - default_console_interface: default_console_interface - default_deploy_interface: default_deploy_interface @@ -151,6 +154,7 @@ - default_rescue_interface: default_rescue_interface - default_storage_interface: default_storage_interface - default_vendor_interface: default_vendor_interface + - enabled_bios_interfaces: enabled_bios_interfaces - enabled_boot_interfaces: enabled_boot_interfaces - enabled_console_interfaces: enabled_console_interfaces - enabled_deploy_interfaces: enabled_deploy_interfaces diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-node-allocation.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-node-allocation.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-node-allocation.inc 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-node-allocation.inc 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,81 @@ +.. -*- rst -*- + +==================================== +Node Allocation (allocations, nodes) +==================================== + +Given a Node identifier (``uuid`` or ``name``), the API allows to get and +delete the associated allocation. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Show Allocation by Node +======================= + +.. rest_method:: GET /v1/nodes/{node_ident}/allocation + +Shows details for an allocation. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Normal response codes: 200 + +Error response codes: 400, 401, 403, 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - node_ident: node_ident + - fields: fields + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - uuid: uuid + - candidate_nodes: candidate_nodes + - last_error: allocation_last_error + - name: allocation_name + - node_uuid: allocation_node + - resource_class: allocation_resource_class + - state: allocation_state + - traits: allocation_traits + - extra: extra + - created_at: created_at + - updated_at: updated_at + - links: links + +Response Example +---------------- + +.. literalinclude:: samples/allocation-show-response.json + :language: javascript + +Delete Allocation by Node +========================= + +.. rest_method:: DELETE /v1/nodes/{node_ident}/allocation + +Deletes the allocation of this node and resets its ``instance_uuid``. + +The deletion will fail if the allocation the node is ``active`` and not in +the ``maintenance`` mode. + +.. versionadded:: 1.52 + Allocation API was introduced. + +Normal response codes: 204 + +Error response codes: 400, 401, 403, 404, 409, 503 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - node_ident: node_ident diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-node-management.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-node-management.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-node-management.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-node-management.inc 2019-03-21 20:07:40.000000000 +0000 @@ -352,6 +352,10 @@ A node can be rescued or unrescued by setting the node's provision target state to ``rescue`` or ``unrescue`` respectively. +.. versionadded:: 1.56 + A ``configdrive`` can be a JSON object with ``meta_data``, ``network_data`` + and ``user_data``. + Normal response code: 202 Error codes: diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-nodes.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-nodes.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-nodes.inc 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-nodes.inc 2019-03-21 20:07:40.000000000 +0000 @@ -92,6 +92,15 @@ .. versionadded:: 1.46 Introduced the ``conductor_group`` field. +.. versionadded:: 1.50 + Introduced the ``owner`` field. + +.. versionadded:: 1.51 + Introduced the ``description`` field. + +.. versionadded:: 1.52 + Introduced the ``allocation_uuid`` field. + Normal response codes: 201 Error codes: 400,403,406 @@ -120,6 +129,8 @@ - storage_interface: req_storage_interface - uuid: req_uuid - vendor_interface: req_vendor_interface + - owner: owner + - description: n_description **Example Node creation request with a dynamic driver:** @@ -188,6 +199,10 @@ - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason + - conductor: conductor + - owner: owner + - description: n_description + - allocation_uuid: allocation_uuid **Example JSON representation of a Node:** @@ -235,6 +250,16 @@ Introduced the ``conductor_group`` request parameter, to allow filtering the list of returned nodes by conductor group. +.. versionadded:: 1.49 + Introduced the ``conductor`` request parameter, to allow filtering the + list of returned nodes by conductor. + +.. versionadded:: 1.50 + Introduced the ``owner`` field. + +.. versionadded:: 1.51 + Introduced the ``description`` field. + Normal response codes: 200 Error codes: 400,403,406 @@ -251,7 +276,10 @@ - driver: r_driver - resource_class: r_resource_class - conductor_group: r_conductor_group + - conductor: r_conductor - fault: r_fault + - owner: owner + - description_contains: r_description_contains - fields: fields - limit: limit - marker: marker @@ -307,6 +335,18 @@ .. versionadded:: 1.48 Introduced the ``protected`` and ``protected_reason`` fields. +.. versionadded:: 1.49 + Introduced the ``conductor`` request parameter and ``conductor`` field. + +.. versionadded:: 1.50 + Introduced the ``owner`` field. + +.. versionadded:: 1.51 + Introduced the ``description`` field. + +.. versionadded:: 1.52 + Introduced the ``allocation_uuid`` field. + Normal response codes: 200 Error codes: 400,403,406 @@ -324,6 +364,9 @@ - driver: r_driver - resource_class: r_resource_class - conductor_group: r_conductor_group + - conductor: r_conductor + - owner: owner + - description_contains: r_description_contains - limit: limit - marker: marker - sort_dir: sort_dir @@ -379,6 +422,10 @@ - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason + - owner: owner + - description: n_description + - conductor: conductor + - allocation_uuid: allocation_uuid **Example detailed list of Nodes:** @@ -410,6 +457,18 @@ .. versionadded:: 1.48 Introduced the ``protected`` and ``protected_reason`` fields. +.. versionadded:: 1.49 + Introduced the ``conductor`` field + +.. versionadded:: 1.50 + Introduced the ``owner`` field. + +.. versionadded:: 1.51 + Introduced the ``description`` field. + +.. versionadded:: 1.52 + Introduced the ``allocation_uuid`` field. + Normal response codes: 200 Error codes: 400,403,404,406 @@ -472,6 +531,10 @@ - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason + - owner: owner + - description: n_description + - conductor: conductor + - allocation_uuid: allocation_uuid **Example JSON representation of a Node:** @@ -492,6 +555,9 @@ .. versionadded:: 1.25 Introduced the ability to unset a node's chassis UUID. +.. versionadded:: 1.51 + Introduced the ability to set/unset a node's description. + Normal response codes: 200 Error codes: 400,403,404,406,409 @@ -560,6 +626,10 @@ - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason + - owner: owner + - description: n_description + - conductor: conductor + - allocation_uuid: allocation_uuid **Example JSON representation of a Node:** diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-nodes-ports.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-nodes-ports.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-nodes-ports.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-nodes-ports.inc 2019-03-21 20:07:40.000000000 +0000 @@ -32,6 +32,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` response fields. + Normal response code: 200 Error codes: TBD @@ -79,6 +82,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` response fields. + Normal response code: 200 Error codes: TBD @@ -112,6 +118,7 @@ - created_at: created_at - updated_at: updated_at - links: links + - is_smartnic: is_smartnic **Example details of a Node's Ports:** diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-portgroups-ports.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-portgroups-ports.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-portgroups-ports.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-portgroups-ports.inc 2019-03-21 20:07:40.000000000 +0000 @@ -25,6 +25,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` response fields. + Normal response code: 200 Error codes: 400,401,403,404 @@ -66,6 +69,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` response fields. + Normal response code: 200 Error codes: 400,401,403,404 @@ -99,6 +105,7 @@ - created_at: created_at - updated_at: updated_at - links: links + - is_smartnic: is_smartnic **Example details of a Portgroup's Ports:** diff -Nru ironic-12.0.0/api-ref/source/baremetal-api-v1-ports.inc ironic-12.1.0/api-ref/source/baremetal-api-v1-ports.inc --- ironic-12.0.0/api-ref/source/baremetal-api-v1-ports.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/baremetal-api-v1-ports.inc 2019-03-21 20:07:40.000000000 +0000 @@ -46,6 +46,9 @@ Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each port. +.. versionadded:: 1.53 + Added the ``is_smartnic`` field. + Normal response code: 200 Request @@ -100,6 +103,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` request and response fields. +.. versionadded:: 1.53 + Added the ``is_smartnic`` request and response fields. + Normal response code: 201 Request @@ -114,6 +120,7 @@ - pxe_enabled: req_pxe_enabled - physical_network: req_physical_network - extra: req_extra + - is_smartnic: req_is_smartnic **Example Port creation request:** @@ -137,6 +144,7 @@ - created_at: created_at - updated_at: updated_at - links: links + - is_smartnic: is_smartnic **Example Port creation response:** @@ -165,6 +173,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` response field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` response fields. + Normal response code: 200 Request @@ -199,6 +210,7 @@ - created_at: created_at - updated_at: updated_at - links: links + - is_smartnic: is_smartnic **Example detailed Port list response:** @@ -227,6 +239,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` response field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` response fields. + Normal response code: 200 Request @@ -254,6 +269,7 @@ - created_at: created_at - updated_at: updated_at - links: links + - is_smartnic: is_smartnic **Example Port details:** @@ -277,6 +293,9 @@ .. versionadded:: 1.34 Added the ``physical_network`` field. +.. versionadded:: 1.53 + Added the ``is_smartnic`` fields. + Normal response code: 200 Request @@ -311,6 +330,7 @@ - created_at: created_at - updated_at: updated_at - links: links + - is_smartnic: is_smartnic **Example Port update response:** diff -Nru ironic-12.0.0/api-ref/source/index.rst ironic-12.1.0/api-ref/source/index.rst --- ironic-12.0.0/api-ref/source/index.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/index.rst 2019-03-21 20:07:40.000000000 +0000 @@ -21,7 +21,12 @@ .. include:: baremetal-api-v1-nodes-volume.inc .. include:: baremetal-api-v1-drivers.inc .. include:: baremetal-api-v1-driver-passthru.inc +.. include:: baremetal-api-v1-nodes-bios.inc +.. include:: baremetal-api-v1-conductors.inc +.. include:: baremetal-api-v1-allocation.inc +.. include:: baremetal-api-v1-node-allocation.inc +.. include:: baremetal-api-v1-deploy-templates.inc +.. NOTE(dtantsur): keep chassis close to the end since it's semi-deprecated .. include:: baremetal-api-v1-chassis.inc +.. NOTE(dtantsur): keep misc last, since it covers internal API .. include:: baremetal-api-v1-misc.inc -.. include:: baremetal-api-v1-nodes-bios.inc - diff -Nru ironic-12.0.0/api-ref/source/parameters.yaml ironic-12.1.0/api-ref/source/parameters.yaml --- ironic-12.0.0/api-ref/source/parameters.yaml 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/api-ref/source/parameters.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -38,6 +38,12 @@ type: string # variables in path +allocation_ident: + description: | + The UUID or name of the allocation. + in: path + required: true + type: string bios_setting: description: | The name of the Bios setting. @@ -50,12 +56,24 @@ in: path required: true type: string +deploy_template_ident: + description: | + The UUID or name of the deploy template. + in: path + required: true + type: string driver_ident: description: | The name of the driver. in: path required: true type: string +hostname_ident: + description: | + The hostname of the conductor. + in: path + required: true + type: string node_id: description: | The UUID of the node. @@ -151,6 +169,19 @@ in: query required: false type: array +fields_for_conductor: + description: | + One or more fields to be returned in the response. + + For example, the following request returns only the ``hostname`` + and ``alive`` fields for each conductor: + + :: + + GET /v1/conductors?fields=hostname,alive + in: query + required: false + type: array limit: description: | Requests a page size of items. Returns a number of items up to a limit @@ -189,7 +220,20 @@ required: false type: array -# variables in the node query string +# variables in the query string +r_allocation_node: + description: | + Filter the list of allocations by the node UUID or name. + in: query + required: false + type: string +r_allocation_state: + description: | + Filter the list of allocations by the allocation state, one of ``active``, + ``allocating`` or ``error``. + in: query + required: false + type: string r_associated: description: | Filter the list of returned nodes and only return those which are, or are @@ -197,6 +241,13 @@ in: query required: false type: boolean +r_conductor: + description: | + Filter the list of returned nodes, and only return those with the + specified ``conductor``. + in: query + required: false + type: string r_conductor_group: description: | Filter the list of returned nodes, and only return those with the @@ -205,6 +256,13 @@ in: query required: false type: string +r_description_contains: + description: | + Filter the list of returned nodes, and only return those containing + substring specified by ``description_contains``. + in: query + requred: false + type: string r_driver: description: | Filter the list of returned nodes, and only return those with the specified @@ -339,7 +397,6 @@ type: string - # variable returned from /lookup agent_config: description: | @@ -357,6 +414,64 @@ type: JSON # variables in the API body +alive: + description: | + The conductor status indicates whether a conductor is considered alive + or not. + in: body + required: true + type: boolean +allocation_last_error: + description: | + The error message for the allocation if it is in the ``error`` state, + ``null`` otherwise. + in: body + required: true + type: string +allocation_name: + description: | + The unique name of the allocation. + in: body + required: true + type: string +allocation_node: + description: | + The UUID of the node assigned to the allocation. Will be ``null`` if a node + is not yet assigned. + in: body + required: true + type: string +allocation_resource_class: + description: | + The resource class requested for the allocation. + in: body + required: true + type: string +allocation_state: + description: | + The current state of the allocation. One of: + + * ``allocating`` - allocation is in progress. + * ``active`` - allocation is finished and ``node_uuid`` is assigned. + * ``error`` - allocation has failed, see ``last_error`` for details. + in: body + required: true + type: string +allocation_traits: + description: | + The list of the traits requested for the allocation. + in: body + required: true + type: array +allocation_uuid: + description: | + The UUID of the allocation associated with the node. If not ``null``, will + be the same as ``instance_uuid`` (the opposite is not always true). + Unlike ``instance_uuid``, this field is read-only. Please use the + Allocation API to remove allocations. + in: body + required: true + type: string bios_setting_name: description: | The name of a Bios setting for a Node, eg. "virtualization". @@ -388,6 +503,12 @@ in: body required: true type: string +candidate_nodes: + description: | + A list of UUIDs of the nodes that are candidates for this allocation. + in: body + required: true + type: array chassis: description: | A ``chassis`` object. @@ -415,21 +536,35 @@ in: body required: false type: array +conductor: + description: | + The conductor currently servicing a node. This field is read-only. + in: body + required: false + type: string conductor_group: description: | The conductor group for a node. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: body - required: false + required: true type: string configdrive: description: | - A gzip'ed and base-64 encoded config drive, to be written to a partition - on the Node's boot disk. This parameter is only accepted when setting the - state to "active" or "rebuild". + A config drive to be written to a partition on the Node's boot disk. Can be + a full gzip'ed and base-64 encoded image or a JSON object with the keys: + + * ``meta_data`` (optional) - JSON object with the standard meta data. + Ironic will provide the defaults for the ``uuid`` and ``name`` fields. + * ``network_data`` (optional) - JSON object with networking configuration. + * ``user_data`` (optional) - user data. May be a string (which will be + UTF-8 encoded); a JSON object, or a JSON array. + + This parameter is only accepted when setting the state to "active" or + "rebuild". in: body required: false - type: string or gzip+b64 blob + type: string or object console_enabled: description: | Indicates whether console access is enabled or disabled on this node. @@ -456,6 +591,13 @@ in: body required: true type: dictionary +default_bios_interface: + description: | + The default bios interface used for a node with a dynamic driver, if no + bios interface is specified for the node. + in: body + required: true + type: string default_boot_interface: description: | The default boot interface used for a node with a dynamic driver, if no @@ -545,6 +687,27 @@ in: body required: false type: string +deploy_template_name: + description: | + The unique name of the deploy template. + in: body + required: true + type: string +deploy_template_steps: + description: | + The deploy steps of the deploy template. Must be a list containing at least + one deploy step. + + A deploy step is a dictionary with required keys ``interface``, ``step``, + ``args``, and ``priority``. The value for ``interface`` is the name of the + driver interface. The value for ``step`` is the name of the deploy step + method on the driver interface. The value for ``args`` is a dictionary of + arguments that are passed to the deploy step method. The value for + ``priority`` is a non-negative integer priority for the step. A value of + ``0`` for ``priority`` will disable that step. + in: body + required: true + type: array description: description: | Descriptive text about the Ironic service. @@ -582,6 +745,12 @@ in: body required: true type: array +enabled_bios_interfaces: + description: | + The enabled bios interfaces for this driver. + in: body + required: true + type: list enabled_boot_interfaces: description: | The enabled boot interfaces for this driver. @@ -665,6 +834,12 @@ in: body required: false type: string +hostname: + description: | + The hostname of this conductor. + in: body + required: true + type: array hosts: description: | A list of active hosts that support this driver. @@ -720,6 +895,12 @@ in: body required: true type: JSON +is_smartnic: + description: | + Indicates whether the Port is a Smart NIC port. + in: body + required: false + type: boolean last_error: description: | Any error from the most recent (last) transaction that started but failed to finish. @@ -766,6 +947,12 @@ in: body required: true type: string +n_description: + description: | + Informational text about this node. + in: body + required: true + type: string n_portgroups: description: | Links to the collection of portgroups on this node. @@ -856,6 +1043,12 @@ in: body required: true type: array +owner: + description: | + A string or UUID of the tenant who owns the baremetal node. + in: body + required: false + type: string passthru_async: description: | If True the passthru function is invoked asynchronously; if False, @@ -1038,6 +1231,24 @@ in: body required: false type: string +req_allocation_name: + description: | + The unique name of the Allocation. + in: body + required: false + type: string +req_allocation_resource_class: + description: | + The requested resource class for the allocation. + in: body + required: true + type: string +req_allocation_traits: + description: | + The list of requested traits for the allocation. + in: body + required: false + type: array req_boot_device: description: | The boot device for a Node, eg. "pxe" or "disk". @@ -1050,6 +1261,13 @@ in: body required: false type: string +req_candidate_nodes: + description: | + The list of nodes (names or UUIDs) that should be considered for this + allocation. If not provided, all available nodes will be considered. + in: body + required: false + type: array req_chassis: description: | A ``chassis`` object. @@ -1114,6 +1332,12 @@ in: body required: false type: string +req_is_smartnic: + description: | + Indicates whether the Port is a Smart NIC port. + in: body + required: false + type: boolean req_local_link_connection: description: | The Port binding profile. If specified, must contain ``switch_id`` (only diff -Nru ironic-12.0.0/api-ref/source/samples/allocation-create-request-2.json ironic-12.1.0/api-ref/source/samples/allocation-create-request-2.json --- ironic-12.0.0/api-ref/source/samples/allocation-create-request-2.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/allocation-create-request-2.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +{ + "name": "allocation-2", + "resource_class": "bm-large", + "traits": ["CUSTOM_GOLD"] +} + diff -Nru ironic-12.0.0/api-ref/source/samples/allocation-create-request.json ironic-12.1.0/api-ref/source/samples/allocation-create-request.json --- ironic-12.0.0/api-ref/source/samples/allocation-create-request.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/allocation-create-request.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,4 @@ +{ + "name": "allocation-1", + "resource_class": "bm-large" +} diff -Nru ironic-12.0.0/api-ref/source/samples/allocation-create-response.json ironic-12.1.0/api-ref/source/samples/allocation-create-response.json --- ironic-12.0.0/api-ref/source/samples/allocation-create-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/allocation-create-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,23 @@ +{ + "candidate_nodes": [], + "created_at": "2019-02-20T09:43:58+00:00", + "extra": {}, + "last_error": null, + "links": [ + { + "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", + "rel": "bookmark" + } + ], + "name": "allocation-1", + "node_uuid": null, + "resource_class": "bm-large", + "state": "allocating", + "traits": [], + "updated_at": null, + "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" +} diff -Nru ironic-12.0.0/api-ref/source/samples/allocation-show-response.json ironic-12.1.0/api-ref/source/samples/allocation-show-response.json --- ironic-12.0.0/api-ref/source/samples/allocation-show-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/allocation-show-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,23 @@ +{ + "candidate_nodes": [], + "created_at": "2019-02-20T09:43:58+00:00", + "extra": {}, + "last_error": null, + "links": [ + { + "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", + "rel": "bookmark" + } + ], + "name": "allocation-1", + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "resource_class": "bm-large", + "state": "active", + "traits": [], + "updated_at": "2019-02-20T09:43:58+00:00", + "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" +} diff -Nru ironic-12.0.0/api-ref/source/samples/allocations-list-response.json ironic-12.1.0/api-ref/source/samples/allocations-list-response.json --- ironic-12.0.0/api-ref/source/samples/allocations-list-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/allocations-list-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,52 @@ +{ + "allocations": [ + { + "candidate_nodes": [], + "created_at": "2019-02-20T09:43:58+00:00", + "extra": {}, + "last_error": null, + "links": [ + { + "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", + "rel": "bookmark" + } + ], + "name": "allocation-1", + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "resource_class": "bm-large", + "state": "active", + "traits": [], + "updated_at": "2019-02-20T09:43:58+00:00", + "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" + }, + { + "candidate_nodes": [], + "created_at": "2019-02-20T09:43:58+00:00", + "extra": {}, + "last_error": "Failed to process allocation eff80f47-75f0-4d41-b1aa-cf07c201adac: no available nodes match the resource class bm-large.", + "links": [ + { + "href": "http://127.0.0.1:6385/v1/allocations/eff80f47-75f0-4d41-b1aa-cf07c201adac", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/allocations/eff80f47-75f0-4d41-b1aa-cf07c201adac", + "rel": "bookmark" + } + ], + "name": "allocation-2", + "node_uuid": null, + "resource_class": "bm-large", + "state": "error", + "traits": [ + "CUSTOM_GOLD" + ], + "updated_at": "2019-02-20T09:43:58+00:00", + "uuid": "eff80f47-75f0-4d41-b1aa-cf07c201adac" + } + ] +} diff -Nru ironic-12.0.0/api-ref/source/samples/conductor-list-details-response.json ironic-12.1.0/api-ref/source/samples/conductor-list-details-response.json --- ironic-12.0.0/api-ref/source/samples/conductor-list-details-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/conductor-list-details-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,44 @@ +{ + "conductors": [ + { + "links": [ + { + "href": "http://127.0.0.1:6385/v1/conductors/compute1.localdomain", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/conductors/compute1.localdomain", + "rel": "bookmark" + } + ], + "created_at": "2018-08-07T08:39:21+00:00", + "hostname": "compute1.localdomain", + "conductor_group": "", + "updated_at": "2018-11-30T07:07:23+00:00", + "alive": false, + "drivers": [ + "ipmi" + ] + }, + { + "links": [ + { + "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", + "rel": "bookmark" + } + ], + "created_at": "2018-12-05T07:03:19+00:00", + "hostname": "compute2.localdomain", + "conductor_group": "", + "updated_at": "2018-12-05T07:03:21+00:00", + "alive": true, + "drivers": [ + "ipmi" + ] + } + ] +} diff -Nru ironic-12.0.0/api-ref/source/samples/conductor-list-response.json ironic-12.1.0/api-ref/source/samples/conductor-list-response.json --- ironic-12.0.0/api-ref/source/samples/conductor-list-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/conductor-list-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,34 @@ +{ + "conductors": [ + { + "hostname": "compute1.localdomain", + "conductor_group": "", + "links": [ + { + "href": "http://127.0.0.1:6385/v1/conductors/compute1.localdomain", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/conductors/compute1.localdomain", + "rel": "bookmark" + } + ], + "alive": false + }, + { + "hostname": "compute2.localdomain", + "conductor_group": "", + "links": [ + { + "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", + "rel": "bookmark" + } + ], + "alive": true + } + ] +} \ No newline at end of file diff -Nru ironic-12.0.0/api-ref/source/samples/conductor-show-response.json ironic-12.1.0/api-ref/source/samples/conductor-show-response.json --- ironic-12.0.0/api-ref/source/samples/conductor-show-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/conductor-show-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,20 @@ +{ + "links": [ + { + "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", + "rel": "self" + }, + { + "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", + "rel": "bookmark" + } + ], + "created_at": "2018-12-05T07:03:19+00:00", + "hostname": "compute2.localdomain", + "conductor_group": "", + "updated_at": "2018-12-05T07:03:21+00:00", + "alive": true, + "drivers": [ + "ipmi" + ] +} diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-create-request.json ironic-12.1.0/api-ref/source/samples/deploy-template-create-request.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-create-request.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-create-request.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,19 @@ +{ + "extra": {}, + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "priority": 150 + } + ] +} diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-create-response.json ironic-12.1.0/api-ref/source/samples/deploy-template-create-response.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-create-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-create-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "self" + }, + { + "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "bookmark" + } + ], + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "interface": "bios", + "priority": 150, + "step": "apply_configuration" + } + ], + "updated_at": null, + "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" +} diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-detail-response.json ironic-12.1.0/api-ref/source/samples/deploy-template-detail-response.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-detail-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-detail-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,36 @@ +{ + "deploy_templates": [ + { + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "self" + }, + { + "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "bookmark" + } + ], + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "interface": "bios", + "priority": 150, + "step": "apply_configuration" + } + ], + "updated_at": null, + "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" + } + ] +} diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-list-response.json ironic-12.1.0/api-ref/source/samples/deploy-template-list-response.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-list-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-list-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,18 @@ +{ + "deploy_templates": [ + { + "links": [ + { + "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "self" + }, + { + "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "bookmark" + } + ], + "name": "CUSTOM_HYPERTHREADING_ON", + "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" + } + ] +} diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-show-response.json ironic-12.1.0/api-ref/source/samples/deploy-template-show-response.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-show-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-show-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "self" + }, + { + "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "bookmark" + } + ], + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "interface": "bios", + "priority": 150, + "step": "apply_configuration" + } + ], + "updated_at": null, + "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" +} diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-update-request.json ironic-12.1.0/api-ref/source/samples/deploy-template-update-request.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-update-request.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-update-request.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,7 @@ +[ + { + "path" : "/name", + "value" : "CUSTOM_HT_ON", + "op" : "replace" + } +] diff -Nru ironic-12.0.0/api-ref/source/samples/deploy-template-update-response.json ironic-12.1.0/api-ref/source/samples/deploy-template-update-response.json --- ironic-12.0.0/api-ref/source/samples/deploy-template-update-response.json 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/deploy-template-update-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,32 @@ +{ + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "self" + }, + { + "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", + "rel": "bookmark" + } + ], + "name": "CUSTOM_HT_ON", + "steps": [ + { + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "interface": "bios", + "priority": 150, + "step": "apply_configuration" + } + ], + "updated_at": "2016-08-18T22:28:49.653974+00:00", + "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" +} diff -Nru ironic-12.0.0/api-ref/source/samples/driver-get-response.json ironic-12.1.0/api-ref/source/samples/driver-get-response.json --- ironic-12.0.0/api-ref/source/samples/driver-get-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/driver-get-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -1,4 +1,5 @@ { + "default_bios_interface": "no-bios", "default_boot_interface": "pxe", "default_console_interface": "no-console", "default_deploy_interface": "iscsi", @@ -10,6 +11,9 @@ "default_rescue_interface": "no-rescue", "default_storage_interface": "noop", "default_vendor_interface": "no-vendor", + "enabled_bios_interfaces": [ + "no-bios" + ], "enabled_boot_interfaces": [ "pxe" ], diff -Nru ironic-12.0.0/api-ref/source/samples/drivers-list-detail-response.json ironic-12.1.0/api-ref/source/samples/drivers-list-detail-response.json --- ironic-12.0.0/api-ref/source/samples/drivers-list-detail-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/drivers-list-detail-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -1,6 +1,7 @@ { "drivers": [ { + "default_bios_interface": null, "default_boot_interface": null, "default_console_interface": null, "default_deploy_interface": null, @@ -12,6 +13,7 @@ "default_rescue_interface": null, "default_storage_interface": null, "default_vendor_interface": null, + "enabled_bios_interfaces": null, "enabled_boot_interfaces": null, "enabled_console_interfaces": null, "enabled_deploy_interfaces": null, @@ -50,6 +52,7 @@ "type": "classic" }, { + "default_bios_interface": null, "default_boot_interface": null, "default_console_interface": null, "default_deploy_interface": null, @@ -61,6 +64,7 @@ "default_rescue_interface": null, "default_storage_interface": null, "default_vendor_interface": null, + "enabled_bios_interfaces": null, "enabled_boot_interfaces": null, "enabled_console_interfaces": null, "enabled_deploy_interfaces": null, @@ -99,6 +103,7 @@ "type": "classic" }, { + "default_bios_interface": "no-bios", "default_boot_interface": "pxe", "default_console_interface": "no-console", "default_deploy_interface": "iscsi", @@ -110,6 +115,9 @@ "default_rescue_interface": "no-rescue", "default_storage_interface": "noop", "default_vendor_interface": "no-vendor", + "enabled_bios_interfaces": [ + "no-bios" + ], "enabled_boot_interfaces": [ "pxe" ], diff -Nru ironic-12.0.0/api-ref/source/samples/node-create-request-classic.json ironic-12.1.0/api-ref/source/samples/node-create-request-classic.json --- ironic-12.0.0/api-ref/source/samples/node-create-request-classic.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/node-create-request-classic.json 2019-03-21 20:07:40.000000000 +0000 @@ -4,5 +4,6 @@ "driver_info": { "ipmi_username": "ADMIN", "ipmi_password": "password" - } + }, + "resource_class": "bm-large" } diff -Nru ironic-12.0.0/api-ref/source/samples/node-create-request-dynamic.json ironic-12.1.0/api-ref/source/samples/node-create-request-dynamic.json --- ironic-12.0.0/api-ref/source/samples/node-create-request-dynamic.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/node-create-request-dynamic.json 2019-03-21 20:07:40.000000000 +0000 @@ -5,5 +5,6 @@ "ipmi_username": "ADMIN", "ipmi_password": "password" }, - "power_interface": "ipmitool" + "power_interface": "ipmitool", + "resource_class": "bm-large" } diff -Nru ironic-12.0.0/api-ref/source/samples/node-create-response.json ironic-12.1.0/api-ref/source/samples/node-create-response.json --- ironic-12.0.0/api-ref/source/samples/node-create-response.json 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/node-create-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -1,4 +1,5 @@ { + "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, @@ -8,6 +9,7 @@ "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, + "description": null, "driver": "agent_ipmitool", "driver_info": { "ipmi_password": "******", @@ -36,6 +38,7 @@ "management_interface": null, "name": "test_node_classic", "network_interface": "flat", + "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", @@ -67,7 +70,7 @@ "raid_interface": null, "rescue_interface": null, "reservation": null, - "resource_class": null, + "resource_class": "bm-large", "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", diff -Nru ironic-12.0.0/api-ref/source/samples/node-port-detail-response.json ironic-12.1.0/api-ref/source/samples/node-port-detail-response.json --- ironic-12.0.0/api-ref/source/samples/node-port-detail-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/node-port-detail-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -5,6 +5,7 @@ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, + "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", diff -Nru ironic-12.0.0/api-ref/source/samples/node-show-response.json ironic-12.1.0/api-ref/source/samples/node-show-response.json --- ironic-12.0.0/api-ref/source/samples/node-show-response.json 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/node-show-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -1,13 +1,16 @@ { + "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, + "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, + "description": null, "driver": "fake", "driver_info": { "ipmi_password": "******", @@ -38,6 +41,7 @@ "management_interface": null, "name": "test_node_classic", "network_interface": "flat", + "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", @@ -69,7 +73,7 @@ "raid_interface": null, "rescue_interface": null, "reservation": null, - "resource_class": null, + "resource_class": "bm-large", "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", diff -Nru ironic-12.0.0/api-ref/source/samples/nodes-list-details-response.json ironic-12.1.0/api-ref/source/samples/nodes-list-details-response.json --- ironic-12.0.0/api-ref/source/samples/nodes-list-details-response.json 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/nodes-list-details-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -1,15 +1,18 @@ { "nodes": [ { + "allocation_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "boot_interface": null, "chassis_uuid": null, "clean_step": {}, + "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, + "description": null, "driver": "fake", "driver_info": { "ipmi_password": "******", @@ -23,7 +26,7 @@ "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, - "instance_uuid": null, + "instance_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "last_error": null, "links": [ { @@ -40,6 +43,7 @@ "management_interface": null, "name": "test_node_classic", "network_interface": "flat", + "owner": "john doe", "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", @@ -102,9 +106,11 @@ ] }, { + "allocation_uuid": null, "boot_interface": "pxe", "chassis_uuid": null, "clean_step": {}, + "conductor": "compute1.localdomain", "conductor_group": "", "console_enabled": false, "console_interface": "no-console", @@ -139,6 +145,7 @@ "management_interface": "ipmitool", "name": "test_node_dynamic", "network_interface": "flat", + "owner": "43e61ec9-8e42-4dcb-bc45-30d66aa93e5b", "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/portgroups", diff -Nru ironic-12.0.0/api-ref/source/samples/node-update-driver-info-response.json ironic-12.1.0/api-ref/source/samples/node-update-driver-info-response.json --- ironic-12.0.0/api-ref/source/samples/node-update-driver-info-response.json 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/node-update-driver-info-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -1,7 +1,9 @@ { + "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, + "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, @@ -40,6 +42,7 @@ "management_interface": null, "name": "test_node_classic", "network_interface": "flat", + "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", diff -Nru ironic-12.0.0/api-ref/source/samples/port-create-request.json ironic-12.1.0/api-ref/source/samples/port-create-request.json --- ironic-12.0.0/api-ref/source/samples/port-create-request.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/port-create-request.json 2019-03-21 20:07:40.000000000 +0000 @@ -2,6 +2,7 @@ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "address": "11:11:11:11:11:11", + "is_smartnic": true, "local_link_connection": { "switch_id": "0a:1b:2c:3d:4e:5f", "port_id": "Ethernet3/1", diff -Nru ironic-12.0.0/api-ref/source/samples/port-create-response.json ironic-12.1.0/api-ref/source/samples/port-create-response.json --- ironic-12.0.0/api-ref/source/samples/port-create-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/port-create-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -3,6 +3,7 @@ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, + "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", diff -Nru ironic-12.0.0/api-ref/source/samples/portgroup-port-detail-response.json ironic-12.1.0/api-ref/source/samples/portgroup-port-detail-response.json --- ironic-12.0.0/api-ref/source/samples/portgroup-port-detail-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/portgroup-port-detail-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -5,6 +5,7 @@ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, + "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", diff -Nru ironic-12.0.0/api-ref/source/samples/port-list-detail-response.json ironic-12.1.0/api-ref/source/samples/port-list-detail-response.json --- ironic-12.0.0/api-ref/source/samples/port-list-detail-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/port-list-detail-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -5,6 +5,7 @@ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, + "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", diff -Nru ironic-12.0.0/api-ref/source/samples/port-update-response.json ironic-12.1.0/api-ref/source/samples/port-update-response.json --- ironic-12.0.0/api-ref/source/samples/port-update-response.json 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/api-ref/source/samples/port-update-response.json 2019-03-21 20:07:40.000000000 +0000 @@ -3,6 +3,7 @@ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, + "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", diff -Nru ironic-12.0.0/AUTHORS ironic-12.1.0/AUTHORS --- ironic-12.0.0/AUTHORS 2018-12-19 10:03:56.000000000 +0000 +++ ironic-12.1.0/AUTHORS 2019-03-21 20:09:08.000000000 +0000 @@ -25,6 +25,7 @@ Ankit Kumar Anne Gentle Annie Lezil +Anshul Jain Anson Y.W Anton Arefiev Anup Navare @@ -34,6 +35,7 @@ Arata Notsu Armando Migliaccio Arne Wiebalck +Arne Wiebalck Artem Rozumenko Atsushi SAKAI Bernard Van De Walle @@ -81,6 +83,7 @@ Dan Prince Dan Smith Dan Smith +Daniel Abad Dao Cong Tien Daryl Walleck Davanum Srinivas @@ -133,6 +136,7 @@ Gábor Antal Ha Van Tu Hadi Bannazadeh +Hamdy Khader Hans Lindgren Haomeng, Wang Harald Jensas @@ -155,12 +159,14 @@ Iury Gregory Melo Ferreira Iury Gregory Melo Ferreira Jacek Tomasiak +Jakub Libosvar James E. Blair James E. Blair James Slagle Jason Kölker Javier Pena Jay Faulkner +Jens Harbott Jeremy Stanley Jesse Andrews Jesse Pretorius @@ -195,6 +201,7 @@ Kyle Stevenson Kyrylo Romanenko Lance Bragstad +Lars Kellogg-Stedman Laura Moore Lenny Verkhovsky LiYucai @@ -210,6 +217,7 @@ Madhuri Kumari MaoyangLiu Marc Methot +Marcin Juszkiewicz Marco Morais Marcus Rafael Mario Villaplana @@ -263,6 +271,7 @@ Nguyen Phuong An Nguyen Van Duc Nguyen Van Trung +Nikolay Fedotov Nisha Agarwal Noam Angel OctopusZhang @@ -283,7 +292,9 @@ PollyZ Pádraig Brady Qian Min Chen +Qianbiao NG R-Vaishnavi +Rachit7194 Rafi Khardalian Rakesh H S Ramakrishnan G @@ -303,6 +314,7 @@ Roman Prykhodchenko Ruby Loo Ruby Loo +Ruby Loo Ruby Loo Ruby Loo Rushil Chugh @@ -349,6 +361,7 @@ Stig Telfer Sukhdev Kapur Sukhdev Kapur +Takashi NATSUME Tan Lin Tang Chen Tao Li @@ -356,6 +369,7 @@ Thierry Carrez Thomas Goirand Thomas Herve +TienDC Tim Burke Tom Fifield Tony Breeds @@ -364,6 +378,7 @@ TuanLAF Tushar Kalra Vadim Hmyrov +Varsha Varun Gadiraju Vasyl Saienko Vic Howard @@ -453,6 +468,8 @@ klyang lei-zhang-99cloud licanwei +lijunjie +lin shengrong linggao liumk liusheng @@ -461,6 +478,8 @@ mallikarjuna.kolagatla max_lobur melissaml +mkumari +mvpnitesh paresh-sao pawnesh.kumar poojajadhav @@ -482,6 +501,7 @@ takanorimiyagishi tanlin tianhui +tiendc vishal mahajan vmud213 vsaienko diff -Nru ironic-12.0.0/bindep.txt ironic-12.1.0/bindep.txt --- ironic-12.0.0/bindep.txt 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/bindep.txt 2019-03-21 20:07:40.000000000 +0000 @@ -28,8 +28,6 @@ libvirt [platform:rpm devstack] libvirt-dev [platform:dpkg devstack] libvirt-devel [platform:rpm devstack] -python-libvirt [platform:dpkg devstack] -libvirt-python [platform:rpm devstack] qemu [platform:dpkg devstack build-image-dib] qemu-kvm [platform:dpkg devstack] qemu-utils [platform:dpkg devstack build-image-dib] @@ -49,6 +47,7 @@ build-essential [platform:dpkg test] libssl-dev [platform:dpkg test] # these are needed by infra for python-* jobs +libpq-dev [platform:dpkg test] postgresql postgresql-client [platform:dpkg] # postgresql-devel [platform:rpm] diff -Nru ironic-12.0.0/ChangeLog ironic-12.1.0/ChangeLog --- ironic-12.0.0/ChangeLog 2018-12-19 10:03:55.000000000 +0000 +++ ironic-12.1.0/ChangeLog 2019-03-21 20:09:07.000000000 +0000 @@ -1,9 +1,154 @@ CHANGES ======= +12.1.0 +------ + +* Add systemd unit for vbmcd in devstack +* Workaround for postgres job with ubuntu bionic +* Add release note on conntrack issue on bionic +* Update release-mappings and api version data for Stein release +* Pass kwargs to exception to get better formatted error message +* Advance python-dracclient version requirement +* Add prelude and update release notes for 12.1.0 +* Optimize: HUAWEI iBMC driver utils +* Set boot\_mode in node properties during OOB Introspection +* Fix idrac driver unit test backwards compat issue +* Deploy Templates: factor out ironic.conductor.steps +* Make metrics usable +* Kg key for IPMIv2 authentication +* fast tracked deployment support +* Update doc for UEFI first +* Fix lower-constraints job +* Fix idrac Job.state renamed to Job.status +* Deprecates \`hash\_distribution\_replicas\` config option +* Add Huawei iBMC driver support +* Fix misuse of assertTrue +* Allow methods to be both deploy and clean steps +* Adding ansible python interpreter as driver\_info +* Return 405 for old versions in allocation and deploy template APIs +* honor ipmi\_port in serial console drivers +* Follow up to available node protection +* Deploy templates: conductor and API nits +* Deploy Templates: documentation +* Fixing a bash test in devstack ironic lib +* Deploy Templates: API reference +* Fix formatting issue in doc +* Update dist filter for devstack ubuntu +* Add a non-voting metalsmith job for local boot coverage +* Document building configdrive on the server side +* Check microversions before validations for allocations and deploy templates +* Add python3 unit test with drivers installed +* Fix missing print format error +* Fix typo and docstring in pxe/ipxe +* Stop requiring root\_gb for whole-disk images +* driver-requirements: mark UcsSdk as Python 2 only +* Set boot\_mode in node properties during Redfish introspection +* Add option to set python interpreter for ansible +* Document using a URL for image\_checksum +* [docs] IPv6 support for iLO +* Temporary marking ironic-standalone non-voting +* Allow building configdrive from JSON in the API +* Allocation API: optimize check on candidate nodes +* Fix TypeError: \_\_str\_\_ returned non-string (type ImageRefValidationFailed) +* Deploy templates: API & notifications +* Deploy templates: conductor +* Drop installing python-libvirt system package +* Test API max version is in RELEASE\_MAPPINGS +* Update the log message for ilo drivers +* Deploy templates: fix updating steps in Python 3 +* Fix pysendfile requirement marker +* Add option to protect available nodes from accidental deletion +* Deploy Templates: add 'extra' field to DB & object +* Trivial: Fix error message when waiting for power state +* Allocation API: fix minor issues in the API reference +* Allocation API: reference documentation +* Adding bios\_interface reference to api docs +* Set available\_nodes in tempest conf +* Update the proliantutils version in documentation +* [trivial] Removing python 3.5 template jobs +* Deploy Templates: Fix DB & object nits +* Add check for object versions +* [Trivial] Fix incorrect logging in destroy\_allocation +* Allocation API: taking over allocations of offline conductors +* Allocation API: resume allocations on conductor restart +* Devstack - run vbmc as sudo +* Documentation update for iLO Drivers +* Follow up - API - Implement /events endpoint +* Follow up to node description +* ensure that socat serial proxy keeps running +* Deprecate Cisco drivers +* Follow up to ISO image build patch +* API - Implement /events endpoint +* Add a requisite for metadata with BFV +* [Follow Up] Add support for Smart NICs +* Support using JSON-RPC instead of oslo.messaging +* Deploy templates: data model, DB API & objects +* [Follow Up] Expose is\_smartnic in port API +* Prioritize sloppy nodes for power sync +* Expose conductors: api-ref +* Remove duplicated jobs and refactor jobs +* Allocation API: fix a small inconsistency +* Expose is\_smartnic in port API +* [Trivial] Allocation API: correct syntax in API version history docs +* Allocation API: REST API implementation +* Make power sync unit test operational +* Allow case-insensitivity when setting conductor\_group via API +* Optionally preserve original system boot order upon instance deployment +* Add support for Smart NICs +* Add a voting CI job running unit tests with driver-requirements +* [Refactor] Make caching BIOS settings explicit +* [docs] OOB RAID implementation for ilo5 based HPE Proliant servers +* Make iLO BIOS interface clean steps asynchronous +* Provides mount point as cinder requires it to attach volume +* Add description field to node: api-ref +* Add description field to node +* Fix test for 'force\_persistent\_boot\_device' (i)PXE driver\_info option +* Fix iPXE boot interface with ipxe\_enabled=False +* Allocation API: conductor API (without HA and take over) +* Removing deprecated drac\_host property +* Add is\_smartnic to Port data model +* Remove uses of logger name "oslo\_messaging" +* [Trivial] Fix typo in noop interface comment +* Remove duplicated fault code +* Fix listing nodes with conductor could raise +* Parallelize periodic power sync calls follow up +* Build ISO out of EFI system partition image +* Make versioned notifications topics configurable +* Build UEFI-only ISO for UEFI boot +* Parallelize periodic power sync calls +* Limit the timeout value of heartbeat\_timeout +* Replace use of Q\_USE\_PROVIDERNET\_FOR\_PUBLIC +* Make ipmi\_force\_boot\_device more user friendly +* Follow-up logging change +* Remove dsvm from zuulv3 jobs +* Allocation API: allow picking random conductor for RPC topic +* Fix updating nodes with removed or broken drivers +* Fix ironic port creation after Redfish inspection +* Allocation API: minor fixes to DB and RPC +* Allocation API: allow skipping retries in TaskManager +* Allocation API: database and RPC +* Allow missing \`\`local\_gb\`\` property +* Fix typo in release note +* Fix IPv6 iPXE support +* OOB RAID implementation for ilo5 based HPE Proliant servers +* Fix SushyError namespacing in Redfish inspection +* Allow disabling TFTP image cache +* Add pxe template per node +* Fix the misspelling of "configuration" +* Switch to cirros 0.4.0 +* Update tox version to 2.0 +* Disable metadata\_csum when creating ext4 filesystems +* Switch the default NIC driver to e1000 +* Change openstack-dev to openstack-discuss +* Fix XClarity driver management defect +* Ignore newly introduced tables in pre-upgrade versions check +* Switch CI back to xenial + 12.0.0 ------ +* Add "owner" information field * Introduce configuration option [ipmi]ipmi\_disable\_timeout * Enroll XClarity machines in Ironic's devstack setting * spelling error @@ -64,6 +209,7 @@ * Fix DHCPv6 support * Revert "Add openstack/placement as a required project for ironic-grenade\*" * Add api-ref for conductor group +* Follow-up patch for I71feefa3d0593fd185a286bec4ce38607203641d * Fix ironic developer quickstart document * Add note to pxe configuration doc * Create base pxe class @@ -78,6 +224,7 @@ * Remove oneview drivers * Completely remove support for deprecated Glance V1 * Avoid race with nova on power sync and rescue +* Log a warning for Gen8 Inspection * Doc: Adds cinder as a service requires creds * Fix unit test run on OS X * Fixes a race condition in the hash ring code diff -Nru ironic-12.0.0/debian/changelog ironic-12.1.0/debian/changelog --- ironic-12.0.0/debian/changelog 2019-03-13 16:39:10.000000000 +0000 +++ ironic-12.1.0/debian/changelog 2019-03-25 12:21:15.000000000 +0000 @@ -1,8 +1,15 @@ -ironic (1:12.0.0-0ubuntu1~cloud0) bionic-stein; urgency=medium +ironic (1:12.1.0-0ubuntu1~cloud0) bionic-stein; urgency=medium * New upstream release for the Ubuntu Cloud Archive. - -- Openstack Ubuntu Testing Bot Wed, 13 Mar 2019 16:39:10 +0000 + -- Openstack Ubuntu Testing Bot Mon, 25 Mar 2019 12:21:15 +0000 + +ironic (1:12.1.0-0ubuntu1) disco; urgency=medium + + * New upstream point release for OpenStack Stein. + * d/control: Align (Build-)Depends with upstream. + + -- Corey Bryant Fri, 22 Mar 2019 09:15:20 -0400 ironic (1:12.0.0-0ubuntu1) disco; urgency=medium diff -Nru ironic-12.0.0/debian/control ironic-12.1.0/debian/control --- ironic-12.0.0/debian/control 2019-03-13 15:39:20.000000000 +0000 +++ ironic-12.1.0/debian/control 2019-03-22 13:15:20.000000000 +0000 @@ -22,6 +22,7 @@ python3-bashate (>= 0.5.1), python3-cinderclient (>= 1:3.3.0), python3-coverage (>= 4.0), + python3-doc8 (>= 0.6.0), python3-eventlet (>= 0.18.2), python3-fixtures (>= 3.0.0), python3-futurist (>= 1.2.0), @@ -38,6 +39,7 @@ python3-mock (>= 2.0.0), python3-neutronclient (>= 1:6.7.0), python3-openstackdocstheme (>= 1.18.1), + python3-openstacksdk (>= 0.25.0), python3-os-api-ref (>= 1.4.0), python3-os-testr (>= 1.0.0), python3-os-traits (>= 0.4.0), @@ -63,7 +65,7 @@ python3-paramiko (>= 2.0), python3-pecan (>= 1.0.0), python3-psutil (>= 3.2.2), - python3-psycopg2 (>= 2.6.2), + python3-psycopg2 (>= 2.7.3), python3-pygments (>= 2.2.0), python3-pymysql (>= 0.7.6), python3-reno (>= 2.5.0), @@ -87,7 +89,7 @@ python3-tz (>= 2013.6), python3-webob (>= 1:1.7.1), python3-webtest (>= 2.0.27), - python3-wsme (>= 0.8.0), + python3-wsme (>= 0.9.3), Standards-Version: 4.1.2 Vcs-Browser: https://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ironic Vcs-Git: git://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ironic @@ -146,6 +148,7 @@ python3-keystoneauth1 (>= 3.4.0), python3-keystonemiddleware (>= 4.17.0), python3-neutronclient (>= 1:6.7.0), + python3-openstacksdk (>= 0.25.0), python3-os-traits (>= 0.4.0), python3-oslo.concurrency (>= 3.26.0), python3-oslo.config (>= 1:5.2.0), @@ -182,7 +185,7 @@ python3-tooz (>= 1.58.0), python3-tz (>= 2013.6), python3-webob (>= 1:1.7.1), - python3-wsme (>= 0.8.0), + python3-wsme (>= 0.9.3), ${misc:Depends}, ${python3:Depends}, Description: Openstack bare metal provisioning service - Python 3 library diff -Nru ironic-12.0.0/devstack/common_settings ironic-12.1.0/devstack/common_settings --- ironic-12.0.0/devstack/common_settings 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/devstack/common_settings 2019-03-21 20:07:40.000000000 +0000 @@ -4,7 +4,7 @@ source <(cat $TOP_DIR/../../old/devstack/.localrc.auto | grep -v 'enable_plugin') fi -CIRROS_VERSION=0.3.5 +CIRROS_VERSION=0.4.0 # Whether configure the nodes to boot in Legacy BIOS or UEFI mode. Accepted # values are: "bios" or "uefi", defaults to "bios". diff -Nru ironic-12.0.0/devstack/files/debs/ironic ironic-12.1.0/devstack/files/debs/ironic --- ironic-12.0.0/devstack/files/debs/ironic 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/devstack/files/debs/ironic 2019-03-21 20:07:44.000000000 +0000 @@ -18,19 +18,18 @@ gnupg libguestfs0 libguestfs-tools -libvirt-bin # dist:xenial NOPRIME +libvirt-bin # dist:xenial,bionic NOPRIME open-iscsi openssh-client -pxelinux # dist:jessie,xenial +pxelinux # dist:xenial,bionic python-libguestfs -python-libvirt qemu qemu-kvm qemu-utils sgabios shellinabox -syslinux # dist:wheezy,trusty -syslinux-common # dist:jessie,xenial +syslinux # dist:xenial,bionic +syslinux-common # dist:xenial,bionic tftpd-hpa xinetd squashfs-tools diff -Nru ironic-12.0.0/devstack/lib/ironic ironic-12.1.0/devstack/lib/ironic --- ironic-12.0.0/devstack/lib/ironic 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/devstack/lib/ironic 2019-03-21 20:07:44.000000000 +0000 @@ -322,6 +322,10 @@ IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot} IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-3928} +# Allow using JSON RPC instead of oslo.messaging +IRONIC_RPC_TRANSPORT=${IRONIC_RPC_TRANSPORT:-oslo} +IRONIC_JSON_RPC_PORT=${IRONIC_JSON_RPC_PORT:-8089} + # Whether DevStack will be setup for bare metal or VMs IRONIC_IS_HARDWARE=$(trueorfalse False IRONIC_IS_HARDWARE) @@ -332,6 +336,7 @@ IRONIC_VBMC_PORT_RANGE_START=${IRONIC_VBMC_PORT_RANGE_START:-6230} IRONIC_VBMC_CONFIG_FILE=${IRONIC_VBMC_CONFIG_FILE:-$HOME/.vbmc/virtualbmc.conf} IRONIC_VBMC_LOGFILE=${IRONIC_VBMC_LOGFILE:-$IRONIC_VM_LOG_DIR/virtualbmc.log} +IRONIC_VBMC_SYSTEMD_SERVICE=devstack@virtualbmc.service # Virtual PDU configs IRONIC_VPDU_CONFIG_FILE=${IRONIC_VPDU_CONFIG_FILE:-$HOME/.vpdu/virtualpdu.conf} @@ -445,6 +450,8 @@ # The path to the directory where Ironic should put the logs when IRONIC_DEPLOY_LOGS_STORAGE_BACKEND is set to "local" IRONIC_DEPLOY_LOGS_LOCAL_PATH=${IRONIC_DEPLOY_LOGS_LOCAL_PATH:-$IRONIC_VM_LOG_DIR/deploy_logs} +# Fast track option +IRONIC_DEPLOY_FAST_TRACK=${IRONIC_DEPLOY_FAST_TRACK:-False} # Define baremetal min_microversion in tempest config. Default value None is picked from tempest. TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-} @@ -516,7 +523,7 @@ # TODO(pas-ha) find a way to (cross-)sign the custom CA bundle used by tls-proxy # with default iPXE cert - for reference see http://ipxe.org/crypto -if [ $IRONIC_IPXE_USE_SWIFT == 'True' && is_service_enabled tls-proxy ]; then +if is_service_enabled tls-proxy && [[ "$IRONIC_IPXE_USE_SWIFT" == "True" ]]; then die $LINENO "Ironic in DevStack does not yet support booting iPXE from HTTPS URLs" fi @@ -717,6 +724,25 @@ fi } +function start_virtualbmc { + $SYSTEMCTL start $IRONIC_VBMC_SYSTEMD_SERVICE +} + +function stop_virtualbmc { + $SYSTEMCTL stop $IRONIC_VBMC_SYSTEMD_SERVICE +} + +function cleanup_virtualbmc { + stop_virtualbmc + + $SYSTEMCTL disable $IRONIC_VBMC_SYSTEMD_SERVICE + + local unitfile="$SYSTEMD_DIR/$IRONIC_VBMC_SYSTEMD_SERVICE" + sudo rm -f $unitfile + + $SYSTEMCTL daemon-reload +} + function setup_virtualbmc { # Install pyghmi from source, if requested, otherwise it will be # downloaded as part of the virtualbmc installation @@ -741,8 +767,18 @@ iniset $IRONIC_VBMC_CONFIG_FILE log debug True iniset $IRONIC_VBMC_CONFIG_FILE log logfile $IRONIC_VBMC_LOGFILE -} + local cmd + + cmd=$(which vbmcd) + cmd+=" --foreground" + + write_user_unit_file $IRONIC_VBMC_SYSTEMD_SERVICE "$cmd" "" "root" + + $SYSTEMCTL enable $IRONIC_VBMC_SYSTEMD_SERVICE + + start_virtualbmc +} function setup_virtualpdu { if use_library_from_git "virtualpdu"; then @@ -1107,6 +1143,12 @@ iniset $IRONIC_CONF_FILE agent deploy_logs_local_path $IRONIC_DEPLOY_LOGS_LOCAL_PATH # Set image_download_source for direct interface iniset $IRONIC_CONF_FILE agent image_download_source $IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE + # Configure JSON RPC backend + iniset $IRONIC_CONF_FILE DEFAULT rpc_transport $IRONIC_RPC_TRANSPORT + iniset $IRONIC_CONF_FILE json_rpc port $IRONIC_JSON_RPC_PORT + + # Set fast track options + iniset $IRONIC_CONF_FILE deploy fast_track $IRONIC_DEPLOY_FAST_TRACK # Configure Ironic conductor, if it was enabled. if is_service_enabled ir-cond; then @@ -1171,7 +1213,9 @@ iniset $IRONIC_CONF_FILE DEFAULT auth_strategy $IRONIC_AUTH_STRATEGY configure_auth_token_middleware $IRONIC_CONF_FILE ironic $IRONIC_AUTH_CACHE_DIR/api - iniset_rpc_backend ironic $IRONIC_CONF_FILE + if is_service_enabled rabbit; then + iniset_rpc_backend ironic $IRONIC_CONF_FILE + fi iniset $IRONIC_CONF_FILE conductor automated_clean $IRONIC_AUTOMATED_CLEAN_ENABLED @@ -1212,7 +1256,7 @@ # NOTE(pas-ha) service_catalog section is used to discover # ironic API endpoint from keystone catalog - local client_sections="neutron swift glance inspector cinder service_catalog" + local client_sections="neutron swift glance inspector cinder service_catalog json_rpc" for conf_section in $client_sections; do configure_client_for $conf_section done @@ -1270,6 +1314,9 @@ # we definitely know the default username to use for TinyIPA image IRONIC_ANSIBLE_SSH_USER='tc' fi + # (rpittau) most recent tinyipa uses python3 natively so we need to change + # the default ansible python interpreter. + iniset $IRONIC_CONF_FILE ansible default_python_interpreter /usr/bin/python3 fi iniset $IRONIC_CONF_FILE ansible default_key_file $IRONIC_ANSIBLE_SSH_KEY if [[ -n $IRONIC_ANSIBLE_SSH_USER ]]; then @@ -1417,6 +1464,10 @@ $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE fi create_ironic_cache_dir + + # NOTE(rloo): We're not upgrading but want to make sure this command works, + # even though we're not parsing the output of this command. + $IRONIC_BIN_DIR/ironic-status upgrade check } # _ironic_bm_vm_names() - Generates list of names for baremetal VMs. @@ -1616,6 +1667,9 @@ if [[ -n "$LIBVIRT_NIC_DRIVER" ]]; then vm_opts+=" -D $LIBVIRT_NIC_DRIVER" + elif [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then + # Note(derekh) UEFI for the moment doesn't work with the e1000 net driver + vm_opts+=" -D virtio" fi local bridge_mac @@ -1629,7 +1683,7 @@ sudo -E su -p $STACK_USER -c "PATH=$PATH $IRONIC_SCRIPTS_DIR/create-node.sh -n $vm_name \ -c $IRONIC_VM_SPECS_CPU -m $IRONIC_VM_SPECS_RAM -d $IRONIC_VM_SPECS_DISK \ -a $IRONIC_VM_SPECS_CPU_ARCH -b $IRONIC_VM_NETWORK_BRIDGE $vm_opts -p $vbmc_port -o $pdu_outlet \ - -i $IRONIC_VM_INTERFACE_COUNT -f $IRONIC_VM_SPECS_DISK_FORMAT -M $PUBLIC_BRIDGE_MTU $log_arg" >> $IRONIC_VM_MACS_CSV_FILE + -i $IRONIC_VM_INTERFACE_COUNT -f $IRONIC_VM_SPECS_DISK_FORMAT -M $PUBLIC_BRIDGE_MTU $log_arg" >> $IRONIC_VM_MACS_CSV_FILE echo " ${bridge_mac} $IRONIC_VM_NETWORK_BRIDGE" >> $IRONIC_VM_MACS_CSV_FILE vbmc_port=$((vbmc_port+1)) @@ -1942,7 +1996,7 @@ node_options+=" --driver-info ilo_deploy_iso=$IRONIC_DEPLOY_ISO_ID" fi elif is_deployed_by_drac; then - node_options+=" --driver-info drac_host=$bmc_address \ + node_options+=" --driver-info drac_address=$bmc_address \ --driver-info drac_password=$bmc_passwd \ --driver-info drac_username=$bmc_username" elif is_deployed_by_redfish; then @@ -2148,6 +2202,13 @@ if [[ "${IRONIC_STORAGE_INTERFACE}" == "cinder" ]]; then sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $ISCSI_SERVICE_PORT -s $FLOATING_RANGE -j ACCEPT || true fi + + # (rpittau) workaround to allow TFTP traffic on ubuntu bionic with conntrack helper disabled + local qrouter + qrouter=$(sudo ip netns list | grep qrouter | awk '{print $1;}') + if [[ ! -z "$qrouter" ]]; then + sudo ip netns exec $qrouter /sbin/iptables -A PREROUTING -t raw -p udp --dport 69 -j CT --helper tftp + fi } function configure_tftpd { @@ -2426,6 +2487,8 @@ done done + cleanup_virtualbmc + sudo ovs-vsctl --if-exists del-br $IRONIC_VM_NETWORK_BRIDGE sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override @@ -2451,6 +2514,9 @@ if [[ -n "$TEMPEST_BAREMETAL_MAX_MICROVERSION" ]]; then iniset $TEMPEST_CONFIG baremetal max_microversion $TEMPEST_BAREMETAL_MAX_MICROVERSION fi + if [[ -n "$IRONIC_VM_COUNT" ]]; then + iniset $TEMPEST_CONFIG baremetal available_nodes $IRONIC_VM_COUNT + fi if [[ -n "$IRONIC_PING_TIMEOUT" ]]; then iniset $TEMPEST_CONFIG validation ping_timeout $IRONIC_PING_TIMEOUT fi @@ -2529,6 +2595,8 @@ # Enabled features iniset $TEMPEST_CONFIG baremetal_feature_enabled ipxe_enabled $IRONIC_IPXE_ENABLED + iniset $TEMPEST_CONFIG baremetal_feature_enabled fast_track_discovery $IRONIC_DEPLOY_FAST_TRACK + } function get_ironic_node_prefix { diff -Nru ironic-12.0.0/devstack/plugin.sh ironic-12.1.0/devstack/plugin.sh --- ironic-12.0.0/devstack/plugin.sh 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/devstack/plugin.sh 2019-03-21 20:07:40.000000000 +0000 @@ -39,6 +39,11 @@ create_ironic_accounts fi + if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" && "$IRONIC_IS_HARDWARE" == "False" ]]; then + echo_summary "Precreating bridge: $IRONIC_VM_NETWORK_BRIDGE" + sudo ovs-vsctl -- --may-exist add-br $IRONIC_VM_NETWORK_BRIDGE + fi + elif [[ "$2" == "extra" ]]; then # stack/extra - Called near the end after layer 1 and 2 services have # been started. diff -Nru ironic-12.0.0/devstack/tools/ironic/scripts/create-node.sh ironic-12.1.0/devstack/tools/ironic/scripts/create-node.sh --- ironic-12.0.0/devstack/tools/ironic/scripts/create-node.sh 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/devstack/tools/ironic/scripts/create-node.sh 2019-03-21 20:07:44.000000000 +0000 @@ -44,7 +44,7 @@ exit 1 fi -LIBVIRT_NIC_DRIVER=${NIC_DRIVER:-"virtio"} +LIBVIRT_NIC_DRIVER=${NIC_DRIVER:-"e1000"} LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} @@ -129,7 +129,7 @@ --disk-format $DISK_FORMAT $VM_LOGGING --engine $ENGINE $UEFI_OPTS $vm_opts \ --interface-count $INTERFACE_COUNT $MAC_ADDRESS >&2 - # Createa Virtual BMC for the node if IPMI is used + # Create Virtual BMC for the node if IPMI is used if [[ $(type -P vbmc) != "" ]]; then vbmc add $NAME --port $VBMC_PORT vbmc start $NAME diff -Nru ironic-12.0.0/devstack/upgrade/upgrade.sh ironic-12.1.0/devstack/upgrade/upgrade.sh --- ironic-12.0.0/devstack/upgrade/upgrade.sh 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/devstack/upgrade/upgrade.sh 2019-03-21 20:07:40.000000000 +0000 @@ -71,6 +71,11 @@ # calls upgrade-ironic for specific release upgrade_project ironic $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH +# NOTE(rloo): make sure it is OK to do an upgrade. Except that we aren't +# parsing/checking the output of this command because the output could change +# based on the checks it makes. +$IRONIC_BIN_DIR/ironic-status upgrade check + $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE # NOTE(vsaienko) pin_release only on multinode job, for cold upgrade (single node) diff -Nru ironic-12.0.0/doc/source/admin/boot-from-volume.rst ironic-12.1.0/doc/source/admin/boot-from-volume.rst --- ironic-12.0.0/doc/source/admin/boot-from-volume.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/boot-from-volume.rst 2019-03-21 20:07:40.000000000 +0000 @@ -25,6 +25,9 @@ utilize PXE boot mechanisms when iPXE is enabled. - iPXE is an explicit requirement, as it provides the mechanism that attaches and initiates booting from an iSCSI volume. +- Metadata services need to be configured and available for the instance images + to obtain configuration such as keys. Configuration drives are not supported + due to minimum disk extension sizes. Conductor Configuration ======================= diff -Nru ironic-12.0.0/doc/source/admin/deploy-steps.rst ironic-12.1.0/doc/source/admin/deploy-steps.rst --- ironic-12.0.0/doc/source/admin/deploy-steps.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/deploy-steps.rst 2019-03-21 20:07:40.000000000 +0000 @@ -1,65 +1,5 @@ ============ -Deploy steps +Deploy Steps ============ -Overview -======== - -Node deployment is performed by the Bare Metal service to prepare a node for -use by a workload. The exact work flow used depends on a number of factors, -including the hardware type and interfaces assigned to a node. - -Customizing deployment -====================== - -The Bare Metal service implements deployment by collecting a list of deploy -steps to perform on a node from the Power, Deploy, Management, BIOS, and RAID -interfaces of the driver assigned to the node. These steps are then ordered by -priority and executed on the node when the node is moved to the ``deploying`` -state. - -Nodes move to the ``deploying`` state when attempting to move to the ``active`` -state (when the hardware is prepared for use by a workload). For a full -understanding of all state transitions into deployment, please see -:ref:`states`. - -The Bare Metal service added support for deploy steps in the Rocky release. - -Deploy steps ------------- - -Deploy steps are ordered from higher to lower priority, where a larger integer -is a higher priority. If the same priority is used by deploy steps on different -interfaces, the following resolution order is used: Power, Management, Deploy, -BIOS, and RAID interfaces. - -Writing a Deploy Step ---------------------- - -Please refer to :doc:`/contributor/deploy-steps`. - -FAQ -=== - -What deploy step is running? ----------------------------- -To check what deploy step the node is performing or attempted to perform and -failed, run the following command; it will return the value in the node's -``driver_internal_info`` field:: - - openstack baremetal node show $node_ident -f value -c driver_internal_info - -The ``deploy_steps`` field will contain a list of all remaining steps with -their priorities, and the first one listed is the step currently in progress or -that the node failed before going into ``deploy failed`` state. - -Troubleshooting -=============== -If deployment fails on a node, the node will be put into the ``deploy failed`` -state until the node is deprovisioned. A deprovisioned node is moved to the -``available`` state after the cleaning process has been performed successfully. - -Strategies for determining why a deploy step failed include checking the ironic -conductor logs, checking logs from the ironic-python-agent that have been -stored on the ironic conductor, or performing general hardware troubleshooting -on the node. +The deploy steps section has moved to :ref:`node-deployment-deploy-steps`. diff -Nru ironic-12.0.0/doc/source/admin/drivers/ansible.rst ironic-12.1.0/doc/source/admin/drivers/ansible.rst --- ironic-12.0.0/doc/source/admin/drivers/ansible.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/drivers/ansible.rst 2019-03-21 20:07:40.000000000 +0000 @@ -231,6 +231,13 @@ Default is taken from ``[ansible]/default_clean_steps_config`` option of the ironic configuration file (defaults to ``clean_steps.yaml``). +ansible_python_interpreter + Absolute path to the python interpreter on the managed machine. + Default is taken from ``[ansible]/default_python_interpreter`` option of + the ironic configuration file. + Ansible uses ``/usr/bin/python`` by default. + + Customizing the deployment logic ================================ diff -Nru ironic-12.0.0/doc/source/admin/drivers/ibmc.rst ironic-12.1.0/doc/source/admin/drivers/ibmc.rst --- ironic-12.0.0/doc/source/admin/drivers/ibmc.rst 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/drivers/ibmc.rst 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,119 @@ +=============== +iBMC driver +=============== + +Overview +======== + +The ``ibmc`` driver is targeted for Huawei rack server 2288H V5, CH121 V5. +The iBMC hardware type enables the user to take advantage of features of +`Huawei iBMC`_ to control Huawei server. + +Prerequisites +============= + +The `HUAWEI iBMC Client library`_ should be installed on the ironic conductor + node(s). + +For example, it can be installed with ``pip``:: + + sudo pip install python-ibmcclient + +Enabling the iBMC driver +============================ + +#. Add ``ibmc`` to the list of ``enabled_hardware_types``, + ``enabled_power_interfaces``, ``enabled_vendor_interfaces`` + and ``enabled_management_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: + + [DEFAULT] + ... + enabled_hardware_types = ibmc,ipmi + enabled_power_interfaces = ibmc,ipmitool + enabled_management_interfaces = ibmc,ipmitool + enabled_vendor_interfaces = ibmc + +#. Restart the ironic conductor service:: + + sudo service ironic-conductor restart + + # Or, for RDO: + sudo systemctl restart openstack-ironic-conductor + +Registering a node with the iBMC driver +=========================================== + +Nodes configured to use the driver should have the ``driver`` property +set to ``ibmc``. + +The following properties are specified in the node's ``driver_info`` +field: + +- ``ibmc_address``: + + The URL address to the ibmc controller. It must + include the authority portion of the URL, and can + optionally include the scheme. If the scheme is + missing, https is assumed. + For example: https://ibmc.example.com. This is required. + +- ``ibmc_username``: + + User account with admin/server-profile access + privilege. This is required. + +- ``ibmc_password``: + + User account password. This is required. + +- ``ibmc_verify_ca``: + + If ibmc_address has the **https** scheme, the + driver will use a secure (TLS_) connection when + talking to the ibmc controller. By default + (if this is set to True), the driver will try to + verify the host certificates. This can be set to + the path of a certificate file or directory with + trusted certificates that the driver will use for + verification. To disable verifying TLS_, set this + to False. This is optional. + +The ``openstack baremetal node create`` command can be used to enroll +a node with the ``ibmc`` driver. For example: + +.. code-block:: bash + + openstack baremetal node create --driver ibmc + --driver-info ibmc_address=https://example.com \ + --driver-info ibmc_username=admin \ + --driver-info ibmc_password=password + +For more information about enrolling nodes see :ref:`enrollment` +in the install guide. + +Features of the ``ibmc`` hardware type +========================================= + +Query boot up sequence +^^^^^^^^^^^^^^^^^^^^^^ + +The ``ibmc`` hardware type can query current boot up sequence from the +bare metal node + +.. code-block:: bash + + openstack baremetal node passthru call --http-method GET \ + boot_up_seq + + +PXE Boot and iSCSI Deploy Process with Ironic Standalone Environment +==================================================================== + +.. figure:: ../../images/ironic_standalone_with_ibmc_driver.svg + :width: 960px + :align: left + :alt: Ironic standalone with iBMC driver node + +.. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc +.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security +.. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/ diff -Nru ironic-12.0.0/doc/source/admin/drivers/idrac.rst ironic-12.1.0/doc/source/admin/drivers/idrac.rst --- ironic-12.0.0/doc/source/admin/drivers/idrac.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/drivers/idrac.rst 2019-03-21 20:07:40.000000000 +0000 @@ -17,7 +17,7 @@ The ``idrac`` hardware type requires the ``python-dracclient`` library to be installed, for example:: - sudo pip install 'python-dracclient>=1.3.0' + sudo pip install 'python-dracclient>=1.5.0' To enable the ``idrac`` hardware type, add the following to your ``/etc/ironic/ironic.conf``: diff -Nru ironic-12.0.0/doc/source/admin/drivers/ilo.rst ironic-12.1.0/doc/source/admin/drivers/ilo.rst --- ironic-12.0.0/doc/source/admin/drivers/ilo.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/drivers/ilo.rst 2019-03-21 20:07:40.000000000 +0000 @@ -23,10 +23,12 @@ Hardware type ============= -ProLiant hardware is primarily supported by the ``ilo`` hardware type. This +ProLiant hardware is primarily supported by the ``ilo`` hardware type. ``ilo5`` +hardware type is only supported on ProLiant Gen10 and later systems. Both hardware can be used with reference hardware type ``ipmi`` (see :doc:`ipmitool`) and ``redfish`` (see :doc:`redfish`). For information on how -to enable the ``ilo`` hardware type, see :ref:`enable-hardware-types`. +to enable the ``ilo`` and ``ilo5`` hardware type, see +:ref:`enable-hardware-types`. .. note:: Only HPE ProLiant Gen10 servers supports hardware type ``redfish``. @@ -50,6 +52,13 @@ * `Rescue mode support`_ * `Inject NMI support`_ * `Soft power operation support`_ +* `BIOS configuration support`_ +* `IPv6 support`_ + +Apart from above features hardware type ``ilo5`` also supports following +features: + +* `Out of Band RAID Support`_ Hardware interfaces ^^^^^^^^^^^^^^^^^^^ @@ -177,8 +186,25 @@ enabled_hardware_types = ilo enabled_rescue_interfaces = agent,no-rescue -``ilo`` hardware type supports all standard ``deploy`` and ``network`` -interface implementations, see :ref:`enable-hardware-interfaces` for details. + +The ``ilo5`` hardware type supports all the ``ilo`` interfaces described above, +except for ``raid`` interface. The details of ``raid`` interface is as under: + +* raid + Supports ``ilo5`` and ``no-raid``. The default is ``ilo5``. + They can be enabled by using the ``[DEFAULT]enabled_raid_interfaces`` + option in ``ironic.conf`` as given below: + + .. code-block:: ini + + [DEFAULT] + enabled_hardware_types = ilo5 + enabled_raid_interfaces = ilo5,no-raid + + +The ``ilo`` and ``ilo5`` hardware type support all standard ``deploy`` and +``network`` interface implementations, see :ref:`enable-hardware-interfaces` +for details. The following command can be used to enroll a ProLiant node with ``ilo`` hardware type: @@ -196,14 +222,30 @@ --driver-info ilo_deploy_iso= \ --driver-info ilo_rescue_iso= +The following command can be used to enroll a ProLiant node with +``ilo5`` hardware type: + +.. code-block:: console + + openstack baremetal node create \ + --driver ilo5 \ + --deploy-interface direct \ + --raid-interface ilo5 \ + --rescue-interface agent \ + --driver-info ilo_address= \ + --driver-info ilo_username= \ + --driver-info ilo_password= \ + --driver-info ilo_deploy_iso= \ + --driver-info ilo_rescue_iso= + Please refer to :doc:`/install/enabling-drivers` for detailed explanation of hardware type. Node configuration ^^^^^^^^^^^^^^^^^^ -* Each node is configured for ``ilo`` hardware type by setting the following - ironic node object's properties in ``driver_info``: +* Each node is configured for ``ilo`` and ``ilo5`` hardware type by setting + the following ironic node object's properties in ``driver_info``: - ``ilo_address``: IP address or hostname of the iLO. - ``ilo_username``: Username for the iLO with administrator privileges. @@ -276,9 +318,9 @@ which contains a set of modules for managing HPE ProLiant hardware. Install ``proliantutils`` module on the ironic conductor node. Minimum - version required is 2.5.0:: + version required is 2.8.0:: - $ pip install "proliantutils>=2.5.0" + $ pip install "proliantutils>=2.8.0" * ``ipmitool`` command must be present on the service node(s) where ``ironic-conductor`` is running. On most distros, this is provided as part @@ -785,6 +827,12 @@ `iLO4 `_ and `iLO5 `_ management engine. + * The proliantutils returns only active NICs for Gen10 ProLiant HPE servers. + The user would need to delete the ironic ports corresponding to inactive NICs + for Gen8 and Gen9 servers as proliantutils returns all the discovered + (active and otherwise) NICs for Gen8 and Gen9 servers and ironic ports + are created for all of them. Inspection logs a warning if the node under + inspection is Gen8 or Gen9. The operator can specify these capabilities in nova flavor for node to be selected for scheduling:: @@ -1529,13 +1577,19 @@ BIOS configuration support ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``ilo`` hardware type supports ``ilo`` BIOS interface. The support includes -providing manual clean steps *apply_configuration* and *factory_reset* to -manage supported BIOS settings on the node. See :ref:`bios` for more details -and examples. +The ``ilo`` and ``ilo5`` hardware types support ``ilo`` BIOS interface. +The support includes providing manual clean steps *apply_configuration* and +*factory_reset* to manage supported BIOS settings on the node. +See :ref:`bios` for more details and examples. .. note:: - The change in the settings will take into effect after next power cycle. + Prior to the Stein release the user is required to reboot the node manually + in order for the settings to take into effect. Starting with the Stein + release, iLO drivers reboot the node after running clean steps related to + the BIOS configuration. The BIOS settings are cached and the clean step is + marked as success only if all the requested settings are applied without + any failure. If application of any of the settings fails, the clean step is + marked as failed and the settings are not cached. Configuration ~~~~~~~~~~~~~ @@ -1762,6 +1816,56 @@ Server POST state is used to track the power status of HPE ProLiant Gen9 servers and beyond. +Out of Band RAID Support +^^^^^^^^^^^^^^^^^^^^^^^^ +With Gen10 HPE Proliant servers and later the ``ilo5`` hardware type supports +firmware based RAID configuration as a clean step. This feature requires the +node to be configured to ``ilo5`` hardware type and its raid interface to be +``ilo5``. See :ref:`raid` for more information. + +After a successful RAID configuration, the Bare Metal service will update the +node with the following information: + +* Node ``properties/local_gb`` is set to the size of root volume. +* Node ``properties/root_device`` is filled with ``wwn`` details of root + volume. It is used by iLO driver as root device hint during provisioning. + +Later the value of raid level of root volume can be added in +``baremetal-with-RAID10`` (RAID10 for raid level 10) resource class. +And consequently flavor needs to be updated to request the resource class +to create the server using selected node:: + + openstack baremetal node set test_node --resource-class \ + baremetal-with-RAID10 + + openstack flavor set --property \ + resources:CUSTOM_BAREMETAL_WITH_RAID10=1 test-flavor + + openstack server create --flavor test-flavor --image test-image instance-1 + + +.. note:: + Supported raid levels for ``ilo5`` hardware type are: 0, 1, 5, 6, 10, 50, 60 + +IPv6 support +^^^^^^^^^^^^ +With the IPv6 support in ``proliantutils>=2.8.0``, nodes can be enrolled +into the baremetal service using the iLO IPv6 addresses. + +.. code-block:: console + + openstack baremetal node create --driver ilo --deploy-interface direct \ + --driver-info ilo_address=2001:0db8:85a3:0000:0000:8a2e:0370:7334 \ + --driver-info ilo_username=test-user \ + --driver-info ilo_password=test-password \ + --driver-info ilo_deploy_iso=test-iso \ + --driver-info ilo_rescue_iso=test-iso + + +.. note:: + No configuration changes (in e.g. ironic.conf) are required in order to + support IPv6. + .. _`ssacli documentation`: https://support.hpe.com/hpsc/doc/public/display?docId=c03909334 .. _`proliant-tools`: https://docs.openstack.org/diskimage-builder/latest/elements/proliant-tools/README.html .. _`HPE iLO4 User Guide`: https://h20566.www2.hpe.com/hpsc/doc/public/display?docId=c03334051 diff -Nru ironic-12.0.0/doc/source/admin/drivers/redfish.rst ironic-12.1.0/doc/source/admin/drivers/redfish.rst --- ironic-12.0.0/doc/source/admin/drivers/redfish.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/drivers/redfish.rst 2019-03-21 20:07:40.000000000 +0000 @@ -128,6 +128,12 @@ Redfish specification. Not all Redfish-compliant BMCs might serve the required information, in which case bare metal node inspection will fail. +.. note:: + + The ``local_gb`` property cannot always be discovered, for example, when a + node does not have local storage or the Redfish implementation does not + support the required schema. In this case the property will be set to 0. + .. _Redfish: http://redfish.dmtf.org/ .. _Sushy: https://git.openstack.org/cgit/openstack/sushy .. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security diff -Nru ironic-12.0.0/doc/source/admin/drivers.rst ironic-12.1.0/doc/source/admin/drivers.rst --- ironic-12.0.0/doc/source/admin/drivers.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/drivers.rst 2019-03-21 20:07:40.000000000 +0000 @@ -18,6 +18,7 @@ :maxdepth: 1 drivers/cimc + drivers/ibmc drivers/idrac drivers/ilo drivers/ipmitool diff -Nru ironic-12.0.0/doc/source/admin/index.rst ironic-12.1.0/doc/source/admin/index.rst --- ironic-12.0.0/doc/source/admin/index.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/index.rst 2019-03-21 20:07:40.000000000 +0000 @@ -11,7 +11,7 @@ Drivers, Hardware Types and Hardware Interfaces Ironic Python Agent Node Hardware Inspection - Deploy steps + Node Deployment Node Cleaning Node Adoption RAID Configuration @@ -32,6 +32,11 @@ Windows Images Troubleshooting FAQ +.. toctree:: + :hidden: + + deploy-steps + Dashboard Integration --------------------- diff -Nru ironic-12.0.0/doc/source/admin/multitenancy.rst ironic-12.1.0/doc/source/admin/multitenancy.rst --- ironic-12.0.0/doc/source/admin/multitenancy.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/multitenancy.rst 2019-03-21 20:07:40.000000000 +0000 @@ -58,16 +58,22 @@ - Required. Identifies a switch and can be a MAC address or an OpenFlow-based ``datapath_id``. * - ``port_id`` - - Required. Port ID on the switch, for example, Gig0/1. + - Required. Port ID on the switch/Smart NIC, for example, Gig0/1, rep0-0. * - ``switch_info`` - Optional. Used to distinguish different switch models or other vendor-specific identifier. Some ML2 plugins may require this field. - + * - ``hostname`` + - Required in case of a Smart NIC port. + Hostname of Smart NIC device. .. note:: This isn't applicable to Infiniband ports because the network topology is discoverable by the Infiniband Subnet Manager. If specified, local_link_connection information will be ignored. + If port is Smart NIC port then: + + 1. ``port_id`` is the representor port name on the Smart NIC. + 2. ``switch_id`` is not mandatory. .. _multitenancy-physnets: @@ -113,8 +119,11 @@ * Physical network support for ironic ports was added in API version 1.34, and is supported by python-ironicclient version 1.15.0 or higher. + * Smart NIC support for ironic ports was added in API version 1.53, + and is supported by python-ironicclient version 2.7.0 or higher. + The following examples assume you are using python-ironicclient version - 1.15.0 or higher. + 2.7.0 or higher. Export the following variable:: @@ -165,6 +174,17 @@ --extra client-id=$CLIENT_ID \ --physical-network physnet1 +#. Create a Smart NIC port as follows:: + + openstack baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ + --local-link-connection hostname=$HOSTNAME \ + --local-link-connection port_id=$REP_NAME \ + --pxe-enabled true \ + --physical-network physnet1 \ + --is-smartnic + + A Smart NIC port requires ``hostname`` which is the hostname of the Smart NIC, + and ``port_id`` which is the representor port name within the Smart NIC. #. Check the port configuration:: diff -Nru ironic-12.0.0/doc/source/admin/node-deployment.rst ironic-12.1.0/doc/source/admin/node-deployment.rst --- ironic-12.0.0/doc/source/admin/node-deployment.rst 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/node-deployment.rst 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,313 @@ +=============== +Node Deployment +=============== + +Overview +======== + +Node deployment is performed by the Bare Metal service to prepare a node for +use by a workload. The exact work flow used depends on a number of factors, +including the hardware type and interfaces assigned to a node. + +.. contents:: + :depth: 2 + +.. _node-deployment-deploy-steps: + +Deploy Steps +============ + +The Bare Metal service implements deployment by collecting a list of deploy +steps to perform on a node from the Power, Deploy, Management, BIOS, and RAID +interfaces of the driver assigned to the node. These steps are then ordered by +priority and executed on the node when the node is moved to the ``deploying`` +state. + +Nodes move to the ``deploying`` state when attempting to move to the ``active`` +state (when the hardware is prepared for use by a workload). For a full +understanding of all state transitions into deployment, please see +:doc:`../contributor/states`. + +The Bare Metal service added support for deploy steps in the Rocky release. + +Order of execution +------------------ + +Deploy steps are ordered from higher to lower priority, where a larger integer +is a higher priority. If the same priority is used by deploy steps on different +interfaces, the following resolution order is used: Power, Management, Deploy, +BIOS, and RAID interfaces. + +.. _node-deployment-core-steps: + +Core steps +---------- + +Certain default deploy steps are designated as 'core' deploy steps. The +following deploy steps are core: + +``deploy.deploy`` + In this step the node is booted using a provisioning image, and the user + image is written to the node's disk. It has a priority of 100. + +Writing a Deploy Step +--------------------- + +Please refer to :doc:`/contributor/deploy-steps`. + +FAQ +--- + +What deploy step is running? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +To check what deploy step the node is performing or attempted to perform and +failed, run the following command; it will return the value in the node's +``driver_internal_info`` field:: + + openstack baremetal node show $node_ident -f value -c driver_internal_info + +The ``deploy_steps`` field will contain a list of all remaining steps with +their priorities, and the first one listed is the step currently in progress or +that the node failed before going into ``deploy failed`` state. + +Troubleshooting +--------------- +If deployment fails on a node, the node will be put into the ``deploy failed`` +state until the node is deprovisioned. A deprovisioned node is moved to the +``available`` state after the cleaning process has been performed successfully. + +Strategies for determining why a deploy step failed include checking the ironic +conductor logs, checking logs from the ironic-python-agent that have been +stored on the ironic conductor, or performing general hardware troubleshooting +on the node. + +Deploy Templates +================ + +Starting with the Stein release, with Bare Metal API version 1.55, deploy +templates offer a way to define a set of one or more deploy steps to be +executed with particular sets of arguments and priorities. + +Each deploy template has a name, which must be a valid trait. Traits can be +either standard or custom. Standard traits are listed in the `os_traits +library `_. Custom traits must +meet the following requirements: + +* prefixed with ``CUSTOM_`` +* contain only upper case characters A to Z, digits 0 to 9, or underscores +* no longer than 255 characters in length + +Deploy step format +------------------ + +An invocation of a deploy step is defined in a deploy template as follows:: + + { + "interface": "", + "step": "", + "args": { + "": "", + "": "" + }, + "priority": + } + +A deploy template contains a list of one or more such steps. Each combination +of `interface` and `step` may only be specified once in a deploy template. + +Matching deploy templates +------------------------- + +During deployment, if any of the traits in a node's ``instance_info.traits`` +field match the name of a deploy template, then the steps from that deploy +template will be added to the list of steps to be executed by the node. + +When using the Compute service, any traits in the instance's flavor properties +or image properties are stored in ``instance_info.traits`` during deployment. +See :ref:`scheduling-traits` for further information on how traits are used for +scheduling when the Bare Metal service is used with the Compute service. + +Note that there is no ongoing relationship between a node and any templates +that are matched during deployment. The set of matching deploy templates is +checked at deployment time. Any subsequent updates to or deletion of those +templates will not be reflected in the node's configuration unless it is +redeployed or rebuilt. Similarly, if a node is rebuilt and the set of matching +deploy templates has changed since the initial deployment, then the resulting +configuration of the node may be different from the initial deployment. + +Overriding default deploy steps +------------------------------- + +A deploy step is enabled by default if it has a non-zero default priority. +A default deploy step may be overridden in a deploy template. If the step's +priority is a positive integer it will be executed with the specified priority +and arguments. If the step's priority is zero, the step will not be executed. + +If a `core deploy step `_ is included in a +deploy template, it can only be assigned a priority of zero to disable it. + +Creating a deploy template via API +---------------------------------- + +A deploy template can be created using the Bare Metal API:: + + POST /v1/deploy_templates + +Here is an example of the body of a request to create a deploy template with a +single step: + +.. code-block:: json + + { + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "priority": 150 + } + ] + } + +Further information on this API is available `here +`__. + +Creating a deploy template via "openstack baremetal" client +----------------------------------------------------------- + +A deploy template can be created via the ``openstack baremetal deploy template +create`` command, starting with ``python-ironicclient`` 2.7.0. + +The argument ``--steps`` must be specified. Its value is one of: + +- a JSON string +- path to a JSON file whose contents are passed to the API +- '-', to read from stdin. This allows piping in the deploy steps. + +Example of creating a deploy template with a single step using a JSON string: + +.. code-block:: console + + openstack baremetal deploy template create \ + CUSTOM_HYPERTHREADING_ON \ + --steps '[{"interface": "bios", "step": "apply_configuration", "args": {"settings": [{"name": "LogicalProc", "value": "Enabled"}]}, "priority": 150}]' + +Or with a file: + +.. code-block:: console + + openstack baremetal deploy template create \ + CUSTOM_HYPERTHREADING_ON \ + ---steps my-deploy-steps.txt + +Or with stdin: + +.. code-block:: console + + cat my-deploy-steps.txt | openstack baremetal deploy template create \ + CUSTOM_HYPERTHREADING_ON \ + --steps - + +Example of use with the Compute service +--------------------------------------- + +.. note:: The deploy steps used in this example are for example purposes only. + +In the following example, we have a node with the following node traits: + +.. code-block:: json + + [ + "CUSTOM_HYPERTHREADING_ON" + ] + +We also have a flavor, ``bm-hyperthreading-on``, in the Compute service with +the following property:: + + trait:CUSTOM_HYPERTHREADING_ON:required + +Creating a Compute instance with this flavor will ensure that the instance is +scheduled only to Bare Metal nodes with the ``CUSTOM_HYPERTHREADING_ON`` trait. + +We could then create a Bare Metal deploy template with the name +``CUSTOM_HYPERTHREADING_ON`` and a deploy step that enables Hyperthreading: + +.. code-block:: json + + { + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "priority": 150 + } + ] + } + +When an instance is created using the ``bm-hyperthreading-on`` flavor, then the +deploy steps of deploy template ``CUSTOM_HYPERTHREADING_ON`` will be executed +during the deployment of the scheduled node, causing Hyperthreading to be +enabled in the node's BIOS configuration. + +To make this example more dynamic, consider adding a second trait to the node: + +.. code-block:: json + + [ + "CUSTOM_HYPERTHREADING_ON", + "CUSTOM_HYPERTHREADING_OFF" + ] + +We could also create a second flavor, ``bm-hyperthreading-off``, with the +following property:: + + trait:CUSTOM_HYPERTHREADING_OFF:required + +Finally, we create a deploy template with the name +``CUSTOM_HYPERTHREADING_OFF`` and a different set of deploy steps: + +.. code-block:: json + + { + "name": "CUSTOM_HYPERTHREADING_OFF", + "steps": [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Disabled" + } + ] + }, + "priority": 150 + } + ] + } + +Creating a Compute instance with the ``bm-hyperthreading-off`` instance will +cause the scheduled node to have Hyperthreading disabled in the BIOS during +deployment. + +We now have a way to create Compute instances with different configurations, by +choosing between different Compute flavors, supported by a single Bare Metal +node that is dynamically configured during deployment. diff -Nru ironic-12.0.0/doc/source/admin/notifications.rst ironic-12.1.0/doc/source/admin/notifications.rst --- ironic-12.0.0/doc/source/admin/notifications.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/admin/notifications.rst 2019-03-21 20:07:40.000000000 +0000 @@ -60,6 +60,11 @@ new fields, while macroversion bumps are backwards-incompatible and may have fields removed. +Versioned notifications are emitted by default to the +`ironic_versioned_notifications` topic. This can be changed and it is +configurable in the ironic.conf with the `versioned_notifications_topics` +config option. + Available notifications ======================= .. TODO(mariojv) Add some form of tabular formatting below @@ -113,6 +118,53 @@ "publisher_id":"ironic-api.hostname02" } +List of CRUD notifications for deploy template: + +* ``baremetal.deploy_template.create.start`` +* ``baremetal.deploy_template.create.end`` +* ``baremetal.deploy_template.create.error`` +* ``baremetal.deploy_template.update.start`` +* ``baremetal.deploy_template.update.end`` +* ``baremetal.deploy_template.update.error`` +* ``baremetal.deploy_template.delete.start`` +* ``baremetal.deploy_template.delete.end`` +* ``baremetal.deploy_template.delete.error`` + +Example of deploy template CRUD notification:: + + { + "priority": "info", + "payload":{ + "ironic_object.namespace":"ironic", + "ironic_object.name":"DeployTemplateCRUDPayload", + "ironic_object.version":"1.0", + "ironic_object.data":{ + "created_at": "2019-02-10T10:13:03+00:00", + "extra": {}, + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [ + { + "name": "LogicalProc", + "value": "Enabled" + } + ] + }, + "priority": 150 + } + ], + "updated_at": "2019-02-27T21:11:03+00:00", + "uuid": "1910f669-ce8b-43c2-b1d8-cf3d65be815e" + } + }, + "event_type":"baremetal.deploy_template.update.end", + "publisher_id":"ironic-api.hostname02" + } + List of CRUD notifications for node: * ``baremetal.node.create.start`` @@ -206,12 +258,13 @@ "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"PortCRUDPayload", - "ironic_object.version":"1.2", + "ironic_object.version":"1.3", "ironic_object.data":{ "address": "77:66:23:34:11:b7", "created_at": "2016-02-11T15:23:03+00:00", "node_uuid": "5b236cab-ad4e-4220-b57c-e827e858745a", "extra": {}, + "is_smartnic": True, "local_link_connection": {}, "physical_network": "physnet1", "portgroup_uuid": "bd2f385e-c51c-4752-82d1-7a9ec2c25f24", diff -Nru ironic-12.0.0/doc/source/cli/ironic-status.rst ironic-12.1.0/doc/source/cli/ironic-status.rst --- ironic-12.0.0/doc/source/cli/ironic-status.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/cli/ironic-status.rst 2019-03-21 20:07:40.000000000 +0000 @@ -75,4 +75,5 @@ **12.0.0 (Stein)** - * Placeholder to be filled in with checks as they are added in Stein. + * Adds a check for compatibility of the object versions with the release + of ironic. diff -Nru ironic-12.0.0/doc/source/contributor/code-contribution-guide.rst ironic-12.1.0/doc/source/contributor/code-contribution-guide.rst --- ironic-12.0.0/doc/source/contributor/code-contribution-guide.rst 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/code-contribution-guide.rst 2019-03-21 20:07:40.000000000 +0000 @@ -62,7 +62,7 @@ https://storyboard.openstack.org/#!/project/943 Mailing list (prefix Subject line with ``[ironic]``) - http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss Wiki https://wiki.openstack.org/Ironic diff -Nru ironic-12.0.0/doc/source/contributor/dev-quickstart.rst ironic-12.1.0/doc/source/contributor/dev-quickstart.rst --- ironic-12.0.0/doc/source/contributor/dev-quickstart.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/dev-quickstart.rst 2019-03-21 20:07:40.000000000 +0000 @@ -98,7 +98,7 @@ All unit tests should be run using tox. To run Ironic's entire test suite:: - # to run the py27, py35 unit tests, and the style tests + # to run the py27, py3 unit tests, and the style tests tox To run a specific test or tests, use the "-e" option followed by the tox target diff -Nru ironic-12.0.0/doc/source/contributor/faq.rst ironic-12.1.0/doc/source/contributor/faq.rst --- ironic-12.0.0/doc/source/contributor/faq.rst 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/faq.rst 2019-03-21 20:07:40.000000000 +0000 @@ -131,7 +131,7 @@ .. _Ironic wiki: https://wiki.openstack.org/wiki/Ironic .. _weekly Ironic meeting: https://wiki.openstack.org/wiki/Meetings/Ironic .. _IRC: https://wiki.openstack.org/wiki/Ironic#IRC -.. _mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev +.. _mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _process for making decisions: http://lists.openstack.org/pipermail/openstack-dev/2016-May/095460.html ...add support for GMRs to new executables and extending the GMR? diff -Nru ironic-12.0.0/doc/source/contributor/ironic-multitenant-networking.rst ironic-12.1.0/doc/source/contributor/ironic-multitenant-networking.rst --- ironic-12.0.0/doc/source/contributor/ironic-multitenant-networking.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/ironic-multitenant-networking.rst 2019-03-21 20:07:40.000000000 +0000 @@ -47,9 +47,6 @@ ENABLE_TENANT_VLANS=True Q_ML2_TENANT_NETWORK_TYPE=vlan TENANT_VLAN_RANGE=100:150 - # Neutron public network type was changed to flat by default - # in neutron commit 1554adef26bd3bd184ddab668660428bdf392232 - Q_USE_PROVIDERNET_FOR_PUBLIC=False # Credentials ADMIN_PASSWORD=password diff -Nru ironic-12.0.0/doc/source/contributor/releasing.rst ironic-12.1.0/doc/source/contributor/releasing.rst --- ironic-12.0.0/doc/source/contributor/releasing.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/releasing.rst 2019-03-21 20:07:40.000000000 +0000 @@ -127,6 +127,8 @@ are used to migrate from an old release to this latest release; they shouldn't be needed after that.) + * remove any model class names from ``ironic.cmd.dbsync.NEW_MODELS``. + As **ironic-tempest-plugin** is branchless, we need to submit a patch adding stable jobs to its master branch. `Example for Queens `_. diff -Nru ironic-12.0.0/doc/source/contributor/rolling-upgrades.rst ironic-12.1.0/doc/source/contributor/rolling-upgrades.rst --- ironic-12.0.0/doc/source/contributor/rolling-upgrades.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/rolling-upgrades.rst 2019-03-21 20:07:40.000000000 +0000 @@ -372,7 +372,9 @@ - Any change of fields or change in signature of remotable methods needs a bump of the object version. The object versions are also maintained in ``ironic/common/release_mappings.py``. -- New objects must be added to ``ironic/common/release_mappings.py``. +- New objects must be added to ``ironic/common/release_mappings.py``. Also for + the first releases they should be excluded from the version check by adding + their class names to the ``NEW_MODELS`` list in ``ironic/cmd/dbsync.py``. - The arguments of remotable methods (methods which are remoted to the conductor via RPC) can only be added as optional. They cannot be removed or changed in an incompatible way (to the previous release). @@ -500,3 +502,6 @@ with the expected (or supported) versions of these objects. The supported versions are the versions specified in ``ironic.common.release_mappings.RELEASE_MAPPING``. +The newly created tables cannot pass this check and thus have to be excluded by +adding their object class names (e.g. ``Node``) to +``ironic.cmd.dbsync.NEW_MODELS``. diff -Nru ironic-12.0.0/doc/source/contributor/webapi-version-history.rst ironic-12.1.0/doc/source/contributor/webapi-version-history.rst --- ironic-12.0.0/doc/source/contributor/webapi-version-history.rst 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/doc/source/contributor/webapi-version-history.rst 2019-03-21 20:07:40.000000000 +0000 @@ -2,20 +2,89 @@ REST API Version History ======================== -1.49 (Stein, master) +1.56 (Stein, 12.1.0) +-------------------- + +Added the ability for the ``configdrive`` parameter submitted with +the deployment of a node, to include a ``meta_data``, ``network_data`` +and ``user_data`` dictionary fields. Ironic will now use the supplied +data to create a configuration drive for the user. Prior uses of the +``configdrive`` field are unaffected. + +1.55 (Stein, 12.1.0) +-------------------- + +Added the following new endpoints for deploy templates: + +* ``GET /v1/deploy_templates`` to list all deploy templates. +* ``GET /v1/deploy_templates/`` to retrieve details + of a deploy template. +* ``POST /v1/deploy_templates`` to create a deploy template. +* ``PATCH /v1/deploy_templates/`` to update a + deploy template. +* ``DELETE /v1/deploy_templates/`` to delete a + deploy template. + +1.54 (Stein, 12.1.0) +-------------------- + +Added new endpoints for external ``events``: + +* POST /v1/events for creating events. (This endpoint is only intended for + internal consumption.) + +1.53 (Stein, 12.1.0) +-------------------- + +Added ``is_smartnic`` field to the port object to enable Smart NIC port +creation in addition to local link connection attributes ``port_id`` and +``hostname``. + +1.52 (Stein, 12.1.0) +-------------------- + +Added allocation API, allowing reserving a node for deployment based on +resource class and traits. The new endpoints are: + +* ``POST /v1/allocations`` to request an allocation. +* ``GET /v1/allocations`` to list all allocations. +* ``GET /v1/allocations/`` to retrieve the allocation details. +* ``GET /v1/nodes//allocation`` to retrieve an allocation + associated with the node. +* ``DELETE /v1/allocations/`` to remove the allocation. +* ``DELETE /v1/nodes//allocation`` to remove an allocation + associated with the node. + +Also added a new field ``allocation_uuid`` to the node resource. + +1.51 (Stein, 12.1.0) +-------------------- + +Added ``description`` field to the node object to enable operators to store +any information relates to the node. The field is limited to 4096 characters. + +1.50 (Stein, 12.1.0) +-------------------- + +Added ``owner`` field to the node object to enable operators to store +information in relation to the owner of a node. The field is up to 255 +characters and MAY be used in a later point in time to allow designation +and deligation of permissions. + +1.49 (Stein, 12.0.0) -------------------- Added new endpoints for retrieving conductors information, and added a ``conductor`` field to node object. -1.48 (Stein, master) +1.48 (Stein, 12.0.0) -------------------- Added ``protected`` field to the node object to allow protecting deployed nodes from undeploying, rebuilding or deletion. Also added ``protected_reason`` to specify the reason of making the node protected. -1.47 (Stein, master) +1.47 (Stein, 12.0.0) -------------------- Added ``automated_clean`` field to the node object, enabling cleaning per node. diff -Nru ironic-12.0.0/doc/source/images/ironic_standalone_with_ibmc_driver.svg ironic-12.1.0/doc/source/images/ironic_standalone_with_ibmc_driver.svg --- ironic-12.0.0/doc/source/images/ironic_standalone_with_ibmc_driver.svg 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/doc/source/images/ironic_standalone_with_ibmc_driver.svg 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,1309 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + API + + + + + API + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Conductor + + + + + Conductor + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + do node deploy + + + + + + + + + + User + + + + + User + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Create ibmc driver node + + + + + + + + Set driver_info (ibmc_address, ibmc_username, ibmc_password, etc) + + + + + + + + Set instance_info(image_source, root_gb, etc.) + + + + + + + + Validate power, management and vendor interfaces + + + + + + + + Create bare metal node network port + + + + + + + + Set provision_state, optionally pass configdrive + + + + + + + + + + DHCP + + + + + DHCP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TFTP + + + + + TFTP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Validate power, management and vendor interfaces + + + + + + + + + + Node + + + + + Node + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Set PXE boot devicethrough iBMC + + + + + + + + REBOOT through iBMC + + + + + + + + + + + + + Prepare PXEenvironment fordeployment + + + + + + + Run agent ramdisk + + + + + + + + + + + + + + Send PXE DHCP request + + + + + + + + Offer IP to node + + + + + + + + Send PXE image and agent image + + + + + + + + + + + + + + + + + + + + + + + + + + + Send IPA a command to expose disks via iSCSI + + + + + + + + iSCSI attach + + + + + + + + Copies user image and configdrive, if presend + + + + + + + + iSCSI detach + + + + + + + + Install boot loader if requested + + + + + + + + Set boot device either to PXE or to disk + + + + + + + + Collect ramdisk logs + + + + + + + + POWER OFF + + + + + + + + POWER ON + + + + + + + Mark node as ACTIVE + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + 2 + + + + + + + + + + 1 + + + + + + + + + + 2 + + + + + + + + + + 2 + + + + + + + + + + 2 + + + + + + + + + + 1 + + + + + + + IBMC management interface + + + + + + + IBMC power interface + + + + + + + \ No newline at end of file diff -Nru ironic-12.0.0/doc/source/install/configdrive.rst ironic-12.1.0/doc/source/install/configdrive.rst --- ironic-12.0.0/doc/source/install/configdrive.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/install/configdrive.rst 2019-03-21 20:07:40.000000000 +0000 @@ -55,6 +55,14 @@ openstack baremetal node deploy $node_identifier --config-drive /dir/configdrive_files +Starting with the Stein release and `ironicclient` 2.7.0, you can request +building a configdrive on the server side by providing a JSON with keys +``meta_data``, ``user_data`` and ``network_data`` (all optional), e.g.: + +.. code-block:: bash + + openstack baremetal node deploy $node_identifier \ + --config-drive '{"meta_data": {"hostname": "server1.cluster"}}' Configuration drive storage in an object store ---------------------------------------------- diff -Nru ironic-12.0.0/doc/source/install/configure-pxe.rst ironic-12.1.0/doc/source/install/configure-pxe.rst --- ironic-12.0.0/doc/source/install/configure-pxe.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/install/configure-pxe.rst 2019-03-21 20:07:40.000000000 +0000 @@ -1,11 +1,11 @@ Configuring PXE and iPXE ======================== -PXE setup ---------- +TFTP server setup +----------------- -If you will be using PXE, it needs to be set up on the Bare Metal service -node(s) where ``ironic-conductor`` is running. +In order to deploy instances via PXE, a TFTP server needs to be +set up on the Bare Metal service nodes which run the ``ironic-conductor``. #. Make sure the tftp root directory exist and can be written to by the user the ``ironic-conductor`` is running as. For example:: @@ -13,27 +13,23 @@ sudo mkdir -p /tftpboot sudo chown -R ironic /tftpboot -#. Install tftp server and the syslinux package with the PXE boot images: +#. Install tftp server: - Ubuntu (Up to and including 14.04):: - - sudo apt-get install xinetd tftpd-hpa syslinux-common syslinux - - Ubuntu (14.10 and after):: + Ubuntu:: - sudo apt-get install xinetd tftpd-hpa syslinux-common pxelinux + sudo apt-get install xinetd tftpd-hpa RHEL7/CentOS7:: - sudo yum install tftp-server syslinux-tftpboot xinetd + sudo yum install tftp-server xinetd Fedora:: - sudo dnf install tftp-server syslinux-tftpboot xinetd + sudo dnf install tftp-server xinetd SUSE:: - sudo zypper install tftp syslinux xinetd + sudo zypper install tftp xinetd #. Using xinetd to provide a tftp server setup to serve ``/tftpboot``. Create or edit ``/etc/xinetd.d/tftp`` as below:: @@ -63,55 +59,16 @@ sudo systemctl restart xinetd -.. note:: - - In certain environments the network's MTU may cause TFTP UDP packets to get - fragmented. Certain PXE firmwares struggle to reconstruct the fragmented - packets which can cause significant slow down or even prevent the server from - PXE booting. In order to avoid this, TFTPd provides an option to limit the - packet size so that it they do not get fragmented. To set this additional - option in the server_args above:: - - --blocksize - -#. Copy the PXE image to ``/tftpboot``. The PXE image might be found at [1]_: - - Ubuntu (Up to and including 14.04):: - - sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot - - Ubuntu (14.10 and after):: - - sudo cp /usr/lib/PXELINUX/pxelinux.0 /tftpboot - - RHEL7/CentOS7/SUSE:: - - sudo cp /usr/share/syslinux/pxelinux.0 /tftpboot - -#. If whole disk images need to be deployed via PXE-netboot, copy the - chain.c32 image to ``/tftpboot`` to support it: - - Ubuntu (Up to and including 14.04):: - - sudo cp /usr/lib/syslinux/chain.c32 /tftpboot - - Ubuntu (14.10 and after):: - - sudo cp /usr/lib/syslinux/modules/bios/chain.c32 /tftpboot - - Fedora:: - - sudo cp /boot/extlinux/chain.c32 /tftpboot - - RHEL7/CentOS7/SUSE:: + .. note:: - sudo cp /usr/share/syslinux/chain.c32 /tftpboot/ + In certain environments the network's MTU may cause TFTP UDP packets to get + fragmented. Certain PXE firmwares struggle to reconstruct the fragmented + packets which can cause significant slow down or even prevent the server + from PXE booting. In order to avoid this, TFTPd provides an option to limit + the packet size so that it they do not get fragmented. To set this + additional option in the server_args above:: -#. If the version of syslinux is **greater than** 4 we also need to make sure - that we copy the library modules into the ``/tftpboot`` directory [2]_ - [1]_. For example, for Ubuntu run:: - - sudo cp /usr/lib/syslinux/modules/*/ldlinux.* /tftpboot + --blocksize #. Create a map file in the tftp boot directory (``/tftpboot``):: @@ -120,22 +77,17 @@ echo 're ^(^/) /tftpboot/\1' >> /tftpboot/map-file echo 're ^([^/]) /tftpboot/\1' >> /tftpboot/map-file -.. [1] On **Fedora/RHEL** the ``syslinux-tftpboot`` package already install - the library modules and PXE image at ``/tftpboot``. If the TFTP server - is configured to listen to a different directory you should copy the - contents of ``/tftpboot`` to the configured directory -.. [2] http://www.syslinux.org/wiki/index.php/Library_modules - -PXE UEFI setup --------------- +UEFI PXE - Grub setup +--------------------- -If you want to deploy on a UEFI supported bare metal, perform these additional -steps on the ironic conductor node to configure the PXE UEFI environment. +In order to deploy instances with PXE on bare metal nodes which support +UEFI, perform these additional steps on the ironic conductor node to configure +the PXE UEFI environment. #. Install Grub2 and shim packages: - Ubuntu (14.04LTS and later):: + Ubuntu (16.04LTS and later):: sudo apt-get install grub-efi-amd64-signed shim-signed @@ -153,7 +105,7 @@ #. Copy grub and shim boot loader images to ``/tftpboot`` directory: - Ubuntu (14.04LTS and later):: + Ubuntu (16.04LTS and later):: sudo cp /usr/lib/shim/shim.efi.signed /tftpboot/bootx64.efi sudo cp /usr/lib/grub/x86_64-efi-signed/grubnetx64.efi.signed /tftpboot/grubx64.efi @@ -205,10 +157,8 @@ sudo chmod 644 $GRUB_DIR/grub.cfg -#. Update the bare metal node with ``boot_mode`` capability in node's properties - field:: - - openstack baremetal node set --property capabilities='boot_mode:uefi' +#. Update the bare metal node with ``boot_mode:uefi`` capability in + node's properties field. See :ref:`boot_mode_support` for details. #. Make sure that bare metal node is configured to boot in UEFI boot mode and boot device is set to network/pxe. @@ -219,8 +169,73 @@ for them. Please check :doc:`../admin/drivers` for information on whether your driver requires manual UEFI configuration. -.. note:: - For more information on configuring boot modes, see :ref:`boot_mode_support`. + +Legacy BIOS - Syslinux setup +---------------------------- + +In order to deploy instances with PXE on bare metal using Legacy BIOS boot +mode, perform these additional steps on the ironic conductor node. + +#. Install the syslinux package with the PXE boot images: + + Ubuntu (16.04LTS and later):: + + sudo apt-get install syslinux-common pxelinux + + RHEL7/CentOS7:: + + sudo yum install syslinux-tftpboot + + Fedora:: + + sudo dnf install syslinux-tftpboot + + SUSE:: + + sudo zypper install syslinux + +#. Copy the PXE image to ``/tftpboot``. The PXE image might be found at [1]_: + + Ubuntu (16.04LTS and later):: + + sudo cp /usr/lib/PXELINUX/pxelinux.0 /tftpboot + + RHEL7/CentOS7/SUSE:: + + sudo cp /usr/share/syslinux/pxelinux.0 /tftpboot + +#. If whole disk images need to be deployed via PXE-netboot, copy the + chain.c32 image to ``/tftpboot`` to support it: + + Ubuntu (16.04LTS and later):: + + sudo cp /usr/lib/syslinux/modules/bios/chain.c32 /tftpboot + + Fedora:: + + sudo cp /boot/extlinux/chain.c32 /tftpboot + + RHEL7/CentOS7/SUSE:: + + sudo cp /usr/share/syslinux/chain.c32 /tftpboot/ + +#. If the version of syslinux is **greater than** 4 we also need to make sure + that we copy the library modules into the ``/tftpboot`` directory [2]_ + [1]_. For example, for Ubuntu run:: + + sudo cp /usr/lib/syslinux/modules/*/ldlinux.* /tftpboot + +#. Update the bare metal node with ``boot_mode:bios`` capability in + node's properties field. See :ref:`boot_mode_support` for details. + +#. Make sure that bare metal node is configured to boot in Legacy BIOS boot mode + and boot device is set to network/pxe. + +.. [1] On **Fedora/RHEL** the ``syslinux-tftpboot`` package already installs + the library modules and PXE image at ``/tftpboot``. If the TFTP server + is configured to listen to a different directory you should copy the + contents of ``/tftpboot`` to the configured directory +.. [2] http://www.syslinux.org/wiki/index.php/Library_modules iPXE setup diff -Nru ironic-12.0.0/doc/source/install/include/configure-ironic-api.inc ironic-12.1.0/doc/source/install/include/configure-ironic-api.inc --- ironic-12.0.0/doc/source/install/include/configure-ironic-api.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/install/include/configure-ironic-api.inc 2019-03-21 20:07:40.000000000 +0000 @@ -29,6 +29,47 @@ # configuration. (string value) transport_url = rabbit://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT/ + Alternatively, you can use JSON RPC for interactions between + ironic-conductor and ironic-api. Enable it in the configuration and provide + the keystone credentials to use for authentication: + + .. code-block:: ini + + [DEFAULT] + + rpc_transport = json-rpc + + [json_rpc] + + # Authentication type to load (string value) + auth_type = password + + # Authentication URL (string value) + auth_url=https://IDENTITY_IP:5000/ + + # Username (string value) + username=ironic + + # User's password (string value) + password=IRONIC_PASSWORD + + # Project name to scope to (string value) + project_name=service + + # Domain ID containing project (string value) + project_domain_id=default + + # User's domain id (string value) + user_domain_id=default + + If you use port other than the default 8089 for JSON RPC, you have to + configure it, for example: + + .. code-block:: ini + + [json_rpc] + port = 9999 + #. Configure the ironic-api service to use these credentials with the Identity service. Replace ``PUBLIC_IDENTITY_IP`` with the public IP of the Identity server, ``PRIVATE_IDENTITY_IP`` with the private IP of the Identity server diff -Nru ironic-12.0.0/doc/source/install/include/configure-ironic-conductor.inc ironic-12.1.0/doc/source/install/include/configure-ironic-conductor.inc --- ironic-12.0.0/doc/source/install/include/configure-ironic-conductor.inc 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/install/include/configure-ironic-conductor.inc 2019-03-21 20:07:40.000000000 +0000 @@ -42,6 +42,56 @@ # configuration. (string value) transport_url = rabbit://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT/ + Alternatively, you can use JSON RPC for interactions between + ironic-conductor and ironic-api. Enable it in the configuration and provide + the keystone credentials to use for authenticating incoming requests (can + be the same as for the API): + + .. code-block:: ini + + [DEFAULT] + + rpc_transport = json-rpc + + [keystone_authtoken] + + # Authentication type to load (string value) + auth_type=password + + # Complete public Identity API endpoint (string value) + www_authenticate_uri=http://PUBLIC_IDENTITY_IP:5000 + + # Complete admin Identity API endpoint. (string value) + auth_url=http://PRIVATE_IDENTITY_IP:5000 + + # Service username. (string value) + username=ironic + + # Service account password. (string value) + password=IRONIC_PASSWORD + + # Service tenant name. (string value) + project_name=service + + # Domain name containing project (string value) + project_domain_name=Default + + # User's domain name (string value) + user_domain_name=Default + + You can optionally change the host and the port the JSON RPC service will + bind to, for example: + + .. code-block:: ini + + [json_rpc] + host_ip = 192.168.0.10 + port = 9999 + + .. warning:: + Hostnames of ironic-conductor machines must be resolvable by ironic-api + services when JSON RPC is used. + #. Configure credentials for accessing other OpenStack services. In order to communicate with other OpenStack services, the Bare Metal diff -Nru ironic-12.0.0/doc/source/install/standalone.rst ironic-12.1.0/doc/source/install/standalone.rst --- ironic-12.0.0/doc/source/install/standalone.rst 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/doc/source/install/standalone.rst 2019-03-21 20:07:40.000000000 +0000 @@ -30,6 +30,14 @@ Networking since it will do all the dynamically changing configurations for you. +#. If you want to disable using a messaging broker between conductor and API + processes, switch to JSON RPC instead: + + .. code-block:: ini + + [DEFAULT] + rpc_transport = json-rpc + If you don't use Image service, it's possible to provide images to Bare Metal service via a URL. @@ -109,14 +117,24 @@ while :ref:`iscsi-deploy` also accepts links to local files (prefixed with ``file://``). - * ``root_gb`` - size of the root partition, mandatory. + * ``root_gb`` - size of the root partition, required for partition images. - .. TODO(dtantsur): root_gb should not be mandatory for whole disk images, - but it seems to be. + .. note:: + Older versions of the Bare Metal service used to require a positive + integer for ``root_gb`` even for whole-disk images. You may want to set + it for compatibility. * ``image_checksum`` - MD5 checksum of the image specified by ``image_source``, only required for :ref:`direct-deploy`. + Starting with the Stein release of ironic-python-agent can also be a URL + to a checksums file, e.g. one generated with: + + .. code-block:: shell + + cd /path/to/http/root + md5sum *.img > checksums + * ``kernel``, ``ramdisk`` - HTTP(s) or file URLs of the kernel and initramfs of the target OS, only required for partition images. diff -Nru ironic-12.0.0/driver-requirements.txt ironic-12.1.0/driver-requirements.txt --- ironic-12.0.0/driver-requirements.txt 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/driver-requirements.txt 2019-03-21 20:07:40.000000000 +0000 @@ -4,12 +4,12 @@ # python projects they should package as optional dependencies for Ironic. # These are available on pypi -proliantutils>=2.6.0 +proliantutils>=2.7.0 pysnmp>=4.3.0,<5.0.0 python-ironic-inspector-client>=1.5.0 python-scciclient>=0.8.0 -UcsSdk==0.8.2.2 -python-dracclient>=1.3.0 +UcsSdk==0.8.2.2;python_version<'3' +python-dracclient>=1.5.0 python-xclarityclient>=0.1.6 # The CIMC drivers use the Cisco IMC SDK version 0.7.2 or greater @@ -20,3 +20,6 @@ # Ansible-deploy interface ansible>=2.4 + +# HUAWEI iBMC hardware type uses the python-ibmcclient library +python-ibmcclient>=0.1.0 diff -Nru ironic-12.0.0/ironic/api/controllers/base.py ironic-12.1.0/ironic/api/controllers/base.py --- ironic-12.0.0/ironic/api/controllers/base.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/base.py 2019-03-21 20:07:40.000000000 +0000 @@ -22,21 +22,35 @@ from ironic.common.i18n import _ -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" +class AsDictMixin(object): + """Mixin class adding an as_dict() method.""" def as_dict(self): """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) + def _attr_as_pod(attr): + """Return an attribute as a Plain Old Data (POD) type.""" + if isinstance(attr, list): + return [_attr_as_pod(item) for item in attr] + # Recursively evaluate objects that support as_dict(). + try: + return attr.as_dict() + except AttributeError: + return attr + + return dict((k, _attr_as_pod(getattr(self, k))) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) + +class APIBase(wtypes.Base, AsDictMixin): + + created_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is created""" + + updated_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is updated""" + def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. diff -Nru ironic-12.0.0/ironic/api/controllers/v1/allocation.py ironic-12.1.0/ironic/api/controllers/v1/allocation.py --- ironic-12.0.0/ironic/api/controllers/v1/allocation.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/allocation.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,471 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from ironic_lib import metrics_utils +from oslo_utils import uuidutils +import pecan +from six.moves import http_client +from webob import exc as webob_exc +import wsme +from wsme import types as wtypes + +from ironic.api.controllers import base +from ironic.api.controllers import link +from ironic.api.controllers.v1 import collection +from ironic.api.controllers.v1 import notification_utils as notify +from ironic.api.controllers.v1 import types +from ironic.api.controllers.v1 import utils as api_utils +from ironic.api import expose +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import policy +from ironic.common import states as ir_states +from ironic import objects + +METRICS = metrics_utils.get_metrics_logger(__name__) + + +class Allocation(base.APIBase): + """API representation of an allocation. + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of a + allocation. + """ + + uuid = types.uuid + """Unique UUID for this allocation""" + + extra = {wtypes.text: types.jsontype} + """This allocation's meta data""" + + node_uuid = wsme.wsattr(types.uuid, readonly=True) + """The UUID of the node this allocation belongs to""" + + name = wsme.wsattr(wtypes.text) + """The logical name for this allocation""" + + links = wsme.wsattr([link.Link], readonly=True) + """A list containing a self link and associated allocation links""" + + state = wsme.wsattr(wtypes.text, readonly=True) + """The current state of the allocation""" + + last_error = wsme.wsattr(wtypes.text, readonly=True) + """Last error that happened to this allocation""" + + resource_class = wsme.wsattr(wtypes.StringType(max_length=80), + mandatory=True) + """Requested resource class for this allocation""" + + # NOTE(dtantsur): candidate_nodes is a list of UUIDs on the database level, + # but the API level also accept names, converting them on fly. + candidate_nodes = wsme.wsattr([wtypes.text]) + """Candidate nodes for this allocation""" + + traits = wsme.wsattr([wtypes.text]) + """Requested traits for the allocation""" + + def __init__(self, **kwargs): + self.fields = [] + fields = list(objects.Allocation.fields) + # NOTE: node_uuid is not part of objects.Allocation.fields + # because it's an API-only attribute + fields.append('node_uuid') + for field in fields: + # Skip fields we do not expose. + if not hasattr(self, field): + continue + self.fields.append(field) + setattr(self, field, kwargs.get(field, wtypes.Unset)) + + @staticmethod + def _convert_with_links(allocation, url): + """Add links to the allocation.""" + allocation.links = [ + link.Link.make_link('self', url, 'allocations', allocation.uuid), + link.Link.make_link('bookmark', url, 'allocations', + allocation.uuid, bookmark=True) + ] + return allocation + + @classmethod + def convert_with_links(cls, rpc_allocation, fields=None, sanitize=True): + """Add links to the allocation.""" + allocation = Allocation(**rpc_allocation.as_dict()) + + if rpc_allocation.node_id: + try: + allocation.node_uuid = objects.Node.get_by_id( + pecan.request.context, + rpc_allocation.node_id).uuid + except exception.NodeNotFound: + allocation.node_uuid = None + else: + allocation.node_uuid = None + + if fields is not None: + api_utils.check_for_invalid_fields(fields, allocation.fields) + + # Make the default values consistent between POST and GET API + if allocation.candidate_nodes is None: + allocation.candidate_nodes = [] + if allocation.traits is None: + allocation.traits = [] + + allocation = cls._convert_with_links(allocation, + pecan.request.host_url) + + if not sanitize: + return allocation + + allocation.sanitize(fields) + + return allocation + + def sanitize(self, fields=None): + """Removes sensitive and unrequested data. + + Will only keep the fields specified in the ``fields`` parameter. + + :param fields: + list of fields to preserve, or ``None`` to preserve them all + :type fields: list of str + """ + + if fields is not None: + self.unset_fields_except(fields) + + @classmethod + def sample(cls): + """Return a sample of the allocation.""" + sample = cls(uuid='a594544a-2daf-420c-8775-17a8c3e0852f', + node_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', + name='node1-allocation-01', + state=ir_states.ALLOCATING, + last_error=None, + resource_class='baremetal', + traits=['CUSTOM_GPU'], + candidate_nodes=[], + extra={'foo': 'bar'}, + created_at=datetime.datetime(2000, 1, 1, 12, 0, 0), + updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0)) + return cls._convert_with_links(sample, 'http://localhost:6385') + + +class AllocationCollection(collection.Collection): + """API representation of a collection of allocations.""" + + allocations = [Allocation] + """A list containing allocation objects""" + + def __init__(self, **kwargs): + self._type = 'allocations' + + @staticmethod + def convert_with_links(rpc_allocations, limit, url=None, fields=None, + **kwargs): + collection = AllocationCollection() + collection.allocations = [ + Allocation.convert_with_links(p, fields=fields, sanitize=False) + for p in rpc_allocations + ] + collection.next = collection.get_next(limit, url=url, **kwargs) + + for item in collection.allocations: + item.sanitize(fields=fields) + + return collection + + @classmethod + def sample(cls): + """Return a sample of the allocation.""" + sample = cls() + sample.allocations = [Allocation.sample()] + return sample + + +class AllocationsController(pecan.rest.RestController): + """REST controller for allocations.""" + + invalid_sort_key_list = ['extra', 'candidate_nodes', 'traits'] + + @pecan.expose() + def _route(self, args, request=None): + if not api_utils.allow_allocations(): + msg = _("The API version does not allow allocations") + if pecan.request.method == "GET": + raise webob_exc.HTTPNotFound(msg) + else: + raise webob_exc.HTTPMethodNotAllowed(msg) + return super(AllocationsController, self)._route(args, request) + + def _get_allocations_collection(self, node_ident=None, resource_class=None, + state=None, marker=None, limit=None, + sort_key='id', sort_dir='asc', + resource_url=None, fields=None): + """Return allocations collection. + + :param node_ident: UUID or name of a node. + :param marker: Pagination marker for large data sets. + :param limit: Maximum number of resources to return in a single result. + :param sort_key: Column to sort results by. Default: id. + :param sort_dir: Direction to sort. "asc" or "desc". Default: asc. + :param resource_url: Optional, URL to the allocation resource. + :param fields: Optional, a list with a specified set of fields + of the resource to be returned. + """ + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + if sort_key in self.invalid_sort_key_list: + raise exception.InvalidParameterValue( + _("The sort_key value %(key)s is an invalid field for " + "sorting") % {'key': sort_key}) + + marker_obj = None + if marker: + marker_obj = objects.Allocation.get_by_uuid(pecan.request.context, + marker) + + if node_ident: + try: + node_uuid = api_utils.get_rpc_node(node_ident).uuid + except exception.NodeNotFound as exc: + exc.code = http_client.BAD_REQUEST + raise + else: + node_uuid = None + + possible_filters = { + 'node_uuid': node_uuid, + 'resource_class': resource_class, + 'state': state + } + + filters = {} + for key, value in possible_filters.items(): + if value is not None: + filters[key] = value + + allocations = objects.Allocation.list(pecan.request.context, + limit=limit, + marker=marker_obj, + sort_key=sort_key, + sort_dir=sort_dir, + filters=filters) + return AllocationCollection.convert_with_links(allocations, limit, + url=resource_url, + fields=fields, + sort_key=sort_key, + sort_dir=sort_dir) + + @METRICS.timer('AllocationsController.get_all') + @expose.expose(AllocationCollection, types.uuid_or_name, wtypes.text, + wtypes.text, types.uuid, int, wtypes.text, wtypes.text, + types.listtype) + def get_all(self, node=None, resource_class=None, state=None, marker=None, + limit=None, sort_key='id', sort_dir='asc', fields=None): + """Retrieve a list of allocations. + + :param node: UUID or name of a node, to get only allocations for that + node. + :param resource_class: Filter by requested resource class. + :param state: Filter by allocation state. + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param fields: Optional, a list with a specified set of fields + of the resource to be returned. + """ + cdict = pecan.request.context.to_policy_values() + policy.authorize('baremetal:allocation:get', cdict, cdict) + + return self._get_allocations_collection(node, resource_class, state, + marker, limit, + sort_key, sort_dir, + fields=fields) + + @METRICS.timer('AllocationsController.get_one') + @expose.expose(Allocation, types.uuid_or_name, types.listtype) + def get_one(self, allocation_ident, fields=None): + """Retrieve information about the given allocation. + + :param allocation_ident: UUID or logical name of an allocation. + :param fields: Optional, a list with a specified set of fields + of the resource to be returned. + """ + cdict = pecan.request.context.to_policy_values() + policy.authorize('baremetal:allocation:get', cdict, cdict) + + rpc_allocation = api_utils.get_rpc_allocation_with_suffix( + allocation_ident) + return Allocation.convert_with_links(rpc_allocation, fields=fields) + + @METRICS.timer('AllocationsController.post') + @expose.expose(Allocation, body=Allocation, + status_code=http_client.CREATED) + def post(self, allocation): + """Create a new allocation. + + :param allocation: an allocation within the request body. + """ + context = pecan.request.context + cdict = context.to_policy_values() + policy.authorize('baremetal:allocation:create', cdict, cdict) + + if allocation.node_uuid is not wtypes.Unset: + msg = _("Cannot set node_uuid when creating an allocation") + raise exception.Invalid(msg) + + if (allocation.name + and not api_utils.is_valid_logical_name(allocation.name)): + msg = _("Cannot create allocation with invalid name " + "'%(name)s'") % {'name': allocation.name} + raise exception.Invalid(msg) + + if allocation.traits: + for trait in allocation.traits: + api_utils.validate_trait(trait) + + if allocation.candidate_nodes: + # Convert nodes from names to UUIDs and check their validity + try: + converted = pecan.request.dbapi.check_node_list( + allocation.candidate_nodes) + except exception.NodeNotFound as exc: + exc.code = http_client.BAD_REQUEST + raise + else: + # Make sure we keep the ordering of candidate nodes. + allocation.candidate_nodes = [ + converted[ident] for ident in allocation.candidate_nodes] + + all_dict = allocation.as_dict() + + # NOTE(yuriyz): UUID is mandatory for notifications payload + if not all_dict.get('uuid'): + all_dict['uuid'] = uuidutils.generate_uuid() + + new_allocation = objects.Allocation(context, **all_dict) + topic = pecan.request.rpcapi.get_random_topic() + + notify.emit_start_notification(context, new_allocation, 'create') + with notify.handle_error_notification(context, new_allocation, + 'create'): + new_allocation = pecan.request.rpcapi.create_allocation( + context, new_allocation, topic) + notify.emit_end_notification(context, new_allocation, 'create') + + # Set the HTTP Location Header + pecan.response.location = link.build_url('allocations', + new_allocation.uuid) + return Allocation.convert_with_links(new_allocation) + + @METRICS.timer('AllocationsController.delete') + @expose.expose(None, types.uuid_or_name, + status_code=http_client.NO_CONTENT) + def delete(self, allocation_ident): + """Delete an allocation. + + :param allocation_ident: UUID or logical name of an allocation. + """ + context = pecan.request.context + cdict = context.to_policy_values() + policy.authorize('baremetal:allocation:delete', cdict, cdict) + + rpc_allocation = api_utils.get_rpc_allocation_with_suffix( + allocation_ident) + if rpc_allocation.node_id: + node_uuid = objects.Node.get_by_id(pecan.request.context, + rpc_allocation.node_id).uuid + else: + node_uuid = None + + notify.emit_start_notification(context, rpc_allocation, 'delete', + node_uuid=node_uuid) + with notify.handle_error_notification(context, rpc_allocation, + 'delete', node_uuid=node_uuid): + topic = pecan.request.rpcapi.get_random_topic() + pecan.request.rpcapi.destroy_allocation(context, rpc_allocation, + topic) + notify.emit_end_notification(context, rpc_allocation, 'delete', + node_uuid=node_uuid) + + +class NodeAllocationController(pecan.rest.RestController): + """REST controller for allocations.""" + + invalid_sort_key_list = ['extra', 'candidate_nodes', 'traits'] + + @pecan.expose() + def _route(self, args, request=None): + if not api_utils.allow_allocations(): + raise webob_exc.HTTPNotFound(_( + "The API version does not allow allocations")) + return super(NodeAllocationController, self)._route(args, request) + + def __init__(self, node_ident): + super(NodeAllocationController, self).__init__() + self.parent_node_ident = node_ident + self.inner = AllocationsController() + + @METRICS.timer('NodeAllocationController.get_all') + @expose.expose(Allocation, types.listtype) + def get_all(self, fields=None): + cdict = pecan.request.context.to_policy_values() + policy.authorize('baremetal:allocation:get', cdict, cdict) + + result = self.inner._get_allocations_collection(self.parent_node_ident, + fields=fields) + try: + return result.allocations[0] + except IndexError: + raise exception.AllocationNotFound( + _("Allocation for node %s was not found") % + self.parent_node_ident) + + @METRICS.timer('NodeAllocationController.delete') + @expose.expose(None, status_code=http_client.NO_CONTENT) + def delete(self): + context = pecan.request.context + cdict = context.to_policy_values() + policy.authorize('baremetal:allocation:delete', cdict, cdict) + + rpc_node = api_utils.get_rpc_node_with_suffix(self.parent_node_ident) + allocations = objects.Allocation.list( + pecan.request.context, + filters={'node_uuid': rpc_node.uuid}) + + try: + rpc_allocation = allocations[0] + except IndexError: + raise exception.AllocationNotFound( + _("Allocation for node %s was not found") % + self.parent_node_ident) + + notify.emit_start_notification(context, rpc_allocation, 'delete', + node_uuid=rpc_node.uuid) + with notify.handle_error_notification(context, rpc_allocation, + 'delete', + node_uuid=rpc_node.uuid): + topic = pecan.request.rpcapi.get_random_topic() + pecan.request.rpcapi.destroy_allocation(context, rpc_allocation, + topic) + notify.emit_end_notification(context, rpc_allocation, 'delete', + node_uuid=rpc_node.uuid) diff -Nru ironic-12.0.0/ironic/api/controllers/v1/chassis.py ironic-12.1.0/ironic/api/controllers/v1/chassis.py --- ironic-12.0.0/ironic/api/controllers/v1/chassis.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/chassis.py 2019-03-21 20:07:40.000000000 +0000 @@ -325,12 +325,8 @@ policy.authorize('baremetal:chassis:update', cdict, cdict) rpc_chassis = objects.Chassis.get_by_uuid(context, chassis_uuid) - try: - chassis = Chassis( - **api_utils.apply_jsonpatch(rpc_chassis.as_dict(), patch)) - - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) + chassis = Chassis( + **api_utils.apply_jsonpatch(rpc_chassis.as_dict(), patch)) # Update only the fields that have changed for field in objects.Chassis.fields: diff -Nru ironic-12.0.0/ironic/api/controllers/v1/deploy_template.py ironic-12.1.0/ironic/api/controllers/v1/deploy_template.py --- ironic-12.0.0/ironic/api/controllers/v1/deploy_template.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/deploy_template.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,445 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import datetime + +from ironic_lib import metrics_utils +from oslo_log import log +from oslo_utils import strutils +from oslo_utils import uuidutils +import pecan +from pecan import rest +from six.moves import http_client +from webob import exc as webob_exc +import wsme +from wsme import types as wtypes + +from ironic.api.controllers import base +from ironic.api.controllers import link +from ironic.api.controllers.v1 import collection +from ironic.api.controllers.v1 import notification_utils as notify +from ironic.api.controllers.v1 import types +from ironic.api.controllers.v1 import utils as api_utils +from ironic.api import expose +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.conductor import steps as conductor_steps +import ironic.conf +from ironic import objects + +CONF = ironic.conf.CONF +LOG = log.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + +_DEFAULT_RETURN_FIELDS = ('uuid', 'name') + +_DEPLOY_INTERFACE_TYPE = wtypes.Enum( + wtypes.text, *conductor_steps.DEPLOYING_INTERFACE_PRIORITY) + + +class DeployStepType(wtypes.Base, base.AsDictMixin): + """A type describing a deployment step.""" + + interface = wsme.wsattr(_DEPLOY_INTERFACE_TYPE, mandatory=True) + + step = wsme.wsattr(wtypes.text, mandatory=True) + + args = wsme.wsattr({wtypes.text: types.jsontype}, mandatory=True) + + priority = wsme.wsattr(wtypes.IntegerType(0), mandatory=True) + + def __init__(self, **kwargs): + self.fields = ['interface', 'step', 'args', 'priority'] + for field in self.fields: + value = kwargs.get(field, wtypes.Unset) + setattr(self, field, value) + + def sanitize(self): + """Removes sensitive data.""" + if self.args != wtypes.Unset: + self.args = strutils.mask_dict_password(self.args, "******") + + +class DeployTemplate(base.APIBase): + """API representation of a deploy template.""" + + uuid = types.uuid + """Unique UUID for this deploy template.""" + + name = wsme.wsattr(wtypes.text, mandatory=True) + """The logical name for this deploy template.""" + + steps = wsme.wsattr([DeployStepType], mandatory=True) + """The deploy steps of this deploy template.""" + + links = wsme.wsattr([link.Link]) + """A list containing a self link and associated deploy template links.""" + + extra = {wtypes.text: types.jsontype} + """This deploy template's meta data""" + + def __init__(self, **kwargs): + self.fields = [] + fields = list(objects.DeployTemplate.fields) + + for field in fields: + # Skip fields we do not expose. + if not hasattr(self, field): + continue + + value = kwargs.get(field, wtypes.Unset) + if field == 'steps' and value != wtypes.Unset: + value = [DeployStepType(**step) for step in value] + self.fields.append(field) + setattr(self, field, value) + + @staticmethod + def validate(value): + if value is None: + return + + # The name is mandatory, but the 'mandatory' attribute support in + # wtypes.wsattr allows None. + if value.name is None: + err = _("Deploy template name cannot be None") + raise exception.InvalidDeployTemplate(err=err) + + # The name must also be a valid trait. + api_utils.validate_trait( + value.name, + error_prefix=_("Deploy template name must be a valid trait")) + + # There must be at least one step. + if not value.steps: + err = _("No deploy steps specified. A deploy template must have " + "at least one deploy step.") + raise exception.InvalidDeployTemplate(err=err) + + # TODO(mgoddard): Determine the consequences of allowing duplicate + # steps. + # * What if one step has zero priority and another non-zero? + # * What if a step that is enabled by default is included in a + # template? Do we override the default or add a second invocation? + + # Check for duplicate steps. Each interface/step combination can be + # specified at most once. + counter = collections.Counter((step.interface, step.step) + for step in value.steps) + duplicates = {key for key, count in counter.items() if count > 1} + if duplicates: + duplicates = {"interface: %s, step: %s" % (interface, step) + for interface, step in duplicates} + err = _("Duplicate deploy steps. A deploy template cannot have " + "multiple deploy steps with the same interface and step. " + "Duplicates: %s") % "; ".join(duplicates) + raise exception.InvalidDeployTemplate(err=err) + return value + + @staticmethod + def _convert_with_links(template, url, fields=None): + template.links = [ + link.Link.make_link('self', url, 'deploy_templates', + template.uuid), + link.Link.make_link('bookmark', url, 'deploy_templates', + template.uuid, + bookmark=True) + ] + return template + + @classmethod + def convert_with_links(cls, rpc_template, fields=None, sanitize=True): + """Add links to the deploy template.""" + template = DeployTemplate(**rpc_template.as_dict()) + + if fields is not None: + api_utils.check_for_invalid_fields(fields, template.as_dict()) + + template = cls._convert_with_links(template, + pecan.request.public_url, + fields=fields) + if sanitize: + template.sanitize(fields) + + return template + + def sanitize(self, fields): + """Removes sensitive and unrequested data. + + Will only keep the fields specified in the ``fields`` parameter. + + :param fields: + list of fields to preserve, or ``None`` to preserve them all + :type fields: list of str + """ + if self.steps != wtypes.Unset: + for step in self.steps: + step.sanitize() + + if fields is not None: + self.unset_fields_except(fields) + + @classmethod + def sample(cls, expand=True): + time = datetime.datetime(2000, 1, 1, 12, 0, 0) + template_uuid = '534e73fa-1014-4e58-969a-814cc0cb9d43' + template_name = 'CUSTOM_RAID1' + template_steps = [{ + "interface": "raid", + "step": "create_configuration", + "args": { + "logical_disks": [{ + "size_gb": "MAX", + "raid_level": "1", + "is_root_volume": True + }], + "delete_configuration": True + }, + "priority": 10 + }] + template_extra = {'foo': 'bar'} + sample = cls(uuid=template_uuid, + name=template_name, + steps=template_steps, + extra=template_extra, + created_at=time, + updated_at=time) + fields = None if expand else _DEFAULT_RETURN_FIELDS + return cls._convert_with_links(sample, 'http://localhost:6385', + fields=fields) + + +class DeployTemplatePatchType(types.JsonPatchType): + + _api_base = DeployTemplate + + +class DeployTemplateCollection(collection.Collection): + """API representation of a collection of deploy templates.""" + + _type = 'deploy_templates' + + deploy_templates = [DeployTemplate] + """A list containing deploy template objects""" + + @staticmethod + def convert_with_links(templates, limit, fields=None, **kwargs): + collection = DeployTemplateCollection() + collection.deploy_templates = [ + DeployTemplate.convert_with_links(t, fields=fields, sanitize=False) + for t in templates] + collection.next = collection.get_next(limit, **kwargs) + + for template in collection.deploy_templates: + template.sanitize(fields) + + return collection + + @classmethod + def sample(cls): + sample = cls() + template = DeployTemplate.sample(expand=False) + sample.deploy_templates = [template] + return sample + + +class DeployTemplatesController(rest.RestController): + """REST controller for deploy templates.""" + + invalid_sort_key_list = ['extra', 'steps'] + + @pecan.expose() + def _route(self, args, request=None): + if not api_utils.allow_deploy_templates(): + msg = _("The API version does not allow deploy templates") + if pecan.request.method == "GET": + raise webob_exc.HTTPNotFound(msg) + else: + raise webob_exc.HTTPMethodNotAllowed(msg) + return super(DeployTemplatesController, self)._route(args, request) + + def _update_changed_fields(self, template, rpc_template): + """Update rpc_template based on changed fields in a template.""" + for field in objects.DeployTemplate.fields: + try: + patch_val = getattr(template, field) + except AttributeError: + # Ignore fields that aren't exposed in the API. + continue + if patch_val == wtypes.Unset: + patch_val = None + if rpc_template[field] != patch_val: + if field == 'steps' and patch_val is not None: + # Convert from DeployStepType to dict. + patch_val = [s.as_dict() for s in patch_val] + rpc_template[field] = patch_val + + @METRICS.timer('DeployTemplatesController.get_all') + @expose.expose(DeployTemplateCollection, types.name, int, wtypes.text, + wtypes.text, types.listtype, types.boolean) + def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', + fields=None, detail=None): + """Retrieve a list of deploy templates. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + This value cannot be larger than the value of max_limit + in the [api] section of the ironic configuration, or only + max_limit resources will be returned. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param fields: Optional, a list with a specified set of fields + of the resource to be returned. + :param detail: Optional, boolean to indicate whether retrieve a list + of deploy templates with detail. + """ + api_utils.check_policy('baremetal:deploy_template:get') + + api_utils.check_allowed_fields(fields) + api_utils.check_allowed_fields([sort_key]) + + fields = api_utils.get_request_return_fields(fields, detail, + _DEFAULT_RETURN_FIELDS) + + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + if sort_key in self.invalid_sort_key_list: + raise exception.InvalidParameterValue( + _("The sort_key value %(key)s is an invalid field for " + "sorting") % {'key': sort_key}) + + marker_obj = None + if marker: + marker_obj = objects.DeployTemplate.get_by_uuid( + pecan.request.context, marker) + + templates = objects.DeployTemplate.list( + pecan.request.context, limit=limit, marker=marker_obj, + sort_key=sort_key, sort_dir=sort_dir) + + parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} + + if detail is not None: + parameters['detail'] = detail + + return DeployTemplateCollection.convert_with_links( + templates, limit, fields=fields, **parameters) + + @METRICS.timer('DeployTemplatesController.get_one') + @expose.expose(DeployTemplate, types.uuid_or_name, types.listtype) + def get_one(self, template_ident, fields=None): + """Retrieve information about the given deploy template. + + :param template_ident: UUID or logical name of a deploy template. + :param fields: Optional, a list with a specified set of fields + of the resource to be returned. + """ + api_utils.check_policy('baremetal:deploy_template:get') + + api_utils.check_allowed_fields(fields) + + rpc_template = api_utils.get_rpc_deploy_template_with_suffix( + template_ident) + + return DeployTemplate.convert_with_links(rpc_template, fields=fields) + + @METRICS.timer('DeployTemplatesController.post') + @expose.expose(DeployTemplate, body=DeployTemplate, + status_code=http_client.CREATED) + def post(self, template): + """Create a new deploy template. + + :param template: a deploy template within the request body. + """ + api_utils.check_policy('baremetal:deploy_template:create') + + context = pecan.request.context + tdict = template.as_dict() + # NOTE(mgoddard): UUID is mandatory for notifications payload + if not tdict.get('uuid'): + tdict['uuid'] = uuidutils.generate_uuid() + + new_template = objects.DeployTemplate(context, **tdict) + + notify.emit_start_notification(context, new_template, 'create') + with notify.handle_error_notification(context, new_template, 'create'): + new_template.create() + # Set the HTTP Location Header + pecan.response.location = link.build_url('deploy_templates', + new_template.uuid) + api_template = DeployTemplate.convert_with_links(new_template) + notify.emit_end_notification(context, new_template, 'create') + return api_template + + @METRICS.timer('DeployTemplatesController.patch') + @wsme.validate(types.uuid, types.boolean, [DeployTemplatePatchType]) + @expose.expose(DeployTemplate, types.uuid_or_name, types.boolean, + body=[DeployTemplatePatchType]) + def patch(self, template_ident, patch=None): + """Update an existing deploy template. + + :param template_ident: UUID or logical name of a deploy template. + :param patch: a json PATCH document to apply to this deploy template. + """ + api_utils.check_policy('baremetal:deploy_template:update') + + context = pecan.request.context + rpc_template = api_utils.get_rpc_deploy_template_with_suffix( + template_ident) + + template_dict = rpc_template.as_dict() + template = DeployTemplate( + **api_utils.apply_jsonpatch(template_dict, patch)) + template.validate(template) + self._update_changed_fields(template, rpc_template) + + # NOTE(mgoddard): There could be issues with concurrent updates of a + # template. This is particularly true for the complex 'steps' field, + # where operations such as modifying a single step could result in + # changes being lost, e.g. two requests concurrently appending a step + # to the same template could result in only one of the steps being + # added, due to the read/modify/write nature of this patch operation. + # This issue should not be present for 'simple' string fields, or + # complete replacement of the steps (the only operation supported by + # the openstack baremetal CLI). It's likely that this is an issue for + # other resources, even those modified in the conductor under a lock. + # This is due to the fact that the patch operation is always applied in + # the API. Ways to avoid this include passing the patch to the + # conductor to apply while holding a lock, or a collision detection + # & retry mechansim using e.g. the updated_at field. + notify.emit_start_notification(context, rpc_template, 'update') + with notify.handle_error_notification(context, rpc_template, 'update'): + rpc_template.save() + + api_template = DeployTemplate.convert_with_links(rpc_template) + notify.emit_end_notification(context, rpc_template, 'update') + + return api_template + + @METRICS.timer('DeployTemplatesController.delete') + @expose.expose(None, types.uuid_or_name, + status_code=http_client.NO_CONTENT) + def delete(self, template_ident): + """Delete a deploy template. + + :param template_ident: UUID or logical name of a deploy template. + """ + api_utils.check_policy('baremetal:deploy_template:delete') + + context = pecan.request.context + rpc_template = api_utils.get_rpc_deploy_template_with_suffix( + template_ident) + notify.emit_start_notification(context, rpc_template, 'delete') + with notify.handle_error_notification(context, rpc_template, 'delete'): + rpc_template.destroy() + notify.emit_end_notification(context, rpc_template, 'delete') diff -Nru ironic-12.0.0/ironic/api/controllers/v1/event.py ironic-12.1.0/ironic/api/controllers/v1/event.py --- ironic-12.0.0/ironic/api/controllers/v1/event.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/event.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ironic_lib import metrics_utils +from oslo_log import log +import pecan +from six.moves import http_client + +from ironic.api.controllers.v1 import collection +from ironic.api.controllers.v1 import types +from ironic.api.controllers.v1 import utils as api_utils +from ironic.api import expose +from ironic.common import exception +from ironic.common import policy + +METRICS = metrics_utils.get_metrics_logger(__name__) + +LOG = log.getLogger(__name__) + + +class EvtCollection(collection.Collection): + """API representation of a collection of events.""" + + events = [types.eventtype] + """A list containing event dict objects""" + + +class EventsController(pecan.rest.RestController): + """REST controller for Events.""" + + @pecan.expose() + def _lookup(self): + if not api_utils.allow_expose_events(): + pecan.abort(http_client.NOT_FOUND) + + @METRICS.timer('EventsController.post') + @expose.expose(None, body=EvtCollection, + status_code=http_client.NO_CONTENT) + def post(self, evts): + if not api_utils.allow_expose_events(): + raise exception.NotFound() + cdict = pecan.request.context.to_policy_values() + policy.authorize('baremetal:events:post', cdict, cdict) + for e in evts.events: + LOG.debug("Received external event: %s", e) diff -Nru ironic-12.0.0/ironic/api/controllers/v1/__init__.py ironic-12.1.0/ironic/api/controllers/v1/__init__.py --- ironic-12.0.0/ironic/api/controllers/v1/__init__.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/__init__.py 2019-03-21 20:07:40.000000000 +0000 @@ -25,9 +25,12 @@ from ironic.api.controllers import base from ironic.api.controllers import link +from ironic.api.controllers.v1 import allocation from ironic.api.controllers.v1 import chassis from ironic.api.controllers.v1 import conductor +from ironic.api.controllers.v1 import deploy_template from ironic.api.controllers.v1 import driver +from ironic.api.controllers.v1 import event from ironic.api.controllers.v1 import node from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import portgroup @@ -104,9 +107,18 @@ conductors = [link.Link] """Links to the conductors resource""" + allocations = [link.Link] + """Links to the allocations resource""" + + deploy_templates = [link.Link] + """Links to the deploy_templates resource""" + version = version.Version """Version discovery information.""" + events = [link.Link] + """Links to the events resource""" + @staticmethod def convert(): v1 = V1() @@ -191,6 +203,33 @@ 'conductors', '', bookmark=True) ] + if utils.allow_allocations(): + v1.allocations = [link.Link.make_link('self', + pecan.request.public_url, + 'allocations', ''), + link.Link.make_link('bookmark', + pecan.request.public_url, + 'allocations', '', + bookmark=True) + ] + if utils.allow_expose_events(): + v1.events = [link.Link.make_link('self', pecan.request.public_url, + 'events', ''), + link.Link.make_link('bookmark', + pecan.request.public_url, + 'events', '', + bookmark=True) + ] + if utils.allow_deploy_templates(): + v1.deploy_templates = [ + link.Link.make_link('self', + pecan.request.public_url, + 'deploy_templates', ''), + link.Link.make_link('bookmark', + pecan.request.public_url, + 'deploy_templates', '', + bookmark=True) + ] v1.version = version.default_version() return v1 @@ -207,6 +246,9 @@ lookup = ramdisk.LookupController() heartbeat = ramdisk.HeartbeatController() conductors = conductor.ConductorsController() + allocations = allocation.AllocationsController() + events = event.EventsController() + deploy_templates = deploy_template.DeployTemplatesController() @expose.expose(V1) def get(self): diff -Nru ironic-12.0.0/ironic/api/controllers/v1/node.py ironic-12.1.0/ironic/api/controllers/v1/node.py --- ironic-12.0.0/ironic/api/controllers/v1/node.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/node.py 2019-03-21 20:07:40.000000000 +0000 @@ -28,6 +28,7 @@ from ironic.api.controllers import base from ironic.api.controllers import link +from ironic.api.controllers.v1 import allocation from ironic.api.controllers.v1 import bios from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify @@ -42,7 +43,7 @@ from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states -from ironic.conductor import utils as conductor_utils +from ironic.conductor import steps as conductor_steps import ironic.conf from ironic import objects @@ -62,7 +63,7 @@ "properties": { "interface": { "description": "driver interface", - "enum": list(conductor_utils.CLEANING_INTERFACE_PRIORITY) + "enum": list(conductor_steps.CLEANING_INTERFACE_PRIORITY) # interface value must be one of the valid interfaces }, "step": { @@ -110,6 +111,8 @@ ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF) +_NODE_DESCRIPTION_MAX_LENGTH = 4096 + def get_nodes_controller_reserved_names(): global _NODES_CONTROLLER_RESERVED_WORDS @@ -597,7 +600,7 @@ @METRICS.timer('NodeStatesController.provision') @expose.expose(None, types.uuid_or_name, wtypes.text, - wtypes.text, types.jsontype, wtypes.text, + types.jsontype, types.jsontype, wtypes.text, status_code=http_client.ACCEPTED) def provision(self, node_ident, target, configdrive=None, clean_steps=None, rescue_password=None): @@ -613,8 +616,8 @@ :param node_ident: UUID or logical name of a node. :param target: The desired provision state of the node or verb. :param configdrive: Optional. A gzipped and base64 encoded - configdrive. Only valid when setting provision state - to "active" or "rebuild". + configdrive or a dict to build a configdrive from. Only valid when + setting provision state to "active" or "rebuild". :param clean_steps: An ordered list of cleaning steps that will be performed on the node. A cleaning step is a dictionary with required keys 'interface' and 'step', and optional key 'args'. If @@ -678,8 +681,7 @@ action=target, node=rpc_node.uuid, state=rpc_node.provision_state) - if configdrive: - api_utils.check_allow_configdrive(target) + api_utils.check_allow_configdrive(target, configdrive) if clean_steps and target != ir_states.VERBS['clean']: msg = (_('"clean_steps" is only valid when setting target ' @@ -1075,6 +1077,15 @@ conductor = wsme.wsattr(wtypes.text, readonly=True) """Represent the conductor currently serving the node""" + owner = wsme.wsattr(wtypes.text) + """Field for storage of physical node owner""" + + description = wsme.wsattr(wtypes.text) + """Field for node description""" + + allocation_uuid = wsme.wsattr(types.uuid, readonly=True) + """The UUID of the allocation this node belongs""" + # NOTE(deva): "conductor_affinity" shouldn't be presented on the # API because it's an internal value. Don't add it here. @@ -1166,8 +1177,21 @@ '%(node)s.', {'node': rpc_node.uuid}) node.conductor = None + if (api_utils.allow_allocations() + and (fields is None or 'allocation_uuid' in fields)): + node.allocation_uuid = None + if rpc_node.allocation_id: + try: + allocation = objects.Allocation.get_by_id( + pecan.request.context, + rpc_node.allocation_id) + node.allocation_uuid = allocation.uuid + except exception.AllocationNotFound: + pass + if fields is not None: - api_utils.check_for_invalid_fields(fields, node.as_dict()) + api_utils.check_for_invalid_fields( + fields, set(node.as_dict()) | {'allocation_uuid'}) show_states_links = ( api_utils.allow_links_node_states_and_driver_properties()) @@ -1277,7 +1301,8 @@ storage_interface=None, traits=[], rescue_interface=None, bios_interface=None, conductor_group="", automated_clean=None, protected=False, - protected_reason=None) + protected_reason=None, owner=None, + allocation_uuid='982ddb5b-bce5-4d23-8fb8-7f710f648cd5') # NOTE(matty_dubs): The chassis_uuid getter() is based on the # _chassis_uuid variable: sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12' @@ -1303,7 +1328,7 @@ '/inspection_started_at', '/clean_step', '/deploy_step', '/raid_config', '/target_raid_config', - '/fault', '/conductor'] + '/fault', '/conductor', '/allocation_uuid'] class NodeCollection(collection.Collection): @@ -1555,6 +1580,7 @@ 'volume': volume.VolumeController, 'traits': NodeTraitsController, 'bios': bios.NodeBiosController, + 'allocation': allocation.NodeAllocationController, } @pecan.expose() @@ -1570,7 +1596,9 @@ or (remainder[0] == 'vifs' and not api_utils.allow_vifs_subcontroller()) or (remainder[0] == 'bios' and - not api_utils.allow_bios_interface())): + not api_utils.allow_bios_interface()) + or (remainder[0] == 'allocation' + and not api_utils.allow_allocations())): pecan.abort(http_client.NOT_FOUND) if remainder[0] == 'traits' and not api_utils.allow_traits(): # NOTE(mgoddard): Returning here will ensure we exhibit the @@ -1584,40 +1612,24 @@ def _filter_by_conductor(self, nodes, conductor): filtered_nodes = [] for n in nodes: - host = pecan.request.rpcapi.get_conductor_for(n) - if host == conductor: - filtered_nodes.append(n) - return filtered_nodes + try: + host = pecan.request.rpcapi.get_conductor_for(n) + if host == conductor: + filtered_nodes.append(n) + except (exception.NoValidHost, exception.TemporaryFailure): + # NOTE(kaifeng) Node gets orphaned in case some conductor + # offline or all conductors are offline. + pass - def _create_node_filters(self, chassis_uuid=None, associated=None, - maintenance=None, provision_state=None, - driver=None, resource_class=None, fault=None, - conductor_group=None): - filters = {} - if chassis_uuid: - filters['chassis_uuid'] = chassis_uuid - if associated is not None: - filters['associated'] = associated - if maintenance is not None: - filters['maintenance'] = maintenance - if provision_state: - filters['provision_state'] = provision_state - if driver: - filters['driver'] = driver - if resource_class is not None: - filters['resource_class'] = resource_class - if fault is not None: - filters['fault'] = fault - if conductor_group is not None: - filters['conductor_group'] = conductor_group - return filters + return filtered_nodes def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated, maintenance, provision_state, marker, limit, sort_key, sort_dir, driver=None, resource_class=None, resource_url=None, fields=None, fault=None, conductor_group=None, - detail=None, conductor=None): + detail=None, conductor=None, owner=None, + description_contains=None): if self.from_chassis and not chassis_uuid: raise exception.MissingParameterValue( _("Chassis id not specified.")) @@ -1650,10 +1662,23 @@ # be generated, which we don't want. limit = 0 else: - filters = self._create_node_filters(chassis_uuid, associated, - maintenance, provision_state, - driver, resource_class, fault, - conductor_group) + possible_filters = { + 'maintenance': maintenance, + 'chassis_uuid': chassis_uuid, + 'associated': associated, + 'provision_state': provision_state, + 'driver': driver, + 'resource_class': resource_class, + 'fault': fault, + 'conductor_group': conductor_group, + 'owner': owner, + 'description_contains': description_contains, + } + filters = {} + for key, value in possible_filters.items(): + if value is not None: + filters[key] = value + nodes = objects.Node.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) @@ -1740,6 +1765,11 @@ continue if patch_val == wtypes.Unset: patch_val = None + # conductor_group is case-insensitive, and we use it to calculate + # the conductor to send an update too. lowercase it here instead + # of just before saving so we calculate correctly. + if field == 'conductor_group': + patch_val = patch_val.lower() if rpc_node[field] != patch_val: rpc_node[field] = patch_val @@ -1764,12 +1794,14 @@ @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, wtypes.text, types.uuid, int, wtypes.text, wtypes.text, wtypes.text, types.listtype, wtypes.text, - wtypes.text, wtypes.text, types.boolean, wtypes.text) + wtypes.text, wtypes.text, types.boolean, wtypes.text, + wtypes.text, wtypes.text) def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, fields=None, resource_class=None, fault=None, - conductor_group=None, detail=None, conductor=None): + conductor_group=None, detail=None, conductor=None, + owner=None, description_contains=None): """Retrieve a list of nodes. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for @@ -1799,9 +1831,14 @@ that conductor_group. :param conductor: Optional string value to get only nodes managed by that conductor. + :param owner: Optional string value that set the owner whose nodes + are to be retrurned. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param fault: Optional string value to get only nodes with that fault. + :param description_contains: Optional string value to get only nodes + with description field contains matching + value. """ cdict = pecan.request.context.to_policy_values() policy.authorize('baremetal:node:get', cdict, cdict) @@ -1815,10 +1852,12 @@ api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_conductor(conductor) + api_utils.check_allow_filter_by_owner(owner) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) + extra_args = {'description_contains': description_contains} return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, provision_state, marker, @@ -1828,18 +1867,20 @@ fields=fields, fault=fault, conductor_group=conductor_group, detail=detail, - conductor=conductor) + conductor=conductor, + owner=owner, + **extra_args) @METRICS.timer('NodesController.detail') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, wtypes.text, types.uuid, int, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, - wtypes.text, wtypes.text) + wtypes.text, wtypes.text, wtypes.text, wtypes.text) def detail(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, resource_class=None, fault=None, conductor_group=None, - conductor=None): + conductor=None, owner=None, description_contains=None): """Retrieve a list of nodes with detail. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for @@ -1868,6 +1909,11 @@ :param fault: Optional string value to get only nodes with that fault. :param conductor_group: Optional string value to get only nodes with that conductor_group. + :param owner: Optional string value that set the owner whose nodes + are to be retrurned. + :param description_contains: Optional string value to get only nodes + with description field contains matching + value. """ cdict = pecan.request.context.to_policy_values() policy.authorize('baremetal:node:get', cdict, cdict) @@ -1877,6 +1923,7 @@ api_utils.check_allow_specify_resource_class(resource_class) api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) + api_utils.check_allow_filter_by_owner(owner) api_utils.check_allowed_fields([sort_key]) # /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] @@ -1886,6 +1933,7 @@ api_utils.check_allow_filter_by_conductor(conductor) resource_url = '/'.join(['nodes', 'detail']) + extra_args = {'description_contains': description_contains} return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, provision_state, marker, @@ -1895,7 +1943,9 @@ resource_url=resource_url, fault=fault, conductor_group=conductor_group, - conductor=conductor) + conductor=conductor, + owner=owner, + **extra_args) @METRICS.timer('NodesController.validate') @expose.expose(wtypes.text, types.uuid_or_name, types.uuid) @@ -1976,6 +2026,16 @@ "creation. These fields can only be set for active nodes") raise exception.Invalid(msg) + if (node.description is not wtypes.Unset and + len(node.description) > _NODE_DESCRIPTION_MAX_LENGTH): + msg = _("Cannot create node with description exceeding %s " + "characters") % _NODE_DESCRIPTION_MAX_LENGTH + raise exception.Invalid(msg) + + if node.allocation_uuid is not wtypes.Unset: + msg = _("Allocation UUID cannot be specified, use allocations API") + raise exception.Invalid(msg) + # NOTE(deva): get_topic_for checks if node.driver is in the hash ring # and raises NoValidHost if it is not. # We need to ensure that node has a UUID before it can @@ -2032,6 +2092,12 @@ "changing the node's driver.") raise exception.Invalid(msg) + description = api_utils.get_patch_values(patch, '/description') + if description and len(description[0]) > _NODE_DESCRIPTION_MAX_LENGTH: + msg = _("Cannot update node with description exceeding %s " + "characters") % _NODE_DESCRIPTION_MAX_LENGTH + raise exception.Invalid(msg) + @METRICS.timer('NodesController.patch') @wsme.validate(types.uuid, types.boolean, [NodePatchType]) @expose.expose(Node, types.uuid_or_name, types.boolean, @@ -2083,16 +2149,14 @@ % node_ident) error_msg += "'%(name)s'" self._check_names_acceptable(names, error_msg) - try: - node_dict = rpc_node.as_dict() - # NOTE(lucasagomes): - # 1) Remove chassis_id because it's an internal value and - # not present in the API object - # 2) Add chassis_uuid - node_dict['chassis_uuid'] = node_dict.pop('chassis_id', None) - node = Node(**api_utils.apply_jsonpatch(node_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) + + node_dict = rpc_node.as_dict() + # NOTE(lucasagomes): + # 1) Remove chassis_id because it's an internal value and + # not present in the API object + # 2) Add chassis_uuid + node_dict['chassis_uuid'] = node_dict.pop('chassis_id', None) + node = Node(**api_utils.apply_jsonpatch(node_dict, patch)) self._update_changed_fields(node, rpc_node) # NOTE(deva): we calculate the rpc topic here in case node.driver # has changed, so that update is sent to the diff -Nru ironic-12.0.0/ironic/api/controllers/v1/notification_utils.py ironic-12.1.0/ironic/api/controllers/v1/notification_utils.py --- ironic-12.0.0/ironic/api/controllers/v1/notification_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/notification_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -21,7 +21,9 @@ from ironic.common import exception from ironic.common.i18n import _ +from ironic.objects import allocation as allocation_objects from ironic.objects import chassis as chassis_objects +from ironic.objects import deploy_template as deploy_template_objects from ironic.objects import fields from ironic.objects import node as node_objects from ironic.objects import notification @@ -35,8 +37,12 @@ CRUD_NOTIFY_OBJ = { + 'allocation': (allocation_objects.AllocationCRUDNotification, + allocation_objects.AllocationCRUDPayload), 'chassis': (chassis_objects.ChassisCRUDNotification, chassis_objects.ChassisCRUDPayload), + 'deploytemplate': (deploy_template_objects.DeployTemplateCRUDNotification, + deploy_template_objects.DeployTemplateCRUDPayload), 'node': (node_objects.NodeCRUDNotification, node_objects.NodeCRUDPayload), 'port': (port_objects.PortCRUDNotification, diff -Nru ironic-12.0.0/ironic/api/controllers/v1/portgroup.py ironic-12.1.0/ironic/api/controllers/v1/portgroup.py --- ironic-12.0.0/ironic/api/controllers/v1/portgroup.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/portgroup.py 2019-03-21 20:07:40.000000000 +0000 @@ -545,17 +545,14 @@ raise wsme.exc.ClientSideError( error_msg, status_code=http_client.BAD_REQUEST) - try: - portgroup_dict = rpc_portgroup.as_dict() - # NOTE: - # 1) Remove node_id because it's an internal value and - # not present in the API object - # 2) Add node_uuid - portgroup_dict['node_uuid'] = portgroup_dict.pop('node_id', None) - portgroup = Portgroup(**api_utils.apply_jsonpatch(portgroup_dict, - patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) + portgroup_dict = rpc_portgroup.as_dict() + # NOTE: + # 1) Remove node_id because it's an internal value and + # not present in the API object + # 2) Add node_uuid + portgroup_dict['node_uuid'] = portgroup_dict.pop('node_id', None) + portgroup = Portgroup(**api_utils.apply_jsonpatch(portgroup_dict, + patch)) api_utils.handle_patch_port_like_extra_vif(rpc_portgroup, portgroup, patch) diff -Nru ironic-12.0.0/ironic/api/controllers/v1/port.py ironic-12.1.0/ironic/api/controllers/v1/port.py --- ironic-12.0.0/ironic/api/controllers/v1/port.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/port.py 2019-03-21 20:07:40.000000000 +0000 @@ -59,6 +59,9 @@ # if requested version is < 1.34, hide physical_network field. if not api_utils.allow_port_physical_network(): obj.physical_network = wsme.Unset + # if requested version is < 1.53, hide is_smartnic field. + if not api_utils.allow_port_is_smartnic(): + obj.is_smartnic = wsme.Unset class Port(base.APIBase): @@ -156,6 +159,9 @@ links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated port links""" + is_smartnic = types.boolean + """Indicates whether this port is a Smart NIC port.""" + def __init__(self, **kwargs): self.fields = [] fields = list(objects.Port.fields) @@ -245,7 +251,8 @@ local_link_connection={ 'switch_info': 'host', 'port_id': 'Gig0/1', 'switch_id': 'aa:bb:cc:dd:ee:ff'}, - physical_network='physnet1') + physical_network='physnet1', + is_smartnic=False) # NOTE(lucasagomes): node_uuid getter() method look at the # _node_uuid variable sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' @@ -425,6 +432,9 @@ if ('physical_network' in fields and not api_utils.allow_port_physical_network()): raise exception.NotAcceptable() + if ('is_smartnic' in fields + and not api_utils.allow_port_is_smartnic()): + raise exception.NotAcceptable() @METRICS.timer('PortsController.get_all') @expose.expose(PortCollection, types.uuid_or_name, types.uuid, @@ -577,6 +587,12 @@ pdict = port.as_dict() self._check_allowed_port_fields(pdict) + if (port.is_smartnic and not types.locallinkconnectiontype + .validate_for_smart_nic(port.local_link_connection)): + raise exception.Invalid( + "Smart NIC port must have port_id " + "and hostname in local_link_connection") + create_remotely = pecan.request.rpcapi.can_send_create_port() if (not create_remotely and pdict.get('portgroup_uuid')): # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the @@ -652,7 +668,8 @@ fields_to_check = set() for field in (self.advanced_net_fields - + ['portgroup_uuid', 'physical_network']): + + ['portgroup_uuid', 'physical_network', + 'is_smartnic']): field_path = '/%s' % field if (api_utils.get_patch_values(patch, field_path) or api_utils.is_path_removed(patch, field_path)): @@ -660,21 +677,18 @@ self._check_allowed_port_fields(fields_to_check) rpc_port = objects.Port.get_by_uuid(context, port_uuid) - try: - port_dict = rpc_port.as_dict() - # NOTE(lucasagomes): - # 1) Remove node_id because it's an internal value and - # not present in the API object - # 2) Add node_uuid - port_dict['node_uuid'] = port_dict.pop('node_id', None) - # NOTE(vsaienko): - # 1) Remove portgroup_id because it's an internal value and - # not present in the API object - # 2) Add portgroup_uuid - port_dict['portgroup_uuid'] = port_dict.pop('portgroup_id', None) - port = Port(**api_utils.apply_jsonpatch(port_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) + port_dict = rpc_port.as_dict() + # NOTE(lucasagomes): + # 1) Remove node_id because it's an internal value and + # not present in the API object + # 2) Add node_uuid + port_dict['node_uuid'] = port_dict.pop('node_id', None) + # NOTE(vsaienko): + # 1) Remove portgroup_id because it's an internal value and + # not present in the API object + # 2) Add portgroup_uuid + port_dict['portgroup_uuid'] = port_dict.pop('portgroup_id', None) + port = Port(**api_utils.apply_jsonpatch(port_dict, patch)) api_utils.handle_patch_port_like_extra_vif(rpc_port, port, patch) diff -Nru ironic-12.0.0/ironic/api/controllers/v1/ramdisk.py ironic-12.1.0/ironic/api/controllers/v1/ramdisk.py --- ironic-12.0.0/ironic/api/controllers/v1/ramdisk.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/ramdisk.py 2019-03-21 20:07:40.000000000 +0000 @@ -37,10 +37,6 @@ _LOOKUP_RETURN_FIELDS = ('uuid', 'properties', 'instance_info', 'driver_internal_info') -_LOOKUP_ALLOWED_STATES = {states.DEPLOYING, states.DEPLOYWAIT, - states.CLEANING, states.CLEANWAIT, - states.INSPECTING, - states.RESCUING, states.RESCUEWAIT} def config(): @@ -83,6 +79,12 @@ class LookupController(rest.RestController): """Controller handling node lookup for a deploy ramdisk.""" + @property + def lookup_allowed_states(self): + if CONF.deploy.fast_track: + return states.FASTTRACK_LOOKUP_ALLOWED_STATES + return states.LOOKUP_ALLOWED_STATES + @expose.expose(LookupResult, types.listtype, types.uuid) def get_all(self, addresses=None, node_uuid=None): """Look up a node by its MAC addresses and optionally UUID. @@ -144,7 +146,7 @@ raise exception.NotFound() if (CONF.api.restrict_lookup - and node.provision_state not in _LOOKUP_ALLOWED_STATES): + and node.provision_state not in self.lookup_allowed_states): raise exception.NotFound() return LookupResult.convert_with_links(node) diff -Nru ironic-12.0.0/ironic/api/controllers/v1/types.py ironic-12.1.0/ironic/api/controllers/v1/types.py --- ironic-12.0.0/ironic/api/controllers/v1/types.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/types.py 2019-03-21 20:07:40.000000000 +0000 @@ -18,6 +18,7 @@ import inspect import json +from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import six @@ -30,6 +31,9 @@ from ironic.common import utils +LOG = log.getLogger(__name__) + + class MacAddressType(wtypes.UserType): """A simple MAC address type.""" @@ -266,9 +270,12 @@ basetype = wtypes.DictType name = 'locallinkconnection' - mandatory_fields = {'switch_id', - 'port_id'} - valid_fields = mandatory_fields.union({'switch_info'}) + local_link_mandatory_fields = {'port_id', 'switch_id'} + smart_nic_mandatory_fields = {'port_id', 'hostname'} + mandatory_fields_list = [local_link_mandatory_fields, + smart_nic_mandatory_fields] + optional_field = {'switch_info'} + valid_fields = set.union(optional_field, *mandatory_fields_list) @staticmethod def validate(value): @@ -276,7 +283,7 @@ :param value: A dictionary of values to validate, switch_id is a MAC address or an OpenFlow based datapath_id, switch_info is an - optional field. + optional field. Required Smart NIC fields are port_id and hostname. For example:: @@ -286,6 +293,13 @@ 'switch_info': 'switch1' } + Or for Smart NIC:: + + { + 'port_id': 'rep0-0', + 'hostname': 'host1-bf' + } + :returns: A dictionary. :raises: Invalid if some of the keys in the dictionary being validated are unknown, invalid, or some required ones are missing. @@ -304,10 +318,20 @@ if invalid: raise exception.Invalid(_('%s are invalid keys') % (invalid)) - # Check all mandatory fields are present - missing = LocalLinkConnectionType.mandatory_fields - keys - if missing: - msg = _('Missing mandatory keys: %s') % missing + # Check any mandatory fields sets are present + for mandatory_set in LocalLinkConnectionType.mandatory_fields_list: + if mandatory_set <= keys: + break + else: + msg = _('Missing mandatory keys. Required keys are ' + '%(required_fields)s. Or in case of Smart NIC ' + '%(smart_nic_required_fields)s. ' + 'Submitted keys are %(keys)s .') % { + 'required_fields': + LocalLinkConnectionType.local_link_mandatory_fields, + 'smart_nic_required_fields': + LocalLinkConnectionType.smart_nic_mandatory_fields, + 'keys': keys} raise exception.Invalid(msg) # Check switch_id is either a valid mac address or @@ -321,6 +345,9 @@ value['switch_id']) except exception.InvalidDatapathID: raise exception.InvalidSwitchID(switch_id=value['switch_id']) + except KeyError: + # In Smart NIC case 'switch_id' is optional. + pass return value @@ -330,6 +357,21 @@ return None return LocalLinkConnectionType.validate(value) + @staticmethod + def validate_for_smart_nic(value): + """Validates Smart NIC field are present 'port_id' and 'hostname' + + :param value: local link information of type Dictionary. + :return: True if both fields 'port_id' and 'hostname' are present + in 'value', False otherwise. + """ + wtypes.DictType(wtypes.text, wtypes.text).validate(value) + keys = set(value) + + if LocalLinkConnectionType.smart_nic_mandatory_fields <= keys: + return True + return False + locallinkconnectiontype = LocalLinkConnectionType() @@ -362,3 +404,93 @@ viftype = VifType() + + +class EventType(wtypes.UserType): + """A simple Event type.""" + + basetype = wtypes.DictType + name = 'event' + + def _validate_network_port_event(value): + """Validate network port event fields. + + :param value: A event dict + :returns: value + :raises: Invalid if network port event not in proper format + """ + + validators = { + 'port_id': UuidType.validate, + 'mac_address': MacAddressType.validate, + 'status': wtypes.text, + 'device_id': UuidType.validate, + 'binding:host_id': UuidType.validate, + 'binding:vnic_type': wtypes.text + } + + keys = set(value) + net_keys = set(validators) + net_mandatory_fields = {'port_id', 'mac_address', 'status'} + + # Check all keys are valid for network port event + invalid = keys.difference(EventType.mandatory_fields.union(net_keys)) + if invalid: + raise exception.Invalid(_('%s are invalid keys') % + ', '.join(invalid)) + + # Check all mandatory fields for network port event is present + missing = net_mandatory_fields.difference(keys) + if missing: + raise exception.Invalid(_('Missing mandatory keys: %s') + % ', '.join(missing)) + + # Check all values are of expected type + for key in net_keys: + if key in value: + try: + validators[key](value[key]) + except Exception as e: + msg = (_('Event validation failure for %(key)s. ' + '%(message)s') % {'key': key, 'message': e}) + raise exception.Invalid(msg) + + return value + + mandatory_fields = {'event'} + event_validators = { + 'network.bind_port': _validate_network_port_event, + 'network.unbind_port': _validate_network_port_event, + 'network.delete_port': _validate_network_port_event, + } + valid_events = set(event_validators) + + @staticmethod + def validate(value): + """Validate the input + + :param value: A event dict + :returns: value + :raises: Invalid if event not in proper format + """ + + wtypes.DictType(wtypes.text, wtypes.text).validate(value) + keys = set(value) + + # Check all mandatory fields are present + missing = EventType.mandatory_fields.difference(keys) + if missing: + raise exception.Invalid(_('Missing mandatory keys: %s') % + ', '.join(missing)) + + # Check event is a supported event + if value['event'] not in EventType.valid_events: + raise exception.Invalid( + _('%(event)s is not one of valid events: %(valid_events)s.') % + {'event': value['event'], + 'valid_events': ', '.join(EventType.valid_events)}) + + return EventType.event_validators[value['event']](value) + + +eventtype = EventType() diff -Nru ironic-12.0.0/ironic/api/controllers/v1/utils.py ironic-12.1.0/ironic/api/controllers/v1/utils.py --- ironic-12.0.0/ironic/api/controllers/v1/utils.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -17,6 +17,8 @@ import re import jsonpatch +import jsonschema +from jsonschema import exceptions as json_schema_exc import os_traits from oslo_config import cfg from oslo_utils import uuidutils @@ -31,6 +33,7 @@ from ironic.common import exception from ironic.common import faults from ironic.common.i18n import _ +from ironic.common import policy from ironic.common import states from ironic.common import utils from ironic import objects @@ -39,9 +42,10 @@ CONF = cfg.CONF -JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, - jsonpatch.JsonPointerException, - KeyError) +_JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, + jsonpatch.JsonPointerException, + KeyError, + IndexError) # Minimum API version to use for certain verbs @@ -92,12 +96,16 @@ return sort_dir -def validate_trait(trait): +def validate_trait(trait, error_prefix=_('Invalid trait')): error = wsme.exc.ClientSideError( - _('Invalid trait. A valid trait must be no longer than 255 ' + _('%(error_prefix)s. A valid trait must be no longer than 255 ' 'characters. Standard traits are defined in the os_traits library. ' 'A custom trait must start with the prefix CUSTOM_ and use ' - 'the following characters: A-Z, 0-9 and _')) + 'the following characters: A-Z, 0-9 and _') % + {'error_prefix': error_prefix}) + if not isinstance(trait, six.string_types): + raise error + if len(trait) > 255 or len(trait) < 1: raise error @@ -109,13 +117,32 @@ def apply_jsonpatch(doc, patch): + """Apply a JSON patch, one operation at a time. + + If the patch fails to apply, this allows us to determine which operation + failed, making the error message a little less cryptic. + + :param doc: The JSON document to patch. + :param patch: The JSON patch to apply. + :returns: The result of the patch operation. + :raises: PatchError if the patch fails to apply. + :raises: wsme.exc.ClientSideError if the patch adds a new root attribute. + """ + # Prevent removal of root attributes. for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' 'the resource is not allowed') raise wsme.exc.ClientSideError(msg % p['path']) - return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) + + # Apply operations one at a time, to improve error reporting. + for patch_op in patch: + try: + doc = jsonpatch.apply_patch(doc, jsonpatch.JsonPatch([patch_op])) + except _JSONPATCH_EXCEPTIONS as e: + raise exception.PatchError(patch=patch_op, reason=e) + return doc def get_patch_values(patch, path): @@ -260,6 +287,84 @@ exception.PortgroupNotFound) +def get_rpc_allocation(allocation_ident): + """Get the RPC allocation from the allocation UUID or logical name. + + :param allocation_ident: the UUID or logical name of an allocation. + + :returns: The RPC allocation. + :raises: InvalidUuidOrName if the name or uuid provided is not valid. + :raises: AllocationNotFound if the allocation is not found. + """ + # Check to see if the allocation_ident is a valid UUID. If it is, treat it + # as a UUID. + if uuidutils.is_uuid_like(allocation_ident): + return objects.Allocation.get_by_uuid(pecan.request.context, + allocation_ident) + + # We can refer to allocations by their name + if utils.is_valid_logical_name(allocation_ident): + return objects.Allocation.get_by_name(pecan.request.context, + allocation_ident) + raise exception.InvalidUuidOrName(name=allocation_ident) + + +def get_rpc_allocation_with_suffix(allocation_ident): + """Get the RPC allocation from the allocation UUID or logical name. + + If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking + for allocation_ident with '.json' suffix. Otherwise identical + to get_rpc_allocation. + + :param allocation_ident: the UUID or logical name of an allocation. + + :returns: The RPC allocation. + :raises: InvalidUuidOrName if the name or uuid provided is not valid. + :raises: AllocationNotFound if the allocation is not found. + """ + return _get_with_suffix(get_rpc_allocation, allocation_ident, + exception.AllocationNotFound) + + +def get_rpc_deploy_template(template_ident): + """Get the RPC deploy template from the UUID or logical name. + + :param template_ident: the UUID or logical name of a deploy template. + + :returns: The RPC deploy template. + :raises: InvalidUuidOrName if the name or uuid provided is not valid. + :raises: DeployTemplateNotFound if the deploy template is not found. + """ + # Check to see if the template_ident is a valid UUID. If it is, treat it + # as a UUID. + if uuidutils.is_uuid_like(template_ident): + return objects.DeployTemplate.get_by_uuid(pecan.request.context, + template_ident) + + # We can refer to templates by their name + if utils.is_valid_logical_name(template_ident): + return objects.DeployTemplate.get_by_name(pecan.request.context, + template_ident) + raise exception.InvalidUuidOrName(name=template_ident) + + +def get_rpc_deploy_template_with_suffix(template_ident): + """Get the RPC deploy template from the UUID or logical name. + + If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking + for template_ident with '.json' suffix. Otherwise identical + to get_rpc_deploy_template. + + :param template_ident: the UUID or logical name of a deploy template. + + :returns: The RPC deploy template. + :raises: InvalidUuidOrName if the name or uuid provided is not valid. + :raises: DeployTemplateNotFound if the deploy template is not found. + """ + return _get_with_suffix(get_rpc_deploy_template, template_ident, + exception.DeployTemplateNotFound) + + def is_valid_node_name(name): """Determine if the provided name is a valid node name. @@ -379,6 +484,10 @@ 'protected': versions.MINOR_48_NODE_PROTECTED, 'protected_reason': versions.MINOR_48_NODE_PROTECTED, 'conductor': versions.MINOR_49_CONDUCTORS, + 'owner': versions.MINOR_50_NODE_OWNER, + 'description': versions.MINOR_51_NODE_DESCRIPTION, + 'allocation_uuid': versions.MINOR_52_ALLOCATION, + 'events': versions.MINOR_54_EVENTS, } for field in V31_FIELDS: @@ -498,7 +607,30 @@ 'opr': versions.MINOR_30_DYNAMIC_DRIVERS}) -def check_allow_configdrive(target): +_CONFIG_DRIVE_SCHEMA = { + 'anyOf': [ + { + 'type': 'object', + 'properties': { + 'meta_data': {'type': 'object'}, + 'network_data': {'type': 'object'}, + 'user_data': { + 'type': ['object', 'array', 'string', 'null'] + } + }, + 'additionalProperties': False + }, + { + 'type': ['string', 'null'] + } + ] +} + + +def check_allow_configdrive(target, configdrive=None): + if not configdrive: + return + allowed_targets = [states.ACTIVE] if allow_node_rebuild_with_configdrive(): allowed_targets.append(states.REBUILD) @@ -509,6 +641,21 @@ raise wsme.exc.ClientSideError( msg, status_code=http_client.BAD_REQUEST) + try: + jsonschema.validate(configdrive, _CONFIG_DRIVE_SCHEMA) + except json_schema_exc.ValidationError as e: + msg = _('Invalid configdrive format: %s') % e + raise wsme.exc.ClientSideError( + msg, status_code=http_client.BAD_REQUEST) + + if isinstance(configdrive, dict) and not allow_build_configdrive(): + msg = _('Providing a JSON object for configdrive is only supported' + ' starting with API version %(base)s.%(opr)s') % { + 'base': versions.BASE_VERSION, + 'opr': versions.MINOR_56_BUILD_CONFIGDRIVE} + raise wsme.exc.ClientSideError( + msg, status_code=http_client.BAD_REQUEST) + def check_allow_filter_by_fault(fault): """Check if filtering nodes by fault is allowed. @@ -544,6 +691,20 @@ 'opr': versions.MINOR_46_NODE_CONDUCTOR_GROUP}) +def check_allow_filter_by_owner(owner): + """Check if filtering nodes by owner is allowed. + + Version 1.50 of the API allows filtering nodes by owner. + """ + if (owner is not None and pecan.request.version.minor + < versions.MINOR_50_NODE_OWNER): + raise exception.NotAcceptable(_( + "Request not acceptable. The minimal required API version " + "should be %(base)s.%(opr)s") % + {'base': versions.BASE_VERSION, + 'opr': versions.MINOR_50_NODE_OWNER}) + + def initial_node_provision_state(): """Return node state to use by default when creating new nodes. @@ -930,7 +1091,7 @@ def allow_expose_conductors(): """Check if accessing conductor endpoints is allowed. - Version 1.48 of the API exposed conductor endpoints and conductor field + Version 1.49 of the API exposed conductor endpoints and conductor field for the node. """ return pecan.request.version.minor >= versions.MINOR_49_CONDUCTORS @@ -939,7 +1100,7 @@ def check_allow_filter_by_conductor(conductor): """Check if filtering nodes by conductor is allowed. - Version 1.48 of the API allows filtering nodes by conductor. + Version 1.49 of the API allows filtering nodes by conductor. """ if conductor is not None and not allow_expose_conductors(): raise exception.NotAcceptable(_( @@ -947,3 +1108,56 @@ "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_49_CONDUCTORS}) + + +def allow_allocations(): + """Check if accessing allocation endpoints is allowed. + + Version 1.52 of the API exposed allocation endpoints and allocation_uuid + field for the node. + """ + return pecan.request.version.minor >= versions.MINOR_52_ALLOCATION + + +def allow_port_is_smartnic(): + """Check if port is_smartnic field is allowed. + + Version 1.53 of the API added is_smartnic field to the port object. + """ + return ((pecan.request.version.minor + >= versions.MINOR_53_PORT_SMARTNIC) + and objects.Port.supports_is_smartnic()) + + +def allow_expose_events(): + """Check if accessing events endpoint is allowed. + + Version 1.54 of the API added the events endpoint. + """ + return pecan.request.version.minor >= versions.MINOR_54_EVENTS + + +def allow_deploy_templates(): + """Check if accessing deploy template endpoints is allowed. + + Version 1.55 of the API exposed deploy template endpoints. + """ + return pecan.request.version.minor >= versions.MINOR_55_DEPLOY_TEMPLATES + + +def check_policy(policy_name): + """Check if the specified policy is authorised for this request. + + :policy_name: Name of the policy to check. + :raises: HTTPForbidden if the policy forbids access. + """ + cdict = pecan.request.context.to_policy_values() + policy.authorize(policy_name, cdict, cdict) + + +def allow_build_configdrive(): + """Check if building configdrive is allowed. + + Version 1.56 of the API added support for building configdrive. + """ + return pecan.request.version.minor >= versions.MINOR_56_BUILD_CONFIGDRIVE diff -Nru ironic-12.0.0/ironic/api/controllers/v1/versions.py ironic-12.1.0/ironic/api/controllers/v1/versions.py --- ironic-12.0.0/ironic/api/controllers/v1/versions.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/versions.py 2019-03-21 20:07:40.000000000 +0000 @@ -86,6 +86,14 @@ # v1.46: Add conductor_group to the node object. # v1.47: Add automated_clean to the node object. # v1.48: Add protected to the node object. +# v1.49: Add conductor to the node object and /v1/conductors. +# v1.50: Add owner to the node object. +# v1.51: Add description to the node object. +# v1.52: Add allocation API. +# v1.53: Add support for Smart NIC port +# v1.54: Add events support. +# v1.55: Add deploy templates API. +# v1.56: Add support for building configdrives. MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 @@ -137,6 +145,13 @@ MINOR_47_NODE_AUTOMATED_CLEAN = 47 MINOR_48_NODE_PROTECTED = 48 MINOR_49_CONDUCTORS = 49 +MINOR_50_NODE_OWNER = 50 +MINOR_51_NODE_DESCRIPTION = 51 +MINOR_52_ALLOCATION = 52 +MINOR_53_PORT_SMARTNIC = 53 +MINOR_54_EVENTS = 54 +MINOR_55_DEPLOY_TEMPLATES = 55 +MINOR_56_BUILD_CONFIGDRIVE = 56 # When adding another version, update: # - MINOR_MAX_VERSION @@ -144,7 +159,7 @@ # explanation of what changed in the new version # - common/release_mappings.py, RELEASE_MAPPING['master']['api'] -MINOR_MAX_VERSION = MINOR_49_CONDUCTORS +MINOR_MAX_VERSION = MINOR_56_BUILD_CONFIGDRIVE # String representations of the minor and maximum versions _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) diff -Nru ironic-12.0.0/ironic/api/controllers/v1/volume_connector.py ironic-12.1.0/ironic/api/controllers/v1/volume_connector.py --- ironic-12.0.0/ironic/api/controllers/v1/volume_connector.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/volume_connector.py 2019-03-21 20:07:40.000000000 +0000 @@ -421,17 +421,14 @@ rpc_connector = objects.VolumeConnector.get_by_uuid(context, connector_uuid) - try: - connector_dict = rpc_connector.as_dict() - # NOTE(smoriya): - # 1) Remove node_id because it's an internal value and - # not present in the API object - # 2) Add node_uuid - connector_dict['node_uuid'] = connector_dict.pop('node_id', None) - connector = VolumeConnector( - **api_utils.apply_jsonpatch(connector_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) + connector_dict = rpc_connector.as_dict() + # NOTE(smoriya): + # 1) Remove node_id because it's an internal value and + # not present in the API object + # 2) Add node_uuid + connector_dict['node_uuid'] = connector_dict.pop('node_id', None) + connector = VolumeConnector( + **api_utils.apply_jsonpatch(connector_dict, patch)) # Update only the fields that have changed. for field in objects.VolumeConnector.fields: diff -Nru ironic-12.0.0/ironic/api/controllers/v1/volume_target.py ironic-12.1.0/ironic/api/controllers/v1/volume_target.py --- ironic-12.0.0/ironic/api/controllers/v1/volume_target.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/api/controllers/v1/volume_target.py 2019-03-21 20:07:40.000000000 +0000 @@ -432,17 +432,14 @@ raise exception.InvalidUUID(message=message) rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid) - try: - target_dict = rpc_target.as_dict() - # NOTE(smoriya): - # 1) Remove node_id because it's an internal value and - # not present in the API object - # 2) Add node_uuid - target_dict['node_uuid'] = target_dict.pop('node_id', None) - target = VolumeTarget( - **api_utils.apply_jsonpatch(target_dict, patch)) - except api_utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) + target_dict = rpc_target.as_dict() + # NOTE(smoriya): + # 1) Remove node_id because it's an internal value and + # not present in the API object + # 2) Add node_uuid + target_dict['node_uuid'] = target_dict.pop('node_id', None) + target = VolumeTarget( + **api_utils.apply_jsonpatch(target_dict, patch)) # Update only the fields that have changed. for field in objects.VolumeTarget.fields: diff -Nru ironic-12.0.0/ironic/cmd/dbsync.py ironic-12.1.0/ironic/cmd/dbsync.py --- ironic-12.0.0/ironic/cmd/dbsync.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/cmd/dbsync.py 2019-03-21 20:07:40.000000000 +0000 @@ -78,48 +78,70 @@ (dbapi, 'update_to_latest_versions'), ) +# These are the models added in supported releases. We skip the version check +# for them since the tables do not exist when it happens. +NEW_MODELS = [ + # TODO(dtantsur): remove in Train + 'Allocation', + # TODO(mgoddard): remove in Train + 'DeployTemplate', + 'DeployTemplateStep', +] + class DBCommand(object): - def _check_versions(self): + def check_obj_versions(self, ignore_missing_tables=False): """Check the versions of objects. Check that the object versions are compatible with this release of ironic. It does this by comparing the objects' .version field in the database, with the expected versions of these objects. - If it isn't compatible, we exit the program, returning 2. + Returns None if compatible; a string describing the issue otherwise. """ if migration.version() is None: # no tables, nothing to check return + if ignore_missing_tables: + ignore_models = NEW_MODELS + else: + ignore_models = () + + msg = None try: - if not dbapi.check_versions(): - sys.stderr.write( - _('The database is not compatible with this ' - 'release of ironic (%s). Please run ' - '"ironic-dbsync online_data_migrations" using ' - 'the previous release.\n') - % version.version_info.release_string()) - # NOTE(rloo): We return 1 in online_data_migrations() to - # indicate that there are more objects to migrate, - # so don't use 1 here. - sys.exit(2) + if not dbapi.check_versions(ignore_models=ignore_models): + msg = (_('The database is not compatible with this ' + 'release of ironic (%s). Please run ' + '"ironic-dbsync online_data_migrations" using ' + 'the previous release.\n') + % version.version_info.release_string()) except exception.DatabaseVersionTooOld: - sys.stderr.write( - _('The database version is not compatible with this ' - 'release of ironic (%s). This can happen if you are ' - 'attempting to upgrade from a version older than ' - 'the previous release (skip versions upgrade). ' - 'This is an unsupported upgrade method. ' - 'Please run "ironic-dbsync upgrade" using the previous ' - 'releases for a fast-forward upgrade.\n') - % version.version_info.release_string()) + msg = (_('The database version is not compatible with this ' + 'release of ironic (%s). This can happen if you are ' + 'attempting to upgrade from a version older than ' + 'the previous release (skip versions upgrade). ' + 'This is an unsupported upgrade method. ' + 'Please run "ironic-dbsync upgrade" using the previous ' + 'releases for a fast-forward upgrade.\n') + % version.version_info.release_string()) + + return msg + + def _check_versions(self, ignore_missing_tables=False): + msg = self.check_obj_versions( + ignore_missing_tables=ignore_missing_tables) + if not msg: + return + else: + sys.stderr.write(msg) + # NOTE(rloo): We return 1 in online_data_migrations() to indicate + # that there are more objects to migrate, so don't use 1 here. sys.exit(2) def upgrade(self): - self._check_versions() + self._check_versions(ignore_missing_tables=True) migration.upgrade(CONF.command.revision) def revision(self): diff -Nru ironic-12.0.0/ironic/cmd/status.py ironic-12.1.0/ironic/cmd/status.py --- ironic-12.0.0/ironic/cmd/status.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/cmd/status.py 2019-03-21 20:07:40.000000000 +0000 @@ -17,6 +17,7 @@ from oslo_config import cfg from oslo_upgradecheck import upgradecheck +from ironic.cmd import dbsync from ironic.common.i18n import _ @@ -28,21 +29,31 @@ and added to _upgrade_checks tuple. """ - def _check_placeholder(self): - # This is just a placeholder for upgrade checks, it should be - # removed when the actual checks are added - return upgradecheck.Result(upgradecheck.Code.SUCCESS) - - # The format of the check functions is to return an - # oslo_upgradecheck.upgradecheck.Result - # object with the appropriate - # oslo_upgradecheck.upgradecheck.Code and details set. - # If the check hits warnings or failures then those should be stored + def _check_obj_versions(self): + """Check that the DB versions of objects are compatible. + + Checks that the object versions are compatible with this + release of ironic. It does this by comparing the objects' + .version field in the database, with the expected versions + of these objects. + """ + msg = dbsync.DBCommand().check_obj_versions(ignore_missing_tables=True) + + if not msg: + return upgradecheck.Result(upgradecheck.Code.SUCCESS) + else: + return upgradecheck.Result(upgradecheck.Code.FAILURE, details=msg) + + # A tuple of check tuples of (, ). + # The name of the check will be used in the output of this command. + # The check function takes no arguments and returns an + # oslo_upgradecheck.upgradecheck.Result object with the appropriate + # oslo_upgradecheck.upgradecheck.Code and details set. If the + # check function hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( - # In the future there should be some real checks added here - (_('Placeholder'), _check_placeholder), + (_('Object versions'), _check_obj_versions), ) diff -Nru ironic-12.0.0/ironic/common/cinder.py ironic-12.1.0/ironic/common/cinder.py --- ironic-12.0.0/ironic/common/cinder.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/cinder.py 2019-03-21 20:07:40.000000000 +0000 @@ -307,10 +307,10 @@ # database record to indicate that the attachment has # been completed, which moves the volume to the # 'attached' state. This action also sets a mountpoint - # for the volume, if known. In our use case, there is - # no way for us to know what the mountpoint is inside of - # the operating system, thus we send None. - client.volumes.attach(volume_id, instance_uuid, None) + # for the volume, as cinder requires a mointpoint to + # attach the volume, thus we send 'mount_volume'. + client.volumes.attach(volume_id, instance_uuid, + 'ironic_mountpoint') except cinder_exceptions.ClientException as e: msg = (_('Failed to inform cinder that the attachment for volume ' diff -Nru ironic-12.0.0/ironic/common/exception.py ironic-12.1.0/ironic/common/exception.py --- ironic-12.0.0/ironic/common/exception.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/exception.py 2019-03-21 20:07:40.000000000 +0000 @@ -128,10 +128,12 @@ def __str__(self): """Encode to utf-8 then wsme api can consume it as well.""" - if not six.PY3: - return six.text_type(self.args[0]).encode('utf-8') - - return self.args[0] + value = self.__unicode__() + if six.PY3: + # On Python 3 unicode is the same as str + return value + else: + return value.encode('utf-8') def __unicode__(self): """Return a unicode representation of the exception message.""" @@ -791,3 +793,43 @@ class NodeProtected(HTTPForbidden): _msg_fmt = _("Node %(node)s is protected and cannot be undeployed, " "rebuilt or deleted") + + +class AllocationNotFound(NotFound): + _msg_fmt = _("Allocation %(allocation)s could not be found.") + + +class AllocationDuplicateName(Conflict): + _msg_fmt = _("An allocation with name %(name)s already exists.") + + +class AllocationAlreadyExists(Conflict): + _msg_fmt = _("An allocation with UUID %(uuid)s already exists.") + + +class AllocationFailed(IronicException): + _msg_fmt = _("Failed to process allocation %(uuid)s: %(error)s.") + + +class DeployTemplateDuplicateName(Conflict): + _msg_fmt = _("A deploy template with name %(name)s already exists.") + + +class DeployTemplateAlreadyExists(Conflict): + _msg_fmt = _("A deploy template with UUID %(uuid)s already exists.") + + +class DeployTemplateNotFound(NotFound): + _msg_fmt = _("Deploy template %(template)s could not be found.") + + +class InvalidDeployTemplate(Invalid): + _msg_fmt = _("Deploy template invalid: %(err)s.") + + +class IBMCError(DriverOperationError): + _msg_fmt = _("IBMC exception occurred on node %(node)s. Error: %(error)s") + + +class IBMCConnectionError(IBMCError): + _msg_fmt = _("IBMC connection failed for node %(node)s: %(error)s") diff -Nru ironic-12.0.0/ironic/common/images.py ironic-12.1.0/ironic/common/images.py --- ironic-12.0.0/ironic/common/images.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/images.py 2019-03-21 20:07:40.000000000 +0000 @@ -222,78 +222,92 @@ raise exception.ImageCreationFailed(image_type='iso', error=e) -def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk, +def create_isolinux_image_for_uefi(output_file, kernel, ramdisk, + deploy_iso=None, esp_image=None, kernel_params=None): """Creates an isolinux image on the specified file. - Copies the provided kernel, ramdisk, efiboot.img to a directory, creates - the path for grub config file, generates the isolinux configuration file - using the kernel parameters provided, generates the grub configuration - file using kernel parameters and then generates a bootable ISO image - for uefi. + Copies the provided kernel, ramdisk and EFI system partition image to + a directory, generates the grub configuration file using kernel parameters + and then generates a bootable ISO image for UEFI. :param output_file: the path to the file where the iso image needs to be created. - :param deploy_iso: deploy iso used to initiate the deploy. :param kernel: the kernel to use. :param ramdisk: the ramdisk to use. + :param deploy_iso: deploy ISO image to extract EFI system partition image + from. If not specified, the `esp_image` option is required. + :param esp_image: FAT12/16/32-formatted EFI system partition image + containing the EFI boot loader (e.g. GRUB2) for each hardware + architecture to boot. This image will be embedded into the ISO image. + If not specified, the `deploy_iso` option is required. :param kernel_params: a list of strings(each element being a string like 'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added as the kernel cmdline. :raises: ImageCreationFailed, if image creation failed while copying files or while running command to generate iso. """ - ISOLINUX_BIN = 'isolinux/isolinux.bin' - ISOLINUX_CFG = 'isolinux/isolinux.cfg' + EFIBOOT_LOCATION = 'boot/grub/efiboot.img' - isolinux_options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'} grub_options = {'linux': '/vmlinuz', 'initrd': '/initrd'} with utils.tempdir() as tmpdir: files_info = { kernel: 'vmlinuz', ramdisk: 'initrd', - CONF.isolinux_bin: ISOLINUX_BIN, } - # Open the deploy iso used to initiate deploy and copy the - # efiboot.img i.e. boot loader to the current temporary - # directory. with utils.tempdir() as mountdir: - uefi_path_info, e_img_rel_path, grub_rel_path = ( - _mount_deploy_iso(deploy_iso, mountdir)) + # Open the deploy iso used to initiate deploy and copy the + # efiboot.img i.e. boot loader to the current temporary + # directory. + if deploy_iso and not esp_image: + uefi_path_info, e_img_rel_path, grub_rel_path = ( + _mount_deploy_iso(deploy_iso, mountdir)) + + grub_cfg = os.path.join(tmpdir, grub_rel_path) + + # Use ELF boot loader provided + elif esp_image and not deploy_iso: + e_img_rel_path = EFIBOOT_LOCATION + grub_rel_path = CONF.grub_config_path.lstrip(' ' + os.sep) + grub_cfg = os.path.join(tmpdir, grub_rel_path) + + uefi_path_info = { + esp_image: e_img_rel_path, + grub_cfg: grub_rel_path + } + + else: + msg = _('Neither deploy ISO nor ESP image configured or ' + 'both of them configured') + raise exception.ImageCreationFailed( + image_type='iso', error=msg) - # if either of these variables are not initialized then the - # uefi efiboot.img cannot be created. files_info.update(uefi_path_info) + try: _create_root_fs(tmpdir, files_info) + except (OSError, IOError) as e: LOG.exception("Creating the filesystem root failed.") raise exception.ImageCreationFailed(image_type='iso', error=e) - finally: - _umount_without_raise(mountdir) - cfg = _generate_cfg(kernel_params, - CONF.isolinux_config_template, isolinux_options) - - isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG) - utils.write_to_file(isolinux_cfg, cfg) + finally: + if deploy_iso: + _umount_without_raise(mountdir) # Generate and copy grub config file. - grub_cfg = os.path.join(tmpdir, grub_rel_path) grub_conf = _generate_cfg(kernel_params, CONF.grub_config_template, grub_options) utils.write_to_file(grub_cfg, grub_conf) # Create the boot_iso. try: - utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", - '-cache-inodes', '-J', '-l', '-no-emul-boot', - '-boot-load-size', '4', '-boot-info-table', - '-b', ISOLINUX_BIN, '-eltorito-alt-boot', + utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", '-l', '-e', e_img_rel_path, '-no-emul-boot', '-o', output_file, tmpdir) + except processutils.ProcessExecutionError as e: LOG.exception("Creating ISO image failed.") raise exception.ImageCreationFailed(image_type='iso', error=e) @@ -414,8 +428,8 @@ def create_boot_iso(context, output_filename, kernel_href, - ramdisk_href, deploy_iso_href, root_uuid=None, - kernel_params=None, boot_mode=None): + ramdisk_href, deploy_iso_href=None, esp_image_href=None, + root_uuid=None, kernel_params=None, boot_mode=None): """Creates a bootable ISO image for a node. Given the hrefs for kernel, ramdisk, root partition's UUID and @@ -427,8 +441,15 @@ :param output_filename: the absolute path of the output ISO file :param kernel_href: URL or glance uuid of the kernel to use :param ramdisk_href: URL or glance uuid of the ramdisk to use - :param deploy_iso_href: URL or glance uuid of the deploy iso used - :param root_uuid: uuid of the root filesystem (optional) + :param deploy_iso_href: URL or glance UUID of the deploy ISO image + to extract EFI system partition image. If not specified, + the `esp_image_href` option must be present if UEFI-bootable + ISO is desired. + :param esp_image_href: URL or glance UUID of FAT12/16/32-formatted EFI + system partition image containing the EFI boot loader (e.g. GRUB2) + for each hardware architecture to boot. This image will be embedded + into the ISO image. If not specified, the `deploy_iso_href` option + is only required for building UEFI-bootable ISO. :param kernel_params: a string containing whitespace separated values kernel cmdline arguments of the form K=V or K (optional). :boot_mode: the boot mode in which the deploy is to happen. @@ -437,6 +458,7 @@ with utils.tempdir() as tmpdir: kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1]) ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1]) + fetch(context, kernel_href, kernel_path) fetch(context, ramdisk_href, ramdisk_path) @@ -447,13 +469,28 @@ params.append(kernel_params) if boot_mode == 'uefi': - deploy_iso = os.path.join(tmpdir, deploy_iso_href.split('/')[-1]) - fetch(context, deploy_iso_href, deploy_iso) + + deploy_iso_path = esp_image_path = None + + if deploy_iso_href: + deploy_iso_path = os.path.join( + tmpdir, deploy_iso_href.split('/')[-1]) + fetch(context, deploy_iso_href, deploy_iso_path) + + elif esp_image_href: + esp_image_path = os.path.join( + tmpdir, esp_image_href.split('/')[-1]) + fetch(context, esp_image_href, esp_image_path) + + elif CONF.esp_image: + esp_image_path = CONF.esp_image + create_isolinux_image_for_uefi(output_filename, - deploy_iso, kernel_path, ramdisk_path, - params) + deploy_iso=deploy_iso_path, + esp_image=esp_image_path, + kernel_params=params) else: create_isolinux_image_for_bios(output_filename, kernel_path, diff -Nru ironic-12.0.0/ironic/common/json_rpc/client.py ironic-12.1.0/ironic/common/json_rpc/client.py --- ironic-12.0.0/ironic/common/json_rpc/client.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/common/json_rpc/client.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,185 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A simple JSON RPC client. + +This client is compatible with any JSON RPC 2.0 implementation, including ours. +""" + +from oslo_config import cfg +from oslo_log import log +from oslo_utils import importutils +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import json_rpc +from ironic.common import keystone + + +CONF = cfg.CONF +LOG = log.getLogger(__name__) +_SESSION = None + + +def _get_session(): + global _SESSION + + if _SESSION is None: + if json_rpc.require_authentication(): + auth = keystone.get_auth('json_rpc') + else: + auth = None + + _SESSION = keystone.get_session('json_rpc', auth=auth) + _SESSION.headers = { + 'Content-Type': 'application/json' + } + + return _SESSION + + +class Client(object): + """JSON RPC client with ironic exception handling.""" + + def __init__(self, serializer, version_cap=None): + self.serializer = serializer + self.version_cap = version_cap + + def can_send_version(self, version): + return _can_send_version(version, self.version_cap) + + def prepare(self, topic, version=None): + host = topic.split('.', 1)[1] + return _CallContext(host, self.serializer, version=version, + version_cap=self.version_cap) + + +class _CallContext(object): + """Wrapper object for compatibility with oslo.messaging API.""" + + def __init__(self, host, serializer, version=None, version_cap=None): + self.host = host + self.serializer = serializer + self.version = version + self.version_cap = version_cap + + def _handle_error(self, error): + if not error: + return + + message = error['message'] + try: + cls = error['data']['class'] + except KeyError: + LOG.error("Unexpected error from RPC: %s", error) + raise exception.IronicException( + _("Unexpected error raised by RPC")) + else: + if not cls.startswith('ironic.common.exception.'): + # NOTE(dtantsur): protect against arbitrary code execution + LOG.error("Unexpected error from RPC: %s", error) + raise exception.IronicException( + _("Unexpected error raised by RPC")) + raise importutils.import_object(cls, message, + code=error.get('code', 500)) + + def call(self, context, method, version=None, **kwargs): + """Call conductor RPC. + + Versioned objects are automatically serialized and deserialized. + + :param context: Security context. + :param method: Method name. + :param version: RPC API version to use. + :param kwargs: Keyword arguments to pass. + :return: RPC result (if any). + """ + return self._request(context, method, cast=False, version=version, + **kwargs) + + def cast(self, context, method, version=None, **kwargs): + """Call conductor RPC asynchronously. + + Versioned objects are automatically serialized and deserialized. + + :param context: Security context. + :param method: Method name. + :param version: RPC API version to use. + :param kwargs: Keyword arguments to pass. + :return: None + """ + return self._request(context, method, cast=True, version=version, + **kwargs) + + def _request(self, context, method, cast=False, version=None, **kwargs): + """Call conductor RPC. + + Versioned objects are automatically serialized and deserialized. + + :param context: Security context. + :param method: Method name. + :param cast: If true, use a JSON RPC notification. + :param version: RPC API version to use. + :param kwargs: Keyword arguments to pass. + :return: RPC result (if any). + """ + params = {key: self.serializer.serialize_entity(context, value) + for key, value in kwargs.items()} + params['context'] = context.to_dict() + + if version is None: + version = self.version + if version is not None: + _check_version(version, self.version_cap) + params['rpc.version'] = version + + body = { + "jsonrpc": "2.0", + "method": method, + "params": params, + } + if not cast: + body['id'] = context.request_id or uuidutils.generate_uuid() + + LOG.debug("RPC %s with %s", method, body) + url = 'http://%s:%d' % (self.host, CONF.json_rpc.port) + result = _get_session().post(url, json=body) + LOG.debug('RPC %s returned %s', method, result.text or '') + + if not cast: + result = result.json() + self._handle_error(result.get('error')) + result = self.serializer.deserialize_entity(context, + result['result']) + return result + + +def _can_send_version(requested, version_cap): + if requested is None or version_cap is None: + return True + + requested_parts = [int(item) for item in requested.split('.', 1)] + version_cap_parts = [int(item) for item in version_cap.split('.', 1)] + + if requested_parts[0] != version_cap_parts[0]: + return False # major version mismatch + else: + return requested_parts[1] <= version_cap_parts[1] + + +def _check_version(requested, version_cap): + if not _can_send_version(requested, version_cap): + raise RuntimeError(_("Cannot send RPC request: requested version " + "%(requested)s, maximum allowed version is " + "%(version_cap)s") % {'requested': requested, + 'version_cap': version_cap}) diff -Nru ironic-12.0.0/ironic/common/json_rpc/__init__.py ironic-12.1.0/ironic/common/json_rpc/__init__.py --- ironic-12.0.0/ironic/common/json_rpc/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/common/json_rpc/__init__.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,20 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + + +CONF = cfg.CONF + + +def require_authentication(): + return (CONF.json_rpc.auth_strategy or CONF.auth_strategy) == 'keystone' diff -Nru ironic-12.0.0/ironic/common/json_rpc/server.py ironic-12.1.0/ironic/common/json_rpc/server.py --- ironic-12.0.0/ironic/common/json_rpc/server.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/common/json_rpc/server.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,283 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of JSON RPC for communication between API and conductors. + +This module implementa a subset of JSON RPC 2.0 as defined in +https://www.jsonrpc.org/specification. Main differences: +* No support for batched requests. +* No support for positional arguments passing. +* No JSON RPC 1.0 fallback. +""" + +import json + +from keystonemiddleware import auth_token +from oslo_config import cfg +from oslo_log import log +import oslo_messaging +from oslo_service import service +from oslo_service import wsgi +import webob + +from ironic.common import context as ir_context +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import json_rpc + + +CONF = cfg.CONF +LOG = log.getLogger(__name__) +_BLACK_LIST = {'init_host', 'del_host', 'target', 'iter_nodes'} + + +def _build_method_map(manager): + """Build mapping from method names to their bodies. + + :param manager: A conductor manager. + :return: dict with mapping + """ + result = {} + for method in dir(manager): + if method.startswith('_') or method in _BLACK_LIST: + continue + func = getattr(manager, method) + if not callable(func): + continue + LOG.debug('Adding RPC method %s', method) + result[method] = func + return result + + +class JsonRpcError(exception.IronicException): + pass + + +class ParseError(JsonRpcError): + code = -32700 + _msg_fmt = _("Invalid JSON received by RPC server") + + +class InvalidRequest(JsonRpcError): + code = -32600 + _msg_fmt = _("Invalid request object received by RPC server") + + +class MethodNotFound(JsonRpcError): + code = -32601 + _msg_fmt = _("Method %(name)s was not found") + + +class InvalidParams(JsonRpcError): + code = -32602 + _msg_fmt = _("Params %(params)s are invalid for %(method)s: %(error)s") + + +class WSGIService(service.Service): + """Provides ability to launch JSON RPC as a WSGI application.""" + + def __init__(self, manager, serializer): + self.manager = manager + self.serializer = serializer + self._method_map = _build_method_map(manager) + if json_rpc.require_authentication(): + conf = dict(CONF.keystone_authtoken) + app = auth_token.AuthProtocol(self._application, conf) + else: + app = self._application + self.server = wsgi.Server(CONF, 'ironic-json-rpc', app, + host=CONF.json_rpc.host_ip, + port=CONF.json_rpc.port, + use_ssl=CONF.json_rpc.use_ssl) + + def _application(self, environment, start_response): + """WSGI application for conductor JSON RPC.""" + request = webob.Request(environment) + if request.method != 'POST': + body = {'error': {'code': 405, + 'message': _('Only POST method can be used')}} + return webob.Response(status_code=405, json_body=body)( + environment, start_response) + + if json_rpc.require_authentication(): + roles = (request.headers.get('X-Roles') or '').split(',') + if 'admin' not in roles: + LOG.debug('Roles %s do not contain "admin", rejecting ' + 'request', roles) + body = {'error': {'code': 403, 'message': _('Forbidden')}} + return webob.Response(status_code=403, json_body=body)( + environment, start_response) + + result = self._call(request) + if result is not None: + response = webob.Response(content_type='application/json', + charset='UTF-8', + json_body=result) + else: + response = webob.Response(status_code=204) + return response(environment, start_response) + + def _handle_error(self, exc, request_id=None): + """Generate a JSON RPC 2.0 error body. + + :param exc: Exception object. + :param request_id: ID of the request (if any). + :return: dict with response body + """ + if isinstance(exc, oslo_messaging.ExpectedException): + exc = exc.exc_info[1] + + expected = isinstance(exc, exception.IronicException) + cls = exc.__class__ + if expected: + LOG.debug('RPC error %s: %s', cls.__name__, exc) + else: + LOG.exception('Unexpected RPC exception %s', cls.__name__) + + response = { + "jsonrpc": "2.0", + "id": request_id, + "error": { + "code": getattr(exc, 'code', 500), + "message": str(exc), + } + } + if expected and not isinstance(exc, JsonRpcError): + # Allow de-serializing the correct class for expected errors. + response['error']['data'] = { + 'class': '%s.%s' % (cls.__module__, cls.__name__) + } + return response + + def _call(self, request): + """Process a JSON RPC request. + + :param request: ``webob.Request`` object. + :return: dict with response body. + """ + request_id = None + try: + try: + body = json.loads(request.text) + except ValueError: + LOG.error('Cannot parse JSON RPC request as JSON') + raise ParseError() + + if not isinstance(body, dict): + LOG.error('JSON RPC request %s is not an object (batched ' + 'requests are not supported)', body) + raise InvalidRequest() + + request_id = body.get('id') + params = body.get('params', {}) + + if (body.get('jsonrpc') != '2.0' + or not body.get('method') + or not isinstance(params, dict)): + LOG.error('JSON RPC request %s is invalid', body) + raise InvalidRequest() + except Exception as exc: + # We do not treat malformed requests as notifications and return + # a response even when request_id is None. This seems in agreement + # with the examples in the specification. + return self._handle_error(exc, request_id) + + try: + method = body['method'] + try: + func = self._method_map[method] + except KeyError: + raise MethodNotFound(name=method) + + result = self._handle_requests(func, method, params) + if request_id is not None: + return { + "jsonrpc": "2.0", + "result": result, + "id": request_id + } + except Exception as exc: + result = self._handle_error(exc, request_id) + # We treat correctly formed requests without "id" as notifications + # and do not return any errors. + if request_id is not None: + return result + + def _handle_requests(self, func, name, params): + """Convert arguments and call a method. + + :param func: Callable object. + :param name: RPC call name for logging. + :param params: Keyword arguments. + :return: call result as JSON. + """ + # TODO(dtantsur): server-side version check? + params.pop('rpc.version', None) + + try: + context = params.pop('context') + except KeyError: + context = None + else: + # A valid context is required for deserialization + if not isinstance(context, dict): + raise InvalidParams( + _("Context must be a dictionary, if provided")) + + context = ir_context.RequestContext.from_dict(context) + params = {key: self.serializer.deserialize_entity(context, value) + for key, value in params.items()} + params['context'] = context + + LOG.debug('RPC %s with %s', name, params) + try: + result = func(**params) + # FIXME(dtantsur): we could use the inspect module, but + # oslo_messaging.expected_exceptions messes up signatures. + except TypeError as exc: + raise InvalidParams(params=', '.join(params), + method=name, error=exc) + + if context is not None: + # Currently it seems that we can serialize even with invalid + # context, but I'm not sure it's guaranteed to be the case. + result = self.serializer.serialize_entity(context, result) + LOG.debug('RPC %s returned %s', name, result) + return result + + def start(self): + """Start serving this service using loaded configuration. + + :returns: None + """ + self.server.start() + + def stop(self): + """Stop serving this API. + + :returns: None + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + """ + self.server.wait() + + def reset(self): + """Reset server greenpool size to default. + + :returns: None + """ + self.server.reset() diff -Nru ironic-12.0.0/ironic/common/neutron.py ironic-12.1.0/ironic/common/neutron.py --- ironic-12.0.0/ironic/common/neutron.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/neutron.py 2019-03-21 20:07:40.000000000 +0000 @@ -14,13 +14,16 @@ from neutronclient.v2_0 import client as clientv20 from oslo_log import log from oslo_utils import uuidutils +import retrying +from ironic.api.controllers.v1 import types from ironic.common import context as ironic_context from ironic.common import exception from ironic.common.i18n import _ from ironic.common import keystone from ironic.common.pxe_utils import DHCP_CLIENT_ID from ironic.conf import CONF +from ironic import objects LOG = log.getLogger(__name__) @@ -31,6 +34,7 @@ _NEUTRON_SESSION = None VNIC_BAREMETAL = 'baremetal' +VNIC_SMARTNIC = 'smart-nic' PHYSNET_PARAM_NAME = 'provider:physical_network' """Name of the neutron network API physical network parameter.""" @@ -256,6 +260,18 @@ binding_profile = {'local_link_information': [portmap[ironic_port.uuid]]} body['port']['binding:profile'] = binding_profile + + is_smart_nic = is_smartnic_port(ironic_port) + if is_smart_nic: + link_info = binding_profile['local_link_information'][0] + LOG.debug('Setting hostname as host_id in case of Smart NIC, ' + 'port %(port_id)s, hostname %(hostname)s', + {'port_id': ironic_port.uuid, + 'hostname': link_info['hostname']}) + body['port']['binding:host_id'] = link_info['hostname'] + + # TODO(hamdyk): use portbindings.VNIC_SMARTNIC from neutron-lib + body['port']['binding:vnic_type'] = VNIC_SMARTNIC client_id = ironic_port.extra.get('client-id') if client_id: client_id_opt = {'opt_name': DHCP_CLIENT_ID, @@ -264,7 +280,11 @@ extra_dhcp_opts.append(client_id_opt) body['port']['extra_dhcp_opts'] = extra_dhcp_opts try: + if is_smart_nic: + wait_for_host_agent(client, body['port']['binding:host_id']) port = client.create_port(body) + if is_smart_nic: + wait_for_port_status(client, port['port']['id'], 'ACTIVE') except neutron_exceptions.NeutronClientException as e: failures.append(ironic_port.uuid) LOG.warning("Could not create neutron port for node's " @@ -342,6 +362,8 @@ '%(node_id)s.', {'vif_port_id': port['id'], 'node_id': node_uuid}) + if is_smartnic_port(port): + wait_for_host_agent(client, port['binding:host_id']) try: client.delete_port(port['id']) # NOTE(mgoddard): Ignore if the port was deleted by nova. @@ -484,10 +506,58 @@ "in the nodes %(node)s port %(port)s", {'node': node.uuid, 'port': port.uuid}) return False + if (port.is_smartnic and not types.locallinkconnectiontype + .validate_for_smart_nic(port.local_link_connection)): + LOG.error("Smart NIC port must have port_id and hostname in " + "local_link_connection, port: %s", port['id']) + return False + if (not port.is_smartnic and types.locallinkconnectiontype + .validate_for_smart_nic(port.local_link_connection)): + LOG.error("Only Smart NIC ports can have port_id and hostname " + "in local_link_connection, port: %s", port['id']) + return False return True +def _validate_agent(client, **kwargs): + """Check that the given neutron agent is alive + + :param client: Neutron client + :param kwargs: Additional parameters to pass to the neutron client + list_agents method. + :returns: A boolean to describe the agent status, if more than one agent + returns by the client then return True if at least one of them is + alive. + :raises: NetworkError in case of failure contacting Neutron. + """ + try: + agents = client.list_agents(**kwargs)['agents'] + for agent in agents: + if agent['alive']: + return True + return False + except neutron_exceptions.NeutronClientException: + raise exception.NetworkError('Failed to contact Neutron server') + + +def is_smartnic_port(port_data): + """Check that the port is Smart NIC port + + :param port_data: an instance of ironic.objects.port.Port + or port data as dict. + :returns: A boolean to indicate port as Smart NIC port. + """ + if isinstance(port_data, objects.Port): + return port_data.supports_is_smartnic() and port_data.is_smartnic + + if isinstance(port_data, dict): + return port_data.get('is_smartnic', False) + + LOG.warning('Unknown port data type: %(type)s', {'type': type(port_data)}) + return False + + def _get_network_by_uuid_or_name(client, uuid_or_name, net_type=_('network'), **params): """Return a neutron network by UUID or name. @@ -586,6 +656,74 @@ if segment[PHYSNET_PARAM_NAME]) +@retrying.retry( + stop_max_attempt_number=CONF.agent.neutron_agent_max_attempts, + retry_on_exception=lambda e: isinstance(e, exception.NetworkError), + wait_fixed=CONF.agent.neutron_agent_status_retry_interval * 1000 +) +def wait_for_host_agent(client, host_id, target_state='up'): + """Wait for neutron agent to become target state + + :param client: A Neutron client object. + :param host_id: Agent host_id + :param target_state: up: wait for up status, + down: wait for down status + :returns: boolean indicates the agent state matches + param value target_state_up. + :raises: exception.Invalid if 'target_state' is not valid. + :raises: exception.NetworkError if host status didn't match the required + status after max retry attempts. + """ + if target_state not in ['up', 'down']: + raise exception.Invalid( + 'Invalid requested agent state to validate, accepted values: ' + 'up, down. Requested state: %(target_state)s' % { + 'target_state': target_state}) + + LOG.debug('Validating host %(host_id)s agent is %(status)s', + {'host_id': host_id, + 'status': target_state}) + is_alive = _validate_agent(client, host=host_id) + LOG.debug('Agent on host %(host_id)s is %(status)s', + {'host_id': host_id, + 'status': 'up' if is_alive else 'down'}) + if ((target_state == 'up' and is_alive) or + (target_state == 'down' and not is_alive)): + return True + raise exception.NetworkError( + 'Agent on host %(host)s failed to reach state %(state)s' % { + 'host': host_id, 'state': target_state}) + + +@retrying.retry( + stop_max_attempt_number=CONF.agent.neutron_agent_max_attempts, + retry_on_exception=lambda e: isinstance(e, exception.NetworkError), + wait_fixed=CONF.agent.neutron_agent_status_retry_interval * 1000 +) +def wait_for_port_status(client, port_id, status): + """Wait for port status to be the desired status + + :param client: A Neutron client object. + :param port_id: Neutron port_id + :param status: Port's target status, can be ACTIVE, DOWN ... etc. + :returns: boolean indicates that the port status matches the + required value passed by param status. + :raises: InvalidParameterValue if the port does not exist. + :raises: exception.NetworkError if port status didn't match + the required status after max retry attempts. + """ + LOG.debug('Validating Port %(port_id)s status is %(status)s', + {'port_id': port_id, 'status': status}) + port_info = _get_port_by_uuid(client, port_id) + LOG.debug('Port %(port_id)s status is: %(status)s', + {'port_id': port_id, 'status': port_info['status']}) + if port_info['status'] == status: + return True + raise exception.NetworkError( + 'Port %(port_id)s failed to reach status %(status)s' % { + 'port_id': port_id, 'status': status}) + + class NeutronNetworkInterfaceMixin(object): def get_cleaning_network_uuid(self, task): diff -Nru ironic-12.0.0/ironic/common/policy.py ironic-12.1.0/ironic/common/policy.py --- ironic-12.0.0/ironic/common/policy.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/policy.py 2019-03-21 20:07:40.000000000 +0000 @@ -404,6 +404,63 @@ {'path': '/conductors/{hostname}', 'method': 'GET'}]), ] +allocation_policies = [ + policy.DocumentedRuleDefault( + 'baremetal:allocation:get', + 'rule:is_admin or rule:is_observer', + 'Retrieve Allocation records', + [{'path': '/allocations', 'method': 'GET'}, + {'path': '/allocations/{allocation_id}', 'method': 'GET'}, + {'path': '/nodes/{node_ident}/allocation', 'method': 'GET'}]), + policy.DocumentedRuleDefault( + 'baremetal:allocation:create', + 'rule:is_admin', + 'Create Allocation records', + [{'path': '/allocations', 'method': 'POST'}]), + policy.DocumentedRuleDefault( + 'baremetal:allocation:delete', + 'rule:is_admin', + 'Delete Allocation records', + [{'path': '/allocations/{allocation_id}', 'method': 'DELETE'}, + {'path': '/nodes/{node_ident}/allocation', 'method': 'DELETE'}]), +] + +event_policies = [ + policy.DocumentedRuleDefault( + 'baremetal:events:post', + 'rule:is_admin', + 'Post events', + [{'path': '/events', 'method': 'POST'}]) +] + + +deploy_template_policies = [ + policy.DocumentedRuleDefault( + 'baremetal:deploy_template:get', + 'rule:is_admin or rule:is_observer', + 'Retrieve Deploy Template records', + [{'path': '/deploy_templates', 'method': 'GET'}, + {'path': '/deploy_templates/{deploy_template_ident}', + 'method': 'GET'}]), + policy.DocumentedRuleDefault( + 'baremetal:deploy_template:create', + 'rule:is_admin', + 'Create Deploy Template records', + [{'path': '/deploy_templates', 'method': 'POST'}]), + policy.DocumentedRuleDefault( + 'baremetal:deploy_template:delete', + 'rule:is_admin', + 'Delete Deploy Template records', + [{'path': '/deploy_templates/{deploy_template_ident}', + 'method': 'DELETE'}]), + policy.DocumentedRuleDefault( + 'baremetal:deploy_template:update', + 'rule:is_admin', + 'Update Deploy Template records', + [{'path': '/deploy_templates/{deploy_template_ident}', + 'method': 'PATCH'}]), +] + def list_policies(): policies = itertools.chain( @@ -416,7 +473,10 @@ vendor_passthru_policies, utility_policies, volume_policies, - conductor_policies + conductor_policies, + allocation_policies, + event_policies, + deploy_template_policies, ) return policies diff -Nru ironic-12.0.0/ironic/common/pxe_utils.py ironic-12.1.0/ironic/common/pxe_utils.py --- ironic-12.0.0/ironic/common/pxe_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/pxe_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -422,7 +422,7 @@ """Retrieves the DHCP PXE boot options. :param task: A TaskManager instance. - :param ipxe_enabled: Default false boolean that siganls if iPXE + :param ipxe_enabled: Default false boolean that signals if iPXE formatting should be returned by the method for DHCP server configuration. :param url_boot: Default false boolean to inform the method if @@ -459,21 +459,41 @@ script_name = os.path.basename(CONF.pxe.ipxe_boot_script) # TODO(TheJulia): We should make this smarter to handle unwrapped v6 # addresses, since the format is http://[ff80::1]:80/boot.ipxe. - # As opposed to requiring configuraiton, we can eventually make this + # As opposed to requiring configuration, we can eventually make this # dynamic, and would need to do similar then. ipxe_script_url = '/'.join([CONF.deploy.http_url, script_name]) # if the request comes from dumb firmware send them the iPXE # boot image. if dhcp_provider_name == 'neutron': - # Neutron use dnsmasq as default DHCP agent, add extra config - # to neutron "dhcp-match=set:ipxe,175" and use below option + # Neutron use dnsmasq as default DHCP agent. Neutron carries the + # configuration to relate to the tags below. The ipxe6 tag was + # added in the Stein cycle which identifies the iPXE User-Class + # directly and is only sent in DHCPv6. + + # NOTE(TheJulia): Lets send both, let neutron tag/sort it out as + # an ip_version field is also transmitted. Plus, given the + # semi-obscure nature of this, being more verbose and letting + # the DHCP server do the best thing possible is likely the best + # course of action. dhcp_opts.append({'opt_name': "tag:!ipxe,%s" % boot_file_param, 'opt_value': boot_file}) + dhcp_opts.append({'opt_name': "tag:!ipxe6,%s" % boot_file_param, + 'opt_value': boot_file}) dhcp_opts.append({'opt_name': "tag:ipxe,%s" % boot_file_param, 'opt_value': ipxe_script_url}) + dhcp_opts.append({'opt_name': "tag:ipxe6,%s" % boot_file_param, + 'opt_value': ipxe_script_url}) else: # !175 == non-iPXE. # http://ipxe.org/howto/dhcpd#ipxe-specific_options + if ip_version == 6: + LOG.warning('IPv6 is enabled and the DHCP driver appears set ' + 'to a plugin aside from "neutron". Node %(name)s ' + 'may not receive proper DHCPv6 provided ' + 'boot parameters.'.format(name=task.node.uuid)) + # NOTE(TheJulia): This was added for ISC DHCPd support, however it + # appears that isc support was never added to neutron and is likely + # a down stream driver. dhcp_opts.append({'opt_name': "!%s,%s" % (DHCP_IPXE_ENCAP_OPTS, boot_file_param), 'opt_value': boot_file}) @@ -629,7 +649,7 @@ return image_info -def get_image_info(node, mode='deploy'): +def get_image_info(node, mode='deploy', ipxe_enabled=False): """Generate the paths for TFTP files for deploy or rescue images. This method generates the paths for the deploy (or rescue) kernel and @@ -639,6 +659,8 @@ :param mode: Label indicating a deploy or rescue operation being carried out on the node. Supported values are 'deploy' and 'rescue'. Defaults to 'deploy', indicating deploy operation is being carried out. + :param ipxe_enabled: A default False boolean value to tell the method + if the caller is using iPXE. :returns: a dictionary whose keys are the names of the images (deploy_kernel, deploy_ramdisk, or rescue_kernel, rescue_ramdisk) and values are the absolute paths of them. @@ -648,7 +670,7 @@ d_info = parse_driver_info(node, mode=mode) return get_kernel_ramdisk_info( - node.uuid, d_info, mode=mode) + node.uuid, d_info, mode=mode, ipxe_enabled=ipxe_enabled) def build_deploy_pxe_options(task, pxe_info, mode='deploy', @@ -777,7 +799,12 @@ # NOTE(pas-ha) if it is takeover of ACTIVE node or node performing # unrescue operation, first ensure that basic PXE configs and links # are in place before switching pxe config - if (node.provision_state in [states.ACTIVE, states.UNRESCUING] + # NOTE(TheJulia): Also consider deploying a valid state to go ahead + # and check things before continuing, as otherwise deployments can + # fail if the agent was booted outside the direct actions of the + # boot interface. + if (node.provision_state in [states.ACTIVE, states.UNRESCUING, + states.DEPLOYING] and not os.path.isfile(pxe_config_path)): pxe_options = build_pxe_config_options(task, instance_image_info, service=True, @@ -786,6 +813,7 @@ create_pxe_config(task, pxe_options, pxe_config_template, ipxe_enabled=ipxe_enabled) iwdi = node.driver_internal_info.get('is_whole_disk_image') + deploy_utils.switch_pxe_config( pxe_config_path, root_uuid_or_disk_id, boot_mode_utils.get_boot_mode(node), @@ -925,8 +953,9 @@ @image_cache.cleanup(priority=25) class TFTPImageCache(image_cache.ImageCache): def __init__(self): + master_path = CONF.pxe.tftp_master_path or None super(TFTPImageCache, self).__init__( - CONF.pxe.tftp_master_path, + master_path, # MiB -> B cache_size=CONF.pxe.image_cache_size * 1024 * 1024, # min -> sec diff -Nru ironic-12.0.0/ironic/common/raid.py ironic-12.1.0/ironic/common/raid.py --- ironic-12.0.0/ironic/common/raid.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/raid.py 2019-03-21 20:07:40.000000000 +0000 @@ -126,3 +126,51 @@ node.properties = properties node.save() + + +def filter_target_raid_config( + node, create_root_volume=True, create_nonroot_volumes=True): + """Filter the target raid config based on root volume creation + + This method can be used by any raid interface which wants to filter + out target raid config based on condition whether the root volume + will be created or not. + + :param node: a node object + :param create_root_volume: A boolean default value True governing + if the root volume is returned else root volumes will be filtered + out. + :param create_nonroot_volumes: A boolean default value True governing + if the non root volume is returned else non-root volumes will be + filtered out. + :raises: MissingParameterValue, if node.target_raid_config is missing + or was found to be empty after skipping root volume and/or non-root + volumes. + :returns: It will return filtered target_raid_config + """ + if not node.target_raid_config: + raise exception.MissingParameterValue( + _("Node %s has no target RAID configuration.") % node.uuid) + + target_raid_config = node.target_raid_config.copy() + + error_msg_list = [] + if not create_root_volume: + target_raid_config['logical_disks'] = [ + x for x in target_raid_config['logical_disks'] + if not x.get('is_root_volume')] + error_msg_list.append(_("skipping root volume")) + + if not create_nonroot_volumes: + target_raid_config['logical_disks'] = [ + x for x in target_raid_config['logical_disks'] + if x.get('is_root_volume')] + error_msg_list.append(_("skipping non-root volumes")) + + if not target_raid_config['logical_disks']: + error_msg = _(' and ').join(error_msg_list) + raise exception.MissingParameterValue( + _("Node %(node)s has empty target RAID configuration " + "after %(msg)s.") % {'node': node.uuid, 'msg': error_msg}) + + return target_raid_config diff -Nru ironic-12.0.0/ironic/common/release_mappings.py ironic-12.1.0/ironic/common/release_mappings.py --- ironic-12.0.0/ironic/common/release_mappings.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/common/release_mappings.py 2019-03-21 20:07:40.000000000 +0000 @@ -130,7 +130,7 @@ 'VolumeTarget': ['1.0'], } }, - 'master': { + '12.0': { 'api': '1.49', 'rpc': '1.47', 'objects': { @@ -145,6 +145,40 @@ 'VolumeTarget': ['1.0'], } }, + '12.1': { + 'api': '1.56', + 'rpc': '1.48', + 'objects': { + 'Allocation': ['1.0'], + 'Node': ['1.32', '1.31', '1.30'], + 'Conductor': ['1.3'], + 'Chassis': ['1.3'], + 'DeployTemplate': ['1.0', '1.1'], + 'Port': ['1.9'], + 'Portgroup': ['1.4'], + 'Trait': ['1.0'], + 'TraitList': ['1.0'], + 'VolumeConnector': ['1.0'], + 'VolumeTarget': ['1.0'], + } + }, + 'master': { + 'api': '1.56', + 'rpc': '1.48', + 'objects': { + 'Allocation': ['1.0'], + 'Node': ['1.32'], + 'Conductor': ['1.3'], + 'Chassis': ['1.3'], + 'DeployTemplate': ['1.1'], + 'Port': ['1.9'], + 'Portgroup': ['1.4'], + 'Trait': ['1.0'], + 'TraitList': ['1.0'], + 'VolumeConnector': ['1.0'], + 'VolumeTarget': ['1.0'], + } + }, } # NOTE(xek): Assign each named release to the appropriate semver. @@ -160,9 +194,9 @@ # # There should be at most two named mappings here. -# NOTE(TheJulia): remove queens prior to the Stein release. -RELEASE_MAPPING['queens'] = RELEASE_MAPPING['10.1'] +# NOTE(TheJulia): remove Rocky prior to the Train release. RELEASE_MAPPING['rocky'] = RELEASE_MAPPING['11.1'] +RELEASE_MAPPING['stein'] = RELEASE_MAPPING['12.1'] # List of available versions with named versions first; 'master' is excluded. RELEASE_VERSIONS = sorted(set(RELEASE_MAPPING) - {'master'}, reverse=True) diff -Nru ironic-12.0.0/ironic/common/rpc.py ironic-12.1.0/ironic/common/rpc.py --- ironic-12.0.0/ironic/common/rpc.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/rpc.py 2019-03-21 20:07:40.000000000 +0000 @@ -13,17 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from osprofiler import profiler from ironic.common import context as ironic_context from ironic.common import exception +from ironic.conf import CONF -CONF = cfg.CONF - TRANSPORT = None NOTIFICATION_TRANSPORT = None SENSORS_NOTIFIER = None @@ -53,10 +51,10 @@ serializer=serializer, driver='noop') else: - VERSIONED_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, - serializer=serializer, - topics=['ironic_versioned_' - 'notifications']) + VERSIONED_NOTIFIER = messaging.Notifier( + NOTIFICATION_TRANSPORT, + serializer=serializer, + topics=CONF.versioned_notifications_topics) def cleanup(): diff -Nru ironic-12.0.0/ironic/common/rpc_service.py ironic-12.1.0/ironic/common/rpc_service.py --- ironic-12.0.0/ironic/common/rpc_service.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/rpc_service.py 2019-03-21 20:07:40.000000000 +0000 @@ -16,16 +16,19 @@ import signal +from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_service import service from oslo_utils import importutils from ironic.common import context +from ironic.common.json_rpc import server as json_rpc from ironic.common import rpc from ironic.objects import base as objects_base LOG = log.getLogger(__name__) +CONF = cfg.CONF class RPCService(service.Service): @@ -44,10 +47,14 @@ super(RPCService, self).start() admin_context = context.get_admin_context() - target = messaging.Target(topic=self.topic, server=self.host) - endpoints = [self.manager] serializer = objects_base.IronicObjectSerializer(is_server=True) - self.rpcserver = rpc.get_server(target, endpoints, serializer) + if CONF.rpc_transport == 'json-rpc': + self.rpcserver = json_rpc.WSGIService(self.manager, + serializer) + else: + target = messaging.Target(topic=self.topic, server=self.host) + endpoints = [self.manager] + self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.handle_signal() diff -Nru ironic-12.0.0/ironic/common/states.py ironic-12.1.0/ironic/common/states.py --- ironic-12.0.0/ironic/common/states.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/common/states.py 2019-03-21 20:07:40.000000000 +0000 @@ -223,7 +223,7 @@ UNRESCUEFAIL) """Transitional states in which we allow updating a node.""" -DELETE_ALLOWED_STATES = (AVAILABLE, MANAGEABLE, ENROLL, ADOPTFAIL) +DELETE_ALLOWED_STATES = (MANAGEABLE, ENROLL, ADOPTFAIL) """States in which node deletion is allowed.""" STABLE_STATES = (ENROLL, MANAGEABLE, AVAILABLE, ACTIVE, ERROR, RESCUE) @@ -243,6 +243,20 @@ to fail state. """ +_LOOKUP_ALLOWED_STATES = (DEPLOYING, DEPLOYWAIT, CLEANING, CLEANWAIT, + INSPECTING, RESCUING, RESCUEWAIT) +LOOKUP_ALLOWED_STATES = frozenset(_LOOKUP_ALLOWED_STATES) + +"""States when API lookups are normally allowed for nodes.""" + +_FASTTRACK_LOOKUP_ALLOWED_STATES = (ENROLL, MANAGEABLE, AVAILABLE, + DEPLOYING, DEPLOYWAIT, CLEANING, + CLEANWAIT, INSPECTING, RESCUING, + RESCUEWAIT) +FASTTRACK_LOOKUP_ALLOWED_STATES = frozenset(_FASTTRACK_LOOKUP_ALLOWED_STATES) +"""States where API lookups are permitted with fast track enabled.""" + + ############## # Power states ############## @@ -262,6 +276,14 @@ SOFT_POWER_OFF = 'soft power off' """ Node is in the process of soft power off. """ +################### +# Allocation states +################### + +ALLOCATING = 'allocating' + +# States ERROR and ACTIVE are reused. + ##################### # State machine model diff -Nru ironic-12.0.0/ironic/conductor/allocations.py ironic-12.1.0/ironic/conductor/allocations.py --- ironic-12.0.0/ironic/conductor/allocations.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/allocations.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,233 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Functionality related to allocations.""" + +import random + +from ironic_lib import metrics_utils +from oslo_config import cfg +from oslo_log import log +import retrying + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import states +from ironic.conductor import task_manager +from ironic import objects + + +CONF = cfg.CONF +LOG = log.getLogger(__name__) +METRICS = metrics_utils.get_metrics_logger(__name__) + + +def do_allocate(context, allocation): + """Process the allocation. + + This call runs in a separate thread on a conductor. It finds suitable + nodes for the allocation and reserves one of them. + + This call does not raise exceptions since it's designed to work + asynchronously. + + :param context: an admin context + :param allocation: an allocation object + """ + try: + nodes = _candidate_nodes(context, allocation) + _allocate_node(context, allocation, nodes) + except exception.AllocationFailed as exc: + LOG.error(str(exc)) + _allocation_failed(allocation, exc) + except Exception as exc: + LOG.exception("Unexpected exception during processing of " + "allocation %s", allocation.uuid) + reason = _("Unexpected exception during allocation: %s") % exc + _allocation_failed(allocation, reason) + + +def verify_node_for_deallocation(node, allocation): + """Verify that allocation can be removed for the node. + + :param node: a node object + :param allocation: an allocation object associated with the node + """ + if node.maintenance: + # Allocations can always be removed in the maintenance mode. + return + + if (node.target_provision_state + and node.provision_state not in states.UPDATE_ALLOWED_STATES): + msg = (_("Cannot remove allocation %(uuid)s for node %(node)s, " + "because the node is in state %(state)s where updates are " + "not allowed (and maintenance mode is off)") % + {'node': node.uuid, 'uuid': allocation.uuid, + 'state': node.provision_state}) + raise exception.InvalidState(msg) + + if node.provision_state == states.ACTIVE: + msg = (_("Cannot remove allocation %(uuid)s for node %(node)s, " + "because the node is active (and maintenance mode is off)") % + {'node': node.uuid, 'uuid': allocation.uuid}) + raise exception.InvalidState(msg) + + +def _allocation_failed(allocation, reason): + """Failure handler for the allocation.""" + try: + allocation.state = states.ERROR + allocation.last_error = str(reason) + allocation.save() + except Exception: + LOG.exception('Could not save the failed allocation %s', + allocation.uuid) + + +def _traits_match(traits, node): + return {t.trait for t in node.traits.objects}.issuperset(traits) + + +def _candidate_nodes(context, allocation): + """Get a list of candidate nodes for the allocation.""" + filters = {'resource_class': allocation.resource_class, + 'provision_state': states.AVAILABLE, + 'associated': False, + 'with_power_state': True, + 'maintenance': False} + if allocation.candidate_nodes: + # NOTE(dtantsur): we assume that candidate_nodes were converted to + # UUIDs on the API level. + filters['uuid_in'] = allocation.candidate_nodes + + nodes = objects.Node.list(context, filters=filters) + + if not nodes: + if allocation.candidate_nodes: + error = _("none of the requested nodes are available and match " + "the resource class %s") % allocation.resource_class + else: + error = _("no available nodes match the resource class %s") % ( + allocation.resource_class) + raise exception.AllocationFailed(uuid=allocation.uuid, error=error) + + # TODO(dtantsur): database-level filtering? + if allocation.traits: + traits = set(allocation.traits) + nodes = [n for n in nodes if _traits_match(traits, n)] + if not nodes: + error = (_("no suitable nodes have the requested traits %s") % + ', '.join(traits)) + raise exception.AllocationFailed(uuid=allocation.uuid, error=error) + + # NOTE(dtantsur): make sure that parallel allocations do not try the nodes + # in the same order. + random.shuffle(nodes) + + LOG.debug('%(count)d nodes are candidates for allocation %(uuid)s', + {'count': len(nodes), 'uuid': allocation.uuid}) + return nodes + + +def _verify_node(node, allocation): + """Check that the node still satisfiest the request.""" + if node.maintenance: + LOG.debug('Node %s is now in maintenance, skipping', + node.uuid) + return False + + if node.instance_uuid: + LOG.debug('Node %(node)s is already associated with instance ' + '%(inst)s, skipping', + {'node': node.uuid, 'inst': node.instance_uuid}) + return False + + if node.provision_state != states.AVAILABLE: + LOG.debug('Node %s is no longer available, skipping', + node.uuid) + return False + + if node.resource_class != allocation.resource_class: + LOG.debug('Resource class of node %(node)s no longer ' + 'matches requested resource class %(rsc)s for ' + 'allocation %(uuid)s, skipping', + {'node': node.uuid, + 'rsc': allocation.resource_class, + 'uuid': allocation.uuid}) + return False + + if allocation.traits and not _traits_match(set(allocation.traits), node): + LOG.debug('List of traits of node %(node)s no longer ' + 'matches requested traits %(traits)s for ' + 'allocation %(uuid)s, skipping', + {'node': node.uuid, + 'traits': allocation.traits, + 'uuid': allocation.uuid}) + return False + + return True + + +# NOTE(dtantsur): instead of trying to allocate each node +# node_locked_retry_attempt times, we try to allocate *any* node the same +# number of times. This avoids getting stuck on a node reserved e.g. for power +# sync periodic task. +@retrying.retry( + retry_on_exception=lambda e: isinstance(e, exception.AllocationFailed), + stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts, + wait_fixed=CONF.conductor.node_locked_retry_interval * 1000) +def _allocate_node(context, allocation, nodes): + """Go through the list of nodes and try to allocate one of them.""" + retry_nodes = [] + for node in nodes: + try: + # NOTE(dtantsur): retries are done for all nodes above, so disable + # per-node retry. Also disable loading the driver, since the + # current conductor may not have the requried hardware type or + # interfaces (it's picked at random). + with task_manager.acquire(context, node.uuid, shared=False, + retry=False, load_driver=False, + purpose='allocating') as task: + # NOTE(dtantsur): double-check the node details, since they + # could have changed before we acquired the lock. + if not _verify_node(task.node, allocation): + continue + + allocation.node_id = task.node.id + allocation.state = states.ACTIVE + # NOTE(dtantsur): the node.instance_uuid and allocation_id are + # updated inside of the save() call within the same + # transaction to avoid races. NodeAssociated can be raised if + # another process allocates this node first. + allocation.save() + LOG.info('Node %(node)s has been successfully reserved for ' + 'allocation %(uuid)s', + {'node': node.uuid, 'uuid': allocation.uuid}) + return allocation + except exception.NodeLocked: + LOG.debug('Node %s is currently locked, moving to the next one', + node.uuid) + retry_nodes.append(node) + except exception.NodeAssociated: + LOG.debug('Node %s is already associated, moving to the next one', + node.uuid) + + # NOTE(dtantsur): rewrite the passed list to only contain the nodes that + # are worth retrying. Do not include nodes that are no longer suitable. + nodes[:] = retry_nodes + + if nodes: + error = _('could not reserve any of %d suitable nodes') % len(nodes) + else: + error = _('all nodes were filtered out during reservation') + + raise exception.AllocationFailed(uuid=allocation.uuid, error=error) diff -Nru ironic-12.0.0/ironic/conductor/base_manager.py ironic-12.1.0/ironic/conductor/base_manager.py --- ironic-12.0.0/ironic/conductor/base_manager.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/base_manager.py 2019-03-21 20:07:40.000000000 +0000 @@ -33,6 +33,7 @@ from ironic.common import release_mappings as versions from ironic.common import rpc from ironic.common import states +from ironic.conductor import allocations from ironic.conductor import notification_utils as notify_utils from ironic.conductor import task_manager from ironic.conf import CONF @@ -204,6 +205,13 @@ LOG.critical('Failed to start keepalive') self.del_host() + # Resume allocations that started before the restart. + try: + self._spawn_worker(self._resume_allocations, + ironic_context.get_admin_context()) + except exception.NoFreeConductorWorker: + LOG.warning('Failed to start worker for resuming allocations.') + self._started = True def _use_groups(self): @@ -550,3 +558,11 @@ finally: # Yield on every iteration eventlet.sleep(0) + + def _resume_allocations(self, context): + """Resume unfinished allocations on restart.""" + filters = {'state': states.ALLOCATING, + 'conductor_affinity': self.conductor.id} + for allocation in objects.Allocation.list(context, filters=filters): + LOG.debug('Resuming unfinished allocation %s', allocation.uuid) + allocations.do_allocate(context, allocation) diff -Nru ironic-12.0.0/ironic/conductor/manager.py ironic-12.1.0/ironic/conductor/manager.py --- ironic-12.0.0/ironic/conductor/manager.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/manager.py 2019-03-21 20:07:40.000000000 +0000 @@ -66,8 +66,10 @@ from ironic.common import release_mappings as versions from ironic.common import states from ironic.common import swift +from ironic.conductor import allocations from ironic.conductor import base_manager from ironic.conductor import notification_utils as notify_utils +from ironic.conductor import steps as conductor_steps from ironic.conductor import task_manager from ironic.conductor import utils from ironic.conf import CONF @@ -100,7 +102,7 @@ # NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's. # NOTE(pas-ha): This also must be in sync with # ironic.common.release_mappings.RELEASE_MAPPING['master'] - RPC_API_VERSION = '1.47' + RPC_API_VERSION = '1.48' target = messaging.Target(version=RPC_API_VERSION) @@ -219,7 +221,12 @@ driver_factory.check_and_update_node_interfaces(node_obj) + # NOTE(dtantsur): if we're updating the driver from an invalid value, + # loading the old driver may be impossible. Since we only need to + # update the node record in the database, skip loading the driver + # completely. with task_manager.acquire(context, node_id, shared=False, + load_driver=False, purpose='node update') as task: # Prevent instance_uuid overwriting if ('instance_uuid' in delta and node_obj.instance_uuid @@ -239,6 +246,25 @@ 'allowed': ', '.join(allowed_update_states), 'field': 'resource_class'}) + if ('instance_uuid' in delta and task.node.allocation_id + and not node_obj.instance_uuid): + if (not task.node.maintenance and task.node.provision_state + not in allowed_update_states): + action = _("Node %(node)s with an allocation can not have " + "instance_uuid removed unless it is in one of " + "allowed (%(allowed)s) states or in " + "maintenance mode.") + raise exception.InvalidState( + action % {'node': node_obj.uuid, + 'allowed': ', '.join(allowed_update_states)}) + + try: + allocation = objects.Allocation.get_by_id( + context, task.node.allocation_id) + allocation.destroy() + except exception.AllocationNotFound: + pass + node_obj.save() return node_obj @@ -828,6 +854,7 @@ task.driver.power.validate(task) task.driver.deploy.validate(task) utils.validate_instance_info_traits(task.node) + conductor_steps.validate_deploy_templates(task) except exception.InvalidParameterValue as e: raise exception.InstanceDeployFailure( _("Failed to validate deploy or power info for node " @@ -1018,6 +1045,13 @@ node.driver_internal_info = driver_internal_info network.remove_vifs_from_node(task) node.save() + if node.allocation_id: + allocation = objects.Allocation.get_by_id(task.context, + node.allocation_id) + allocation.destroy() + # The destroy() call above removes allocation_id and + # instance_uuid, refresh the node to get these changes. + node.refresh() # Begin cleaning task.process_event('clean') @@ -1305,7 +1339,7 @@ return try: - utils.set_node_cleaning_steps(task) + conductor_steps.set_node_cleaning_steps(task) except (exception.InvalidParameterValue, exception.NodeCleaningFailure) as e: msg = (_('Cannot clean node %(node)s. Error: %(msg)s') @@ -1631,10 +1665,48 @@ @periodics.periodic(spacing=CONF.conductor.sync_power_state_interval, enabled=CONF.conductor.sync_power_state_interval > 0) def _sync_power_states(self, context): - """Periodic task to sync power states for the nodes. + """Periodic task to sync power states for the nodes.""" + filters = {'maintenance': False} - Attempt to grab a lock and sync only if the following - conditions are met: + # NOTE(etingof): prioritize non-responding nodes to fail them fast + nodes = sorted( + self.iter_nodes(fields=['id'], filters=filters), + key=lambda n: -self.power_state_sync_count.get(n[0], 0) + ) + + nodes_queue = queue.Queue() + + for node_info in nodes: + nodes_queue.put(node_info) + + number_of_workers = min(CONF.conductor.sync_power_state_workers, + CONF.conductor.periodic_max_workers, + nodes_queue.qsize()) + futures = [] + + for worker_number in range(max(0, number_of_workers - 1)): + try: + futures.append( + self._spawn_worker(self._sync_power_state_nodes_task, + context, nodes_queue)) + except exception.NoFreeConductorWorker: + LOG.warning("There are no more conductor workers for " + "power sync task. %(workers)d workers have " + "been already spawned.", + {'workers': worker_number}) + break + + try: + self._sync_power_state_nodes_task(context, nodes_queue) + + finally: + waiters.wait_for_all(futures) + + def _sync_power_state_nodes_task(self, context, nodes): + """Invokes power state sync on nodes from synchronized queue. + + Attempt to grab a lock and sync only if the following conditions + are met: 1) Node is mapped to this conductor. 2) Node is not in maintenance mode. @@ -1660,9 +1732,13 @@ # (through to its DB API call) so that we can eliminate our call # and first set of checks below. - filters = {'maintenance': False} - node_iter = self.iter_nodes(fields=['id'], filters=filters) - for (node_uuid, driver, conductor_group, node_id) in node_iter: + while not self._shutdown: + try: + (node_uuid, driver, conductor_group, + node_id) = nodes.get_nowait() + except queue.Empty: + break + try: # NOTE(dtantsur): start with a shared lock, upgrade if needed with task_manager.acquire(context, node_uuid, @@ -2130,6 +2206,7 @@ iface.validate(task) if iface_name == 'deploy': utils.validate_instance_info_traits(task.node) + conductor_steps.validate_deploy_templates(task) result = True except (exception.InvalidParameterValue, exception.UnsupportedDriverExtension) as e: @@ -2193,15 +2270,18 @@ # CLEANFAIL -> MANAGEABLE # INSPECTIONFAIL -> MANAGEABLE # DEPLOYFAIL -> DELETING + delete_allowed_states = states.DELETE_ALLOWED_STATES + if CONF.conductor.allow_deleting_available_nodes: + delete_allowed_states += (states.AVAILABLE,) if (not node.maintenance and node.provision_state - not in states.DELETE_ALLOWED_STATES): + not in delete_allowed_states): msg = (_('Can not delete node "%(node)s" while it is in ' 'provision state "%(state)s". Valid provision states ' 'to perform deletion are: "%(valid_states)s", ' 'or set the node into maintenance mode') % {'node': node.uuid, 'state': node.provision_state, - 'valid_states': states.DELETE_ALLOWED_STATES}) + 'valid_states': delete_allowed_states}) raise exception.InvalidState(msg) if node.console_enabled: notify_utils.emit_console_notification( @@ -2753,8 +2833,7 @@ message = {'message_id': uuidutils.generate_uuid(), 'instance_uuid': instance_uuid, 'node_uuid': node_uuid, - 'timestamp': datetime.datetime.utcnow(), - 'event_type': 'hardware.ipmi.metrics.update'} + 'timestamp': datetime.datetime.utcnow()} try: lock_purpose = 'getting sensors data' @@ -2767,6 +2846,16 @@ '%s as it is in maintenance mode', task.node.uuid) continue + # Add the node name, as the name would be hand for other + # notifier plugins + message['node_name'] = task.node.name + # We should convey the proper hardware type, + # which previously was hard coded to ipmi, but other + # drivers were transmitting other values under the + # guise of ipmi. + ev_type = 'hardware.{driver}.metrics'.format( + driver=task.node.driver) + message['event_type'] = ev_type + '.update' task.driver.management.validate(task) sensors_data = task.driver.management.get_sensors_data( @@ -2800,7 +2889,7 @@ self._filter_out_unsupported_types(sensors_data)) if message['payload']: self.sensors_notifier.info( - context, "hardware.ipmi.metrics", message) + context, ev_type, message) finally: # Yield on every iteration eventlet.sleep(0) @@ -3405,6 +3494,84 @@ objects.Trait.destroy(context, node_id=node_id, trait=trait) + @METRICS.timer('ConductorManager.create_allocation') + @messaging.expected_exceptions(exception.InvalidParameterValue) + def create_allocation(self, context, allocation): + """Create an allocation in database. + + :param context: an admin context + :param allocation: a created (but not saved to the database) + allocation object. + :returns: created allocation object. + :raises: InvalidParameterValue if some fields fail validation. + """ + LOG.debug("RPC create_allocation called for allocation %s.", + allocation.uuid) + allocation.conductor_affinity = self.conductor.id + allocation.create() + + # Spawn an asynchronous worker to process the allocation. Copy it to + # avoid data races. + self._spawn_worker(allocations.do_allocate, + context, allocation.obj_clone()) + + # Return the unfinished allocation + return allocation + + @METRICS.timer('ConductorManager.destroy_allocation') + @messaging.expected_exceptions(exception.InvalidState) + def destroy_allocation(self, context, allocation): + """Delete an allocation. + + :param context: request context. + :param allocation: allocation object. + :raises: InvalidState if the associated node is in the wrong provision + state to perform deallocation. + """ + if allocation.node_id: + with task_manager.acquire(context, allocation.node_id, + purpose='allocation deletion', + shared=False) as task: + allocations.verify_node_for_deallocation(task.node, allocation) + # NOTE(dtantsur): remove the allocation while still holding + # the node lock to avoid races. + allocation.destroy() + else: + allocation.destroy() + + LOG.info('Successfully deleted allocation %s', allocation.uuid) + + @METRICS.timer('ConductorManager._check_orphan_allocations') + @periodics.periodic( + spacing=CONF.conductor.check_allocations_interval, + enabled=CONF.conductor.check_allocations_interval > 0) + def _check_orphan_allocations(self, context): + """Periodically checks the status of allocations that were taken over. + + Periodically checks the allocations assigned to a conductor that + went offline, tries to take them over and finish. + + :param context: request context. + """ + offline_conductors = self.dbapi.get_offline_conductors(field='id') + for conductor_id in offline_conductors: + filters = {'state': states.ALLOCATING, + 'conductor_affinity': conductor_id} + for allocation in objects.Allocation.list(context, + filters=filters): + try: + if not self.dbapi.take_over_allocation(allocation.id, + conductor_id, + self.conductor.id): + # Another conductor has taken over, skipping + continue + + LOG.debug('Taking over allocation %s', allocation.uuid) + allocations.do_allocate(context, allocation) + except Exception: + LOG.exception('Unexpected exception when taking over ' + 'allocation %s', allocation.uuid) + @METRICS.timer('get_vendor_passthru_metadata') def get_vendor_passthru_metadata(route_dict): @@ -3469,6 +3636,8 @@ try: if configdrive: + if isinstance(configdrive, dict): + configdrive = utils.build_configdrive(node, configdrive) _store_configdrive(node, configdrive) except (exception.SwiftOperationError, exception.ConfigInvalid) as e: with excutils.save_and_reraise_exception(): @@ -3524,7 +3693,7 @@ try: # This gets the deploy steps (if any) and puts them in the node's # driver_internal_info['deploy_steps']. - utils.set_node_deployment_steps(task) + conductor_steps.set_node_deployment_steps(task) except exception.InstanceDeployFailure as e: with excutils.save_and_reraise_exception(): utils.deploying_error_handler( diff -Nru ironic-12.0.0/ironic/conductor/rpcapi.py ironic-12.1.0/ironic/conductor/rpcapi.py --- ironic-12.0.0/ironic/conductor/rpcapi.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/rpcapi.py 2019-03-21 20:07:40.000000000 +0000 @@ -25,10 +25,12 @@ from ironic.common import exception from ironic.common import hash_ring from ironic.common.i18n import _ +from ironic.common.json_rpc import client as json_rpc from ironic.common import release_mappings as versions from ironic.common import rpc from ironic.conductor import manager from ironic.conf import CONF +from ironic.db import api as dbapi from ironic.objects import base as objects_base @@ -96,13 +98,14 @@ | 1.45 - Added continue_node_deploy | 1.46 - Added reset_interfaces to update_node | 1.47 - Added support for conductor groups + | 1.48 - Added allocation API """ # NOTE(rloo): This must be in sync with manager.ConductorManager's. # NOTE(pas-ha): This also must be in sync with # ironic.common.release_mappings.RELEASE_MAPPING['master'] - RPC_API_VERSION = '1.47' + RPC_API_VERSION = '1.48' def __init__(self, topic=None): super(ConductorAPI, self).__init__() @@ -110,14 +113,19 @@ if self.topic is None: self.topic = manager.MANAGER_TOPIC - target = messaging.Target(topic=self.topic, - version='1.0') serializer = objects_base.IronicObjectSerializer() release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version) version_cap = (release_ver['rpc'] if release_ver else self.RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap=version_cap, - serializer=serializer) + + if CONF.rpc_transport == 'json-rpc': + self.client = json_rpc.Client(serializer=serializer, + version_cap=version_cap) + self.topic = '' + else: + target = messaging.Target(topic=self.topic, version='1.0') + self.client = rpc.get_client(target, version_cap=version_cap, + serializer=serializer) use_groups = self.client.can_send_version('1.47') # NOTE(deva): this is going to be buggy @@ -154,6 +162,16 @@ hostname = self.get_conductor_for(node) return '%s.%s' % (self.topic, hostname) + def get_random_topic(self): + """Get an RPC topic for a random conductor service.""" + conductors = dbapi.get_instance().get_online_conductors() + try: + hostname = random.choice(conductors) + except IndexError: + # There are no conductors - return 503 Service Unavailable + raise exception.TemporaryFailure() + return '%s.%s' % (self.topic, hostname) + def get_topic_for_driver(self, driver_name): """Get RPC topic name for a conductor supporting the given driver. @@ -1094,3 +1112,25 @@ cctxt = self.client.prepare(topic=topic or self.topic, version='1.44') return cctxt.call(context, 'remove_node_traits', node_id=node_id, traits=traits) + + def create_allocation(self, context, allocation, topic=None): + """Create an allocation. + + :param context: request context. + :param allocation: an allocation object. + :param topic: RPC topic. Defaults to self.topic. + """ + cctxt = self.client.prepare(topic=topic or self.topic, version='1.48') + return cctxt.call(context, 'create_allocation', allocation=allocation) + + def destroy_allocation(self, context, allocation, topic=None): + """Delete an allocation. + + :param context: request context. + :param allocation: an allocation object. + :param topic: RPC topic. Defaults to self.topic. + :raises: InvalidState if the associated node is in the wrong provision + state to perform deallocation. + """ + cctxt = self.client.prepare(topic=topic or self.topic, version='1.48') + return cctxt.call(context, 'destroy_allocation', allocation=allocation) diff -Nru ironic-12.0.0/ironic/conductor/steps.py ironic-12.1.0/ironic/conductor/steps.py --- ironic-12.0.0/ironic/conductor/steps.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/steps.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,601 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from oslo_config import cfg +from oslo_log import log + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import states +from ironic.objects import deploy_template + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + + +CLEANING_INTERFACE_PRIORITY = { + # When two clean steps have the same priority, their order is determined + # by which interface is implementing the clean step. The clean step of the + # interface with the highest value here, will be executed first in that + # case. + 'power': 5, + 'management': 4, + 'deploy': 3, + 'bios': 2, + 'raid': 1, +} + +DEPLOYING_INTERFACE_PRIORITY = { + # When two deploy steps have the same priority, their order is determined + # by which interface is implementing the step. The step of the interface + # with the highest value here, will be executed first in that case. + # TODO(rloo): If we think it makes sense to have the interface priorities + # the same for cleaning & deploying, replace the two with one e.g. + # 'INTERFACE_PRIORITIES'. + 'power': 5, + 'management': 4, + 'deploy': 3, + 'bios': 2, + 'raid': 1, +} + + +def _clean_step_key(step): + """Sort by priority, then interface priority in event of tie. + + :param step: cleaning step dict to get priority for. + """ + return (step.get('priority'), + CLEANING_INTERFACE_PRIORITY[step.get('interface')]) + + +def _deploy_step_key(step): + """Sort by priority, then interface priority in event of tie. + + :param step: deploy step dict to get priority for. + """ + return (step.get('priority'), + DEPLOYING_INTERFACE_PRIORITY[step.get('interface')]) + + +def _sorted_steps(steps, sort_step_key): + """Return a sorted list of steps. + + :param sort_step_key: If set, this is a method (key) used to sort the steps + from highest priority to lowest priority. For steps having the same + priority, they are sorted from highest interface priority to lowest. + :returns: A list of sorted step dictionaries. + """ + # Sort the steps from higher priority to lower priority + return sorted(steps, key=sort_step_key, reverse=True) + + +def _get_steps(task, interfaces, get_method, enabled=False, + sort_step_key=None): + """Get steps for task.node. + + :param task: A TaskManager object + :param interfaces: A dictionary of (key) interfaces and their + (value) priorities. These are the interfaces that will have steps of + interest. The priorities are used for deciding the priorities of steps + having the same priority. + :param get_method: The method used to get the steps from the node's + interface; a string. + :param enabled: If True, returns only enabled (priority > 0) steps. If + False, returns all steps. + :param sort_step_key: If set, this is a method (key) used to sort the steps + from highest priority to lowest priority. For steps having the same + priority, they are sorted from highest interface priority to lowest. + :raises: NodeCleaningFailure or InstanceDeployFailure if there was a + problem getting the steps. + :returns: A list of step dictionaries + """ + # Get steps from each interface + steps = list() + for interface in interfaces: + interface = getattr(task.driver, interface) + if interface: + interface_steps = [x for x in getattr(interface, get_method)(task) + if not enabled or x['priority'] > 0] + steps.extend(interface_steps) + if sort_step_key: + steps = _sorted_steps(steps, sort_step_key) + return steps + + +def _get_cleaning_steps(task, enabled=False, sort=True): + """Get cleaning steps for task.node. + + :param task: A TaskManager object + :param enabled: If True, returns only enabled (priority > 0) steps. If + False, returns all clean steps. + :param sort: If True, the steps are sorted from highest priority to lowest + priority. For steps having the same priority, they are sorted from + highest interface priority to lowest. + :raises: NodeCleaningFailure if there was a problem getting the + clean steps. + :returns: A list of clean step dictionaries + """ + sort_key = _clean_step_key if sort else None + return _get_steps(task, CLEANING_INTERFACE_PRIORITY, 'get_clean_steps', + enabled=enabled, sort_step_key=sort_key) + + +def _get_deployment_steps(task, enabled=False, sort=True): + """Get deployment steps for task.node. + + :param task: A TaskManager object + :param enabled: If True, returns only enabled (priority > 0) steps. If + False, returns all deploy steps. + :param sort: If True, the steps are sorted from highest priority to lowest + priority. For steps having the same priority, they are sorted from + highest interface priority to lowest. + :raises: InstanceDeployFailure if there was a problem getting the + deploy steps. + :returns: A list of deploy step dictionaries + """ + sort_key = _deploy_step_key if sort else None + return _get_steps(task, DEPLOYING_INTERFACE_PRIORITY, 'get_deploy_steps', + enabled=enabled, sort_step_key=sort_key) + + +def set_node_cleaning_steps(task): + """Set up the node with clean step information for cleaning. + + For automated cleaning, get the clean steps from the driver. + For manual cleaning, the user's clean steps are known but need to be + validated against the driver's clean steps. + + :raises: InvalidParameterValue if there is a problem with the user's + clean steps. + :raises: NodeCleaningFailure if there was a problem getting the + clean steps. + """ + node = task.node + driver_internal_info = node.driver_internal_info + + # For manual cleaning, the target provision state is MANAGEABLE, whereas + # for automated cleaning, it is AVAILABLE. + manual_clean = node.target_provision_state == states.MANAGEABLE + + if not manual_clean: + # Get the prioritized steps for automated cleaning + driver_internal_info['clean_steps'] = _get_cleaning_steps(task, + enabled=True) + else: + # For manual cleaning, the list of cleaning steps was specified by the + # user and already saved in node.driver_internal_info['clean_steps']. + # Now that we know what the driver's available clean steps are, we can + # do further checks to validate the user's clean steps. + steps = node.driver_internal_info['clean_steps'] + driver_internal_info['clean_steps'] = ( + _validate_user_clean_steps(task, steps)) + + node.clean_step = {} + driver_internal_info['clean_step_index'] = None + node.driver_internal_info = driver_internal_info + node.save() + + +def _get_deployment_templates(task): + """Get deployment templates for task.node. + + Return deployment templates where the name of the deployment template + matches one of the node's instance traits (the subset of the node's traits + requested by the user via a flavor or image). + + :param task: A TaskManager object + :returns: a list of DeployTemplate objects. + """ + node = task.node + if not node.instance_info.get('traits'): + return [] + instance_traits = node.instance_info['traits'] + return deploy_template.DeployTemplate.list_by_names(task.context, + instance_traits) + + +def _get_steps_from_deployment_templates(task, templates): + """Get deployment template steps for task.node. + + Given a list of deploy template objects, return a list of all deploy steps + combined. + + :param task: A TaskManager object + :param templates: a list of deploy templates + :returns: A list of deploy step dictionaries + """ + steps = [] + # NOTE(mgoddard): The steps from the object include id, created_at, etc., + # which we don't want to include when we assign them to + # node.driver_internal_info. Include only the relevant fields. + step_fields = ('interface', 'step', 'args', 'priority') + for template in templates: + steps.extend([{key: step[key] for key in step_fields} + for step in template.steps]) + return steps + + +def _get_validated_steps_from_templates(task): + """Return a list of validated deploy steps from deploy templates. + + Deployment template steps are those steps defined in deployment templates + where the name of the deployment template matches one of the node's + instance traits (the subset of the node's traits requested by the user via + a flavor or image). There may be many such matching templates, each with a + list of steps to execute. + + This method gathers the steps from all matching deploy templates for a + node, and validates those steps against the node's driver interfaces, + raising an error if validation fails. + + :param task: A TaskManager object + :raises: InvalidParameterValue if validation of steps fails. + :raises: InstanceDeployFailure if there was a problem getting the + deploy steps. + :returns: A list of validated deploy step dictionaries + """ + # Gather deploy templates matching the node's instance traits. + templates = _get_deployment_templates(task) + + # Gather deploy steps from deploy templates. + user_steps = _get_steps_from_deployment_templates(task, templates) + + # Validate the steps. + error_prefix = (_('Validation of deploy steps from deploy templates ' + 'matching this node\'s instance traits failed. Matching ' + 'deploy templates: %(templates)s. Errors: ') % + {'templates': ','.join(t.name for t in templates)}) + return _validate_user_deploy_steps(task, user_steps, + error_prefix=error_prefix) + + +def _get_all_deployment_steps(task): + """Get deployment steps for task.node. + + Deployment steps from matching deployment templates are combined with those + from driver interfaces and all enabled steps returned in priority order. + + :param task: A TaskManager object + :raises: InstanceDeployFailure if there was a problem getting the + deploy steps. + :returns: A list of deploy step dictionaries + """ + # Gather deploy steps from deploy templates and validate. + # NOTE(mgoddard): although we've probably just validated the templates in + # do_node_deploy, they may have changed in the DB since we last checked, so + # validate again. + user_steps = _get_validated_steps_from_templates(task) + + # Gather enabled deploy steps from drivers. + driver_steps = _get_deployment_steps(task, enabled=True, sort=False) + + # Remove driver steps that have been disabled or overridden by user steps. + user_step_keys = {(s['interface'], s['step']) for s in user_steps} + steps = [s for s in driver_steps + if (s['interface'], s['step']) not in user_step_keys] + + # Add enabled user steps. + enabled_user_steps = [s for s in user_steps if s['priority'] > 0] + steps.extend(enabled_user_steps) + + return _sorted_steps(steps, _deploy_step_key) + + +def set_node_deployment_steps(task): + """Set up the node with deployment step information for deploying. + + Get the deploy steps from the driver. + + :raises: InstanceDeployFailure if there was a problem getting the + deployment steps. + """ + node = task.node + driver_internal_info = node.driver_internal_info + driver_internal_info['deploy_steps'] = _get_all_deployment_steps(task) + node.deploy_step = {} + driver_internal_info['deploy_step_index'] = None + node.driver_internal_info = driver_internal_info + node.save() + + +def _step_id(step): + """Return the 'ID' of a deploy step. + + The ID is a string, .. + + :param step: the step dictionary. + :return: the step's ID string. + """ + return '.'.join([step['interface'], step['step']]) + + +def _validate_deploy_steps_unique(user_steps): + """Validate that deploy steps from deploy templates are unique. + + :param user_steps: a list of user steps. A user step is a dictionary + with required keys 'interface', 'step', 'args', and 'priority':: + + { 'interface': , + 'step': , + 'args': {: , ..., : }, + 'priority': } + + For example:: + + { 'interface': deploy', + 'step': 'upgrade_firmware', + 'args': {'force': True}, + 'priority': 10 } + + :return: a list of validation error strings for the steps. + """ + # Check for duplicate steps. Each interface/step combination can be + # specified at most once. + errors = [] + counter = collections.Counter(_step_id(step) for step in user_steps) + duplicates = {step_id for step_id, count in counter.items() if count > 1} + if duplicates: + err = (_('deploy steps from all deploy templates matching this ' + 'node\'s instance traits cannot have the same interface ' + 'and step. Duplicate deploy steps for %(duplicates)s') % + {'duplicates': ', '.join(duplicates)}) + errors.append(err) + return errors + + +def _validate_user_step(task, user_step, driver_step, step_type): + """Validate a user-specified step. + + :param task: A TaskManager object + :param user_step: a user step dictionary with required keys 'interface' + and 'step', and optional keys 'args' and 'priority':: + + { 'interface': , + 'step': , + 'args': {: , ..., : }, + 'priority': } + + For example:: + + { 'interface': deploy', + 'step': 'upgrade_firmware', + 'args': {'force': True} } + + :param driver_step: a driver step dictionary:: + + { 'interface': , + 'step': , + 'priority': + 'abortable': Optional for clean steps, absent for deploy steps. + . + 'argsinfo': Optional. A dictionary of + {:} entries. + is a dictionary with + { 'description': , + 'required': } } + + For example:: + + { 'interface': deploy', + 'step': 'upgrade_firmware', + 'priority': 10, + 'abortable': True, + 'argsinfo': { + 'force': { 'description': 'Whether to force the upgrade', + 'required': False } } } + + :param step_type: either 'clean' or 'deploy'. + :return: a list of validation error strings for the step. + """ + errors = [] + # Check that the user-specified arguments are valid + argsinfo = driver_step.get('argsinfo') or {} + user_args = user_step.get('args') or {} + unexpected = set(user_args) - set(argsinfo) + if unexpected: + error = (_('%(type)s step %(step)s has these unexpected arguments: ' + '%(unexpected)s') % + {'type': step_type, 'step': user_step, + 'unexpected': ', '.join(unexpected)}) + errors.append(error) + + if step_type == 'clean' or user_step['priority'] > 0: + # Check that all required arguments were specified by the user + missing = [] + for (arg_name, arg_info) in argsinfo.items(): + if arg_info.get('required', False) and arg_name not in user_args: + msg = arg_name + if arg_info.get('description'): + msg += ' (%(desc)s)' % {'desc': arg_info['description']} + missing.append(msg) + if missing: + error = (_('%(type)s step %(step)s is missing these required ' + 'arguments: %(miss)s') % + {'type': step_type, 'step': user_step, + 'miss': ', '.join(missing)}) + errors.append(error) + + if step_type == 'clean': + # Copy fields that should not be provided by a user + user_step['abortable'] = driver_step.get('abortable', False) + user_step['priority'] = driver_step.get('priority', 0) + elif user_step['priority'] > 0: + # 'core' deploy steps can only be disabled. + + # NOTE(mgoddard): we'll need something a little more sophisticated to + # track core steps once we split out the single core step. + is_core = (driver_step['interface'] == 'deploy' and + driver_step['step'] == 'deploy') + if is_core: + error = (_('deploy step %(step)s on interface %(interface)s is a ' + 'core step and cannot be overridden by user steps. It ' + 'may be disabled by setting the priority to 0') % + {'step': user_step['step'], + 'interface': user_step['interface']}) + errors.append(error) + + return errors + + +def _validate_user_steps(task, user_steps, driver_steps, step_type, + error_prefix=None): + """Validate the user-specified steps. + + :param task: A TaskManager object + :param user_steps: a list of user steps. A user step is a dictionary + with required keys 'interface' and 'step', and optional keys 'args' + and 'priority':: + + { 'interface': , + 'step': , + 'args': {: , ..., : }, + 'priority': } + + For example:: + + { 'interface': deploy', + 'step': 'upgrade_firmware', + 'args': {'force': True} } + + :param driver_steps: a list of driver steps:: + + { 'interface': , + 'step': , + 'priority': + 'abortable': Optional for clean steps, absent for deploy steps. + . + 'argsinfo': Optional. A dictionary of + {:} entries. + is a dictionary with + { 'description': , + 'required': } } + + For example:: + + { 'interface': deploy', + 'step': 'upgrade_firmware', + 'priority': 10, + 'abortable': True, + 'argsinfo': { + 'force': { 'description': 'Whether to force the upgrade', + 'required': False } } } + + :param step_type: either 'clean' or 'deploy'. + :param error_prefix: String to use as a prefix for exception messages, or + None. + :raises: InvalidParameterValue if validation of steps fails. + :raises: NodeCleaningFailure or InstanceDeployFailure if + there was a problem getting the steps from the driver. + :return: validated steps updated with information from the driver + """ + + errors = [] + + # Convert driver steps to a dict. + driver_steps = {_step_id(s): s for s in driver_steps} + + for user_step in user_steps: + # Check if this user-specified step isn't supported by the driver + try: + driver_step = driver_steps[_step_id(user_step)] + except KeyError: + error = (_('node does not support this %(type)s step: %(step)s') + % {'type': step_type, 'step': user_step}) + errors.append(error) + continue + + step_errors = _validate_user_step(task, user_step, driver_step, + step_type) + errors.extend(step_errors) + + if step_type == 'deploy': + # Deploy steps should be unique across all combined templates. + dup_errors = _validate_deploy_steps_unique(user_steps) + errors.extend(dup_errors) + + if errors: + err = error_prefix or '' + err += '; '.join(errors) + raise exception.InvalidParameterValue(err=err) + + return user_steps + + +def _validate_user_clean_steps(task, user_steps): + """Validate the user-specified clean steps. + + :param task: A TaskManager object + :param user_steps: a list of clean steps. A clean step is a dictionary + with required keys 'interface' and 'step', and optional key 'args':: + + { 'interface': , + 'step': , + 'args': {: , ..., : } } + + For example:: + + { 'interface': 'deploy', + 'step': 'upgrade_firmware', + 'args': {'force': True} } + :raises: InvalidParameterValue if validation of clean steps fails. + :raises: NodeCleaningFailure if there was a problem getting the + clean steps from the driver. + :return: validated clean steps update with information from the driver + """ + driver_steps = _get_cleaning_steps(task, enabled=False, sort=False) + return _validate_user_steps(task, user_steps, driver_steps, 'clean') + + +def _validate_user_deploy_steps(task, user_steps, error_prefix=None): + """Validate the user-specified deploy steps. + + :param task: A TaskManager object + :param user_steps: a list of deploy steps. A deploy step is a dictionary + with required keys 'interface', 'step', 'args', and 'priority':: + + { 'interface': , + 'step': , + 'args': {: , ..., : }, + 'priority': } + + For example:: + + { 'interface': 'bios', + 'step': 'apply_configuration', + 'args': { 'settings': [ { 'foo': 'bar' } ] }, + 'priority': 150 } + :param error_prefix: String to use as a prefix for exception messages, or + None. + :raises: InvalidParameterValue if validation of deploy steps fails. + :raises: InstanceDeployFailure if there was a problem getting the deploy + steps from the driver. + :return: validated deploy steps update with information from the driver + """ + driver_steps = _get_deployment_steps(task, enabled=False, sort=False) + return _validate_user_steps(task, user_steps, driver_steps, 'deploy', + error_prefix=error_prefix) + + +def validate_deploy_templates(task): + """Validate the deploy templates for a node. + + :param task: A TaskManager object + :raises: InvalidParameterValue if the instance has traits that map to + deploy steps that are unsupported by the node's driver interfaces. + :raises: InstanceDeployFailure if there was a problem getting the deploy + steps from the driver. + """ + # Gather deploy steps from matching deploy templates and validate them. + _get_validated_steps_from_templates(task) diff -Nru ironic-12.0.0/ironic/conductor/task_manager.py ironic-12.1.0/ironic/conductor/task_manager.py --- ironic-12.0.0/ironic/conductor/task_manager.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/task_manager.py 2019-03-21 20:07:40.000000000 +0000 @@ -149,20 +149,16 @@ return wrapper -def acquire(context, node_id, shared=False, purpose='unspecified action'): +def acquire(context, *args, **kwargs): """Shortcut for acquiring a lock on a Node. :param context: Request context. - :param node_id: ID or UUID of node to lock. - :param shared: Boolean indicating whether to take a shared or exclusive - lock. Default: False. - :param purpose: human-readable purpose to put to debug logs. :returns: An instance of :class:`TaskManager`. """ # NOTE(lintan): This is a workaround to set the context of periodic tasks. context.ensure_thread_contain_context() - return TaskManager(context, node_id, shared=shared, purpose=purpose) + return TaskManager(context, *args, **kwargs) class TaskManager(object): @@ -174,7 +170,8 @@ """ def __init__(self, context, node_id, shared=False, - purpose='unspecified action'): + purpose='unspecified action', retry=True, + load_driver=True): """Create a new TaskManager. Acquire a lock on a node. The lock can be either shared or @@ -187,6 +184,10 @@ :param shared: Boolean indicating whether to take a shared or exclusive lock. Default: False. :param purpose: human-readable purpose to put to debug logs. + :param retry: whether to retry locking if it fails. Default: True. + :param load_driver: whether to load the ``driver`` object. Set this to + False if loading the driver is undesired or + impossible. :raises: DriverNotFound :raises: InterfaceNotFoundInEntrypoint :raises: NodeNotFound @@ -201,6 +202,7 @@ self._node = None self.node_id = node_id self.shared = shared + self._retry = retry self.fsm = states.machine.copy() self._purpose = purpose @@ -231,7 +233,10 @@ context, self.node.id) self.volume_targets = objects.VolumeTarget.list_by_node_id( context, self.node.id) - self.driver = driver_factory.build_driver_for_task(self) + if load_driver: + self.driver = driver_factory.build_driver_for_task(self) + else: + self.driver = None except Exception: with excutils.save_and_reraise_exception(): @@ -251,12 +256,17 @@ def _lock(self): self._debug_timer.restart() + if self._retry: + attempts = CONF.conductor.node_locked_retry_attempts + else: + attempts = 1 + # NodeLocked exceptions can be annoying. Let's try to alleviate # some of that pain by retrying our lock attempts. The retrying # module expects a wait_fixed value in milliseconds. @retrying.retry( retry_on_exception=lambda e: isinstance(e, exception.NodeLocked), - stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts, + stop_max_attempt_number=attempts, wait_fixed=CONF.conductor.node_locked_retry_interval * 1000) def reserve_node(): self.node = objects.Node.reserve(self.context, CONF.host, diff -Nru ironic-12.0.0/ironic/conductor/utils.py ironic-12.1.0/ironic/conductor/utils.py --- ironic-12.0.0/ironic/conductor/utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conductor/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -12,12 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime +import time + +from openstack.baremetal import configdrive as os_configdrive from oslo_config import cfg from oslo_log import log +from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import excutils +from oslo_utils import timeutils import six +from ironic.common import boot_devices from ironic.common import exception from ironic.common import faults from ironic.common.i18n import _ @@ -30,32 +37,6 @@ LOG = log.getLogger(__name__) CONF = cfg.CONF -CLEANING_INTERFACE_PRIORITY = { - # When two clean steps have the same priority, their order is determined - # by which interface is implementing the clean step. The clean step of the - # interface with the highest value here, will be executed first in that - # case. - 'power': 5, - 'management': 4, - 'deploy': 3, - 'bios': 2, - 'raid': 1, -} - -DEPLOYING_INTERFACE_PRIORITY = { - # When two deploy steps have the same priority, their order is determined - # by which interface is implementing the step. The step of the interface - # with the highest value here, will be executed first in that case. - # TODO(rloo): If we think it makes sense to have the interface priorities - # the same for cleaning & deploying, replace the two with one e.g. - # 'INTERFACE_PRIORITIES'. - 'power': 5, - 'management': 4, - 'deploy': 3, - 'bios': 2, - 'raid': 1, -} - @task_manager.require_exclusive_lock def node_set_boot_device(task, device, persistent=False): @@ -166,7 +147,7 @@ timer = loopingcall.BackOffLoopingCall(_wait) return timer.start(initial_delay=1, timeout=retry_timeout).wait() except loopingcall.LoopingCallTimeOut: - LOG.error('Timed out after %(retry_timeout)s secs waiting for power ' + LOG.error('Timed out after %(retry_timeout)s secs waiting for ' '%(state)s on node %(node_id)s.', {'retry_timeout': retry_timeout, 'state': new_state, 'node_id': task.node.uuid}) @@ -623,236 +604,6 @@ {'node': node.uuid, 'power_state': power_state}) -def _clean_step_key(step): - """Sort by priority, then interface priority in event of tie. - - :param step: cleaning step dict to get priority for. - """ - return (step.get('priority'), - CLEANING_INTERFACE_PRIORITY[step.get('interface')]) - - -def _deploy_step_key(step): - """Sort by priority, then interface priority in event of tie. - - :param step: deploy step dict to get priority for. - """ - return (step.get('priority'), - DEPLOYING_INTERFACE_PRIORITY[step.get('interface')]) - - -def _get_steps(task, interfaces, get_method, enabled=False, - sort_step_key=None): - """Get steps for task.node. - - :param task: A TaskManager object - :param interfaces: A dictionary of (key) interfaces and their - (value) priorities. These are the interfaces that will have steps of - interest. The priorities are used for deciding the priorities of steps - having the same priority. - :param get_method: The method used to get the steps from the node's - interface; a string. - :param enabled: If True, returns only enabled (priority > 0) steps. If - False, returns all steps. - :param sort_step_key: If set, this is a method (key) used to sort the steps - from highest priority to lowest priority. For steps having the same - priority, they are sorted from highest interface priority to lowest. - :raises: NodeCleaningFailure or InstanceDeployFailure if there was a - problem getting the steps. - :returns: A list of step dictionaries - """ - # Get steps from each interface - steps = list() - for interface in interfaces: - interface = getattr(task.driver, interface) - if interface: - interface_steps = [x for x in getattr(interface, get_method)(task) - if not enabled or x['priority'] > 0] - steps.extend(interface_steps) - if sort_step_key: - # Sort the steps from higher priority to lower priority - steps = sorted(steps, key=sort_step_key, reverse=True) - return steps - - -def _get_cleaning_steps(task, enabled=False, sort=True): - """Get cleaning steps for task.node. - - :param task: A TaskManager object - :param enabled: If True, returns only enabled (priority > 0) steps. If - False, returns all clean steps. - :param sort: If True, the steps are sorted from highest priority to lowest - priority. For steps having the same priority, they are sorted from - highest interface priority to lowest. - :raises: NodeCleaningFailure if there was a problem getting the - clean steps. - :returns: A list of clean step dictionaries - """ - sort_key = _clean_step_key if sort else None - return _get_steps(task, CLEANING_INTERFACE_PRIORITY, 'get_clean_steps', - enabled=enabled, sort_step_key=sort_key) - - -def _get_deployment_steps(task, enabled=False, sort=True): - """Get deployment steps for task.node. - - :param task: A TaskManager object - :param enabled: If True, returns only enabled (priority > 0) steps. If - False, returns all deploy steps. - :param sort: If True, the steps are sorted from highest priority to lowest - priority. For steps having the same priority, they are sorted from - highest interface priority to lowest. - :raises: InstanceDeployFailure if there was a problem getting the - deploy steps. - :returns: A list of deploy step dictionaries - """ - sort_key = _deploy_step_key if sort else None - return _get_steps(task, DEPLOYING_INTERFACE_PRIORITY, 'get_deploy_steps', - enabled=enabled, sort_step_key=sort_key) - - -def set_node_cleaning_steps(task): - """Set up the node with clean step information for cleaning. - - For automated cleaning, get the clean steps from the driver. - For manual cleaning, the user's clean steps are known but need to be - validated against the driver's clean steps. - - :raises: InvalidParameterValue if there is a problem with the user's - clean steps. - :raises: NodeCleaningFailure if there was a problem getting the - clean steps. - """ - node = task.node - driver_internal_info = node.driver_internal_info - - # For manual cleaning, the target provision state is MANAGEABLE, whereas - # for automated cleaning, it is AVAILABLE. - manual_clean = node.target_provision_state == states.MANAGEABLE - - if not manual_clean: - # Get the prioritized steps for automated cleaning - driver_internal_info['clean_steps'] = _get_cleaning_steps(task, - enabled=True) - else: - # For manual cleaning, the list of cleaning steps was specified by the - # user and already saved in node.driver_internal_info['clean_steps']. - # Now that we know what the driver's available clean steps are, we can - # do further checks to validate the user's clean steps. - steps = node.driver_internal_info['clean_steps'] - driver_internal_info['clean_steps'] = ( - _validate_user_clean_steps(task, steps)) - - node.clean_step = {} - driver_internal_info['clean_step_index'] = None - node.driver_internal_info = driver_internal_info - node.save() - - -def set_node_deployment_steps(task): - """Set up the node with deployment step information for deploying. - - Get the deploy steps from the driver. - - :raises: InstanceDeployFailure if there was a problem getting the - deployment steps. - """ - node = task.node - driver_internal_info = node.driver_internal_info - driver_internal_info['deploy_steps'] = _get_deployment_steps( - task, enabled=True) - node.deploy_step = {} - driver_internal_info['deploy_step_index'] = None - node.driver_internal_info = driver_internal_info - node.save() - - -def _validate_user_clean_steps(task, user_steps): - """Validate the user-specified clean steps. - - :param task: A TaskManager object - :param user_steps: a list of clean steps. A clean step is a dictionary - with required keys 'interface' and 'step', and optional key 'args':: - - { 'interface': , - 'step': , - 'args': {: , ..., : } } - - For example:: - - { 'interface': deploy', - 'step': 'upgrade_firmware', - 'args': {'force': True} } - :raises: InvalidParameterValue if validation of clean steps fails. - :raises: NodeCleaningFailure if there was a problem getting the - clean steps from the driver. - :return: validated clean steps update with information from the driver - """ - - def step_id(step): - return '.'.join([step['step'], step['interface']]) - - errors = [] - - # The clean steps from the driver. A clean step dictionary is of the form: - # { 'interface': , - # 'step': , - # 'priority': - # 'abortable': Optional. . - # 'argsinfo': Optional. A dictionary of {:} - # entries. is a dictionary with - # { 'description': , - # 'required': } - # } - driver_steps = {} - for s in _get_cleaning_steps(task, enabled=False, sort=False): - driver_steps[step_id(s)] = s - - result = [] - for user_step in user_steps: - # Check if this user_specified clean step isn't supported by the driver - try: - driver_step = driver_steps[step_id(user_step)] - except KeyError: - error = (_('node does not support this clean step: %(step)s') - % {'step': user_step}) - errors.append(error) - continue - - # Check that the user-specified arguments are valid - argsinfo = driver_step.get('argsinfo') or {} - user_args = user_step.get('args') or {} - invalid = set(user_args) - set(argsinfo) - if invalid: - error = _('clean step %(step)s has these invalid arguments: ' - '%(invalid)s') % {'step': user_step, - 'invalid': ', '.join(invalid)} - errors.append(error) - - # Check that all required arguments were specified by the user - missing = [] - for (arg_name, arg_info) in argsinfo.items(): - if arg_info.get('required', False) and arg_name not in user_args: - msg = arg_name - if arg_info.get('description'): - msg += ' (%(desc)s)' % {'desc': arg_info['description']} - missing.append(msg) - if missing: - error = _('clean step %(step)s is missing these required keyword ' - 'arguments: %(miss)s') % {'step': user_step, - 'miss': ', '.join(missing)} - errors.append(error) - - # Copy fields that should not be provided by a user - user_step['abortable'] = driver_step.get('abortable', False) - user_step['priority'] = driver_step.get('priority', 0) - result.append(user_step) - - if errors: - raise exception.InvalidParameterValue('; '.join(errors)) - return result - - @task_manager.require_exclusive_lock def validate_port_physnet(task, port_obj): """Validate the consistency of physical networks of ports in a portgroup. @@ -1000,3 +751,144 @@ :param node: the node to consider """ return not CONF.conductor.automated_clean and not node.automated_clean + + +def power_on_node_if_needed(task): + """Powers on node if it is powered off and has a Smart NIC port + + :param task: A TaskManager object + :returns: the previous power state or None if no changes were made + :raises: exception.NetworkError if agent status didn't match the required + status after max retry attempts. + """ + if not task.driver.network.need_power_on(task): + return + + previous_power_state = task.driver.power.get_power_state(task) + if previous_power_state == states.POWER_OFF: + node_set_boot_device( + task, boot_devices.BIOS, persistent=False) + node_power_action(task, states.POWER_ON) + + # local import is necessary to avoid circular import + from ironic.common import neutron + + host_id = None + for port in task.ports: + if neutron.is_smartnic_port(port): + link_info = port.local_link_connection + host_id = link_info['hostname'] + break + + if host_id: + LOG.debug('Waiting for host %(host)s agent to be down', + {'host': host_id}) + + client = neutron.get_client(context=task.context) + neutron.wait_for_host_agent( + client, host_id, target_state='down') + return previous_power_state + + +def restore_power_state_if_needed(task, power_state_to_restore): + """Change the node's power state if power_state_to_restore is not None + + :param task: A TaskManager object + :param power_state_to_restore: power state + """ + if power_state_to_restore: + + # Sleep is required here in order to give neutron agent + # a chance to apply the changes before powering off. + # Using twice the polling interval of the agent + # "CONF.AGENT.polling_interval" would give the agent + # enough time to apply network changes. + time.sleep(CONF.agent.neutron_agent_poll_interval * 2) + node_power_action(task, power_state_to_restore) + + +def build_configdrive(node, configdrive): + """Build a configdrive from provided meta_data, network_data and user_data. + + If uuid or name are not provided in the meta_data, they're defauled to the + node's uuid and name accordingly. + + :param node: an Ironic node object. + :param configdrive: A configdrive as a dict with keys ``meta_data``, + ``network_data`` and ``user_data`` (all optional). + :returns: A gzipped and base64 encoded configdrive as a string. + """ + meta_data = configdrive.setdefault('meta_data', {}) + meta_data.setdefault('uuid', node.uuid) + if node.name: + meta_data.setdefault('name', node.name) + + user_data = configdrive.get('user_data') + if isinstance(user_data, (dict, list)): + user_data = jsonutils.dump_as_bytes(user_data) + elif user_data: + user_data = user_data.encode('utf-8') + + LOG.debug('Building a configdrive for node %s', node.uuid) + return os_configdrive.build(meta_data, user_data=user_data, + network_data=configdrive.get('network_data')) + + +def fast_track_able(task): + """Checks if the operation can be a streamlined deployment sequence. + + This is mainly focused on ensuring that we are able to quickly sequence + through operations if we already have a ramdisk heartbeating through + external means. + + :param task: Taskmanager object + :returns: True if [deploy]fast_track is set to True, no iSCSI boot + configuration is present, and no last_error is present for + the node indicating that there was a recent failure. + """ + return (CONF.deploy.fast_track + # TODO(TheJulia): Network model aside, we should be able to + # fast-track through initial sequence to complete deployment. + # This needs to be validated. + # TODO(TheJulia): Do we need a secondary guard? To prevent + # driving through this we could query the API endpoint of + # the agent with a short timeout such as 10 seconds, which + # would help verify if the node is online. + # TODO(TheJulia): Should we check the provisioning/deployment + # networks match config wise? Do we care? #decisionsdecisions + and task.driver.storage.should_write_image(task) + and task.node.last_error is None) + + +def is_fast_track(task): + """Checks a fast track is available. + + This method first ensures that the node and conductor configuration + is valid to perform a fast track sequence meaning that we already + have a ramdisk running through another means like discovery. + If not valid, False is returned. + + The method then checks for the last agent heartbeat, and if it occured + within the timeout set by [deploy]fast_track_timeout and the power + state for the machine is POWER_ON, then fast track is permitted. + + :param node: A node object. + :returns: True if the last heartbeat that was recorded was within + the [deploy]fast_track_timeout setting. + """ + if not fast_track_able(task): + return False + # use native datetime objects for conversion and compare + # slightly odd because py2 compatability :( + last = datetime.datetime.strptime( + task.node.driver_internal_info.get( + 'agent_last_heartbeat', + '1970-01-01T00:00:00.000000'), + "%Y-%m-%dT%H:%M:%S.%f") + # If we found nothing, we assume that the time is essentially epoch. + time_delta = datetime.timedelta(seconds=CONF.deploy.fast_track_timeout) + last_valid = timeutils.utcnow() - time_delta + # Checking the power state, because if we find the machine off due to + # any action, we can't actually fast track the node. :( + return (last_valid <= last + and task.driver.power.get_power_state(task) == states.POWER_ON) diff -Nru ironic-12.0.0/ironic/conf/agent.py ironic-12.1.0/ironic/conf/agent.py --- ironic-12.0.0/ironic/conf/agent.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/agent.py 2019-03-21 20:07:40.000000000 +0000 @@ -110,6 +110,21 @@ help=_('This is the maximum number of attempts that will be ' 'done for IPA commands that fails due to network ' 'problems.')), + cfg.IntOpt('neutron_agent_poll_interval', + default=2, + help=_('The number of seconds Neutron agent will wait between ' + 'polling for device changes. This value should be ' + 'the same as CONF.AGENT.polling_interval in Neutron ' + 'configuration.')), + cfg.IntOpt('neutron_agent_max_attempts', + default=100, + help=_('Max number of attempts to validate a Neutron agent ' + 'status before raising network error for a ' + 'dead agent.')), + cfg.IntOpt('neutron_agent_status_retry_interval', + default=10, + help=_('Wait time in seconds between attempts for validating ' + 'Neutron agent status.')), ] diff -Nru ironic-12.0.0/ironic/conf/ansible.py ironic-12.1.0/ironic/conf/ansible.py --- ironic-12.0.0/ironic/conf/ansible.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/ansible.py 2019-03-21 20:07:40.000000000 +0000 @@ -134,6 +134,12 @@ "It may be overridden by per-node " "'ansible_clean_steps_config' option in node's " "'driver_info' field.")), + cfg.StrOpt('default_python_interpreter', + help=_("Absolute path to the python interpreter on the " + "managed machines. It may be overridden by per-node " + "'ansible_python_interpreter' option in node's " + "'driver_info' field. " + "By default, ansible uses /usr/bin/python")), ] diff -Nru ironic-12.0.0/ironic/conf/conductor.py ironic-12.1.0/ironic/conf/conductor.py --- ironic-12.0.0/ironic/conf/conductor.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/conductor.py 2019-03-21 20:07:40.000000000 +0000 @@ -24,7 +24,9 @@ default=100, min=3, help=_('The size of the workers greenthread pool. ' 'Note that 2 threads will be reserved by the conductor ' - 'itself for handling heart beats and periodic tasks.')), + 'itself for handling heart beats and periodic tasks. ' + 'On top of that, `sync_power_state_workers` will take ' + 'up to 7 green threads with the default value of 8.')), cfg.IntOpt('heartbeat_interval', default=10, help=_('Seconds between conductor heart beats.')), @@ -41,6 +43,9 @@ 'http:// or https://.')), cfg.IntOpt('heartbeat_timeout', default=60, + # We're using timedelta which can overflow if somebody sets this + # too high, so limit to a sane value of 10 years. + max=315576000, help=_('Maximum time (in seconds) since the last check-in ' 'of a conductor. A conductor is considered inactive ' 'when this time has been exceeded.')), @@ -58,6 +63,11 @@ min=1, help=_('Interval (seconds) between checks of rescue ' 'timeouts.')), + cfg.IntOpt('check_allocations_interval', + default=60, + min=0, + help=_('Interval between checks of orphaned allocations, ' + 'in seconds. Set to 0 to disable checks.')), cfg.IntOpt('deploy_callback_timeout', default=1800, help=_('Timeout (seconds) to wait for a callback from ' @@ -74,6 +84,11 @@ 'number of times Ironic should try syncing the ' 'hardware node power state with the node power state ' 'in DB')), + cfg.IntOpt('sync_power_state_workers', + default=8, min=1, + help=_('The maximum number of worker threads that can be ' + 'started simultaneously to sync nodes power states from ' + 'the periodic task.')), cfg.IntOpt('periodic_max_workers', default=8, help=_('Maximum number of worker threads that can be started ' @@ -184,6 +199,11 @@ '255 characters and is case insensitive. This ' 'conductor will only manage nodes with a matching ' '"conductor_group" field set on the node.')), + cfg.BoolOpt('allow_deleting_available_nodes', + default=True, + mutable=True, + help=_('Allow deleting nodes which are in state ' + '\'available\'. Defaults to True.')), ] diff -Nru ironic-12.0.0/ironic/conf/default.py ironic-12.1.0/ironic/conf/default.py --- ironic-12.0.0/ironic/conf/default.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/default.py 2019-03-21 20:07:40.000000000 +0000 @@ -190,7 +190,8 @@ 'Setting this to more than one will cause additional ' 'conductor services to prepare deployment environments ' 'and potentially allow the Ironic cluster to recover ' - 'more quickly if a conductor instance is terminated.')), + 'more quickly if a conductor instance is terminated.'), + deprecated_for_removal=True), cfg.IntOpt('hash_ring_reset_interval', default=15, help=_('Time (in seconds) after which the hash ring is ' @@ -210,6 +211,10 @@ default=os.path.join('$pybasedir', 'common/isolinux_config.template'), help=_('Template file for isolinux configuration file.')), + cfg.StrOpt('grub_config_path', + default='/boot/grub/grub.cfg', + help=_('GRUB2 configuration file location on the UEFI ISO ' + 'images produced by ironic.')), cfg.StrOpt('grub_config_template', default=os.path.join('$pybasedir', 'common/grub_conf.template'), @@ -220,6 +225,18 @@ 'looked for in ' '"/usr/lib/syslinux/modules/bios/ldlinux.c32" and ' '"/usr/share/syslinux/ldlinux.c32".')), + cfg.StrOpt('esp_image', + help=_('Path to EFI System Partition image file. This file is ' + 'recommended for creating UEFI bootable ISO images ' + 'efficiently. ESP image should contain a ' + 'FAT12/16/32-formatted file system holding EFI boot ' + 'loaders (e.g. GRUB2) for each hardware architecture ' + 'ironic needs to boot. This option is only used when ' + 'neither ESP nor ISO deploy image is configured to ' + 'the node being deployed in which case ironic will ' + 'attempt to fetch ESP image from the configured ' + 'location or extract ESP image from UEFI-bootable ' + 'deploy ISO image.')), ] img_cache_opts = [ @@ -238,10 +255,11 @@ '"127.0.0.1".')), ] -# NOTE(mariojv) By default, accessing this option when it's unset will return -# None, indicating no notifications will be sent. oslo.config returns None by -# default for options without set defaults that aren't required. notification_opts = [ + # NOTE(mariojv) By default, accessing this option when it's unset will + # return None, indicating no notifications will be sent. oslo.config + # returns None by default for options without set defaults that aren't + # required. cfg.StrOpt('notification_level', choices=[('debug', _('"debug" level')), ('info', _('"info" level')), @@ -250,7 +268,22 @@ ('critical', _('"critical" level'))], help=_('Specifies the minimum level for which to send ' 'notifications. If not set, no notifications will ' - 'be sent. The default is for this option to be unset.')) + 'be sent. The default is for this option to be unset.')), + cfg.ListOpt( + 'versioned_notifications_topics', + default=['ironic_versioned_notifications'], + help=_(""" +Specifies the topics for the versioned notifications issued by Ironic. + +The default value is fine for most deployments and rarely needs to be changed. +However, if you have a third-party service that consumes versioned +notifications, it might be worth getting a topic for that service. +Ironic will send a message containing a versioned notification payload to each +topic queue in this list. + +The list of versioned notifications is visible in +https://docs.openstack.org/ironic/latest/admin/notifications.html +""")), ] path_opts = [ @@ -299,6 +332,12 @@ 'When doing a rolling upgrade from version N to version ' 'N+1, set (to pin) this to N. To unpin (default), leave ' 'it unset and the latest versions will be used.')), + cfg.StrOpt('rpc_transport', + default='oslo', + choices=[('oslo', _('use oslo.messaging transport')), + ('json-rpc', _('use JSON RPC transport'))], + help=_('Which RPC transport implementation to use between ' + 'conductor and API services')), ] utils_opts = [ diff -Nru ironic-12.0.0/ironic/conf/deploy.py ironic-12.1.0/ironic/conf/deploy.py --- ironic-12.0.0/ironic/conf/deploy.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/deploy.py 2019-03-21 20:07:40.000000000 +0000 @@ -113,6 +113,27 @@ 'images for the direct deploy interface, when local ' 'HTTP service is incorporated to provide instance image ' 'instead of swift tempurls.')), + cfg.BoolOpt('fast_track', + default=False, + help=_('Whether to allow deployment agents to perform lookup, ' + 'heartbeat operations during initial states of a ' + 'machine lifecycle and by-pass the normal setup ' + 'procedures for a ramdisk. This feature also enables ' + 'power operations which are part of deployment ' + 'processes to be bypassed if the ramdisk has performed ' + 'a heartbeat operation using the fast_track_timeout ' + 'setting.')), + cfg.IntOpt('fast_track_timeout', + default=300, + min=0, + max=300, + help=_('Seconds for which the last heartbeat event is to be ' + 'considered valid for the purpose of a fast ' + 'track sequence. This setting should generally be ' + 'less than the number of seconds for "Power-On Self ' + 'Test" and typical ramdisk start-up. This value should ' + 'not exceed the [api]ramdisk_heartbeat_timeout ' + 'setting.')), ] diff -Nru ironic-12.0.0/ironic/conf/ibmc.py ironic-12.1.0/ironic/conf/ibmc.py --- ironic-12.0.0/ironic/conf/ibmc.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/conf/ibmc.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,35 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Version 1.0.0 + +from oslo_config import cfg + +from ironic.common.i18n import _ + +opts = [ + cfg.IntOpt('connection_attempts', + min=1, + default=5, + help=_('Maximum number of attempts to try to connect ' + 'to iBMC')), + cfg.IntOpt('connection_retry_interval', + min=1, + default=4, + help=_('Number of seconds to wait between attempts to ' + 'connect to iBMC')) +] + + +def register_opts(conf): + conf.register_opts(opts, group='ibmc') diff -Nru ironic-12.0.0/ironic/conf/__init__.py ironic-12.1.0/ironic/conf/__init__.py --- ironic-12.0.0/ironic/conf/__init__.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/__init__.py 2019-03-21 20:07:40.000000000 +0000 @@ -30,11 +30,13 @@ from ironic.conf import drac from ironic.conf import glance from ironic.conf import healthcheck +from ironic.conf import ibmc from ironic.conf import ilo from ironic.conf import inspector from ironic.conf import ipmi from ironic.conf import irmc from ironic.conf import iscsi +from ironic.conf import json_rpc from ironic.conf import metrics from ironic.conf import metrics_statsd from ironic.conf import neutron @@ -62,11 +64,13 @@ dhcp.register_opts(CONF) glance.register_opts(CONF) healthcheck.register_opts(CONF) +ibmc.register_opts(CONF) ilo.register_opts(CONF) inspector.register_opts(CONF) ipmi.register_opts(CONF) irmc.register_opts(CONF) iscsi.register_opts(CONF) +json_rpc.register_opts(CONF) metrics.register_opts(CONF) metrics_statsd.register_opts(CONF) neutron.register_opts(CONF) diff -Nru ironic-12.0.0/ironic/conf/json_rpc.py ironic-12.1.0/ironic/conf/json_rpc.py --- ironic-12.0.0/ironic/conf/json_rpc.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/conf/json_rpc.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from ironic.common.i18n import _ +from ironic.conf import auth + +opts = [ + cfg.StrOpt('auth_strategy', + choices=[('noauth', _('no authentication')), + ('keystone', _('use the Identity service for ' + 'authentication'))], + help=_('Authentication strategy used by JSON RPC. Defaults to ' + 'the global auth_strategy setting.')), + cfg.HostAddressOpt('host_ip', + default='0.0.0.0', + help=_('The IP address or hostname on which JSON RPC ' + 'will listen.')), + cfg.PortOpt('port', + default=8089, + help=_('The port to use for JSON RPC')), + cfg.BoolOpt('use_ssl', + default=False, + help=_('Whether to use TLS for JSON RPC')), +] + + +def register_opts(conf): + conf.register_opts(opts, group='json_rpc') + auth.register_auth_opts(conf, 'json_rpc') + + +def list_opts(): + return opts + auth.add_auth_opts([]) diff -Nru ironic-12.0.0/ironic/conf/opts.py ironic-12.1.0/ironic/conf/opts.py --- ironic-12.0.0/ironic/conf/opts.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/opts.py 2019-03-21 20:07:40.000000000 +0000 @@ -53,6 +53,7 @@ ('ipmi', ironic.conf.ipmi.opts), ('irmc', ironic.conf.irmc.opts), ('iscsi', ironic.conf.iscsi.opts), + ('json_rpc', ironic.conf.json_rpc.list_opts()), ('metrics', ironic.conf.metrics.opts), ('metrics_statsd', ironic.conf.metrics_statsd.opts), ('neutron', ironic.conf.neutron.list_opts()), @@ -88,9 +89,6 @@ 'amqp=WARNING', 'amqplib=WARNING', 'qpid.messaging=INFO', - # TODO(therve): when bug #1685148 is fixed in oslo.messaging, we - # should be able to remove one of those 2 lines. - 'oslo_messaging=INFO', 'oslo.messaging=INFO', 'sqlalchemy=WARNING', 'stevedore=INFO', diff -Nru ironic-12.0.0/ironic/conf/pxe.py ironic-12.1.0/ironic/conf/pxe.py --- ironic-12.0.0/ironic/conf/pxe.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/conf/pxe.py 2019-03-21 20:07:40.000000000 +0000 @@ -76,7 +76,7 @@ default='/tftpboot/master_images', help=_('On ironic-conductor node, directory where master TFTP ' 'images are stored on disk. ' - 'Setting to disables image caching.')), + 'Setting to the empty string disables image caching.')), cfg.IntOpt('dir_permission', help=_("The permission that will be applied to the TFTP " "folders upon creation. This should be set to the " diff -Nru ironic-12.0.0/ironic/db/api.py ironic-12.1.0/ironic/db/api.py --- ironic-12.0.0/ironic/db/api.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/db/api.py 2019-03-21 20:07:40.000000000 +0000 @@ -98,6 +98,20 @@ """ @abc.abstractmethod + def check_node_list(self, idents): + """Check a list of node identities and map it to UUIDs. + + This call takes a list of node names and/or UUIDs and tries to convert + them to UUIDs. It fails early if any identities cannot possible be used + as names or UUIDs. + + :param idents: List of identities. + :returns: A mapping from requests identities to node UUIDs. + :raises: NodeNotFound if some identities were not found or cannot be + valid names or UUIDs. + """ + + @abc.abstractmethod def reserve_node(self, tag, node_id): """Reserve a node. @@ -549,8 +563,16 @@ """ @abc.abstractmethod - def get_offline_conductors(self): - """Get a list conductor hostnames that are offline (dead). + def get_offline_conductors(self, field='hostname'): + """Get a list conductors that are offline (dead). + + :param field: A field to return, hostname by default. + :returns: A list of requested fields of offline conductors. + """ + + @abc.abstractmethod + def get_online_conductors(self): + """Get a list conductor hostnames that are online and active. :returns: A list of conductor hostnames. """ @@ -902,13 +924,14 @@ """ @abc.abstractmethod - def check_versions(self): + def check_versions(self, ignore_models=()): """Checks the whole database for incompatible objects. This scans all the tables in search of objects that are not supported; i.e., those that are not specified in `ironic.common.release_mappings.RELEASE_MAPPING`. + :param ignore_models: List of model names to skip. :returns: A Boolean. True if all the objects have supported versions; False otherwise. """ @@ -1078,3 +1101,194 @@ :returns: A list of BIOSSetting objects. :raises: NodeNotFound if the node is not found. """ + + @abc.abstractmethod + def get_allocation_by_id(self, allocation_id): + """Return an allocation representation. + + :param allocation_id: The id of an allocation. + :returns: An allocation. + :raises: AllocationNotFound + """ + + @abc.abstractmethod + def get_allocation_by_uuid(self, allocation_uuid): + """Return an allocation representation. + + :param allocation_uuid: The uuid of an allocation. + :returns: An allocation. + :raises: AllocationNotFound + """ + + @abc.abstractmethod + def get_allocation_by_name(self, name): + """Return an allocation representation. + + :param name: The logical name of an allocation. + :returns: An allocation. + :raises: AllocationNotFound + """ + + @abc.abstractmethod + def get_allocation_list(self, filters=None, limit=None, marker=None, + sort_key=None, sort_dir=None): + """Return a list of allocations. + + :param filters: Filters to apply. Defaults to None. + + :node_uuid: uuid of node + :state: allocation state + :resource_class: requested resource class + :param limit: Maximum number of allocations to return. + :param marker: The last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: Direction in which results should be sorted. + (asc, desc) + :returns: A list of allocations. + """ + + @abc.abstractmethod + def create_allocation(self, values): + """Create a new allocation. + + :param values: Dict of values to create an allocation with + :returns: An allocation + :raises: AllocationDuplicateName + :raises: AllocationAlreadyExists + """ + + @abc.abstractmethod + def update_allocation(self, allocation_id, values, update_node=True): + """Update properties of an allocation. + + :param allocation_id: Allocation ID + :param values: Dict of values to update. + :param update_node: If True and node_id is updated, update the node + with instance_uuid and traits from the allocation + :returns: An allocation. + :raises: AllocationNotFound + :raises: AllocationDuplicateName + :raises: InstanceAssociated + :raises: NodeAssociated + """ + + @abc.abstractmethod + def take_over_allocation(self, allocation_id, old_conductor_id, + new_conductor_id): + """Do a take over for an allocation. + + The allocation is only updated if the old conductor matches the + provided value, thus guarding against races. + + :param allocation_id: Allocation ID + :param old_conductor_id: The conductor ID we expect to be the current + ``conductor_affinity`` of the allocation. + :param new_conductor_id: The conductor ID of the new + ``conductor_affinity``. + :returns: True if the take over was successful, False otherwise. + :raises: AllocationNotFound + """ + + @abc.abstractmethod + def destroy_allocation(self, allocation_id): + """Destroy an allocation. + + :param allocation_id: Allocation ID + :raises: AllocationNotFound + """ + + @abc.abstractmethod + def create_deploy_template(self, values): + """Create a deployment template. + + :param values: A dict describing the deployment template. For example: + + :: + + { + 'uuid': uuidutils.generate_uuid(), + 'name': 'CUSTOM_DT1', + } + :raises: DeployTemplateDuplicateName if a deploy template with the same + name exists. + :raises: DeployTemplateAlreadyExists if a deploy template with the same + UUID exists. + :returns: A deploy template. + """ + + @abc.abstractmethod + def update_deploy_template(self, template_id, values): + """Update a deployment template. + + :param template_id: ID of the deployment template to update. + :param values: A dict describing the deployment template. For example: + + :: + + { + 'uuid': uuidutils.generate_uuid(), + 'name': 'CUSTOM_DT1', + } + :raises: DeployTemplateDuplicateName if a deploy template with the same + name exists. + :raises: DeployTemplateNotFound if the deploy template does not exist. + :returns: A deploy template. + """ + + @abc.abstractmethod + def destroy_deploy_template(self, template_id): + """Destroy a deployment template. + + :param template_id: ID of the deployment template to destroy. + :raises: DeployTemplateNotFound if the deploy template does not exist. + """ + + @abc.abstractmethod + def get_deploy_template_by_id(self, template_id): + """Retrieve a deployment template by ID. + + :param template_id: ID of the deployment template to retrieve. + :raises: DeployTemplateNotFound if the deploy template does not exist. + :returns: A deploy template. + """ + + @abc.abstractmethod + def get_deploy_template_by_uuid(self, template_uuid): + """Retrieve a deployment template by UUID. + + :param template_uuid: UUID of the deployment template to retrieve. + :raises: DeployTemplateNotFound if the deploy template does not exist. + :returns: A deploy template. + """ + + @abc.abstractmethod + def get_deploy_template_by_name(self, template_name): + """Retrieve a deployment template by name. + + :param template_name: name of the deployment template to retrieve. + :raises: DeployTemplateNotFound if the deploy template does not exist. + :returns: A deploy template. + """ + + @abc.abstractmethod + def get_deploy_template_list(self, limit=None, marker=None, + sort_key=None, sort_dir=None): + """Retrieve a list of deployment templates. + + :param limit: Maximum number of deploy templates to return. + :param marker: The last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: Direction in which results should be sorted. + (asc, desc) + :returns: A list of deploy templates. + """ + + @abc.abstractmethod + def get_deploy_template_list_by_names(self, names): + """Return a list of deployment templates with one of a list of names. + + :param names: List of names to filter by. + :returns: A list of deploy templates. + """ diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py --- ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add extra column to deploy_templates + +Revision ID: 1e15e7122cc9 +Revises: 2aac7e0872f6 +Create Date: 2019-02-26 15:08:18.419157 + +""" + +# revision identifiers, used by Alembic. +revision = '1e15e7122cc9' +down_revision = '2aac7e0872f6' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('deploy_templates', + sa.Column('extra', sa.Text(), nullable=True)) diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/28c44432c9c3_add_node_description.py ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/28c44432c9c3_add_node_description.py --- ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/28c44432c9c3_add_node_description.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/28c44432c9c3_add_node_description.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add node description + +Revision ID: 28c44432c9c3 +Revises: dd67b91a1981 +Create Date: 2019-01-23 13:54:08.850421 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '28c44432c9c3' +down_revision = '9cbeefa3763f' + + +def upgrade(): + op.add_column('nodes', sa.Column('description', sa.Text(), + nullable=True)) diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py --- ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,67 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Create deploy_templates and deploy_template_steps tables. + +Revision ID: 2aac7e0872f6 +Revises: 28c44432c9c3 +Create Date: 2018-12-27 11:49:15.029650 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '2aac7e0872f6' +down_revision = '28c44432c9c3' + + +def upgrade(): + op.create_table( + 'deploy_templates', + sa.Column('version', sa.String(length=15), nullable=True), + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('id', sa.Integer(), nullable=False, + autoincrement=True), + sa.Column('uuid', sa.String(length=36)), + sa.Column('name', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('uuid', name='uniq_deploytemplates0uuid'), + sa.UniqueConstraint('name', name='uniq_deploytemplates0name'), + mysql_ENGINE='InnoDB', + mysql_DEFAULT_CHARSET='UTF8' + ) + + op.create_table( + 'deploy_template_steps', + sa.Column('version', sa.String(length=15), nullable=True), + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('id', sa.Integer(), nullable=False, + autoincrement=True), + sa.Column('deploy_template_id', sa.Integer(), nullable=False, + autoincrement=False), + sa.Column('interface', sa.String(length=255), nullable=False), + sa.Column('step', sa.String(length=255), nullable=False), + sa.Column('args', sa.Text, nullable=False), + sa.Column('priority', sa.Integer, nullable=False), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['deploy_template_id'], + ['deploy_templates.id']), + sa.Index('deploy_template_id', 'deploy_template_id'), + sa.Index('deploy_template_steps_interface_idx', 'interface'), + sa.Index('deploy_template_steps_step_idx', 'step'), + mysql_ENGINE='InnoDB', + mysql_DEFAULT_CHARSET='UTF8' + ) diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py --- ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""add is_smartnic port attribute + +Revision ID: 9cbeefa3763f +Revises: dd67b91a1981 +Create Date: 2019-01-13 09:31:13.336479 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '9cbeefa3763f' +down_revision = 'dd67b91a1981' + + +def upgrade(): + op.add_column('ports', sa.Column('is_smartnic', sa.Boolean(), + default=False)) diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py --- ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add Allocations table + +Revision ID: dd67b91a1981 +Revises: f190f9d00a11 +Create Date: 2018-12-10 15:24:30.555995 + +""" + +from alembic import op +from oslo_db.sqlalchemy import types +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'dd67b91a1981' +down_revision = 'f190f9d00a11' + + +def upgrade(): + op.create_table( + 'allocations', + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('version', sa.String(length=15), nullable=True), + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('uuid', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('node_id', sa.Integer(), nullable=True), + sa.Column('state', sa.String(length=15), nullable=False), + sa.Column('last_error', sa.Text(), nullable=True), + sa.Column('resource_class', sa.String(length=80), nullable=True), + sa.Column('traits', types.JsonEncodedList(), nullable=True), + sa.Column('candidate_nodes', types.JsonEncodedList(), nullable=True), + sa.Column('extra', types.JsonEncodedDict(), nullable=True), + sa.Column('conductor_affinity', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['conductor_affinity'], ['conductors.id'], ), + sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('name', name='uniq_allocations0name'), + sa.UniqueConstraint('uuid', name='uniq_allocations0uuid') + ) + op.add_column('nodes', sa.Column('allocation_id', sa.Integer(), + nullable=True)) + op.create_foreign_key(None, 'nodes', 'allocations', + ['allocation_id'], ['id']) diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/f190f9d00a11_add_node_owner.py ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/f190f9d00a11_add_node_owner.py --- ironic-12.0.0/ironic/db/sqlalchemy/alembic/versions/f190f9d00a11_add_node_owner.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/alembic/versions/f190f9d00a11_add_node_owner.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,32 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""add_node_owner + +Revision ID: f190f9d00a11 +Revises: 93706939026c +Create Date: 2018-11-12 00:33:58.575100 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = 'f190f9d00a11' +down_revision = '93706939026c' + + +def upgrade(): + op.add_column('nodes', sa.Column('owner', sa.String(255), + nullable=True)) diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/api.py ironic-12.1.0/ironic/db/sqlalchemy/api.py --- ironic-12.0.0/ironic/db/sqlalchemy/api.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/api.py 2019-03-21 20:07:40.000000000 +0000 @@ -16,6 +16,7 @@ import collections import datetime +import json import threading from oslo_db import api as oslo_db_api @@ -38,6 +39,7 @@ from ironic.common import profiler from ironic.common import release_mappings from ironic.common import states +from ironic.common import utils from ironic.conf import CONF from ironic.db import api from ironic.db.sqlalchemy import models @@ -85,6 +87,14 @@ .options(joinedload('traits'))) +def _get_deploy_template_query_with_steps(): + """Return a query object for the DeployTemplate joined with steps. + + :returns: a query object. + """ + return model_query(models.DeployTemplate).options(joinedload('steps')) + + def model_query(model, *args, **kwargs): """Query helper for simpler session usage. @@ -182,6 +192,26 @@ return query.filter(models.Chassis.uuid == value) +def add_allocation_filter_by_node(query, value): + if strutils.is_int_like(value): + return query.filter_by(node_id=value) + else: + query = query.join(models.Node, + models.Allocation.node_id == models.Node.id) + return query.filter(models.Node.uuid == value) + + +def add_allocation_filter_by_conductor(query, value): + if strutils.is_int_like(value): + return query.filter_by(conductor_affinity=value) + else: + # Assume hostname and join with the conductor table + query = query.join( + models.Conductor, + models.Allocation.conductor_affinity == models.Conductor.id) + return query.filter(models.Conductor.hostname == value) + + def _paginate_query(model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None): if not query: @@ -209,6 +239,42 @@ return query +def _zip_matching(a, b, key): + """Zip two unsorted lists, yielding matching items or None. + + Each zipped item is a tuple taking one of three forms: + + (a[i], b[j]) if a[i] and b[j] are equal. + (a[i], None) if a[i] is less than b[j] or b is empty. + (None, b[j]) if a[i] is greater than b[j] or a is empty. + + Note that the returned list may be longer than either of the two + lists. + + Adapted from https://stackoverflow.com/a/11426702. + + :param a: the first list. + :param b: the second list. + :param key: a function that generates a key used to compare items. + """ + a = collections.deque(sorted(a, key=key)) + b = collections.deque(sorted(b, key=key)) + while a and b: + k_a = key(a[0]) + k_b = key(b[0]) + if k_a == k_b: + yield a.popleft(), b.popleft() + elif k_a < k_b: + yield a.popleft(), None + else: + yield None, b.popleft() + # Consume any remaining items in each deque. + for i in a: + yield i, None + for i in b: + yield None, i + + @profiler.trace_cls("db_api") class Connection(api.Connection): """SqlAlchemy connection.""" @@ -216,7 +282,7 @@ def __init__(self): pass - def _add_nodes_filters(self, query, filters): + def _validate_nodes_filters(self, filters): if filters is None: filters = dict() supported_filters = {'console_enabled', 'maintenance', 'driver', @@ -224,15 +290,20 @@ 'chassis_uuid', 'associated', 'reserved', 'reserved_by_any_of', 'provisioned_before', 'inspection_started_before', 'fault', - 'conductor_group'} + 'conductor_group', 'owner', 'uuid_in', + 'with_power_state', 'description_contains'} unsupported_filters = set(filters).difference(supported_filters) if unsupported_filters: msg = _("SqlAlchemy API does not support " "filtering by %s") % ', '.join(unsupported_filters) raise ValueError(msg) + return filters + + def _add_nodes_filters(self, query, filters): + filters = self._validate_nodes_filters(filters) for field in ['console_enabled', 'maintenance', 'driver', 'resource_class', 'provision_state', 'uuid', 'id', - 'fault', 'conductor_group']: + 'fault', 'conductor_group', 'owner']: if field in filters: query = query.filter_by(**{field: filters[field]}) if 'chassis_uuid' in filters: @@ -263,9 +334,50 @@ - (datetime.timedelta( seconds=filters['inspection_started_before']))) query = query.filter(models.Node.inspection_started_at < limit) + if 'uuid_in' in filters: + query = query.filter(models.Node.uuid.in_(filters['uuid_in'])) + if 'with_power_state' in filters: + if filters['with_power_state']: + query = query.filter(models.Node.power_state != sql.null()) + else: + query = query.filter(models.Node.power_state == sql.null()) + if 'description_contains' in filters: + keyword = filters['description_contains'] + if keyword is not None: + query = query.filter( + models.Node.description.like(r'%{}%'.format(keyword))) return query + def _add_allocations_filters(self, query, filters): + if filters is None: + filters = dict() + supported_filters = {'state', 'resource_class', 'node_uuid', + 'conductor_affinity'} + unsupported_filters = set(filters).difference(supported_filters) + if unsupported_filters: + msg = _("SqlAlchemy API does not support " + "filtering by %s") % ', '.join(unsupported_filters) + raise ValueError(msg) + + try: + node_uuid = filters.pop('node_uuid') + except KeyError: + pass + else: + query = add_allocation_filter_by_node(query, node_uuid) + + try: + conductor = filters.pop('conductor_affinity') + except KeyError: + pass + else: + query = add_allocation_filter_by_conductor(query, conductor) + + if filters: + query = query.filter_by(**filters) + return query + def get_nodeinfo_list(self, columns=None, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): # list-ify columns default values because it is bad form @@ -287,6 +399,39 @@ return _paginate_query(models.Node, limit, marker, sort_key, sort_dir, query) + def check_node_list(self, idents): + mapping = {} + if idents: + idents = set(idents) + else: + return mapping + + uuids = {i for i in idents if uuidutils.is_uuid_like(i)} + names = {i for i in idents if not uuidutils.is_uuid_like(i) + and utils.is_valid_logical_name(i)} + missing = idents - set(uuids) - set(names) + if missing: + # Such nodes cannot exist, bailing out early + raise exception.NodeNotFound( + _("Nodes cannot be found: %s") % ', '.join(missing)) + + query = model_query(models.Node.uuid, models.Node.name).filter( + sql.or_(models.Node.uuid.in_(uuids), + models.Node.name.in_(names)) + ) + for row in query: + if row[0] in idents: + mapping[row[0]] = row[0] + if row[1] and row[1] in idents: + mapping[row[1]] = row[0] + + missing = idents - set(mapping) + if missing: + raise exception.NodeNotFound( + _("Nodes cannot be found: %s") % ', '.join(missing)) + + return mapping + @oslo_db_api.retry_on_deadlock def reserve_node(self, tag, node_id): with _session_for_write(): @@ -304,7 +449,7 @@ host=node['reservation']) return node except NoResultFound: - raise exception.NodeNotFound(node_id) + raise exception.NodeNotFound(node=node_id) @oslo_db_api.retry_on_deadlock def release_node(self, tag, node_id): @@ -323,7 +468,7 @@ raise exception.NodeLocked(node=node.uuid, host=node['reservation']) except NoResultFound: - raise exception.NodeNotFound(node_id) + raise exception.NodeNotFound(node=node_id) @oslo_db_api.retry_on_deadlock def create_node(self, values): @@ -452,6 +597,11 @@ models.BIOSSetting).filter_by(node_id=node_id) bios_settings_query.delete() + # delete all allocations for this node + allocation_query = model_query( + models.Allocation).filter_by(node_id=node_id) + allocation_query.delete() + query.delete() def update_node(self, node_id, values): @@ -878,13 +1028,18 @@ d2c[key].add(cdr_row['hostname']) return d2c - def get_offline_conductors(self): + def get_offline_conductors(self, field='hostname'): + field = getattr(models.Conductor, field) interval = CONF.conductor.heartbeat_timeout limit = timeutils.utcnow() - datetime.timedelta(seconds=interval) - result = (model_query(models.Conductor).filter_by() - .filter(models.Conductor.updated_at < limit) - .all()) - return [row['hostname'] for row in result] + result = (model_query(field) + .filter(models.Conductor.updated_at < limit)) + return [row[0] for row in result] + + def get_online_conductors(self): + query = model_query(models.Conductor.hostname) + query = _filter_active_conductors(query) + return [row[0] for row in query] def list_conductor_hardware_interfaces(self, conductor_id): query = (model_query(models.ConductorHardwareInterfaces) @@ -935,7 +1090,7 @@ query = add_identity_filter(query, node_id) count = query.update({'provision_updated_at': timeutils.utcnow()}) if count == 0: - raise exception.NodeNotFound(node_id) + raise exception.NodeNotFound(node=node_id) def _check_node_exists(self, node_id): if not model_query(models.Node).filter_by(id=node_id).scalar(): @@ -1198,7 +1353,7 @@ model.version.notin_(versions))) return query.all() - def check_versions(self): + def check_versions(self, ignore_models=()): """Checks the whole database for incompatible objects. This scans all the tables in search of objects that are not supported; @@ -1206,38 +1361,45 @@ `ironic.common.release_mappings.RELEASE_MAPPING`. This includes objects that have null 'version' values. + :param ignore_models: List of model names to skip. :returns: A Boolean. True if all the objects have supported versions; False otherwise. """ object_versions = release_mappings.get_object_versions() for model in models.Base.__subclasses__(): - if model.__name__ in object_versions: - supported_versions = object_versions[model.__name__] - if not supported_versions: - continue - - # NOTE(mgagne): Additional safety check to detect old database - # version which does not have the 'version' columns available. - # This usually means a skip version upgrade is attempted - # from a version earlier than Pike which added - # those columns required for the next check. - engine = enginefacade.reader.get_engine() - if not db_utils.column_exists(engine, - model.__tablename__, - model.version.name): - raise exception.DatabaseVersionTooOld() - - # NOTE(rloo): we use model.version, not model, because we - # know that the object has a 'version' column - # but we don't know whether the entire object is - # compatible with its (old) DB representation. - # NOTE(rloo): .notin_ does not handle null: - # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.operators.ColumnOperators.notin_ - query = model_query(model.version).filter( - sql.or_(model.version == sql.null(), - model.version.notin_(supported_versions))) - if query.count(): - return False + if model.__name__ not in object_versions: + continue + + if model.__name__ in ignore_models: + continue + + supported_versions = object_versions[model.__name__] + if not supported_versions: + continue + + # NOTE(mgagne): Additional safety check to detect old database + # version which does not have the 'version' columns available. + # This usually means a skip version upgrade is attempted + # from a version earlier than Pike which added + # those columns required for the next check. + engine = enginefacade.reader.get_engine() + if not db_utils.column_exists(engine, + model.__tablename__, + model.version.name): + raise exception.DatabaseVersionTooOld() + + # NOTE(rloo): we use model.version, not model, because we + # know that the object has a 'version' column + # but we don't know whether the entire object is + # compatible with its (old) DB representation. + # NOTE(rloo): .notin_ does not handle null: + # http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.operators.ColumnOperators.notin_ + query = model_query(model.version).filter( + sql.or_(model.version == sql.null(), + model.version.notin_(supported_versions))) + if query.count(): + return False + return True @oslo_db_api.retry_on_deadlock @@ -1475,3 +1637,360 @@ .filter_by(node_id=node_id) .all()) return result + + def get_allocation_by_id(self, allocation_id): + """Return an allocation representation. + + :param allocation_id: The id of an allocation. + :returns: An allocation. + :raises: AllocationNotFound + """ + query = model_query(models.Allocation).filter_by(id=allocation_id) + try: + return query.one() + except NoResultFound: + raise exception.AllocationNotFound(allocation=allocation_id) + + def get_allocation_by_uuid(self, allocation_uuid): + """Return an allocation representation. + + :param allocation_uuid: The uuid of an allocation. + :returns: An allocation. + :raises: AllocationNotFound + """ + query = model_query(models.Allocation).filter_by(uuid=allocation_uuid) + try: + return query.one() + except NoResultFound: + raise exception.AllocationNotFound(allocation=allocation_uuid) + + def get_allocation_by_name(self, name): + """Return an allocation representation. + + :param name: The logical name of an allocation. + :returns: An allocation. + :raises: AllocationNotFound + """ + query = model_query(models.Allocation).filter_by(name=name) + try: + return query.one() + except NoResultFound: + raise exception.AllocationNotFound(allocation=name) + + def get_allocation_list(self, filters=None, limit=None, marker=None, + sort_key=None, sort_dir=None): + """Return a list of allocations. + + :param filters: Filters to apply. Defaults to None. + + :node_uuid: uuid of node + :state: allocation state + :resource_class: requested resource class + :param limit: Maximum number of allocations to return. + :param marker: The last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: Direction in which results should be sorted. + (asc, desc) + :returns: A list of allocations. + """ + query = self._add_allocations_filters(model_query(models.Allocation), + filters) + return _paginate_query(models.Allocation, limit, marker, + sort_key, sort_dir, query) + + @oslo_db_api.retry_on_deadlock + def create_allocation(self, values): + """Create a new allocation. + + :param values: Dict of values to create an allocation with + :returns: An allocation + :raises: AllocationDuplicateName + :raises: AllocationAlreadyExists + """ + if not values.get('uuid'): + values['uuid'] = uuidutils.generate_uuid() + if not values.get('state'): + values['state'] = states.ALLOCATING + + allocation = models.Allocation() + allocation.update(values) + with _session_for_write() as session: + try: + session.add(allocation) + session.flush() + except db_exc.DBDuplicateEntry as exc: + if 'name' in exc.columns: + raise exception.AllocationDuplicateName( + name=values['name']) + else: + raise exception.AllocationAlreadyExists( + uuid=values['uuid']) + return allocation + + @oslo_db_api.retry_on_deadlock + def update_allocation(self, allocation_id, values, update_node=True): + """Update properties of an allocation. + + :param allocation_id: Allocation ID + :param values: Dict of values to update. + :param update_node: If True and node_id is updated, update the node + with instance_uuid and traits from the allocation + :returns: An allocation. + :raises: AllocationNotFound + :raises: AllocationDuplicateName + :raises: InstanceAssociated + :raises: NodeAssociated + """ + if 'uuid' in values: + msg = _("Cannot overwrite UUID for an existing allocation.") + raise exception.InvalidParameterValue(err=msg) + + # These values are used in exception handling. They should always be + # initialized, but set them to None just in case. + instance_uuid = node_uuid = None + + with _session_for_write() as session: + try: + query = model_query(models.Allocation, session=session) + query = add_identity_filter(query, allocation_id) + ref = query.one() + ref.update(values) + instance_uuid = ref.uuid + + if 'node_id' in values and update_node: + node = model_query(models.Node, session=session).filter_by( + id=ref.node_id).with_lockmode('update').one() + node_uuid = node.uuid + if node.instance_uuid and node.instance_uuid != ref.uuid: + raise exception.NodeAssociated( + node=node.uuid, instance=node.instance_uuid) + iinfo = node.instance_info.copy() + iinfo['traits'] = ref.traits or [] + node.update({'allocation_id': ref.id, + 'instance_uuid': instance_uuid, + 'instance_info': iinfo}) + session.flush() + except NoResultFound: + raise exception.AllocationNotFound(allocation=allocation_id) + except db_exc.DBDuplicateEntry as exc: + if 'name' in exc.columns: + raise exception.AllocationDuplicateName( + name=values['name']) + elif 'instance_uuid' in exc.columns: + # Case when the allocation UUID is already used on some + # node as instance_uuid. + raise exception.InstanceAssociated( + instance_uuid=instance_uuid, node=node_uuid) + else: + raise + return ref + + @oslo_db_api.retry_on_deadlock + def take_over_allocation(self, allocation_id, old_conductor_id, + new_conductor_id): + """Do a take over for an allocation. + + The allocation is only updated if the old conductor matches the + provided value, thus guarding against races. + + :param allocation_id: Allocation ID + :param old_conductor_id: The conductor ID we expect to be the current + ``conductor_affinity`` of the allocation. + :param new_conductor_id: The conductor ID of the new + ``conductor_affinity``. + :returns: True if the take over was successful, False otherwise. + :raises: AllocationNotFound + """ + with _session_for_write() as session: + try: + query = model_query(models.Allocation, session=session) + query = add_identity_filter(query, allocation_id) + # NOTE(dtantsur): the FOR UPDATE clause locks the allocation + ref = query.with_for_update().one() + if ref.conductor_affinity != old_conductor_id: + # Race detected, bailing out + return False + + ref.update({'conductor_affinity': new_conductor_id}) + session.flush() + except NoResultFound: + raise exception.AllocationNotFound(allocation=allocation_id) + else: + return True + + @oslo_db_api.retry_on_deadlock + def destroy_allocation(self, allocation_id): + """Destroy an allocation. + + :param allocation_id: Allocation ID or UUID + :raises: AllocationNotFound + """ + with _session_for_write() as session: + query = model_query(models.Allocation) + query = add_identity_filter(query, allocation_id) + + try: + ref = query.one() + except NoResultFound: + raise exception.AllocationNotFound(allocation=allocation_id) + + allocation_id = ref['id'] + + node_query = model_query(models.Node, session=session).filter_by( + allocation_id=allocation_id) + node_query.update({'allocation_id': None, 'instance_uuid': None}) + + query.delete() + + @staticmethod + def _get_deploy_template_steps(steps, deploy_template_id=None): + results = [] + for values in steps: + step = models.DeployTemplateStep() + step.update(values) + if deploy_template_id: + step['deploy_template_id'] = deploy_template_id + results.append(step) + return results + + @oslo_db_api.retry_on_deadlock + def create_deploy_template(self, values): + steps = values.get('steps', []) + values['steps'] = self._get_deploy_template_steps(steps) + + template = models.DeployTemplate() + template.update(values) + with _session_for_write() as session: + try: + session.add(template) + session.flush() + except db_exc.DBDuplicateEntry as e: + if 'name' in e.columns: + raise exception.DeployTemplateDuplicateName( + name=values['name']) + raise exception.DeployTemplateAlreadyExists( + uuid=values['uuid']) + return template + + def _update_deploy_template_steps(self, session, template_id, steps): + """Update the steps for a deploy template. + + :param session: DB session object. + :param template_id: deploy template ID. + :param steps: list of steps that should exist for the deploy template. + """ + + def _step_key(step): + """Compare two deploy template steps.""" + # NOTE(mgoddard): In python 3, dicts are not orderable so cannot be + # used as a sort key. Serialise the step arguments to a JSON string + # for comparison. Taken from https://stackoverflow.com/a/22003440. + sortable_args = json.dumps(step.args, sort_keys=True) + return step.interface, step.step, sortable_args, step.priority + + # List all existing steps for the template. + current_steps = (model_query(models.DeployTemplateStep) + .filter_by(deploy_template_id=template_id)) + + # List the new steps for the template. + new_steps = self._get_deploy_template_steps(steps, template_id) + + # The following is an efficient way to ensure that the steps in the + # database match those that have been requested. We compare the current + # and requested steps in a single pass using the _zip_matching + # function. + steps_to_create = [] + step_ids_to_delete = [] + for current_step, new_step in _zip_matching(current_steps, new_steps, + _step_key): + if current_step is None: + # No matching current step found for this new step - create. + steps_to_create.append(new_step) + elif new_step is None: + # No matching new step found for this current step - delete. + step_ids_to_delete.append(current_step.id) + # else: steps match, no work required. + + # Delete and create steps in bulk as necessary. + if step_ids_to_delete: + ((model_query(models.DeployTemplateStep) + .filter(models.DeployTemplateStep.id.in_(step_ids_to_delete))) + .delete(synchronize_session=False)) + if steps_to_create: + session.bulk_save_objects(steps_to_create) + + @oslo_db_api.retry_on_deadlock + def update_deploy_template(self, template_id, values): + if 'uuid' in values: + msg = _("Cannot overwrite UUID for an existing deploy template.") + raise exception.InvalidParameterValue(err=msg) + + try: + with _session_for_write() as session: + # NOTE(mgoddard): Don't issue a joined query for the update as + # this does not work with PostgreSQL. + query = model_query(models.DeployTemplate) + query = add_identity_filter(query, template_id) + try: + ref = query.with_lockmode('update').one() + except NoResultFound: + raise exception.DeployTemplateNotFound( + template=template_id) + + # First, update non-step columns. + steps = values.pop('steps', None) + ref.update(values) + + # If necessary, update steps. + if steps is not None: + self._update_deploy_template_steps(session, ref.id, steps) + + # Return the updated template joined with all relevant fields. + query = _get_deploy_template_query_with_steps() + query = add_identity_filter(query, template_id) + return query.one() + except db_exc.DBDuplicateEntry as e: + if 'name' in e.columns: + raise exception.DeployTemplateDuplicateName( + name=values['name']) + raise + + @oslo_db_api.retry_on_deadlock + def destroy_deploy_template(self, template_id): + with _session_for_write(): + model_query(models.DeployTemplateStep).filter_by( + deploy_template_id=template_id).delete() + count = model_query(models.DeployTemplate).filter_by( + id=template_id).delete() + if count == 0: + raise exception.DeployTemplateNotFound(template=template_id) + + def _get_deploy_template(self, field, value): + """Helper method for retrieving a deploy template.""" + query = (_get_deploy_template_query_with_steps() + .filter_by(**{field: value})) + try: + return query.one() + except NoResultFound: + raise exception.DeployTemplateNotFound(template=value) + + def get_deploy_template_by_id(self, template_id): + return self._get_deploy_template('id', template_id) + + def get_deploy_template_by_uuid(self, template_uuid): + return self._get_deploy_template('uuid', template_uuid) + + def get_deploy_template_by_name(self, template_name): + return self._get_deploy_template('name', template_name) + + def get_deploy_template_list(self, limit=None, marker=None, + sort_key=None, sort_dir=None): + query = _get_deploy_template_query_with_steps() + return _paginate_query(models.DeployTemplate, limit, marker, + sort_key, sort_dir, query) + + def get_deploy_template_list_by_names(self, names): + query = (_get_deploy_template_query_with_steps() + .filter(models.DeployTemplate.name.in_(names))) + return query.all() diff -Nru ironic-12.0.0/ironic/db/sqlalchemy/models.py ironic-12.1.0/ironic/db/sqlalchemy/models.py --- ironic-12.0.0/ironic/db/sqlalchemy/models.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/db/sqlalchemy/models.py 2019-03-21 20:07:40.000000000 +0000 @@ -179,6 +179,10 @@ protected = Column(Boolean, nullable=False, default=False, server_default=false()) protected_reason = Column(Text, nullable=True) + owner = Column(String(255), nullable=True) + allocation_id = Column(Integer, ForeignKey('allocations.id'), + nullable=True) + description = Column(Text, nullable=True) bios_interface = Column(String(255), nullable=True) boot_interface = Column(String(255), nullable=True) @@ -212,6 +216,7 @@ pxe_enabled = Column(Boolean, default=True) internal_info = Column(db_types.JsonEncodedDict) physical_network = Column(String(64), nullable=True) + is_smartnic = Column(Boolean, nullable=True, default=False) class Portgroup(Base): @@ -322,6 +327,69 @@ value = Column(Text, nullable=True) +class Allocation(Base): + """Represents an allocation of a node for deployment.""" + + __tablename__ = 'allocations' + __table_args__ = ( + schema.UniqueConstraint('name', name='uniq_allocations0name'), + schema.UniqueConstraint('uuid', name='uniq_allocations0uuid'), + table_args()) + id = Column(Integer, primary_key=True) + uuid = Column(String(36), nullable=False) + name = Column(String(255), nullable=True) + node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True) + state = Column(String(15), nullable=False) + last_error = Column(Text, nullable=True) + resource_class = Column(String(80), nullable=True) + traits = Column(db_types.JsonEncodedList) + candidate_nodes = Column(db_types.JsonEncodedList) + extra = Column(db_types.JsonEncodedDict) + # The last conductor to handle this allocation (internal field). + conductor_affinity = Column(Integer, ForeignKey('conductors.id'), + nullable=True) + + +class DeployTemplate(Base): + """Represents a deployment template.""" + + __tablename__ = 'deploy_templates' + __table_args__ = ( + schema.UniqueConstraint('uuid', name='uniq_deploytemplates0uuid'), + schema.UniqueConstraint('name', name='uniq_deploytemplates0name'), + table_args()) + id = Column(Integer, primary_key=True) + uuid = Column(String(36)) + name = Column(String(255), nullable=False) + extra = Column(db_types.JsonEncodedDict) + + +class DeployTemplateStep(Base): + """Represents a deployment step in a deployment template.""" + + __tablename__ = 'deploy_template_steps' + __table_args__ = ( + Index('deploy_template_id', 'deploy_template_id'), + Index('deploy_template_steps_interface_idx', 'interface'), + Index('deploy_template_steps_step_idx', 'step'), + table_args()) + id = Column(Integer, primary_key=True) + deploy_template_id = Column(Integer, ForeignKey('deploy_templates.id'), + nullable=False) + interface = Column(String(255), nullable=False) + step = Column(String(255), nullable=False) + args = Column(db_types.JsonEncodedDict, nullable=False) + priority = Column(Integer, nullable=False) + deploy_template = orm.relationship( + "DeployTemplate", + backref='steps', + primaryjoin=( + 'and_(DeployTemplateStep.deploy_template_id == ' + 'DeployTemplate.id)'), + foreign_keys=deploy_template_id + ) + + def get_class(model_name): """Returns the model class with the specified name. diff -Nru ironic-12.0.0/ironic/drivers/base.py ironic-12.1.0/ironic/drivers/base.py --- ironic-12.0.0/ironic/drivers/base.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/base.py 2019-03-21 20:07:40.000000000 +0000 @@ -213,7 +213,7 @@ 'argsinfo': method._clean_step_argsinfo, 'interface': instance.interface_type} instance.clean_steps.append(step) - elif getattr(method, '_is_deploy_step', False): + if getattr(method, '_is_deploy_step', False): # Create a DeployStep to represent this method step = {'step': method.__name__, 'priority': method._deploy_step_priority, @@ -1004,32 +1004,22 @@ driver=task.node.driver, extension='abort') +def cache_bios_settings(func): + """A decorator to cache bios settings after running the function. + + :param func: Function or method to wrap. + """ + @six.wraps(func) + def wrapped(self, task, *args, **kwargs): + result = func(self, task, *args, **kwargs) + self.cache_bios_settings(task) + return result + return wrapped + + class BIOSInterface(BaseInterface): interface_type = 'bios' - def __new__(cls, *args, **kwargs): - # Wrap the apply_configuration and factory_reset into a decorator - # which call cache_bios_settings() to update the node's BIOS setting - # table after apply_configuration and factory_reset have finished. - - super_new = super(BIOSInterface, cls).__new__ - instance = super_new(cls, *args, **kwargs) - - def wrapper(func): - @six.wraps(func) - def wrapped(task, *args, **kwargs): - result = func(task, *args, **kwargs) - instance.cache_bios_settings(task) - return result - return wrapped - - for n, method in inspect.getmembers(instance, inspect.ismethod): - if n == "apply_configuration": - instance.apply_configuration = wrapper(method) - elif n == "factory_reset": - instance.factory_reset = wrapper(method) - return instance - @abc.abstractmethod def apply_configuration(self, task, settings): """Validate & apply BIOS settings on the given node. @@ -1344,6 +1334,14 @@ """ pass + def need_power_on(self, task): + """Check if ironic node must be powered on before applying network changes + + :param task: A TaskManager instance. + :returns: Boolean. + """ + return False + @six.add_metaclass(abc.ABCMeta) class StorageInterface(BaseInterface): @@ -1440,7 +1438,7 @@ For automated cleaning, only steps with priorities greater than 0 are used. These steps are ordered by priority from highest value to lowest value. For steps with the same priority, they are ordered by driver - interface priority (see conductor.manager.CLEANING_INTERFACE_PRIORITY). + interface priority (see conductor.steps.CLEANING_INTERFACE_PRIORITY). execute_clean_step() will be called on each step. For manual cleaning, the clean steps will be executed in a similar fashion @@ -1516,7 +1514,7 @@ Only steps with priorities greater than 0 are used. These steps are ordered by priority from highest value to lowest value. For steps with the same priority, they are ordered by driver - interface priority (see conductor.manager.DEPLOYING_INTERFACE_PRIORITY). + interface priority (see conductor.steps.DEPLOYING_INTERFACE_PRIORITY). execute_deploy_step() will be called on each step. Decorated deploy steps must take as the only positional argument, a diff -Nru ironic-12.0.0/ironic/drivers/cisco_ucs.py ironic-12.1.0/ironic/drivers/cisco_ucs.py --- ironic-12.0.0/ironic/drivers/cisco_ucs.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/cisco_ucs.py 2019-03-21 20:07:40.000000000 +0000 @@ -24,6 +24,10 @@ class CiscoUCSStandalone(ipmi.IPMIHardware): """Cisco UCS in standalone mode""" + # NOTE(TheJulia): Deprecated due to a lack of operating third party + # CI, which stopped reporting during the Stein development cycle. + supported = False + @property def supported_management_interfaces(self): """List of supported management interfaces.""" @@ -40,6 +44,10 @@ class CiscoUCSManaged(CiscoUCSStandalone): """Cisco UCS under UCSM management""" + # NOTE(TheJulia): Deprecated due to a lack of operating third party + # CI, which stopped reporting during the Stein development cycle. + supported = False + @property def supported_management_interfaces(self): """List of supported management interfaces.""" diff -Nru ironic-12.0.0/ironic/drivers/ibmc.py ironic-12.1.0/ironic/drivers/ibmc.py --- ironic-12.0.0/ironic/drivers/ibmc.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/ibmc.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,40 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iBMC Driver for managing HUAWEI Huawei 2288H V5, CH121 V5 series servers. +""" + +from ironic.drivers import generic +from ironic.drivers.modules.ibmc import management as ibmc_mgmt +from ironic.drivers.modules.ibmc import power as ibmc_power +from ironic.drivers.modules.ibmc import vendor as ibmc_vendor +from ironic.drivers.modules import noop + + +class IBMCHardware(generic.GenericHardware): + """Huawei iBMC hardware type.""" + + @property + def supported_management_interfaces(self): + """List of supported management interfaces.""" + return [ibmc_mgmt.IBMCManagement] + + @property + def supported_power_interfaces(self): + """List of supported power interfaces.""" + return [ibmc_power.IBMCPower] + + @property + def supported_vendor_interfaces(self): + """List of supported vendor interfaces.""" + return [ibmc_vendor.IBMCVendor, noop.NoVendor] diff -Nru ironic-12.0.0/ironic/drivers/ilo.py ironic-12.1.0/ironic/drivers/ilo.py --- ironic-12.0.0/ironic/drivers/ilo.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/ilo.py 2019-03-21 20:07:40.000000000 +0000 @@ -22,6 +22,7 @@ from ironic.drivers.modules.ilo import inspect from ironic.drivers.modules.ilo import management from ironic.drivers.modules.ilo import power +from ironic.drivers.modules.ilo import raid from ironic.drivers.modules.ilo import vendor from ironic.drivers.modules import inspector from ironic.drivers.modules import noop @@ -69,3 +70,15 @@ def supported_vendor_interfaces(self): """List of supported power interfaces.""" return [vendor.VendorPassthru, noop.NoVendor] + + +class Ilo5Hardware(IloHardware): + """iLO5 hardware type. + + iLO5 hardware type is targeted for iLO5 based Proliant Gen10 servers. + """ + + @property + def supported_raid_interfaces(self): + """List of supported raid interfaces.""" + return [raid.Ilo5RAID, noop.NoRAID] diff -Nru ironic-12.0.0/ironic/drivers/modules/agent_base_vendor.py ironic-12.1.0/ironic/drivers/modules/agent_base_vendor.py --- ironic-12.0.0/ironic/drivers/modules/agent_base_vendor.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/agent_base_vendor.py 2019-03-21 20:07:40.000000000 +0000 @@ -28,6 +28,7 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states +from ironic.conductor import steps as conductor_steps from ironic.conductor import utils as manager_utils from ironic.conf import CONF from ironic.drivers.modules import agent_client @@ -66,6 +67,19 @@ 'older deploy ramdisks. Defaults to False. Optional.') } +__HEARTBEAT_RECORD_ONLY = (states.ENROLL, states.MANAGEABLE, + states.AVAILABLE) +_HEARTBEAT_RECORD_ONLY = frozenset(__HEARTBEAT_RECORD_ONLY) + +_HEARTBEAT_ALLOWED = (states.DEPLOYWAIT, states.CLEANWAIT, states.RESCUEWAIT) +HEARTBEAT_ALLOWED = frozenset(_HEARTBEAT_ALLOWED) + +_FASTTRACK_HEARTBEAT_ALLOWED = (states.DEPLOYWAIT, states.CLEANWAIT, + states.RESCUEWAIT, states.ENROLL, + states.MANAGEABLE, states.AVAILABLE, + states.DEPLOYING) +FASTTRACK_HEARTBEAT_ALLOWED = frozenset(_FASTTRACK_HEARTBEAT_ALLOWED) + def _get_client(): client = agent_client.AgentClient() @@ -260,7 +274,9 @@ @property def heartbeat_allowed_states(self): """Define node states where heartbeating is allowed""" - return (states.DEPLOYWAIT, states.CLEANWAIT, states.RESCUEWAIT) + if CONF.deploy.fast_track: + return FASTTRACK_HEARTBEAT_ALLOWED + return HEARTBEAT_ALLOWED @METRICS.timer('HeartbeatMixin.heartbeat') def heartbeat(self, task, callback_url, agent_version): @@ -271,7 +287,8 @@ :param agent_version: The version of the agent that is heartbeating """ # NOTE(pas-ha) immediately skip the rest if nothing to do - if task.node.provision_state not in self.heartbeat_allowed_states: + if (task.node.provision_state not in self.heartbeat_allowed_states + and not manager_utils.fast_track_able(task)): LOG.debug('Heartbeat from node %(node)s in unsupported ' 'provision state %(state)s, not taking any action.', {'node': task.node.uuid, @@ -288,13 +305,25 @@ node = task.node LOG.debug('Heartbeat from node %s', node.uuid) - driver_internal_info = node.driver_internal_info driver_internal_info['agent_url'] = callback_url driver_internal_info['agent_version'] = agent_version + # Record the last heartbeat event time in UTC, so we can make + # decisions about it later. Can be decoded to datetime object with: + # datetime.datetime.strptime(var, "%Y-%m-%d %H:%M:%S.%f") + driver_internal_info['agent_last_heartbeat'] = str( + timeutils.utcnow().isoformat()) node.driver_internal_info = driver_internal_info node.save() + if node.provision_state in _HEARTBEAT_RECORD_ONLY: + # We shouldn't take any additional action. The agent will + # silently continue to heartbeat to ironic until user initiated + # state change occurs causing it to match a state below. + LOG.debug('Heartbeat from %(node)s recorded to identify the ' + 'node as on-line.', {'node': task.node.uuid}) + return + # Async call backs don't set error state on their own # TODO(jimrollenhagen) improve error messages here msg = _('Failed checking if deploy is done.') @@ -324,7 +353,7 @@ # First, cache the clean steps self.refresh_clean_steps(task) # Then set/verify node clean steps and start cleaning - manager_utils.set_node_cleaning_steps(task) + conductor_steps.set_node_cleaning_steps(task) # The exceptions from RPC are not possible as we using cast # here manager_utils.notify_conductor_resume_clean(task) @@ -373,7 +402,10 @@ reason=fail_reason) task.process_event('resume') task.driver.rescue.clean_up(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.configure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) task.process_event('done') @@ -522,7 +554,7 @@ 'clean version mismatch. Resetting clean steps ' 'and rebooting the node.', node.uuid) try: - manager_utils.set_node_cleaning_steps(task) + conductor_steps.set_node_cleaning_steps(task) except exception.NodeCleaningFailure: msg = (_('Could not restart automated cleaning on node ' '%(node)s: %(err)s.') % @@ -642,9 +674,12 @@ log_and_raise_deployment_error(task, msg, exc=e) try: + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) task.driver.network.remove_provisioning_network(task) task.driver.network.configure_tenant_networks(task) - + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) manager_utils.node_power_action(task, states.POWER_ON) except Exception as e: msg = (_('Error rebooting node %(node)s after deploy. ' @@ -735,7 +770,12 @@ log_and_raise_deployment_error(task, msg) try: - deploy_utils.try_set_boot_device(task, boot_devices.DISK) + persistent = True + if node.driver_info.get('force_persistent_boot_device', + 'Default') == 'Never': + persistent = False + deploy_utils.try_set_boot_device(task, boot_devices.DISK, + persistent=persistent) except Exception as e: msg = (_("Failed to change the boot device to %(boot_dev)s " "when deploying node %(node)s. Error: %(error)s") % diff -Nru ironic-12.0.0/ironic/drivers/modules/agent.py ironic-12.1.0/ironic/drivers/modules/agent.py --- ironic-12.0.0/ironic/drivers/modules/agent.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/agent.py 2019-03-21 20:07:40.000000000 +0000 @@ -455,7 +455,14 @@ :param task: a TaskManager instance. :returns: status of the deploy. One of ironic.common.states. """ - if task.driver.storage.should_write_image(task): + if manager_utils.is_fast_track(task): + LOG.debug('Performing a fast track deployment for %(node)s.', + {'node': task.node.uuid}) + # Update the database for the API and the task tracking resumes + # the state machine state going from DEPLOYWAIT -> DEPLOYING + task.process_event('wait') + self.continue_deploy(task) + elif task.driver.storage.should_write_image(task): manager_utils.node_power_action(task, states.REBOOT) return states.DEPLOYWAIT else: @@ -464,8 +471,12 @@ # This is not being done now as it is expected to be # refactored in the near future. manager_utils.node_power_action(task, states.POWER_OFF) + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) task.driver.network.remove_provisioning_network(task) task.driver.network.configure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) task.driver.boot.prepare_instance(task) manager_utils.node_power_action(task, states.POWER_ON) LOG.info('Deployment to node %s done', task.node.uuid) @@ -489,11 +500,13 @@ manager_utils.node_power_action(task, states.POWER_OFF) task.driver.storage.detach_volumes(task) deploy_utils.tear_down_storage_configuration(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.unconfigure_tenant_networks(task) # NOTE(mgoddard): If the deployment was unsuccessful the node may have # ports on the provisioning network which were not deleted. task.driver.network.remove_provisioning_network(task) - + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) return states.DELETED @METRICS.timer('AgentDeploy.prepare') @@ -515,6 +528,12 @@ :raises: exception.InvalidParameterValue if network validation fails. :raises: any boot interface's prepare_ramdisk exceptions. """ + + def _update_instance_info(): + node.instance_info = ( + deploy_utils.build_instance_info_for_deploy(task)) + node.save() + node = task.node deploy_utils.populate_storage_driver_internal_info(task) if node.provision_state == states.DEPLOYING: @@ -541,21 +560,44 @@ task.driver.network.validate(task) else: ctx.reraise = True - - # Adding the node to provisioning network so that the dhcp - # options get added for the provisioning port. - manager_utils.node_power_action(task, states.POWER_OFF) + # Determine if this is a fast track sequence + fast_track_deploy = manager_utils.is_fast_track(task) + if fast_track_deploy: + # The agent has already recently checked in and we are + # configured to take that as an indicator that we can + # skip ahead. + LOG.debug('The agent for node %(node)s has recently checked ' + 'in, and the node power will remain unmodified.', + {'node': task.node.uuid}) + else: + # Powering off node to setup networking for port and + # ensure that the state is reset if it is inadvertently + # on for any unknown reason. + manager_utils.node_power_action(task, states.POWER_OFF) if task.driver.storage.should_write_image(task): # NOTE(vdrok): in case of rebuild, we have tenant network # already configured, unbind tenant ports if present + if not fast_track_deploy: + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) + task.driver.network.unconfigure_tenant_networks(task) task.driver.network.add_provisioning_network(task) + if not fast_track_deploy: + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) + else: + # Fast track sequence in progress + _update_instance_info() # Signal to storage driver to attach volumes task.driver.storage.attach_volumes(task) - if not task.driver.storage.should_write_image(task): + if (not task.driver.storage.should_write_image(task) + or fast_track_deploy): # We have nothing else to do as this is handled in the # backend storage system, and we can return to the caller # as we do not need to boot the agent to deploy. + # Alternatively, we could be in a fast track deployment + # and again, we should have nothing to do here. return if node.provision_state in (states.ACTIVE, states.UNRESCUING): # Call is due to conductor takeover @@ -563,9 +605,7 @@ elif node.provision_state != states.ADOPTING: if node.provision_state not in (states.RESCUING, states.RESCUEWAIT, states.RESCUE, states.RESCUEFAIL): - node.instance_info = ( - deploy_utils.build_instance_info_for_deploy(task)) - node.save() + _update_instance_info() if CONF.agent.manage_agent_boot: deploy_opts = deploy_utils.build_agent_options(node) task.driver.boot.prepare_ramdisk(task, deploy_opts) @@ -698,32 +738,10 @@ 'create_nonroot_volumes': create_nonroot_volumes, 'target_raid_config': node.target_raid_config}) - if not node.target_raid_config: - raise exception.MissingParameterValue( - _("Node %s has no target RAID configuration.") % node.uuid) - - target_raid_config = node.target_raid_config.copy() - - error_msg_list = [] - if not create_root_volume: - target_raid_config['logical_disks'] = [ - x for x in target_raid_config['logical_disks'] - if not x.get('is_root_volume')] - error_msg_list.append(_("skipping root volume")) - - if not create_nonroot_volumes: - error_msg_list.append(_("skipping non-root volumes")) - - target_raid_config['logical_disks'] = [ - x for x in target_raid_config['logical_disks'] - if x.get('is_root_volume')] - - if not target_raid_config['logical_disks']: - error_msg = _(' and ').join(error_msg_list) - raise exception.MissingParameterValue( - _("Node %(node)s has empty target RAID configuration " - "after %(msg)s.") % {'node': node.uuid, 'msg': error_msg}) - + target_raid_config = raid.filter_target_raid_config( + node, + create_root_volume=create_root_volume, + create_nonroot_volumes=create_nonroot_volumes) # Rewrite it back to the node object, but no need to save it as # we need to just send this to the agent ramdisk. node.driver_internal_info['target_raid_config'] = target_raid_config @@ -828,8 +846,11 @@ task.node.save() task.driver.boot.clean_up_instance(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.unconfigure_tenant_networks(task) task.driver.network.add_rescuing_network(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) if CONF.agent.manage_agent_boot: ramdisk_opts = deploy_utils.build_agent_options(task.node) # prepare_ramdisk will set the boot device @@ -864,7 +885,10 @@ task.node.save() self.clean_up(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.configure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) task.driver.boot.prepare_instance(task) manager_utils.node_power_action(task, states.POWER_ON) @@ -916,4 +940,7 @@ manager_utils.remove_node_rescue_password(task.node, save=True) if CONF.agent.manage_agent_boot: task.driver.boot.clean_up_ramdisk(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.remove_rescuing_network(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) diff -Nru ironic-12.0.0/ironic/drivers/modules/ansible/deploy.py ironic-12.1.0/ironic/drivers/modules/ansible/deploy.py --- ironic-12.0.0/ironic/drivers/modules/ansible/deploy.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ansible/deploy.py 2019-03-21 20:07:40.000000000 +0000 @@ -36,6 +36,7 @@ from ironic.common import images from ironic.common import states from ironic.common import utils +from ironic.conductor import steps as conductor_steps from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.conf import CONF @@ -73,6 +74,8 @@ 'ansible_clean_steps_config': _('Name of the file inside the ' '"ansible_playbooks_path" folder with ' 'cleaning steps configuration. Optional.'), + 'ansible_python_interpreter': _('Absolute path to the python interpreter ' + 'on the managed machines. Optional.'), } COMMON_PROPERTIES = OPTIONAL_PROPERTIES @@ -101,6 +104,11 @@ return os.path.basename(playbook), user, key +def _get_python_interpreter(node): + return node.driver_info.get('ansible_python_interpreter', + CONF.ansible.default_python_interpreter) + + def _get_configdrive_path(basename): return os.path.join(CONF.tempdir, basename + '.cndrive') @@ -126,6 +134,9 @@ playbook = os.path.join(root, name) inventory = os.path.join(root, 'inventory') ironic_vars = {'ironic': extra_vars} + python_interpreter = _get_python_interpreter(node) + if python_interpreter: + ironic_vars['ansible_python_interpreter'] = python_interpreter args = [CONF.ansible.ansible_playbook_script, playbook, '-i', inventory, '-e', json.dumps(ironic_vars), @@ -440,7 +451,10 @@ def tear_down(self, task): """Tear down a previous deployment on the task's node.""" manager_utils.node_power_action(task, states.POWER_OFF) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.unconfigure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) return states.DELETED @METRICS.timer('AnsibleDeploy.prepare') @@ -451,7 +465,11 @@ if node.provision_state == states.DEPLOYING: # adding network-driver dependent provisioning ports manager_utils.node_power_action(task, states.POWER_OFF) + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) task.driver.network.add_provisioning_network(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) if node.provision_state not in [states.ACTIVE, states.ADOPTING]: node.instance_info = deploy_utils.build_instance_info_for_deploy( task) @@ -530,11 +548,14 @@ :returns: None or states.CLEANWAIT for async prepare. """ node = task.node - manager_utils.set_node_cleaning_steps(task) + conductor_steps.set_node_cleaning_steps(task) if not node.driver_internal_info['clean_steps']: # no clean steps configured, nothing to do. return + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.add_cleaning_network(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) boot_opt = deploy_utils.build_agent_options(node) task.driver.boot.prepare_ramdisk(task, boot_opt) manager_utils.node_power_action(task, states.REBOOT) @@ -550,7 +571,10 @@ """ manager_utils.node_power_action(task, states.POWER_OFF) task.driver.boot.clean_up_ramdisk(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.remove_cleaning_network(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) @METRICS.timer('AnsibleDeploy.continue_deploy') def continue_deploy(self, task): @@ -622,8 +646,12 @@ manager_utils.node_power_action(task, states.POWER_OFF) else: manager_utils.node_power_action(task, states.POWER_OFF) + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) task.driver.network.remove_provisioning_network(task) task.driver.network.configure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) manager_utils.node_power_action(task, states.POWER_ON) except Exception as e: msg = (_('Error rebooting node %(node)s after deploy. ' diff -Nru ironic-12.0.0/ironic/drivers/modules/cimc/management.py ironic-12.1.0/ironic/drivers/modules/cimc/management.py --- ironic-12.0.0/ironic/drivers/modules/cimc/management.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/cimc/management.py 2019-03-21 20:07:40.000000000 +0000 @@ -40,6 +40,10 @@ class CIMCManagement(base.ManagementInterface): + # NOTE(TheJulia): Deprecated due to a lack of operating third party + # CI, which stopped reporting during the Stein development cycle. + supported = False + def get_properties(self): """Return the properties of the interface. diff -Nru ironic-12.0.0/ironic/drivers/modules/cimc/power.py ironic-12.1.0/ironic/drivers/modules/cimc/power.py --- ironic-12.0.0/ironic/drivers/modules/cimc/power.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/cimc/power.py 2019-03-21 20:07:40.000000000 +0000 @@ -81,6 +81,10 @@ class Power(base.PowerInterface): + # NOTE(TheJulia): Deprecated due to a lack of operating third party + # CI, which stopped reporting during the Stein development cycle. + supported = False + def get_properties(self): """Return the properties of the interface. diff -Nru ironic-12.0.0/ironic/drivers/modules/console_utils.py ironic-12.1.0/ironic/drivers/modules/console_utils.py --- ironic-12.0.0/ironic/drivers/modules/console_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/console_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -305,9 +305,11 @@ console_host = CONF.console.socat_address if netutils.is_valid_ipv6(console_host): - arg = 'TCP6-LISTEN:%(port)s,bind=[%(host)s],reuseaddr' + arg = ('TCP6-LISTEN:%(port)s,bind=[%(host)s],reuseaddr,fork,' + 'max-children=1') else: - arg = 'TCP4-LISTEN:%(port)s,bind=%(host)s,reuseaddr' + arg = ('TCP4-LISTEN:%(port)s,bind=%(host)s,reuseaddr,fork,' + 'max-children=1') args.append(arg % {'host': console_host, 'port': port}) diff -Nru ironic-12.0.0/ironic/drivers/modules/deploy_utils.py ironic-12.1.0/ironic/drivers/modules/deploy_utils.py --- ironic-12.0.0/ironic/drivers/modules/deploy_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/deploy_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -735,19 +735,22 @@ """Return the PXE config template file name requested for deploy. This method returns PXE config template file to be used for deploy. - Architecture specific template file is searched first. BIOS/UEFI - template file is used if no valid architecture specific file found. + First specific pxe template is searched in the node. After that + architecture specific template file is searched. BIOS/UEFI template file + is used if no valid architecture specific file found. :param node: A single Node. :returns: The PXE config template file name. """ - cpu_arch = node.properties.get('cpu_arch') - config_template = CONF.pxe.pxe_config_template_by_arch.get(cpu_arch) + config_template = node.driver_info.get("pxe_template", None) if config_template is None: - if boot_mode_utils.get_boot_mode(node) == 'uefi': - config_template = CONF.pxe.uefi_pxe_config_template - else: - config_template = CONF.pxe.pxe_config_template + cpu_arch = node.properties.get('cpu_arch') + config_template = CONF.pxe.pxe_config_template_by_arch.get(cpu_arch) + if config_template is None: + if boot_mode_utils.get_boot_mode(node) == 'uefi': + config_template = CONF.pxe.uefi_pxe_config_template + else: + config_template = CONF.pxe.pxe_config_template return config_template @@ -826,7 +829,7 @@ raise exception.InvalidParameterValue(_( "Image %s can not be found.") % image_href) except exception.ImageRefValidationFailed as e: - raise exception.InvalidParameterValue(e) + raise exception.InvalidParameterValue(err=e) missing_props = [] for prop in properties: @@ -896,7 +899,22 @@ :raises: InvalidParameterValue if cleaning network UUID config option has an invalid value. """ + fast_track = manager_utils.is_fast_track(task) + if not fast_track: + power_state_to_restore = manager_utils.power_on_node_if_needed(task) + + # WARNING(TheJulia): When fast track is available, trying to plug the + # cleaning network is problematic and in practice this may fail if + # cleaning/provisioning/discovery all take place on different + # networks when.. + # Translation: Here be a realistically unavoidable footgun + # fast track support. + # TODO(TheJulia): Lets improve this somehow such that the agent host + # gracefully handles these sorts of changes. task.driver.network.add_cleaning_network(task) + if not fast_track: + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) # Append required config parameters to node's driver_internal_info # to pass to IPA. @@ -906,7 +924,8 @@ ramdisk_opts = build_agent_options(task.node) task.driver.boot.prepare_ramdisk(task, ramdisk_opts) - manager_utils.node_power_action(task, states.REBOOT) + if not fast_track: + manager_utils.node_power_action(task, states.REBOOT) # Tell the conductor we are waiting for the agent to boot. return states.CLEANWAIT @@ -930,11 +949,18 @@ :raises: NetworkError, NodeCleaningFailure if the cleaning ports cannot be removed. """ - manager_utils.node_power_action(task, states.POWER_OFF) + fast_track = manager_utils.is_fast_track(task) + if not fast_track: + manager_utils.node_power_action(task, states.POWER_OFF) + if manage_boot: task.driver.boot.clean_up_ramdisk(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.remove_cleaning_network(task) + if not fast_track: + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) def get_image_instance_info(node): @@ -969,6 +995,10 @@ return info +_ERR_MSG_INVALID_DEPLOY = _("Cannot validate parameter for driver deploy. " + "Invalid parameter %(param)s. Reason: %(reason)s") + + def parse_instance_info(node): """Gets the instance specific Node deployment info. @@ -994,57 +1024,64 @@ i_info['image_source'])): i_info['kernel'] = info.get('kernel') i_info['ramdisk'] = info.get('ramdisk') - i_info['root_gb'] = info.get('root_gb') + i_info['root_gb'] = info.get('root_gb') error_msg = _("Cannot validate driver deploy. Some parameters were missing" " in node's instance_info") check_for_missing_params(i_info, error_msg) - # NOTE(vdrok): We're casting disk layout parameters to int only after - # ensuring that it is possible + # This is used in many places, so keep it even for whole-disk images. + # There is also a potential use case of creating an ephemeral partition via + # cloud-init and telling ironic to avoid metadata wipe via setting + # preserve_ephemeral (not saying it will work, but it seems possible). + preserve_ephemeral = info.get('preserve_ephemeral', False) + try: + i_info['preserve_ephemeral'] = ( + strutils.bool_from_string(preserve_ephemeral, strict=True)) + except ValueError as e: + raise exception.InvalidParameterValue( + _ERR_MSG_INVALID_DEPLOY % {'param': 'preserve_ephemeral', + 'reason': e}) + + if iwdi: + if i_info.get('swap_mb') or i_info.get('ephemeral_mb'): + err_msg_invalid = _("Cannot deploy whole disk image with " + "swap or ephemeral size set") + raise exception.InvalidParameterValue(err_msg_invalid) + else: + _validate_layout_properties(node, info, i_info) + + i_info['configdrive'] = info.get('configdrive') + + return i_info + + +def _validate_layout_properties(node, info, i_info): i_info['swap_mb'] = info.get('swap_mb', 0) i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0) - err_msg_invalid = _("Cannot validate parameter for driver deploy. " - "Invalid parameter %(param)s. Reason: %(reason)s") + # NOTE(vdrok): We're casting disk layout parameters to int only after + # ensuring that it is possible for param in DISK_LAYOUT_PARAMS: try: int(i_info[param]) except ValueError: reason = _("%s is not an integer value.") % i_info[param] - raise exception.InvalidParameterValue(err_msg_invalid % + raise exception.InvalidParameterValue(_ERR_MSG_INVALID_DEPLOY % {'param': param, 'reason': reason}) i_info['root_mb'] = 1024 * int(i_info['root_gb']) i_info['swap_mb'] = int(i_info['swap_mb']) i_info['ephemeral_mb'] = 1024 * int(i_info['ephemeral_gb']) - - if iwdi: - if i_info['swap_mb'] > 0 or i_info['ephemeral_mb'] > 0: - err_msg_invalid = _("Cannot deploy whole disk image with " - "swap or ephemeral size set") - raise exception.InvalidParameterValue(err_msg_invalid) i_info['ephemeral_format'] = info.get('ephemeral_format') - i_info['configdrive'] = info.get('configdrive') - if i_info['ephemeral_gb'] and not i_info['ephemeral_format']: i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format - preserve_ephemeral = info.get('preserve_ephemeral', False) - try: - i_info['preserve_ephemeral'] = ( - strutils.bool_from_string(preserve_ephemeral, strict=True)) - except ValueError as e: - raise exception.InvalidParameterValue( - err_msg_invalid % {'param': 'preserve_ephemeral', 'reason': e}) - # NOTE(Zhenguo): If rebuilding with preserve_ephemeral option, check # that the disk layout is unchanged. if i_info['preserve_ephemeral']: _check_disk_layout_unchanged(node, i_info) - return i_info - def _check_disk_layout_unchanged(node, i_info): """Check whether disk layout is unchanged. diff -Nru ironic-12.0.0/ironic/drivers/modules/drac/common.py ironic-12.1.0/ironic/drivers/modules/drac/common.py --- ironic-12.0.0/ironic/drivers/modules/drac/common.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/drac/common.py 2019-03-21 20:07:40.000000000 +0000 @@ -39,13 +39,9 @@ 'drac_protocol': _('protocol used for WS-Man endpoint; one of http, https;' ' default is "https". Optional.'), } -DEPRECATED_PROPERTIES = { - 'drac_host': _('IP address or hostname of the DRAC card. DEPRECATED, ' - 'PLEASE USE "drac_address" INSTEAD.'), -} + COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy() COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES) -COMMON_PROPERTIES.update(DEPRECATED_PROPERTIES) def parse_driver_info(node): @@ -63,21 +59,6 @@ driver_info = node.driver_info parsed_driver_info = {} - if 'drac_host' in driver_info and 'drac_address' not in driver_info: - LOG.warning('The driver_info["drac_host"] property is deprecated ' - 'and will be removed in the Pike release. Please ' - 'update the node %s driver_info field to use ' - '"drac_address" instead', node.uuid) - address = driver_info.pop('drac_host', None) - if address: - driver_info['drac_address'] = address - elif 'drac_host' in driver_info and 'drac_address' in driver_info: - LOG.warning('Both driver_info["drac_address"] and ' - 'driver_info["drac_host"] properties are ' - 'specified for node %s. Please remove the ' - '"drac_host" property from the node. Ignoring ' - '"drac_host" for now', node.uuid) - error_msgs = [] for param in REQUIRED_PROPERTIES: try: diff -Nru ironic-12.0.0/ironic/drivers/modules/drac/inspect.py ironic-12.1.0/ironic/drivers/modules/drac/inspect.py --- ironic-12.0.0/ironic/drivers/modules/drac/inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/drac/inspect.py 2019-03-21 20:07:40.000000000 +0000 @@ -23,6 +23,7 @@ from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states +from ironic.common import utils from ironic.drivers import base from ironic.drivers.modules.drac import common as drac_common from ironic import objects @@ -84,6 +85,14 @@ [self._calculate_cpus(cpu) for cpu in cpus]) properties['cpu_arch'] = 'x86_64' if cpus[0].arch64 else 'x86' + bios_settings = client.list_bios_settings() + current_capabilities = node.properties.get('capabilities', '') + new_capabilities = { + 'boot_mode': bios_settings["BootMode"].current_value.lower()} + capabilties = utils.get_updated_capabilities(current_capabilities, + new_capabilities) + properties['capabilities'] = capabilties + virtual_disks = client.list_virtual_disks() root_disk = self._guess_root_disk(virtual_disks) if root_disk: diff -Nru ironic-12.0.0/ironic/drivers/modules/drac/raid.py ironic-12.1.0/ironic/drivers/modules/drac/raid.py --- ironic-12.0.0/ironic/drivers/modules/drac/raid.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/drac/raid.py 2019-03-21 20:07:40.000000000 +0000 @@ -841,9 +841,9 @@ for config_job_id in raid_config_job_ids: config_job = drac_job.get_job(node, job_id=config_job_id) - if config_job.state == 'Completed': + if config_job.status == 'Completed': finished_job_ids.append(config_job_id) - elif config_job.state == 'Failed': + elif config_job.status == 'Failed': finished_job_ids.append(config_job_id) self._set_raid_config_job_failure(node) diff -Nru ironic-12.0.0/ironic/drivers/modules/ibmc/management.py ironic-12.1.0/ironic/drivers/modules/ibmc/management.py --- ironic-12.0.0/ironic/drivers/modules/ibmc/management.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ibmc/management.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,237 @@ +# Copyright 2019 HUAWEI, Inc. All Rights Reserved. +# Copyright 2017 Red Hat, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iBMC Management Interface +""" + +from oslo_log import log +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.conductor import task_manager +from ironic.drivers import base +from ironic.drivers.modules.ibmc import mappings +from ironic.drivers.modules.ibmc import utils + +constants = importutils.try_import('ibmc_client.constants') +ibmc_client = importutils.try_import('ibmc_client') + +LOG = log.getLogger(__name__) + + +class IBMCManagement(base.ManagementInterface): + + def __init__(self): + """Initialize the iBMC management interface + + :raises: DriverLoadError if the driver can't be loaded due to + missing dependencies + """ + super(IBMCManagement, self).__init__() + if not ibmc_client: + raise exception.DriverLoadError( + driver='ibmc', + reason=_('Unable to import the python-ibmcclient library')) + + def get_properties(self): + """Return the properties of the interface. + + :returns: dictionary of : entries. + """ + return utils.COMMON_PROPERTIES.copy() + + def validate(self, task): + """Validates the driver information needed by the iBMC driver. + + :param task: A TaskManager instance containing the node to act on. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + """ + utils.parse_driver_info(task.node) + + @utils.handle_ibmc_exception('get iBMC supported boot devices') + def get_supported_boot_devices(self, task): + """Get a list of the supported boot devices. + + :param task: a task from TaskManager. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + :returns: A list with the supported boot devices defined + in :mod:`ironic.common.boot_devices`. + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + system = conn.system.get() + boot_source_override = system.boot_source_override + return list(map(mappings.GET_BOOT_DEVICE_MAP.get, + boot_source_override.supported_boot_devices)) + + @task_manager.require_exclusive_lock + @utils.handle_ibmc_exception('set iBMC boot device') + def set_boot_device(self, task, device, persistent=False): + """Set the boot device for a node. + + :param task: A task from TaskManager. + :param device: The boot device, one of + :mod:`ironic.common.boot_device`. + :param persistent: Boolean value. True if the boot device will + persist to all future boots, False if not. + Default: False. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + boot_device = mappings.SET_BOOT_DEVICE_MAP[device] + enabled = mappings.SET_BOOT_DEVICE_PERSISTENT_MAP[persistent] + conn.system.set_boot_source(boot_device, enabled=enabled) + + @utils.handle_ibmc_exception('get iBMC boot device') + def get_boot_device(self, task): + """Get the current boot device for a node. + + :param task: A task from TaskManager. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + :returns: a dictionary containing: + + :boot_device: + the boot device, one of :mod:`ironic.common.boot_devices` or + None if it is unknown. + :persistent: + Boolean value or None, True if the boot device persists, + False otherwise. None if it's disabled. + + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + system = conn.system.get() + boot_source_override = system.boot_source_override + boot_device = boot_source_override.target + enabled = boot_source_override.enabled + return { + 'boot_device': mappings.GET_BOOT_DEVICE_MAP.get(boot_device), + 'persistent': + mappings.GET_BOOT_DEVICE_PERSISTENT_MAP.get(enabled) + } + + def get_supported_boot_modes(self, task): + """Get a list of the supported boot modes. + + :param task: A task from TaskManager. + :returns: A list with the supported boot modes defined + in :mod:`ironic.common.boot_modes`. If boot + mode support can't be determined, empty list + is returned. + """ + return list(mappings.SET_BOOT_MODE_MAP) + + @task_manager.require_exclusive_lock + @utils.handle_ibmc_exception('set iBMC boot mode') + def set_boot_mode(self, task, mode): + """Set the boot mode for a node. + + Set the boot mode to use on next reboot of the node. + + :param task: A task from TaskManager. + :param mode: The boot mode, one of + :mod:`ironic.common.boot_modes`. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + system = conn.system.get() + boot_source_override = system.boot_source_override + boot_device = boot_source_override.target + boot_override = boot_source_override.enabled + + # Copied from redfish driver + # TODO(Qianbiao.NG) what if boot device is "NONE"? + if not boot_device: + error_msg = (_('Cannot change boot mode on node %(node)s ' + 'because its boot device is not set.') % + {'node': task.node.uuid}) + LOG.error(error_msg) + raise exception.IBMCError(error_msg) + + # TODO(Qianbiao.NG) what if boot override is "disabled"? + if not boot_override: + i18n = _('Cannot change boot mode on node %(node)s ' + 'because its boot source override is not set.') + error_msg = i18n % {'node': task.node.uuid} + LOG.error(error_msg) + raise exception.IBMCError(error_msg) + + boot_mode = mappings.SET_BOOT_MODE_MAP[mode] + conn.system.set_boot_source(boot_device, + enabled=boot_override, + mode=boot_mode) + + @utils.handle_ibmc_exception('get iBMC boot mode') + def get_boot_mode(self, task): + """Get the current boot mode for a node. + + Provides the current boot mode of the node. + + :param task: A task from TaskManager. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + :returns: The boot mode, one of :mod:`ironic.common.boot_mode` or + None if it is unknown. + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + system = conn.system.get() + boot_source_override = system.boot_source_override + boot_mode = boot_source_override.mode + return mappings.GET_BOOT_MODE_MAP.get(boot_mode) + + def get_sensors_data(self, task): + """Get sensors data. + + Not implemented for this driver. + + :raises: NotImplementedError + """ + raise NotImplementedError() + + @task_manager.require_exclusive_lock + @utils.handle_ibmc_exception('inject iBMC NMI') + def inject_nmi(self, task): + """Inject NMI, Non Maskable Interrupt. + + Inject NMI (Non Maskable Interrupt) for a node immediately. + + :param task: A TaskManager instance containing the node to act on. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + conn.system.reset(constants.RESET_NMI) diff -Nru ironic-12.0.0/ironic/drivers/modules/ibmc/mappings.py ironic-12.1.0/ironic/drivers/modules/ibmc/mappings.py --- ironic-12.0.0/ironic/drivers/modules/ibmc/mappings.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ibmc/mappings.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,70 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iBMC and Ironic constants mapping +""" + +from oslo_utils import importutils + +from ironic.common import boot_devices +from ironic.common import boot_modes +from ironic.common import states +from ironic.drivers.modules.ibmc import utils + +constants = importutils.try_import('ibmc_client.constants') + +if constants: + # Set power state mapping + SET_POWER_STATE_MAP = { + states.POWER_ON: constants.RESET_ON, + states.POWER_OFF: constants.RESET_FORCE_OFF, + states.REBOOT: constants.RESET_FORCE_RESTART, + states.SOFT_REBOOT: constants.RESET_FORCE_POWER_CYCLE, + states.SOFT_POWER_OFF: constants.RESET_GRACEFUL_SHUTDOWN, + } + + # Get power state mapping + GET_POWER_STATE_MAP = { + constants.SYSTEM_POWER_STATE_ON: states.POWER_ON, + constants.SYSTEM_POWER_STATE_OFF: states.POWER_OFF, + } + + # Boot device mapping + GET_BOOT_DEVICE_MAP = { + constants.BOOT_SOURCE_TARGET_NONE: 'none', + constants.BOOT_SOURCE_TARGET_PXE: boot_devices.PXE, + constants.BOOT_SOURCE_TARGET_FLOPPY: 'floppy', + constants.BOOT_SOURCE_TARGET_CD: boot_devices.CDROM, + constants.BOOT_SOURCE_TARGET_HDD: boot_devices.DISK, + constants.BOOT_SOURCE_TARGET_BIOS_SETUP: boot_devices.BIOS, + } + + SET_BOOT_DEVICE_MAP = utils.revert_dictionary(GET_BOOT_DEVICE_MAP) + + # Boot mode mapping + GET_BOOT_MODE_MAP = { + constants.BOOT_SOURCE_MODE_BIOS: boot_modes.LEGACY_BIOS, + constants.BOOT_SOURCE_MODE_UEFI: boot_modes.UEFI, + } + + SET_BOOT_MODE_MAP = utils.revert_dictionary(GET_BOOT_MODE_MAP) + + # Boot device persistent mapping + GET_BOOT_DEVICE_PERSISTENT_MAP = { + constants.BOOT_SOURCE_ENABLED_ONCE: False, + constants.BOOT_SOURCE_ENABLED_CONTINUOUS: True, + constants.BOOT_SOURCE_ENABLED_DISABLED: None, + } + + SET_BOOT_DEVICE_PERSISTENT_MAP = utils.revert_dictionary( + GET_BOOT_DEVICE_PERSISTENT_MAP) diff -Nru ironic-12.0.0/ironic/drivers/modules/ibmc/power.py ironic-12.1.0/ironic/drivers/modules/ibmc/power.py --- ironic-12.0.0/ironic/drivers/modules/ibmc/power.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ibmc/power.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,145 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iBMC Power Interface +""" + +from oslo_log import log +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import states +from ironic.conductor import task_manager +from ironic.conductor import utils as cond_utils +from ironic.drivers import base +from ironic.drivers.modules.ibmc import mappings +from ironic.drivers.modules.ibmc import utils + +constants = importutils.try_import('ibmc_client.constants') +ibmc_client = importutils.try_import('ibmc_client') + +LOG = log.getLogger(__name__) + +EXPECT_POWER_STATE_MAP = { + states.REBOOT: states.POWER_ON, + states.SOFT_REBOOT: states.POWER_ON, + states.SOFT_POWER_OFF: states.POWER_OFF, +} + + +class IBMCPower(base.PowerInterface): + + def __init__(self): + """Initialize the iBMC power interface. + + :raises: DriverLoadError if the driver can't be loaded due to + missing dependencies + """ + super(IBMCPower, self).__init__() + if not ibmc_client: + raise exception.DriverLoadError( + driver='ibmc', + reason=_('Unable to import the python-ibmcclient library')) + + def get_properties(self): + """Return the properties of the interface. + + :returns: dictionary of : entries. + """ + return utils.COMMON_PROPERTIES.copy() + + def validate(self, task): + """Validates the driver information needed by the iBMC driver. + + :param task: A TaskManager instance containing the node to act on. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + """ + utils.parse_driver_info(task.node) + + @utils.handle_ibmc_exception('get iBMC power state') + def get_power_state(self, task): + """Get the current power state of the task's node. + + :param task: A TaskManager instance containing the node to act on. + :returns: A power state. One of :mod:`ironic.common.states`. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + system = conn.system.get() + return mappings.GET_POWER_STATE_MAP.get(system.power_state) + + @task_manager.require_exclusive_lock + @utils.handle_ibmc_exception('set iBMC power state') + def set_power_state(self, task, power_state, timeout=None): + """Set the power state of the task's node. + + :param task: A TaskManager instance containing the node to act on. + :param power_state: Any power state from :mod:`ironic.common.states`. + :param timeout: Time to wait for the node to reach the requested state. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue if a required parameter is missing. + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + reset_type = mappings.SET_POWER_STATE_MAP.get(power_state) + conn.system.reset(reset_type) + + target_state = EXPECT_POWER_STATE_MAP.get(power_state, power_state) + cond_utils.node_wait_for_power_state(task, target_state, + timeout=timeout) + + @task_manager.require_exclusive_lock + @utils.handle_ibmc_exception('reboot iBMC') + def reboot(self, task, timeout=None): + """Perform a hard reboot of the task's node. + + :param task: A TaskManager instance containing the node to act on. + :param timeout: Time to wait for the node to become powered on. + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue if a required parameter is missing. + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + """ + ibmc = utils.parse_driver_info(task.node) + with ibmc_client.connect(**ibmc) as conn: + system = conn.system.get() + current_power_state = ( + mappings.GET_POWER_STATE_MAP.get(system.power_state) + ) + if current_power_state == states.POWER_ON: + conn.system.reset( + mappings.SET_POWER_STATE_MAP.get(states.REBOOT)) + else: + conn.system.reset( + mappings.SET_POWER_STATE_MAP.get(states.POWER_ON)) + + cond_utils.node_wait_for_power_state(task, states.POWER_ON, + timeout=timeout) + + def get_supported_power_states(self, task): + """Get a list of the supported power states. + + :param task: A TaskManager instance containing the node to act on. + Not used by this driver at the moment. + :returns: A list with the supported power states defined + in :mod:`ironic.common.states`. + """ + return list(mappings.SET_POWER_STATE_MAP) diff -Nru ironic-12.0.0/ironic/drivers/modules/ibmc/utils.py ironic-12.1.0/ironic/drivers/modules/ibmc/utils.py --- ironic-12.0.0/ironic/drivers/modules/ibmc/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ibmc/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,172 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iBMC Driver common utils +""" + +import os + +from oslo_log import log +from oslo_utils import importutils +from oslo_utils import netutils +from oslo_utils import strutils +import retrying +import six + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.conductor import task_manager +from ironic.conf import CONF + +ibmc_client = importutils.try_import('ibmcclient') +ibmc_error = importutils.try_import('ibmc_client.exceptions') + +LOG = log.getLogger(__name__) + +REQUIRED_PROPERTIES = { + 'ibmc_address': _('The URL address to the iBMC controller. It must ' + 'include the authority portion of the URL. ' + 'If the scheme is missing, https is assumed. ' + 'For example: https://mgmt.vendor.com. Required.'), + 'ibmc_username': _('User account with admin/server-profile access ' + 'privilege. Required.'), + 'ibmc_password': _('User account password. Required.'), +} + +OPTIONAL_PROPERTIES = { + 'ibmc_verify_ca': _('Either a Boolean value, a path to a CA_BUNDLE ' + 'file or directory with certificates of trusted ' + 'CAs. If set to True the driver will verify the ' + 'host certificates; if False the driver will ' + 'ignore verifying the SSL certificate. If it\'s ' + 'a path the driver will use the specified ' + 'certificate or one of the certificates in the ' + 'directory. Defaults to True. Optional.'), +} + +COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy() +COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES) + + +def parse_driver_info(node): + """Parse the information required for Ironic to connect to iBMC. + + :param node: an Ironic node object + :returns: dictionary of parameters + :raises: InvalidParameterValue on malformed parameter(s) + :raises: MissingParameterValue on missing parameter(s) + """ + driver_info = node.driver_info or {} + missing_info = [key for key in REQUIRED_PROPERTIES + if not driver_info.get(key)] + if missing_info: + raise exception.MissingParameterValue(_( + 'Missing the following iBMC properties in node ' + '%(node)s driver_info: %(info)s') % {'node': node.uuid, + 'info': missing_info}) + + # Validate the iBMC address + address = driver_info['ibmc_address'] + parsed = netutils.urlsplit(address) + if not parsed.scheme: + address = 'https://%s' % address + parsed = netutils.urlsplit(address) + + if not parsed.netloc: + raise exception.InvalidParameterValue( + _('Invalid iBMC address %(address)s set in ' + 'driver_info/ibmc_address on node %(node)s') % + {'address': address, 'node': node.uuid}) + + # Check if verify_ca is a Boolean or a file/directory in the file-system + verify_ca = driver_info.get('ibmc_verify_ca', True) + if isinstance(verify_ca, six.string_types): + if not os.path.exists(verify_ca): + try: + verify_ca = strutils.bool_from_string(verify_ca, strict=True) + except ValueError: + raise exception.InvalidParameterValue( + _('Invalid value type set in driver_info/' + 'ibmc_verify_ca on node %(node)s. ' + 'The value should be a Boolean or the path ' + 'to a file/directory, not "%(value)s"' + ) % {'value': verify_ca, 'node': node.uuid}) + elif not isinstance(verify_ca, bool): + raise exception.InvalidParameterValue( + _('Invalid value type set in driver_info/ibmc_verify_ca ' + 'on node %(node)s. The value should be a Boolean or the path ' + 'to a file/directory, not "%(value)s"') % {'value': verify_ca, + 'node': node.uuid}) + return {'address': address, + 'username': driver_info.get('ibmc_username'), + 'password': driver_info.get('ibmc_password'), + 'verify_ca': verify_ca} + + +def revert_dictionary(d): + return {v: k for k, v in d.items()} + + +def handle_ibmc_exception(action): + """Decorator to handle iBMC client exception. + + Decorated functions must take a :class:`TaskManager` as the first + parameter. + """ + + def decorator(f): + + def should_retry(e): + connect_error = isinstance(e, exception.IBMCConnectionError) + if connect_error: + LOG.info(_('Failed to connect to iBMC, will retry now. ' + 'Max retry times is %(retry_times)d.'), + {'retry_times': CONF.ibmc.connection_attempts}) + return connect_error + + @retrying.retry( + retry_on_exception=should_retry, + stop_max_attempt_number=CONF.ibmc.connection_attempts, + wait_fixed=CONF.ibmc.connection_retry_interval * 1000) + @six.wraps(f) + def wrapper(*args, **kwargs): + # NOTE(dtantsur): this code could be written simpler, but then unit + # testing decorated functions is pretty hard, as we usually pass a + # Mock object instead of TaskManager there. + if len(args) > 1: + is_task_mgr = isinstance(args[1], task_manager.TaskManager) + task = args[1] if is_task_mgr else args[0] + else: + task = args[0] + + node = task.node + + try: + return f(*args, **kwargs) + except ibmc_error.ConnectionError as e: + error = (_('Failed to connect to iBMC for node %(node)s, ' + 'Error: %(error)s') + % {'node': node.uuid, 'error': e}) + LOG.error(error) + raise exception.IBMCConnectionError(node=node.uuid, + error=error) + except ibmc_error.IBMCClientError as e: + error = (_('Failed to %(action)s for node %(node)s, ' + 'Error %(error)s') + % {'node': node.uuid, 'action': action, 'error': e}) + LOG.error(error) + raise exception.IBMCError(node=node.uuid, error=error) + + return wrapper + + return decorator diff -Nru ironic-12.0.0/ironic/drivers/modules/ibmc/vendor.py ironic-12.1.0/ironic/drivers/modules/ibmc/vendor.py --- ironic-12.0.0/ironic/drivers/modules/ibmc/vendor.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ibmc/vendor.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,87 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +iBMC Vendor Interface +""" + +from oslo_log import log +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.drivers import base +from ironic.drivers.modules.ibmc import utils + +ibmc_client = importutils.try_import('ibmc_client') + +LOG = log.getLogger(__name__) + + +class IBMCVendor(base.VendorInterface): + + def __init__(self): + """Initialize the iBMC vendor interface. + + :raises: DriverLoadError if the driver can't be loaded due to + missing dependencies + """ + super(IBMCVendor, self).__init__() + if not ibmc_client: + raise exception.DriverLoadError( + driver='ibmc', + reason=_('Unable to import the python-ibmcclient library')) + + def validate(self, task, method=None, **kwargs): + """Validate vendor-specific actions. + + If invalid, raises an exception; otherwise returns None. + + :param task: A task from TaskManager. + :param method: Method to be validated + :param kwargs: Info for action. + :raises: UnsupportedDriverExtension if 'method' can not be mapped to + the supported interfaces. + :raises: InvalidParameterValue if kwargs does not contain 'method'. + :raises: MissingParameterValue + """ + utils.parse_driver_info(task.node) + + def get_properties(self): + """Return the properties of the interface. + + :returns: dictionary of : entries. + """ + return utils.COMMON_PROPERTIES.copy() + + @base.passthru(['GET'], async_call=False, + description=_('Returns a dictionary, ' + 'containing node boot up sequence, ' + 'in ascending order')) + @utils.handle_ibmc_exception('get iBMC boot up sequence') + def boot_up_seq(self, task, **kwargs): + """List boot type order of the node. + + :param task: A TaskManager instance containing the node to act on. + :param kwargs: Not used. + :raises: InvalidParameterValue if kwargs does not contain 'method'. + :raises: MissingParameterValue + :raises: IBMCConnectionError when it fails to connect to iBMC + :raises: IBMCError when iBMC responses an error information + :returns: A dictionary, containing node boot up sequence, + in ascending order. + """ + driver_info = utils.parse_driver_info(task.node) + with ibmc_client.connect(**driver_info) as conn: + system = conn.system.get() + boot_sequence = system.boot_sequence + return {'boot_up_sequence': boot_sequence} diff -Nru ironic-12.0.0/ironic/drivers/modules/ilo/bios.py ironic-12.1.0/ironic/drivers/modules/ilo/bios.py --- ironic-12.0.0/ironic/drivers/modules/ilo/bios.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ilo/bios.py 2019-03-21 20:07:40.000000000 +0000 @@ -21,12 +21,14 @@ from ironic.common import exception from ironic.common.i18n import _ +from ironic.common import states +from ironic.conductor import utils as manager_utils from ironic.drivers import base +from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.ilo import common as ilo_common from ironic import objects LOG = logging.getLogger(__name__) - METRICS = metrics_utils.get_metrics_logger(__name__) ilo_error = importutils.try_import('proliantutils.exception') @@ -51,6 +53,100 @@ """ ilo_common.parse_driver_info(task.node) + def _execute_pre_boot_bios_step(self, task, step, data=None): + """Perform operations required prior to the reboot. + + Depending on the clean step, it executes the operations required + and moves the node to CLEANWAIT state prior to reboot. + :param task: a task from TaskManager. + :param step: name of the clean step to be performed + :param data: if the clean step is apply_configuration it holds + the settings data. + :raises: NodeCleaningFailure if it fails any conditions expected + """ + node = task.node + + if step not in ('apply_configuration', 'factory_reset'): + errmsg = _('Could not find the step %(step)s for the ' + 'node %(node)s.') + raise exception.NodeCleaningFailure( + errmsg % {'step': step, 'node': node.uuid}) + + try: + ilo_object = ilo_common.get_ilo_object(node) + ilo_object.set_bios_settings(data) if step == ( + 'apply_configuration') else ilo_object.reset_bios_to_default() + except (exception.MissingParameterValue, + exception.InvalidParameterValue, + ilo_error.IloError, + ilo_error.IloCommandNotSupportedError) as ir_exception: + errmsg = _('Clean step %(step)s failed ' + 'on the node %(node)s with error: %(err)s') + raise exception.NodeCleaningFailure( + errmsg % {'step': step, 'node': node.uuid, + 'err': ir_exception}) + + deploy_opts = deploy_utils.build_agent_options(node) + task.driver.boot.prepare_ramdisk(task, deploy_opts) + manager_utils.node_power_action(task, states.REBOOT) + + driver_internal_info = node.driver_internal_info + driver_internal_info['cleaning_reboot'] = True + driver_internal_info['skip_current_clean_step'] = False + + if step == 'apply_configuration': + driver_internal_info['apply_bios'] = True + else: + driver_internal_info['reset_bios'] = True + + node.driver_internal_info = driver_internal_info + node.save() + return states.CLEANWAIT + + def _execute_post_boot_bios_step(self, task, step): + """Perform operations required after the reboot. + + Caches BIOS settings in the database and clear the flags assocated + with the clean step post reboot. + :param task: a task from TaskManager. + :param step: name of the clean step to be performed + :raises: NodeCleaningFailure if it fails any conditions expected + """ + node = task.node + + driver_internal_info = node.driver_internal_info + driver_internal_info.pop('apply_bios', None) + driver_internal_info.pop('reset_bios', None) + task.node.driver_internal_info = driver_internal_info + task.node.save() + + if step not in ('apply_configuration', 'factory_reset'): + errmsg = _('Could not find the step %(step)s for the ' + 'node %(node)s.') + raise exception.NodeCleaningFailure( + errmsg % {'step': step, 'node': node.uuid}) + + try: + ilo_object = ilo_common.get_ilo_object(node) + status = ilo_object.get_bios_settings_result() + except (exception.MissingParameterValue, + exception.InvalidParameterValue, + ilo_error.IloError, + ilo_error.IloCommandNotSupportedError) as ir_exception: + + errmsg = _('Clean step %(step)s failed ' + 'on the node %(node)s with error: %(err)s') + raise exception.NodeCleaningFailure( + errmsg % {'step': step, 'node': node.uuid, + 'err': ir_exception}) + + if status.get('status') == 'failed': + errmsg = _('Clean step %(step)s failed ' + 'on the node %(node)s with error: %(err)s') + raise exception.NodeCleaningFailure( + errmsg % {'step': step, 'node': node.uuid, + 'err': status.get('results')}) + @METRICS.timer('IloBIOS.apply_configuration') @base.clean_step(priority=0, abortable=False, argsinfo={ 'settings': { @@ -58,6 +154,7 @@ 'required': True } }) + @base.cache_bios_settings def apply_configuration(self, task, settings): """Applies the provided configuration on the node. @@ -67,27 +164,21 @@ the node fails. """ + node = task.node + driver_internal_info = node.driver_internal_info data = {} for setting in settings: data.update({setting['name']: setting['value']}) - - node = task.node - - errmsg = _("Clean step \"apply_configuration\" failed " - "on node %(node)s with error: %(err)s") - - try: - ilo_object = ilo_common.get_ilo_object(node) - ilo_object.set_bios_settings(data) - except (exception.MissingParameterValue, - exception.InvalidParameterValue, - ilo_error.IloError, - ilo_error.IloCommandNotSupportedError) as ir_exception: - raise exception.NodeCleaningFailure( - errmsg % {'node': node.uuid, 'err': ir_exception}) + if not driver_internal_info.get('apply_bios'): + return self._execute_pre_boot_bios_step( + task, 'apply_configuration', data) + else: + return self._execute_post_boot_bios_step( + task, 'apply_configuration') @METRICS.timer('IloBIOS.factory_reset') @base.clean_step(priority=0, abortable=False) + @base.cache_bios_settings def factory_reset(self, task): """Reset the BIOS settings to factory configuration. @@ -97,19 +188,12 @@ """ node = task.node + driver_internal_info = node.driver_internal_info - errmsg = _("Clean step \"factory_reset\" failed " - "on node %(node)s with error: %(err)s") - - try: - ilo_object = ilo_common.get_ilo_object(node) - ilo_object.reset_bios_to_default() - except (exception.MissingParameterValue, - exception.InvalidParameterValue, - ilo_error.IloError, - ilo_error.IloCommandNotSupportedError) as ir_exception: - raise exception.NodeCleaningFailure( - errmsg % {'node': node.uuid, 'err': ir_exception}) + if not driver_internal_info.get('reset_bios'): + return self._execute_pre_boot_bios_step(task, 'factory_reset') + else: + return self._execute_post_boot_bios_step(task, 'factory_reset') @METRICS.timer('IloBIOS.cache_bios_settings') def cache_bios_settings(self, task): @@ -127,7 +211,7 @@ "on node %(node)s with error: %(err)s") try: ilo_object = ilo_common.get_ilo_object(node) - bios_settings = ilo_object.get_pending_bios_settings() + bios_settings = ilo_object.get_current_bios_settings() except (exception.MissingParameterValue, exception.InvalidParameterValue, diff -Nru ironic-12.0.0/ironic/drivers/modules/ilo/boot.py ironic-12.1.0/ironic/drivers/modules/ilo/boot.py --- ironic-12.0.0/ironic/drivers/modules/ilo/boot.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ilo/boot.py 2019-03-21 20:07:40.000000000 +0000 @@ -198,8 +198,10 @@ boot_iso_tmp_file = fileobj.name images.create_boot_iso(task.context, boot_iso_tmp_file, kernel_href, ramdisk_href, - deploy_iso_uuid, root_uuid, - kernel_params, boot_mode) + deploy_iso_href=deploy_iso_uuid, + root_uuid=root_uuid, + kernel_params=kernel_params, + boot_mode=boot_mode) if CONF.ilo.use_web_server_for_images: boot_iso_url = ( diff -Nru ironic-12.0.0/ironic/drivers/modules/ilo/inspect.py ironic-12.1.0/ironic/drivers/modules/ilo/inspect.py --- ironic-12.0.0/ironic/drivers/modules/ilo/inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ilo/inspect.py 2019-03-21 20:07:40.000000000 +0000 @@ -233,7 +233,9 @@ # hardwares, the method inspect_hardware() doesn't raise an error # for these capabilities. capabilities = _get_capabilities(task.node, ilo_object) + model = None if capabilities: + model = capabilities.get('server_model') valid_cap = _create_supported_capabilities_dict(capabilities) capabilities = utils.get_updated_capabilities( task.node.properties.get('capabilities'), valid_cap) @@ -241,6 +243,23 @@ node_properties['capabilities'] = capabilities task.node.properties = node_properties + # RIBCL(Gen8) protocol cannot determine if a NIC + # is physically connected with cable or not when the server + # is not provisioned. RIS(Gen9) can detect the same for few NIC + # adapters but not for all. However it is possible to determine + # the same using Redfish(Gen10) protocol. Hence proliantutils + # returns ALL MACs for Gen8 and Gen9 while it returns + # only active MACs for Gen10. A warning is being added + # for the user so that he knows that he needs to remove the + # ironic ports created for inactive ports for Gen8 and Gen9. + servers = ['Gen8', 'Gen9'] + if model is not None and any(serv in model for serv in servers): + LOG.warning('iLO cannot determine if the NICs are physically ' + 'connected or not for ProLiant Gen8 and Gen9 servers. ' + 'Hence returns all the MACs present on the server. ' + 'Please remove the ironic ports created for inactive ' + 'NICs manually for the node %(node)s', + {"node": task.node.uuid}) task.node.save() # Create ports for the nics detected. diff -Nru ironic-12.0.0/ironic/drivers/modules/ilo/raid.py ironic-12.1.0/ironic/drivers/modules/ilo/raid.py --- ironic-12.0.0/ironic/drivers/modules/ilo/raid.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ilo/raid.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,235 @@ +# Copyright 2018 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +iLO5 RAID specific methods +""" + +from ironic_lib import metrics_utils +from oslo_log import log as logging +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common.i18n import _ +from ironic.common import raid +from ironic.common import states +from ironic.conductor import utils as manager_utils +from ironic import conf +from ironic.drivers import base +from ironic.drivers.modules import deploy_utils +from ironic.drivers.modules.ilo import common as ilo_common + + +LOG = logging.getLogger(__name__) +CONF = conf.CONF +METRICS = metrics_utils.get_metrics_logger(__name__) + +ilo_error = importutils.try_import('proliantutils.exception') + + +class Ilo5RAID(base.RAIDInterface): + """Implementation of OOB RAIDInterface for iLO5.""" + + def get_properties(self): + """Return the properties of the interface.""" + return ilo_common.REQUIRED_PROPERTIES + + def _set_clean_failed(self, task, msg, exc): + LOG.error("RAID configuration job failed for node %(node)s. " + "Message: '%(message)s'.", + {'node': task.node.uuid, 'message': msg}) + task.node.last_error = msg + task.process_event('fail') + + def _set_driver_internal_true_value(self, task, *keys): + driver_internal_info = task.node.driver_internal_info + for key in keys: + driver_internal_info[key] = True + task.node.driver_internal_info = driver_internal_info + task.node.save() + + def _set_driver_internal_false_value(self, task, *keys): + driver_internal_info = task.node.driver_internal_info + for key in keys: + driver_internal_info[key] = False + task.node.driver_internal_info = driver_internal_info + task.node.save() + + def _pop_driver_internal_values(self, task, *keys): + driver_internal_info = task.node.driver_internal_info + for key in keys: + driver_internal_info.pop(key, None) + task.node.driver_internal_info = driver_internal_info + task.node.save() + + def _prepare_for_read_raid(self, task, raid_step): + deploy_opts = deploy_utils.build_agent_options(task.node) + task.driver.boot.prepare_ramdisk(task, deploy_opts) + manager_utils.node_power_action(task, states.REBOOT) + if raid_step == 'create_raid': + self._set_driver_internal_true_value( + task, 'ilo_raid_create_in_progress') + else: + self._set_driver_internal_true_value( + task, 'ilo_raid_delete_in_progress') + self._set_driver_internal_true_value(task, 'cleaning_reboot') + self._set_driver_internal_false_value(task, 'skip_current_clean_step') + + @METRICS.timer('Ilo5RAID.create_configuration') + @base.clean_step(priority=0, abortable=False, argsinfo={ + 'create_root_volume': { + 'description': ( + 'This specifies whether to create the root volume. ' + 'Defaults to `True`.' + ), + 'required': False + }, + 'create_nonroot_volumes': { + 'description': ( + 'This specifies whether to create the non-root volumes. ' + 'Defaults to `True`.' + ), + 'required': False + } + }) + def create_configuration(self, task, create_root_volume=True, + create_nonroot_volumes=True): + """Create a RAID configuration on a bare metal using agent ramdisk. + + This method creates a RAID configuration on the given node. + + :param task: a TaskManager instance. + :param create_root_volume: If True, a root volume is created + during RAID configuration. Otherwise, no root volume is + created. Default is True. + :param create_nonroot_volumes: If True, non-root volumes are + created. If False, no non-root volumes are created. Default + is True. + :raises: MissingParameterValue, if node.target_raid_config is missing + or was found to be empty after skipping root volume and/or non-root + volumes. + :raises: NodeCleaningFailure, on failure to execute step. + """ + node = task.node + target_raid_config = raid.filter_target_raid_config( + node, create_root_volume=create_root_volume, + create_nonroot_volumes=create_nonroot_volumes) + driver_internal_info = node.driver_internal_info + driver_internal_info['target_raid_config'] = target_raid_config + LOG.debug("Calling OOB RAID create_configuration for node %(node)s " + "with the following target RAID configuration: %(target)s", + {'node': node.uuid, 'target': target_raid_config}) + ilo_object = ilo_common.get_ilo_object(node) + + try: + # Raid configuration in progress, checking status + if not driver_internal_info.get('ilo_raid_create_in_progress'): + ilo_object.create_raid_configuration(target_raid_config) + self._prepare_for_read_raid(task, 'create_raid') + return states.CLEANWAIT + else: + # Raid configuration is done, updating raid_config + raid_conf = ( + ilo_object.read_raid_configuration( + raid_config=target_raid_config)) + if len(raid_conf['logical_disks']): + raid.update_raid_info(node, raid_conf) + LOG.debug("Node %(uuid)s raid create clean step is done.", + {'uuid': node.uuid}) + self._pop_driver_internal_values( + task, 'ilo_raid_create_in_progress', + 'cleaning_reboot', 'skip_current_clean_step') + node.driver_internal_info = driver_internal_info + node.save() + else: + # Raid configuration failed + msg = "Unable to create raid" + self._pop_driver_internal_values( + task, 'ilo_raid_create_in_progress', + 'cleaning_reboot', 'skip_current_clean_step') + node.driver_internal_info = driver_internal_info + node.save() + raise exception.NodeCleaningFailure( + "Clean step create_configuration failed " + "on node %(node)s with error: %(err)s" % + {'node': node.uuid, 'err': msg}) + except ilo_error.IloError as ilo_exception: + operation = (_("Failed to create raid configuration on node %s") + % node.uuid) + self._pop_driver_internal_values(task, + 'ilo_raid_create_in_progress', + 'cleaning_reboot', + 'skip_current_clean_step') + node.driver_internal_info = driver_internal_info + node.save() + self._set_clean_failed(task, operation, ilo_exception) + + @METRICS.timer('Ilo5RAID.delete_configuration') + @base.clean_step(priority=0, abortable=False) + def delete_configuration(self, task): + """Delete the RAID configuration. + + :param task: a TaskManager instance containing the node to act on. + :raises: NodeCleaningFailure, on failure to execute step. + """ + node = task.node + LOG.debug("OOB RAID delete_configuration invoked for node %s.", + node.uuid) + driver_internal_info = node.driver_internal_info + ilo_object = ilo_common.get_ilo_object(node) + + try: + # Raid configuration in progress, checking status + if not driver_internal_info.get('ilo_raid_delete_in_progress'): + ilo_object.delete_raid_configuration() + self._prepare_for_read_raid(task, 'delete_raid') + return states.CLEANWAIT + else: + # Raid configuration is done, updating raid_config + raid_conf = ilo_object.read_raid_configuration() + if not len(raid_conf['logical_disks']): + node.raid_config = {} + LOG.debug("Node %(uuid)s raid delete clean step is done.", + {'uuid': node.uuid}) + self._pop_driver_internal_values( + task, 'ilo_raid_delete_in_progress', + 'cleaning_reboot', 'skip_current_clean_step') + node.driver_internal_info = driver_internal_info + node.save() + else: + # Raid configuration failed + msg = ("Unable to delete this logical disks: %s" % + raid_conf['logical_disks']) + self._pop_driver_internal_values( + task, 'ilo_raid_delete_in_progress', + 'cleaning_reboot', 'skip_current_clean_step') + node.driver_internal_info = driver_internal_info + node.save() + raise exception.NodeCleaningFailure( + "Clean step delete_configuration failed " + "on node %(node)s with error: %(err)s" % + {'node': node.uuid, 'err': msg}) + except ilo_error.IloLogicalDriveNotFoundError: + LOG.info("No logical drive found to delete on node %(node)s", + {'node': node.uuid}) + except ilo_error.IloError as ilo_exception: + operation = (_("Failed to delete raid configuration on node %s") + % node.uuid) + self._pop_driver_internal_values(task, + 'ilo_raid_delete_in_progress', + 'cleaning_reboot', + 'skip_current_clean_step') + node.driver_internal_info = driver_internal_info + node.save() + self._set_clean_failed(task, operation, ilo_exception) diff -Nru ironic-12.0.0/ironic/drivers/modules/inspect_utils.py ironic-12.1.0/ironic/drivers/modules/inspect_utils.py --- ironic-12.0.0/ironic/drivers/modules/inspect_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/inspect_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -21,31 +21,32 @@ LOG = logging.getLogger(__name__) -def create_ports_if_not_exist(task, macs): - """Create ironic ports for the mac addresses. - - Creates ironic ports for the mac addresses returned with inspection - or as requested by operator. +def create_ports_if_not_exist( + task, macs, get_mac_address=lambda x: x[1]): + """Create ironic ports from MAC addresses data dict. + + Creates ironic ports from MAC addresses data returned with inspection or + as requested by operator. Helper argument to detect the MAC address + ``get_mac_address`` defaults to 'value' part of MAC address dict key-value + pair. :param task: A TaskManager instance. - :param macs: A dictionary of port numbers to mac addresses - returned by node inspection. - + :param macs: A dictionary of MAC addresses returned by node inspection. + :param get_mac_address: a function to get the MAC address from mac item. + A mac item is the dict key-value pair of the previous ``macs`` + argument. """ node = task.node - for port_num, mac in macs.items(): - # TODO(etingof): detect --pxe-enabled flag + for k_v_pair in macs.items(): + mac = get_mac_address(k_v_pair) port_dict = {'address': mac, 'node_id': node.id} port = objects.Port(task.context, **port_dict) try: port.create() - LOG.info("Port %(port_num)s created for MAC address %(address)s " - "for node %(node)s", {'address': mac, 'node': node.uuid, - 'port_num': port_num}) + LOG.info("Port created for MAC address %(address)s for node " + "%(node)s", {'address': mac, 'node': node.uuid}) except exception.MACAlreadyExists: - LOG.warning("Port %(port_num)s already exists for " - "MAC address %(address)s for node " - "%(node)s", {'address': mac, - 'node': node.uuid, - 'port_num': port_num}) + LOG.warning("Port already exists for MAC address %(address)s " + "for node %(node)s", + {'address': mac, 'node': node.uuid}) diff -Nru ironic-12.0.0/ironic/drivers/modules/ipmitool.py ironic-12.1.0/ironic/drivers/modules/ipmitool.py --- ironic-12.0.0/ironic/drivers/modules/ipmitool.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ipmitool.py 2019-03-21 20:07:40.000000000 +0000 @@ -72,6 +72,9 @@ } OPTIONAL_PROPERTIES = { 'ipmi_password': _("password. Optional."), + 'ipmi_hex_kg_key': _('Kg key for IPMIv2 authentication. ' + 'The key is expected in hexadecimal format. ' + 'Optional.'), 'ipmi_port': _("remote IPMI RMCP port. Optional."), 'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of " "%s. Optional.") % ', '.join(VALID_PRIV_LEVELS), @@ -282,6 +285,7 @@ address = info.get('ipmi_address') username = info.get('ipmi_username') password = six.text_type(info.get('ipmi_password', '')) + hex_kg_key = info.get('ipmi_hex_kg_key') dest_port = info.get('ipmi_port') port = info.get('ipmi_terminal_port') priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR') @@ -361,11 +365,16 @@ " can be one of %(valid_levels)s") % {'priv_level': priv_level, 'valid_levels': valid_priv_lvls}) + if hex_kg_key and len(hex_kg_key) % 2 != 0: + raise exception.InvalidParameterValue(_( + "Number of ipmi_hex_kg_key characters is not even")) + return { 'address': address, 'dest_port': dest_port, 'username': username, 'password': password, + 'hex_kg_key': hex_kg_key, 'port': port, 'uuid': node.uuid, 'priv_level': priv_level, @@ -404,30 +413,14 @@ {'node': driver_info['uuid'], 'cmd': popen_obj.cmd}) -def _exec_ipmitool(driver_info, command, check_exit_code=None, - kill_on_timeout=False): - """Execute the ipmitool command. - - :param driver_info: the ipmitool parameters for accessing a node. - :param command: the ipmitool command to be executed. - :param check_exit_code: Single bool, int, or list of allowed exit codes. - :param kill_on_timeout: if `True`, kill unresponsive ipmitool on - `min_command_interval` timeout. Default is `False`. Makes no - effect on Windows. - :returns: (stdout, stderr) from executing the command. - :raises: PasswordFileFailedToCreate from creating or writing to the - temporary file. - :raises: processutils.ProcessExecutionError from executing the command. - - """ +def _get_ipmitool_args(driver_info, pw_file=None): ipmi_version = ('lanplus' if driver_info['protocol_version'] == '2.0' else 'lan') + args = ['ipmitool', - '-I', - ipmi_version, - '-H', - driver_info['address'], + '-I', ipmi_version, + '-H', driver_info['address'], '-L', driver_info['priv_level'] ] @@ -439,11 +432,46 @@ args.append('-U') args.append(driver_info['username']) + if driver_info['hex_kg_key']: + args.append('-y') + args.append(driver_info['hex_kg_key']) + for name, option in BRIDGING_OPTIONS: if driver_info[name] is not None: args.append(option) args.append(driver_info[name]) + if pw_file: + args.append('-f') + args.append(pw_file) + + if CONF.debug: + args.append('-v') + + # ensure all arguments are strings + args = [str(arg) for arg in args] + + return args + + +def _exec_ipmitool(driver_info, command, check_exit_code=None, + kill_on_timeout=False): + """Execute the ipmitool command. + + :param driver_info: the ipmitool parameters for accessing a node. + :param command: the ipmitool command to be executed. + :param check_exit_code: Single bool, int, or list of allowed exit codes. + :param kill_on_timeout: if `True`, kill unresponsive ipmitool on + `min_command_interval` timeout. Default is `False`. Makes no + effect on Windows. + :returns: (stdout, stderr) from executing the command. + :raises: PasswordFileFailedToCreate from creating or writing to the + temporary file. + :raises: processutils.ProcessExecutionError from executing the command. + + """ + args = _get_ipmitool_args(driver_info) + timeout = CONF.ipmi.command_retry_timeout # specify retry timing more precisely, if supported @@ -986,7 +1014,8 @@ 'to False, so not sending ipmi boot-timeout-disable', {'node_uuid', task.node.uuid}) - if task.node.driver_info.get('ipmi_force_boot_device', False): + ifbd = task.node.driver_info.get('ipmi_force_boot_device', False) + if strutils.bool_from_string(ifbd): driver_utils.force_persistent_boot(task, device, persistent) @@ -1052,8 +1081,9 @@ """ driver_info = task.node.driver_info driver_internal_info = task.node.driver_internal_info + ifbd = driver_info.get('ipmi_force_boot_device', False) - if (driver_info.get('ipmi_force_boot_device', False) + if (strutils.bool_from_string(ifbd) and driver_internal_info.get('persistent_boot_device') and driver_internal_info.get('is_next_boot_persistent', True)): return { @@ -1275,13 +1305,7 @@ :param pw_file: password file to be used in ipmitool command :returns: returns a command string for ipmitool """ - user = driver_info.get('username') - user = ' -U {}'.format(user) if user else '' - return ("ipmitool -H %(address)s -I lanplus" - "%(user)s -f %(pwfile)s" - % {'address': driver_info['address'], - 'user': user, - 'pwfile': pw_file}) + return ' '.join(_get_ipmitool_args(driver_info, pw_file=pw_file)) def _start_console(self, driver_info, start_method): """Start a remote console for the node. @@ -1299,15 +1323,8 @@ pw_file = console_utils.make_persistent_password_file( path, driver_info['password'] or '\0') ipmi_cmd = self._get_ipmi_cmd(driver_info, pw_file) + ipmi_cmd += ' sol activate' - for name, option in BRIDGING_OPTIONS: - if driver_info[name] is not None: - ipmi_cmd = " ".join([ipmi_cmd, - option, driver_info[name]]) - - if CONF.debug: - ipmi_cmd += " -v" - ipmi_cmd += " sol activate" try: start_method(driver_info['uuid'], driver_info['port'], ipmi_cmd) except (exception.ConsoleError, exception.ConsoleSubprocessFailed): diff -Nru ironic-12.0.0/ironic/drivers/modules/ipxe.py ironic-12.1.0/ironic/drivers/modules/ipxe.py --- ironic-12.0.0/ironic/drivers/modules/ipxe.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ipxe.py 2019-03-21 20:07:40.000000000 +0000 @@ -120,10 +120,6 @@ :param ramdisk_params: the parameters to be passed to the ramdisk. pxe driver passes these parameters as kernel command-line arguments. - :param mode: Label indicating a deploy or rescue operation - being carried out on the node. Supported values are - 'deploy' and 'rescue'. Defaults to 'deploy', indicating - deploy operation is being carried out. :returns: None :raises: MissingParameterValue, if some information is missing in node's driver_info or instance_info. @@ -133,6 +129,11 @@ operation failed on the node. """ node = task.node + + # Label indicating a deploy or rescue operation being carried out on + # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like + # state, the mode is set to 'deploy', indicating deploy operation is + # being carried out. mode = deploy_utils.rescue_or_deploy_mode(node) # NOTE(mjturek): At this point, the ipxe boot script should @@ -147,7 +148,8 @@ provider = dhcp_factory.DHCPFactory() provider.update_dhcp(task, dhcp_opts) - pxe_info = pxe_utils.get_image_info(node, mode=mode) + pxe_info = pxe_utils.get_image_info(node, mode=mode, + ipxe_enabled=True) # NODE: Try to validate and fetch instance images only # if we are in DEPLOYING state. @@ -165,9 +167,14 @@ pxe_utils.create_pxe_config(task, pxe_options, pxe_config_template, ipxe_enabled=True) - persistent = strutils.bool_from_string( - node.driver_info.get('force_persistent_boot_device', - False)) + persistent = False + value = node.driver_info.get('force_persistent_boot_device', + 'Default') + if value in {'Always', 'Default', 'Never'}: + if value == 'Always': + persistent = True + else: + persistent = strutils.bool_from_string(value, False) manager_utils.node_set_boot_device(task, boot_devices.PXE, persistent=persistent) @@ -265,8 +272,12 @@ # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes # during takeover if boot_device and task.node.provision_state != states.ACTIVE: + persistent = True + if node.driver_info.get('force_persistent_boot_device', + 'Default') == 'Never': + persistent = False manager_utils.node_set_boot_device(task, boot_device, - persistent=True) + persistent=persistent) @METRICS.timer('iPXEBoot.clean_up_instance') def clean_up_instance(self, task): diff -Nru ironic-12.0.0/ironic/drivers/modules/irmc/bios.py ironic-12.1.0/ironic/drivers/modules/irmc/bios.py --- ironic-12.0.0/ironic/drivers/modules/irmc/bios.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/irmc/bios.py 2019-03-21 20:07:40.000000000 +0000 @@ -61,6 +61,7 @@ 'required': True } }) + @base.cache_bios_settings def apply_configuration(self, task, settings): """Applies BIOS configuration on the given node. @@ -98,6 +99,7 @@ operation='Apply BIOS configuration', error=e) @METRICS.timer('IRMCBIOS.factory_reset') + @base.cache_bios_settings def factory_reset(self, task): """Reset BIOS configuration to factory default on the given node. diff -Nru ironic-12.0.0/ironic/drivers/modules/irmc/boot.py ironic-12.1.0/ironic/drivers/modules/irmc/boot.py --- ironic-12.0.0/ironic/drivers/modules/irmc/boot.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/irmc/boot.py 2019-03-21 20:07:40.000000000 +0000 @@ -314,8 +314,10 @@ images.create_boot_iso(task.context, boot_iso_fullpathname, kernel_href, ramdisk_href, - deploy_iso_href, root_uuid, - kernel_params, boot_mode) + deploy_iso_href=deploy_iso_href, + root_uuid=root_uuid, + kernel_params=kernel_params, + boot_mode=boot_mode) driver_internal_info['irmc_boot_iso'] = boot_iso_filename diff -Nru ironic-12.0.0/ironic/drivers/modules/iscsi_deploy.py ironic-12.1.0/ironic/drivers/modules/iscsi_deploy.py --- ironic-12.0.0/ironic/drivers/modules/iscsi_deploy.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/iscsi_deploy.py 2019-03-21 20:07:40.000000000 +0000 @@ -408,20 +408,35 @@ :returns: deploy state DEPLOYWAIT. """ node = task.node - if task.driver.storage.should_write_image(task): + if manager_utils.is_fast_track(task): + LOG.debug('Performing a fast track deployment for %(node)s.', + {'node': task.node.uuid}) + deploy_utils.cache_instance_image(task.context, node) + check_image_size(task) + # Update the database for the API and the task tracking resumes + # the state machine state going from DEPLOYWAIT -> DEPLOYING + task.process_event('wait') + self.continue_deploy(task) + elif task.driver.storage.should_write_image(task): + # Standard deploy process deploy_utils.cache_instance_image(task.context, node) check_image_size(task) manager_utils.node_power_action(task, states.REBOOT) - return states.DEPLOYWAIT else: + # Boot to an Storage Volume + # TODO(TheJulia): At some point, we should de-dupe this code # as it is nearly identical to the agent deploy interface. # This is not being done now as it is expected to be # refactored in the near future. manager_utils.node_power_action(task, states.POWER_OFF) + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) task.driver.network.remove_provisioning_network(task) task.driver.network.configure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) task.driver.boot.prepare_instance(task) manager_utils.node_power_action(task, states.POWER_ON) @@ -447,10 +462,13 @@ manager_utils.node_power_action(task, states.POWER_OFF) task.driver.storage.detach_volumes(task) deploy_utils.tear_down_storage_configuration(task) + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.unconfigure_tenant_networks(task) # NOTE(mgoddard): If the deployment was unsuccessful the node may have # ports on the provisioning network which were not deleted. task.driver.network.remove_provisioning_network(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) return states.DELETED @METRICS.timer('ISCSIDeploy.prepare') @@ -479,19 +497,38 @@ task.driver.boot.prepare_instance(task) else: if node.provision_state == states.DEPLOYING: - # Adding the node to provisioning network so that the dhcp - # options get added for the provisioning port. - manager_utils.node_power_action(task, states.POWER_OFF) + fast_track_deploy = manager_utils.is_fast_track(task) + if fast_track_deploy: + # The agent has already recently checked in and we are + # configured to take that as an indicator that we can + # skip ahead. + LOG.debug('The agent for node %(node)s has recently ' + 'checked in, and the node power will remain ' + 'unmodified.', + {'node': task.node.uuid}) + else: + # Adding the node to provisioning network so that the dhcp + # options get added for the provisioning port. + manager_utils.node_power_action(task, states.POWER_OFF) # NOTE(vdrok): in case of rebuild, we have tenant network # already configured, unbind tenant ports if present if task.driver.storage.should_write_image(task): + if not fast_track_deploy: + power_state_to_restore = ( + manager_utils.power_on_node_if_needed(task)) task.driver.network.unconfigure_tenant_networks(task) task.driver.network.add_provisioning_network(task) + if not fast_track_deploy: + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) task.driver.storage.attach_volumes(task) - if not task.driver.storage.should_write_image(task): + if (not task.driver.storage.should_write_image(task) + or fast_track_deploy): # We have nothing else to do as this is handled in the # backend storage system, and we can return to the caller # as we do not need to boot the agent to deploy. + # Alternatively, we are in a fast track deployment + # and have nothing else to do. return deploy_opts = deploy_utils.build_agent_options(node) diff -Nru ironic-12.0.0/ironic/drivers/modules/network/common.py ironic-12.1.0/ironic/drivers/modules/network/common.py --- ironic-12.0.0/ironic/drivers/modules/network/common.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/network/common.py 2019-03-21 20:07:40.000000000 +0000 @@ -266,11 +266,26 @@ if client_id_opt: body['port']['extra_dhcp_opts'] = [client_id_opt] + is_smart_nic = neutron.is_smartnic_port(port_like_obj) + if is_smart_nic: + link_info = local_link_info[0] + LOG.debug('Setting hostname as host_id in case of Smart NIC, ' + 'port %(port_id)s, hostname %(hostname)s', + {'port_id': vif_id, + 'hostname': link_info['hostname']}) + body['port']['binding:host_id'] = link_info['hostname'] + body['port']['binding:vnic_type'] = neutron.VNIC_SMARTNIC + if not client: client = neutron.get_client(context=task.context) + if is_smart_nic: + neutron.wait_for_host_agent(client, body['port']['binding:host_id']) + try: client.update_port(vif_id, body) + if is_smart_nic: + neutron.wait_for_port_status(client, vif_id, 'ACTIVE') except neutron_exceptions.ConnectionFailed as e: msg = (_('Could not add public network VIF %(vif)s ' 'to node %(node)s, possible network issue. %(exc)s') % diff -Nru ironic-12.0.0/ironic/drivers/modules/network/neutron.py ironic-12.1.0/ironic/drivers/modules/network/neutron.py --- ironic-12.0.0/ironic/drivers/modules/network/neutron.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/network/neutron.py 2019-03-21 20:07:40.000000000 +0000 @@ -258,4 +258,22 @@ or port_like_obj.extra.get('vif_port_id')) if not vif_port_id: continue + + is_smart_nic = neutron.is_smartnic_port(port_like_obj) + if is_smart_nic: + client = neutron.get_client(context=task.context) + link_info = port_like_obj.local_link_connection + neutron.wait_for_host_agent(client, link_info['hostname']) + neutron.unbind_neutron_port(vif_port_id, context=task.context) + + def need_power_on(self, task): + """Check if the node has any Smart NIC ports + + :param task: A TaskManager instance. + :return: A boolean to indicate Smart NIC port presence + """ + for port in task.ports: + if neutron.is_smartnic_port(port): + return True + return False diff -Nru ironic-12.0.0/ironic/drivers/modules/noop.py ironic-12.1.0/ironic/drivers/modules/noop.py --- ironic-12.0.0/ironic/drivers/modules/noop.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/noop.py 2019-03-21 20:07:40.000000000 +0000 @@ -15,7 +15,7 @@ """ Dummy interface implementations for use as defaults with optional interfaces. -Note that unlike fake implementatios, these do not pass validation and raise +Note that unlike fake implementations, these do not pass validation and raise exceptions for user-accessible actions. """ diff -Nru ironic-12.0.0/ironic/drivers/modules/pxe_base.py ironic-12.1.0/ironic/drivers/modules/pxe_base.py --- ironic-12.0.0/ironic/drivers/modules/pxe_base.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/pxe_base.py 2019-03-21 20:07:40.000000000 +0000 @@ -31,10 +31,16 @@ "mounted at boot time. Required."), } OPTIONAL_PROPERTIES = { - 'force_persistent_boot_device': _("True to enable persistent behavior " - "when the boot device is set during " - "deploy and cleaning operations. " - "Defaults to False. Optional."), + 'force_persistent_boot_device': _("Controls the persistency of boot order " + "changes. 'Always' will make all " + "changes persistent, 'Default' will " + "make all but the final one upon " + "instance deployment non-persistent, " + "and 'Never' will make no persistent " + "changes at all. The old values 'True' " + "and 'False' are still supported but " + "deprecated in favor of the new ones." + "Defaults to 'Default'. Optional."), } RESCUE_PROPERTIES = { 'rescue_kernel': _('UUID (from Glance) of the rescue kernel. This value ' diff -Nru ironic-12.0.0/ironic/drivers/modules/pxe.py ironic-12.1.0/ironic/drivers/modules/pxe.py --- ironic-12.0.0/ironic/drivers/modules/pxe.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/pxe.py 2019-03-21 20:07:40.000000000 +0000 @@ -127,10 +127,6 @@ :param ramdisk_params: the parameters to be passed to the ramdisk. pxe driver passes these parameters as kernel command-line arguments. - :param mode: Label indicating a deploy or rescue operation - being carried out on the node. Supported values are - 'deploy' and 'rescue'. Defaults to 'deploy', indicating - deploy operation is being carried out. :returns: None :raises: MissingParameterValue, if some information is missing in node's driver_info or instance_info. @@ -140,6 +136,11 @@ operation failed on the node. """ node = task.node + + # Label indicating a deploy or rescue operation being carried out on + # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like + # state, the mode is set to 'deploy', indicating deploy operation is + # being carried out. mode = deploy_utils.rescue_or_deploy_mode(node) ipxe_enabled = CONF.pxe.ipxe_enabled if ipxe_enabled: @@ -172,9 +173,15 @@ pxe_utils.create_pxe_config(task, pxe_options, pxe_config_template, ipxe_enabled=CONF.pxe.ipxe_enabled) - persistent = strutils.bool_from_string( - node.driver_info.get('force_persistent_boot_device', - False)) + + persistent = False + value = node.driver_info.get('force_persistent_boot_device', + 'Default') + if value in {'Always', 'Default', 'Never'}: + if value == 'Always': + persistent = True + else: + persistent = strutils.bool_from_string(value, False) manager_utils.node_set_boot_device(task, boot_devices.PXE, persistent=persistent) @@ -274,8 +281,12 @@ # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes # during takeover if boot_device and task.node.provision_state != states.ACTIVE: + persistent = True + if node.driver_info.get('force_persistent_boot_device', + 'Default') == 'Never': + persistent = False manager_utils.node_set_boot_device(task, boot_device, - persistent=True) + persistent=persistent) @METRICS.timer('PXEBoot.clean_up_instance') def clean_up_instance(self, task): @@ -329,7 +340,10 @@ # IDEA(TheJulia): Maybe a "trusted environment" mode flag # that we otherwise fail validation on for drivers that # require explicit security postures. + power_state_to_restore = manager_utils.power_on_node_if_needed(task) task.driver.network.configure_tenant_networks(task) + manager_utils.restore_power_state_if_needed( + task, power_state_to_restore) # calling boot.prepare_instance will also set the node # to PXE boot, and update PXE templates accordingly diff -Nru ironic-12.0.0/ironic/drivers/modules/redfish/bios.py ironic-12.1.0/ironic/drivers/modules/redfish/bios.py --- ironic-12.0.0/ironic/drivers/modules/redfish/bios.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/redfish/bios.py 2019-03-21 20:07:40.000000000 +0000 @@ -79,6 +79,7 @@ task.context, node_id, delete_names) @base.clean_step(priority=0) + @base.cache_bios_settings def factory_reset(self, task): """Reset the BIOS settings of the node to the factory default. @@ -110,6 +111,7 @@ 'required': True } }) + @base.cache_bios_settings def apply_configuration(self, task, settings): """Apply the BIOS settings to the node. diff -Nru ironic-12.0.0/ironic/drivers/modules/redfish/inspect.py ironic-12.1.0/ironic/drivers/modules/redfish/inspect.py --- ironic-12.0.0/ironic/drivers/modules/redfish/inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/redfish/inspect.py 2019-03-21 20:07:44.000000000 +0000 @@ -17,12 +17,15 @@ from oslo_utils import importutils from oslo_utils import units +from ironic.common import boot_modes from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states +from ironic.common import utils from ironic.drivers import base from ironic.drivers.modules import inspect_utils from ironic.drivers.modules.redfish import utils as redfish_utils +from ironic.drivers import utils as drivers_utils LOG = log.getLogger(__name__) @@ -37,6 +40,11 @@ sushy.PROCESSOR_ARCH_OEM: 'oem' } + BOOT_MODE_MAP = { + sushy.BOOT_SOURCE_MODE_UEFI: boot_modes.UEFI, + sushy.BOOT_SOURCE_MODE_BIOS: boot_modes.LEGACY_BIOS + } + class RedfishInspect(base.InspectInterface): @@ -115,6 +123,8 @@ simple_storage_size = 0 try: + LOG.debug("Attempting to discover system simple storage size for " + "node %(node)s", {'node': task.node.uuid}) if (system.simple_storage and system.simple_storage.disks_sizes_bytes): simple_storage_size = [ @@ -124,7 +134,7 @@ simple_storage_size = simple_storage_size[0] - except sushy.SushyError as ex: + except sushy.exceptions.SushyError as ex: LOG.debug("No simple storage information discovered " "for node %(node)s: %(err)s", {'node': task.node.uuid, 'err': ex}) @@ -132,6 +142,8 @@ storage_size = 0 try: + LOG.debug("Attempting to discover system storage volume size for " + "node %(node)s", {'node': task.node.uuid}) if system.storage and system.storage.volumes_sizes_bytes: storage_size = [ size for size in system.storage.volumes_sizes_bytes @@ -140,11 +152,28 @@ storage_size = storage_size[0] - except sushy.SushyError as ex: + except sushy.exceptions.SushyError as ex: LOG.debug("No storage volume information discovered " "for node %(node)s: %(err)s", {'node': task.node.uuid, 'err': ex}) + try: + if not storage_size: + LOG.debug("Attempting to discover system storage drive size " + "for node %(node)s", {'node': task.node.uuid}) + if system.storage and system.storage.drives_sizes_bytes: + storage_size = [ + size for size in system.storage.drives_sizes_bytes + if size >= 4 * units.Gi + ] or [0] + + storage_size = storage_size[0] + + except sushy.exceptions.SushyError as ex: + LOG.debug("No storage drive information discovered " + "for node %(node)s: %(err)s", {'node': task.node.uuid, + 'err': ex}) + # NOTE(etingof): pick the smallest disk larger than 4G among available if simple_storage_size and storage_size: local_gb = min(simple_storage_size, storage_size) @@ -161,10 +190,19 @@ if local_gb: inspected_properties['local_gb'] = str(local_gb) - else: LOG.warning("Could not provide a valid storage size configured " - "for node %(node)s", {'node': task.node.uuid}) + "for node %(node)s. Assuming this is a disk-less node", + {'node': task.node.uuid}) + inspected_properties['local_gb'] = '0' + + if system.boot.mode: + if not drivers_utils.get_node_capability(task.node, 'boot_mode'): + capabilities = utils.get_updated_capabilities( + inspected_properties.get('capabilities', ''), + {'boot_mode': BOOT_MODE_MAP[system.boot.mode]}) + + inspected_properties['capabilities'] = capabilities valid_keys = self.ESSENTIAL_PROPERTIES missing_keys = valid_keys - set(inspected_properties) @@ -183,12 +221,21 @@ 'node': task.node.uuid}) if (system.ethernet_interfaces and - system.ethernet_interfaces.eth_summary): - macs = system.ethernet_interfaces.eth_summary - - # Create ports for the nics detected. - inspect_utils.create_ports_if_not_exist(task, macs) + system.ethernet_interfaces.summary): + macs = system.ethernet_interfaces.summary + # Create ports for the discovered NICs being in 'enabled' state + enabled_macs = {nic_mac: nic_state + for nic_mac, nic_state in macs.items() + if nic_state == sushy.STATE_ENABLED} + if enabled_macs: + inspect_utils.create_ports_if_not_exist( + task, enabled_macs, get_mac_address=lambda x: x[0]) + else: + LOG.warning("Not attempting to create any port as no NICs " + "were discovered in 'enabled' state for node " + "%(node)s: %(mac_data)s", + {'mac_data': macs, 'node': task.node.uuid}) else: LOG.warning("No NIC information discovered " "for node %(node)s", {'node': task.node.uuid}) diff -Nru ironic-12.0.0/ironic/drivers/modules/ucs/management.py ironic-12.1.0/ironic/drivers/modules/ucs/management.py --- ironic-12.0.0/ironic/drivers/modules/ucs/management.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ucs/management.py 2019-03-21 20:07:40.000000000 +0000 @@ -44,6 +44,10 @@ class UcsManagement(base.ManagementInterface): + # NOTE(TheJulia): Deprecated due to a lack of operating third party + # CI, which stopped reporting during the Stein development cycle. + supported = False + def get_properties(self): return ucs_helper.COMMON_PROPERTIES diff -Nru ironic-12.0.0/ironic/drivers/modules/ucs/power.py ironic-12.1.0/ironic/drivers/modules/ucs/power.py --- ironic-12.0.0/ironic/drivers/modules/ucs/power.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/ucs/power.py 2019-03-21 20:07:40.000000000 +0000 @@ -74,6 +74,9 @@ This PowerInterface class provides a mechanism for controlling the power state of servers managed by Cisco UCS Manager. """ + # NOTE(TheJulia): Deprecated due to a lack of operating third party + # CI, which stopped reporting during the Stein development cycle. + supported = False def get_properties(self): """Returns common properties of the driver.""" diff -Nru ironic-12.0.0/ironic/drivers/modules/xclarity/common.py ironic-12.1.0/ironic/drivers/modules/xclarity/common.py --- ironic-12.0.0/ironic/drivers/modules/xclarity/common.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/xclarity/common.py 2019-03-21 20:07:40.000000000 +0000 @@ -144,7 +144,7 @@ msg = (_("Error validating node driver info, " "server uuid: %s missing xclarity_hardware_id") % node.uuid) - raise exception.MissingParameterValue(error=msg) + raise exception.MissingParameterValue(err=msg) return xclarity_hardware_id diff -Nru ironic-12.0.0/ironic/drivers/modules/xclarity/management.py ironic-12.1.0/ironic/drivers/modules/xclarity/management.py --- ironic-12.0.0/ironic/drivers/modules/xclarity/management.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/xclarity/management.py 2019-03-21 20:07:40.000000000 +0000 @@ -38,6 +38,9 @@ boot_devices.BIOS: 'Boot To F1' } +BOOT_DEVICE_MAPPING_FROM_XCLARITY = { + v: k for k, v in BOOT_DEVICE_MAPPING_TO_XCLARITY.items()} + SUPPORTED_BOOT_DEVICES = [ boot_devices.PXE, boot_devices.DISK, @@ -78,10 +81,11 @@ """It validates if the boot device is supported by XClarity. :param task: a task from TaskManager. - :param boot_device: the boot device, one of [PXE, DISK, CDROM, BIOS] + :param boot_device: the boot device in XClarity format, one of + ['PXE Network', 'Hard Disk 0', 'CD/DVD Rom', 'Boot To F1'] :raises: InvalidParameterValue if the boot device is not supported. """ - if boot_device not in SUPPORTED_BOOT_DEVICES: + if boot_device not in BOOT_DEVICE_MAPPING_FROM_XCLARITY: raise exception.InvalidParameterValue( _("Unsupported boot device %(device)s for node: %(node)s ") % {"device": boot_device, "node": task.node.uuid} @@ -95,6 +99,7 @@ :returns: a dictionary containing: :boot_device: the boot device, one of [PXE, DISK, CDROM, BIOS] :persistent: Whether the boot device will persist or not + It returns None if boot device is unknown. :raises: InvalidParameterValue if the boot device is unknown :raises: XClarityError if the communication with XClarity fails """ @@ -117,22 +122,38 @@ primary = None boot_order = boot_info['bootOrder']['bootOrderList'] for item in boot_order: - current = item.get('currentBootOrderDevices', None) - boot_type = item.get('bootType', None) + current = item.get('currentBootOrderDevices') + if current is None: + LOG.warning( + 'Current boot order is None from XClarity for ' + 'node %(node)s. Please check the hardware and ' + 'XClarity connection', {'node': node.uuid, }) + return {'boot_device': None, 'persistent': None} + else: + primary = current[0] + boot_type = item.get('bootType') if boot_type == "SingleUse": persistent = False - primary = current[0] if primary != 'None': - boot_device = {'boot_device': primary, - 'persistent': persistent} - self._validate_whether_supported_boot_device(primary) + self._validate_supported_boot_device(task, primary) + boot_device = { + 'boot_device': + BOOT_DEVICE_MAPPING_FROM_XCLARITY.get(primary), + 'persistent': persistent + } return boot_device elif boot_type == "Permanent": persistent = True - boot_device = {'boot_device': current[0], - 'persistent': persistent} - self._validate_supported_boot_device(task, primary) - return boot_device + if primary != 'None': + self._validate_supported_boot_device(task, primary) + boot_device = { + 'boot_device': + BOOT_DEVICE_MAPPING_FROM_XCLARITY.get(primary), + 'persistent': persistent + } + return boot_device + else: + return {'boot_device': None, 'persistent': None} @METRICS.timer('XClarityManagement.set_boot_device') @task_manager.require_exclusive_lock @@ -149,12 +170,13 @@ specified. :raises: XClarityError if the communication with XClarity fails """ - self._validate_supported_boot_device(task=task, boot_device=device) + node = task.node + xc_device = self._translate_ironic_to_xclarity(device) - server_hardware_id = task.node.driver_info.get('server_hardware_id') + server_hardware_id = common.get_server_hardware_id(node) LOG.debug("Setting boot device to %(device)s for node %(node)s", - {"device": device, "node": task.node.uuid}) - self._set_boot_device(task, server_hardware_id, device, + {"device": device, "node": node.uuid}) + self._set_boot_device(task, server_hardware_id, xc_device, singleuse=not persistent) @METRICS.timer('XClarityManagement.get_sensors_data') @@ -189,36 +211,34 @@ client = common.get_xclarity_client(node) boot_info = client.get_node_all_boot_info( server_hardware_id) - xclarity_boot_device = self._translate_ironic_to_xclarity( - new_primary_boot_device) current = [] LOG.debug( ("Setting boot device to %(device)s for XClarity " "node %(node)s"), - {'device': xclarity_boot_device, 'node': node.uuid} + {'device': new_primary_boot_device, 'node': node.uuid} ) for item in boot_info['bootOrder']['bootOrderList']: if singleuse and item['bootType'] == 'SingleUse': - item['currentBootOrderDevices'][0] = xclarity_boot_device + item['currentBootOrderDevices'][0] = new_primary_boot_device elif not singleuse and item['bootType'] == 'Permanent': current = item['currentBootOrderDevices'] - if xclarity_boot_device == current[0]: + if new_primary_boot_device == current[0]: return - if xclarity_boot_device in current: - current.remove(xclarity_boot_device) - current.insert(0, xclarity_boot_device) + if new_primary_boot_device in current: + current.remove(new_primary_boot_device) + current.insert(0, new_primary_boot_device) item['currentBootOrderDevices'] = current try: client.set_node_boot_info(server_hardware_id, boot_info, - xclarity_boot_device, + new_primary_boot_device, singleuse) except xclarity_client_exceptions.XClarityError as xclarity_exc: LOG.error( ('Error setting boot device %(boot_device)s for the XClarity ' 'node %(node)s. Error: %(error)s'), - {'boot_device': xclarity_boot_device, 'node': node.uuid, + {'boot_device': new_primary_boot_device, 'node': node.uuid, 'error': xclarity_exc} ) raise exception.XClarityError(error=xclarity_exc) diff -Nru ironic-12.0.0/ironic/drivers/modules/xclarity/power.py ironic-12.1.0/ironic/drivers/modules/xclarity/power.py --- ironic-12.0.0/ironic/drivers/modules/xclarity/power.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/modules/xclarity/power.py 2019-03-21 20:07:40.000000000 +0000 @@ -62,7 +62,7 @@ server_hardware_id = common.get_server_hardware_id(node) try: power_state = client.get_node_power_status(server_hardware_id) - except xclarity_client_exceptions.XClarityException as xclarity_exc: + except xclarity_client_exceptions.XClarityError as xclarity_exc: LOG.error( ("Error getting power state for node %(node)s. Error: " "%(error)s"), diff -Nru ironic-12.0.0/ironic/drivers/utils.py ironic-12.1.0/ironic/drivers/utils.py --- ironic-12.0.0/ironic/drivers/utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/drivers/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -18,6 +18,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import base64 +from oslo_utils import strutils from oslo_utils import timeutils import six @@ -192,8 +193,8 @@ :param task: Node object. :param driver_info: Node driver_info. """ - - if driver_info.get('force_boot_device', False): + ifbd = driver_info.get('force_boot_device', False) + if strutils.bool_from_string(ifbd): driver_internal_info = task.node.driver_internal_info if driver_internal_info.get('is_next_boot_persistent') is False: driver_internal_info.pop('is_next_boot_persistent', None) diff -Nru ironic-12.0.0/ironic/objects/allocation.py ironic-12.1.0/ironic/objects/allocation.py --- ironic-12.0.0/ironic/objects/allocation.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/objects/allocation.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,300 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import strutils +from oslo_utils import uuidutils +from oslo_versionedobjects import base as object_base + +from ironic.common import exception +from ironic.common import utils +from ironic.db import api as dbapi +from ironic.objects import base +from ironic.objects import fields as object_fields +from ironic.objects import notification + + +@base.IronicObjectRegistry.register +class Allocation(base.IronicObject, object_base.VersionedObjectDictCompat): + # Version 1.0: Initial version + VERSION = '1.0' + + dbapi = dbapi.get_instance() + + fields = { + 'id': object_fields.IntegerField(), + 'uuid': object_fields.UUIDField(nullable=True), + 'name': object_fields.StringField(nullable=True), + 'node_id': object_fields.IntegerField(nullable=True), + 'state': object_fields.StringField(nullable=True), + 'last_error': object_fields.StringField(nullable=True), + 'resource_class': object_fields.StringField(nullable=True), + 'traits': object_fields.ListOfStringsField(nullable=True), + 'candidate_nodes': object_fields.ListOfStringsField(nullable=True), + 'extra': object_fields.FlexibleDictField(nullable=True), + 'conductor_affinity': object_fields.IntegerField(nullable=True), + } + + def _convert_to_version(self, target_version, + remove_unavailable_fields=True): + """Convert to the target version. + + Convert the object to the target version. The target version may be + the same, older, or newer than the version of the object. This is + used for DB interactions as well as for serialization/deserialization. + + :param target_version: the desired version of the object + :param remove_unavailable_fields: True to remove fields that are + unavailable in the target version; set this to True when + (de)serializing. False to set the unavailable fields to appropriate + values; set this to False for DB interactions. + """ + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable_classmethod + @classmethod + def get(cls, context, allocation_ident): + """Find an allocation by its ID, UUID or name. + + :param allocation_ident: The ID, UUID or name of an allocation. + :param context: Security context + :returns: An :class:`Allocation` object. + :raises: InvalidIdentity + + """ + if strutils.is_int_like(allocation_ident): + return cls.get_by_id(context, allocation_ident) + elif uuidutils.is_uuid_like(allocation_ident): + return cls.get_by_uuid(context, allocation_ident) + elif utils.is_valid_logical_name(allocation_ident): + return cls.get_by_name(context, allocation_ident) + else: + raise exception.InvalidIdentity(identity=allocation_ident) + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable_classmethod + @classmethod + def get_by_id(cls, context, allocation_id): + """Find an allocation by its integer ID. + + :param cls: the :class:`Allocation` + :param context: Security context + :param allocation_id: The ID of an allocation. + :returns: An :class:`Allocation` object. + :raises: AllocationNotFound + + """ + db_allocation = cls.dbapi.get_allocation_by_id(allocation_id) + allocation = cls._from_db_object(context, cls(), db_allocation) + return allocation + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable_classmethod + @classmethod + def get_by_uuid(cls, context, uuid): + """Find an allocation by its UUID. + + :param cls: the :class:`Allocation` + :param context: Security context + :param uuid: The UUID of an allocation. + :returns: An :class:`Allocation` object. + :raises: AllocationNotFound + + """ + db_allocation = cls.dbapi.get_allocation_by_uuid(uuid) + allocation = cls._from_db_object(context, cls(), db_allocation) + return allocation + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable_classmethod + @classmethod + def get_by_name(cls, context, name): + """Find an allocation based by its name. + + :param cls: the :class:`Allocation` + :param context: Security context + :param name: The name of an allocation. + :returns: An :class:`Allocation` object. + :raises: AllocationNotFound + + """ + db_allocation = cls.dbapi.get_allocation_by_name(name) + allocation = cls._from_db_object(context, cls(), db_allocation) + return allocation + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable_classmethod + @classmethod + def list(cls, context, filters=None, limit=None, marker=None, + sort_key=None, sort_dir=None): + """Return a list of Allocation objects. + + :param cls: the :class:`Allocation` + :param context: Security context. + :param filters: Filters to apply. + :param limit: Maximum number of resources to return in a single result. + :param marker: Pagination marker for large data sets. + :param sort_key: Column to sort results by. + :param sort_dir: Direction to sort. "asc" or "desc". + :returns: A list of :class:`Allocation` object. + :raises: InvalidParameterValue + + """ + db_allocations = cls.dbapi.get_allocation_list(filters=filters, + limit=limit, + marker=marker, + sort_key=sort_key, + sort_dir=sort_dir) + return cls._from_db_object_list(context, db_allocations) + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable + def create(self, context=None): + """Create a Allocation record in the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Allocation(context) + :raises: AllocationDuplicateName, AllocationAlreadyExists + + """ + values = self.do_version_changes_for_db() + db_allocation = self.dbapi.create_allocation(values) + self._from_db_object(self._context, self, db_allocation) + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable + def destroy(self, context=None): + """Delete the Allocation from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Allocation(context) + :raises: AllocationNotFound + + """ + self.dbapi.destroy_allocation(self.uuid) + self.obj_reset_changes() + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable + def save(self, context=None): + """Save updates to this Allocation. + + Updates will be made column by column based on the result + of self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Allocation(context) + :raises: AllocationNotFound, AllocationDuplicateName + + """ + updates = self.do_version_changes_for_db() + updated_allocation = self.dbapi.update_allocation(self.uuid, updates) + self._from_db_object(self._context, self, updated_allocation) + + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable + # methods can be used in the future to replace current explicit RPC calls. + # Implications of calling new remote procedures should be thought through. + # @object_base.remotable + def refresh(self, context=None): + """Loads updates for this Allocation. + + Loads an allocation with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded allocation column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Allocation(context) + :raises: AllocationNotFound + + """ + current = self.get_by_uuid(self._context, uuid=self.uuid) + self.obj_refresh(current) + self.obj_reset_changes() + + +@base.IronicObjectRegistry.register +class AllocationCRUDNotification(notification.NotificationBase): + """Notification when ironic creates, updates or deletes an allocation.""" + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'payload': object_fields.ObjectField('AllocationCRUDPayload') + } + + +@base.IronicObjectRegistry.register +class AllocationCRUDPayload(notification.NotificationPayloadBase): + # Version 1.0: Initial version + VERSION = '1.0' + + SCHEMA = { + 'candidate_nodes': ('allocation', 'candidate_nodes'), + 'created_at': ('allocation', 'created_at'), + 'extra': ('allocation', 'extra'), + 'last_error': ('allocation', 'last_error'), + 'name': ('allocation', 'name'), + 'resource_class': ('allocation', 'resource_class'), + 'state': ('allocation', 'state'), + 'traits': ('allocation', 'traits'), + 'updated_at': ('allocation', 'updated_at'), + 'uuid': ('allocation', 'uuid') + } + + fields = { + 'uuid': object_fields.UUIDField(nullable=True), + 'name': object_fields.StringField(nullable=True), + 'node_uuid': object_fields.StringField(nullable=True), + 'state': object_fields.StringField(nullable=True), + 'last_error': object_fields.StringField(nullable=True), + 'resource_class': object_fields.StringField(nullable=True), + 'traits': object_fields.ListOfStringsField(nullable=True), + 'candidate_nodes': object_fields.ListOfStringsField(nullable=True), + 'extra': object_fields.FlexibleDictField(nullable=True), + 'created_at': object_fields.DateTimeField(nullable=True), + 'updated_at': object_fields.DateTimeField(nullable=True), + } + + def __init__(self, allocation, node_uuid=None): + super(AllocationCRUDPayload, self).__init__(node_uuid=node_uuid) + self.populate_schema(allocation=allocation) diff -Nru ironic-12.0.0/ironic/objects/deploy_template.py ironic-12.1.0/ironic/objects/deploy_template.py --- ironic-12.0.0/ironic/objects/deploy_template.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/objects/deploy_template.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,281 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import base as object_base + +from ironic.db import api as db_api +from ironic.objects import base +from ironic.objects import fields as object_fields +from ironic.objects import notification + + +@base.IronicObjectRegistry.register +class DeployTemplate(base.IronicObject, object_base.VersionedObjectDictCompat): + # Version 1.0: Initial version + # Version 1.1: Added 'extra' field + VERSION = '1.1' + + dbapi = db_api.get_instance() + + fields = { + 'id': object_fields.IntegerField(), + 'uuid': object_fields.UUIDField(nullable=False), + 'name': object_fields.StringField(nullable=False), + 'steps': object_fields.ListOfFlexibleDictsField(nullable=False), + 'extra': object_fields.FlexibleDictField(nullable=True), + } + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable + def create(self, context=None): + """Create a DeployTemplate record in the DB. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :raises: DeployTemplateDuplicateName if a deploy template with the same + name exists. + :raises: DeployTemplateAlreadyExists if a deploy template with the same + UUID exists. + """ + values = self.do_version_changes_for_db() + db_template = self.dbapi.create_deploy_template(values) + self._from_db_object(self._context, self, db_template) + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable + def save(self, context=None): + """Save updates to this DeployTemplate. + + Column-wise updates will be made based on the result of + self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context) + :raises: DeployTemplateDuplicateName if a deploy template with the same + name exists. + :raises: DeployTemplateNotFound if the deploy template does not exist. + """ + updates = self.do_version_changes_for_db() + db_template = self.dbapi.update_deploy_template(self.uuid, updates) + self._from_db_object(self._context, self, db_template) + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable_classmethod + def destroy(self): + """Delete the DeployTemplate from the DB. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :raises: DeployTemplateNotFound if the deploy template no longer + appears in the database. + """ + self.dbapi.destroy_deploy_template(self.id) + self.obj_reset_changes() + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable_classmethod + @classmethod + def get_by_id(cls, context, template_id): + """Find a deploy template based on its integer ID. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :param template_id: The ID of a deploy template. + :raises: DeployTemplateNotFound if the deploy template no longer + appears in the database. + :returns: a :class:`DeployTemplate` object. + """ + db_template = cls.dbapi.get_deploy_template_by_id(template_id) + template = cls._from_db_object(context, cls(), db_template) + return template + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable_classmethod + @classmethod + def get_by_uuid(cls, context, uuid): + """Find a deploy template based on its UUID. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :param uuid: The UUID of a deploy template. + :raises: DeployTemplateNotFound if the deploy template no longer + appears in the database. + :returns: a :class:`DeployTemplate` object. + """ + db_template = cls.dbapi.get_deploy_template_by_uuid(uuid) + template = cls._from_db_object(context, cls(), db_template) + return template + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable_classmethod + @classmethod + def get_by_name(cls, context, name): + """Find a deploy template based on its name. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :param name: The name of a deploy template. + :raises: DeployTemplateNotFound if the deploy template no longer + appears in the database. + :returns: a :class:`DeployTemplate` object. + """ + db_template = cls.dbapi.get_deploy_template_by_name(name) + template = cls._from_db_object(context, cls(), db_template) + return template + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable_classmethod + @classmethod + def list(cls, context, limit=None, marker=None, sort_key=None, + sort_dir=None): + """Return a list of DeployTemplate objects. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :param limit: maximum number of resources to return in a single result. + :param marker: pagination marker for large data sets. + :param sort_key: column to sort results by. + :param sort_dir: direction to sort. "asc" or "desc". + :returns: a list of :class:`DeployTemplate` objects. + """ + db_templates = cls.dbapi.get_deploy_template_list( + limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) + return cls._from_db_object_list(context, db_templates) + + # NOTE(mgoddard): We don't want to enable RPC on this call just yet. + # Remotable methods can be used in the future to replace current explicit + # RPC calls. Implications of calling new remote procedures should be + # thought through. + # @object_base.remotable_classmethod + @classmethod + def list_by_names(cls, context, names): + """Return a list of DeployTemplate objects matching a set of names. + + :param context: security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: DeployTemplate(context). + :param names: a list of names to filter by. + :returns: a list of :class:`DeployTemplate` objects. + """ + db_templates = cls.dbapi.get_deploy_template_list_by_names(names) + return cls._from_db_object_list(context, db_templates) + + def refresh(self, context=None): + """Loads updates for this deploy template. + + Loads a deploy template with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded template column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Port(context) + :raises: DeployTemplateNotFound if the deploy template no longer + appears in the database. + """ + current = self.get_by_uuid(self._context, uuid=self.uuid) + self.obj_refresh(current) + self.obj_reset_changes() + + +@base.IronicObjectRegistry.register +class DeployTemplateCRUDNotification(notification.NotificationBase): + """Notification emitted on deploy template API operations.""" + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'payload': object_fields.ObjectField('DeployTemplateCRUDPayload') + } + + +@base.IronicObjectRegistry.register +class DeployTemplateCRUDPayload(notification.NotificationPayloadBase): + # Version 1.0: Initial version + VERSION = '1.0' + + SCHEMA = { + 'created_at': ('deploy_template', 'created_at'), + 'extra': ('deploy_template', 'extra'), + 'name': ('deploy_template', 'name'), + 'steps': ('deploy_template', 'steps'), + 'updated_at': ('deploy_template', 'updated_at'), + 'uuid': ('deploy_template', 'uuid') + } + + fields = { + 'created_at': object_fields.DateTimeField(nullable=True), + 'extra': object_fields.FlexibleDictField(nullable=True), + 'name': object_fields.StringField(nullable=False), + 'steps': object_fields.ListOfFlexibleDictsField(nullable=False), + 'updated_at': object_fields.DateTimeField(nullable=True), + 'uuid': object_fields.UUIDField() + } + + def __init__(self, deploy_template, **kwargs): + super(DeployTemplateCRUDPayload, self).__init__(**kwargs) + self.populate_schema(deploy_template=deploy_template) diff -Nru ironic-12.0.0/ironic/objects/fields.py ironic-12.1.0/ironic/objects/fields.py --- ironic-12.0.0/ironic/objects/fields.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/objects/fields.py 2019-03-21 20:07:40.000000000 +0000 @@ -106,6 +106,10 @@ super(FlexibleDictField, self)._null(obj, attr) +class ListOfFlexibleDictsField(object_fields.AutoTypedField): + AUTO_TYPE = object_fields.List(FlexibleDict()) + + class EnumField(object_fields.EnumField): pass diff -Nru ironic-12.0.0/ironic/objects/__init__.py ironic-12.1.0/ironic/objects/__init__.py --- ironic-12.0.0/ironic/objects/__init__.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/objects/__init__.py 2019-03-21 20:07:40.000000000 +0000 @@ -24,9 +24,11 @@ # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. + __import__('ironic.objects.allocation') __import__('ironic.objects.bios') __import__('ironic.objects.chassis') __import__('ironic.objects.conductor') + __import__('ironic.objects.deploy_template') __import__('ironic.objects.node') __import__('ironic.objects.port') __import__('ironic.objects.portgroup') diff -Nru ironic-12.0.0/ironic/objects/node.py ironic-12.1.0/ironic/objects/node.py --- ironic-12.0.0/ironic/objects/node.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/objects/node.py 2019-03-21 20:07:40.000000000 +0000 @@ -66,7 +66,10 @@ # Version 1.27: Add conductor_group field # Version 1.28: Add automated_clean field # Version 1.29: Add protected and protected_reason fields - VERSION = '1.29' + # Version 1.30: Add owner field + # Version 1.31: Add allocation_id field + # Version 1.32: Add description field + VERSION = '1.32' dbapi = db_api.get_instance() @@ -135,6 +138,7 @@ 'automated_clean': objects.fields.BooleanField(nullable=True), 'protected': objects.fields.BooleanField(), 'protected_reason': object_fields.StringField(nullable=True), + 'allocation_id': object_fields.IntegerField(nullable=True), 'bios_interface': object_fields.StringField(nullable=True), 'boot_interface': object_fields.StringField(nullable=True), @@ -149,6 +153,8 @@ 'storage_interface': object_fields.StringField(nullable=True), 'vendor_interface': object_fields.StringField(nullable=True), 'traits': object_fields.ObjectField('TraitList', nullable=True), + 'owner': object_fields.StringField(nullable=True), + 'description': object_fields.StringField(nullable=True), } def as_dict(self, secure=False): @@ -493,18 +499,6 @@ node = cls._from_db_object(context, cls(), db_node) return node - def _convert_fault_field(self, target_version, - remove_unavailable_fields=True): - fault_is_set = self.obj_attr_is_set('fault') - if target_version >= (1, 25): - if not fault_is_set: - self.fault = None - elif fault_is_set: - if remove_unavailable_fields: - delattr(self, 'fault') - elif self.fault is not None: - self.fault = None - def _convert_deploy_step_field(self, target_version, remove_unavailable_fields=True): # NOTE(rloo): Typically we set the value to None. However, @@ -581,6 +575,12 @@ should be set to None (or removed). Version 1.29: protected was added. For versions prior to this, it should be set to False (or removed). + Version 1.30: owner was added. For versions prior to this, it should be + set to None or removed. + Version 1.31: allocation_id was added. For versions prior to this, it + should be set to None (or removed). + Version 1.32: description was added. For versions prior to this, it + should be set to None (or removed). :param target_version: the desired version of the object :param remove_unavailable_fields: True to remove fields that are @@ -592,8 +592,9 @@ # Convert the different fields depending on version fields = [('rescue_interface', 22), ('traits', 23), - ('bios_interface', 24), ('automated_clean', 28), - ('protected_reason', 29)] + ('bios_interface', 24), ('fault', 25), + ('automated_clean', 28), ('protected_reason', 29), + ('owner', 30), ('allocation_id', 31), ('description', 32)] for name, minor in fields: self._adjust_field_to_version(name, None, target_version, 1, minor, remove_unavailable_fields) @@ -602,7 +603,6 @@ self._adjust_field_to_version('protected', False, target_version, 1, 29, remove_unavailable_fields) - self._convert_fault_field(target_version, remove_unavailable_fields) self._convert_deploy_step_field(target_version, remove_unavailable_fields) self._convert_conductor_group_field(target_version, @@ -626,6 +626,7 @@ 'console_enabled': ('node', 'console_enabled'), 'created_at': ('node', 'created_at'), 'deploy_step': ('node', 'deploy_step'), + 'description': ('node', 'description'), 'driver': ('node', 'driver'), 'extra': ('node', 'extra'), 'inspection_finished_at': ('node', 'inspection_finished_at'), @@ -648,6 +649,7 @@ 'rescue_interface': ('node', 'rescue_interface'), 'storage_interface': ('node', 'storage_interface'), 'vendor_interface': ('node', 'vendor_interface'), + 'owner': ('node', 'owner'), 'power_state': ('node', 'power_state'), 'properties': ('node', 'properties'), 'protected': ('node', 'protected'), @@ -674,13 +676,16 @@ # Version 1.9: Add deploy_step field exposed via API. # Version 1.10: Add conductor_group field exposed via API. # Version 1.11: Add protected and protected_reason fields exposed via API. - VERSION = '1.11' + # Version 1.12: Add node owner field. + # Version 1.13: Add description field. + VERSION = '1.13' fields = { 'clean_step': object_fields.FlexibleDictField(nullable=True), 'conductor_group': object_fields.StringField(nullable=True), 'console_enabled': object_fields.BooleanField(nullable=True), 'created_at': object_fields.DateTimeField(nullable=True), 'deploy_step': object_fields.FlexibleDictField(nullable=True), + 'description': object_fields.StringField(nullable=True), 'driver': object_fields.StringField(nullable=True), 'extra': object_fields.FlexibleDictField(nullable=True), 'inspection_finished_at': object_fields.DateTimeField(nullable=True), @@ -703,6 +708,7 @@ 'storage_interface': object_fields.StringField(nullable=True), 'vendor_interface': object_fields.StringField(nullable=True), 'name': object_fields.StringField(nullable=True), + 'owner': object_fields.StringField(nullable=True), 'power_state': object_fields.StringField(nullable=True), 'properties': object_fields.FlexibleDictField(nullable=True), 'protected': object_fields.BooleanField(nullable=True), @@ -754,7 +760,9 @@ # Version 1.9: Parent NodePayload version 1.9 # Version 1.10: Parent NodePayload version 1.10 # Version 1.11: Parent NodePayload version 1.11 - VERSION = '1.11' + # Version 1.12: Parent NodePayload version 1.12 + # Version 1.13: Parent NodePayload version 1.13 + VERSION = '1.13' fields = { # "to_power" indicates the future target_power_state of the node. A @@ -806,7 +814,9 @@ # Version 1.9: Parent NodePayload version 1.9 # Version 1.10: Parent NodePayload version 1.10 # Version 1.11: Parent NodePayload version 1.11 - VERSION = '1.11' + # Version 1.12: Parent NodePayload version 1.12 + # Version 1.13: Parent NodePayload version 1.13 + VERSION = '1.13' fields = { 'from_power': object_fields.StringField(nullable=True) @@ -842,7 +852,9 @@ # Version 1.9: Parent NodePayload version 1.9 # Version 1.10: Parent NodePayload version 1.10 # Version 1.11: Parent NodePayload version 1.11 - VERSION = '1.11' + # Version 1.12: Parent NodePayload version 1.12 + # Version 1.13: Parent NodePayload version 1.13 + VERSION = '1.13' SCHEMA = dict(NodePayload.SCHEMA, **{'instance_info': ('node', 'instance_info')}) @@ -885,7 +897,9 @@ # Version 1.7: Parent NodePayload version 1.9 # Version 1.8: Parent NodePayload version 1.10 # Version 1.9: Parent NodePayload version 1.11 - VERSION = '1.9' + # Version 1.10: Parent NodePayload version 1.12 + # Version 1.11: Parent NodePayload version 1.13 + VERSION = '1.11' SCHEMA = dict(NodePayload.SCHEMA, **{'instance_info': ('node', 'instance_info'), diff -Nru ironic-12.0.0/ironic/objects/port.py ironic-12.1.0/ironic/objects/port.py --- ironic-12.0.0/ironic/objects/port.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/objects/port.py 2019-03-21 20:07:40.000000000 +0000 @@ -41,7 +41,7 @@ # NOTE(rloo): if we introduce newer port versions in the same cycle, # we could add those versions along with 1.8. This is only so we don't # duplicate work; it isn't necessary. - db_ports = Port.dbapi.get_not_versions('Port', ['1.8']) + db_ports = Port.dbapi.get_not_versions('Port', ['1.8', '1.9']) total = len(db_ports) max_count = max_count or total done = 0 @@ -71,7 +71,8 @@ # Version 1.8: Migrate/copy extra['vif_port_id'] to # internal_info['tenant_vif_port_id'] (not an explicit db # change) - VERSION = '1.8' + # Version 1.9: Add support for Smart NIC port + VERSION = '1.9' dbapi = dbapi.get_instance() @@ -87,6 +88,8 @@ 'pxe_enabled': object_fields.BooleanField(), 'internal_info': object_fields.FlexibleDictField(nullable=True), 'physical_network': object_fields.StringField(nullable=True), + 'is_smartnic': object_fields.BooleanField(nullable=True, + default=False), } def _convert_to_version(self, target_version, @@ -106,6 +109,9 @@ .extra value to internal_info. There is nothing to do here when downgrading to an older version. + Version 1.9: remove is_smartnic field for unsupported versions if + remove_unavailable_fields is True. + :param target_version: the desired version of the object :param remove_unavailable_fields: True to remove fields that are unavailable in the target version; set this to True when @@ -140,6 +146,24 @@ # DB: set unavailable fields to their default. self.physical_network = None + # Convert is_smartnic field. + is_smartnic_set = self.obj_attr_is_set('is_smartnic') + if target_version >= (1, 9): + # Target version supports is_smartnic. Set it to its default + # value if it is not set. + if not is_smartnic_set: + self.is_smartnic = False + + # handle is_smartnic field in older version + elif is_smartnic_set: + # Target version does not support is_smartnic, and it is set. + if remove_unavailable_fields: + # (De)serialising: remove unavailable fields. + delattr(self, 'is_smartnic') + elif self.is_smartnic is not False: + # DB: set unavailable fields to their default. + self.is_smartnic = False + # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable # methods can be used in the future to replace current explicit RPC calls. # Implications of calling new remote procedures should be thought through. @@ -393,6 +417,15 @@ """ return cls.supports_version((1, 7)) + @classmethod + def supports_is_smartnic(cls): + """Return whether is_smartnic field is supported. + + :returns: Whether is_smartnic field is supported + :raises: ovo_exception.IncompatibleObjectVersion + """ + return cls.supports_version((1, 9)) + @base.IronicObjectRegistry.register class PortCRUDNotification(notification.NotificationBase): @@ -410,7 +443,8 @@ # Version 1.0: Initial version # Version 1.1: Add "portgroup_uuid" field # Version 1.2: Add "physical_network" field - VERSION = '1.2' + # Version 1.3: Add "is_smartnic" field + VERSION = '1.3' SCHEMA = { 'address': ('port', 'address'), @@ -420,7 +454,8 @@ 'physical_network': ('port', 'physical_network'), 'created_at': ('port', 'created_at'), 'updated_at': ('port', 'updated_at'), - 'uuid': ('port', 'uuid') + 'uuid': ('port', 'uuid'), + 'is_smartnic': ('port', 'is_smartnic'), } fields = { @@ -434,7 +469,9 @@ 'physical_network': object_fields.StringField(nullable=True), 'created_at': object_fields.DateTimeField(nullable=True), 'updated_at': object_fields.DateTimeField(nullable=True), - 'uuid': object_fields.UUIDField() + 'uuid': object_fields.UUIDField(), + 'is_smartnic': object_fields.BooleanField(nullable=True, + default=False), } def __init__(self, port, node_uuid, portgroup_uuid): diff -Nru ironic-12.0.0/ironic/releasenotes/notes/add-protection-for-available-nodes-25f163d69782ef63.yaml ironic-12.1.0/ironic/releasenotes/notes/add-protection-for-available-nodes-25f163d69782ef63.yaml --- ironic-12.0.0/ironic/releasenotes/notes/add-protection-for-available-nodes-25f163d69782ef63.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/releasenotes/notes/add-protection-for-available-nodes-25f163d69782ef63.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,12 @@ +--- +features: + - Adds option 'allow_deleting_available_nodes' to control whether nodes in + state 'available' should be deletable (which is and stays the default). + Setting this option to False will remove 'available' from the list of + states in which nodes can be deleted from ironic. It hence provides + protection against accidental removal of nodes which are ready for + allocation (and is meant as a safeguard for the operational effort to + bring nodes into this state). For backwards compatibility reasons, the + default value for this option is True. The other states in which nodes + can be deleted from ironic ('manageable', 'enroll', and 'adoptfail') + remain unchanged. This option can be changed without service restart. diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_allocation.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_allocation.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_allocation.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_allocation.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,743 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for the API /allocations/ methods. +""" + +import datetime + +import fixtures +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from oslo_utils import uuidutils +import six +from six.moves import http_client +from six.moves.urllib import parse as urlparse +from wsme import types as wtypes + +from ironic.api.controllers import base as api_base +from ironic.api.controllers import v1 as api_v1 +from ironic.api.controllers.v1 import allocation as api_allocation +from ironic.api.controllers.v1 import notification_utils +from ironic.common import exception +from ironic.conductor import rpcapi +from ironic import objects +from ironic.objects import fields as obj_fields +from ironic.tests import base +from ironic.tests.unit.api import base as test_api_base +from ironic.tests.unit.api import utils as apiutils +from ironic.tests.unit.objects import utils as obj_utils + + +class TestAllocationObject(base.TestCase): + + def test_allocation_init(self): + allocation_dict = apiutils.allocation_post_data(node_id=None) + del allocation_dict['extra'] + allocation = api_allocation.Allocation(**allocation_dict) + self.assertEqual(wtypes.Unset, allocation.extra) + + +class TestListAllocations(test_api_base.BaseApiTest): + headers = {api_base.Version.string: str(api_v1.max_version())} + + def setUp(self): + super(TestListAllocations, self).setUp() + self.node = obj_utils.create_test_node(self.context, name='node-1') + + def test_empty(self): + data = self.get_json('/allocations', headers=self.headers) + self.assertEqual([], data['allocations']) + + def test_one(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + data = self.get_json('/allocations', headers=self.headers) + self.assertEqual(allocation.uuid, data['allocations'][0]["uuid"]) + self.assertEqual(allocation.name, data['allocations'][0]['name']) + self.assertEqual({}, data['allocations'][0]["extra"]) + self.assertEqual(self.node.uuid, data['allocations'][0]["node_uuid"]) + # never expose the node_id + self.assertNotIn('node_id', data['allocations'][0]) + + def test_get_one(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + data = self.get_json('/allocations/%s' % allocation.uuid, + headers=self.headers) + self.assertEqual(allocation.uuid, data['uuid']) + self.assertEqual({}, data["extra"]) + self.assertEqual(self.node.uuid, data["node_uuid"]) + # never expose the node_id + self.assertNotIn('node_id', data) + + def test_get_one_with_json(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + data = self.get_json('/allocations/%s.json' % allocation.uuid, + headers=self.headers) + self.assertEqual(allocation.uuid, data['uuid']) + + def test_get_one_with_json_in_name(self): + allocation = obj_utils.create_test_allocation(self.context, + name='pg.json', + node_id=self.node.id) + data = self.get_json('/allocations/%s' % allocation.uuid, + headers=self.headers) + self.assertEqual(allocation.uuid, data['uuid']) + + def test_get_one_with_suffix(self): + allocation = obj_utils.create_test_allocation(self.context, + name='pg.1', + node_id=self.node.id) + data = self.get_json('/allocations/%s' % allocation.uuid, + headers=self.headers) + self.assertEqual(allocation.uuid, data['uuid']) + + def test_get_one_custom_fields(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + fields = 'resource_class,extra' + data = self.get_json( + '/allocations/%s?fields=%s' % (allocation.uuid, fields), + headers=self.headers) + # We always append "links" + self.assertItemsEqual(['resource_class', 'extra', 'links'], data) + + def test_get_collection_custom_fields(self): + fields = 'uuid,extra' + for i in range(3): + obj_utils.create_test_allocation( + self.context, + node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % i) + + data = self.get_json( + '/allocations?fields=%s' % fields, + headers=self.headers) + + self.assertEqual(3, len(data['allocations'])) + for allocation in data['allocations']: + # We always append "links" + self.assertItemsEqual(['uuid', 'extra', 'links'], allocation) + + def test_get_custom_fields_invalid_fields(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + fields = 'uuid,spongebob' + response = self.get_json( + '/allocations/%s?fields=%s' % (allocation.uuid, fields), + headers=self.headers, expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertIn('spongebob', response.json['error_message']) + + def test_get_one_invalid_api_version(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + response = self.get_json( + '/allocations/%s' % (allocation.uuid), + headers={api_base.Version.string: str(api_v1.min_version())}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_get_one_invalid_api_version_without_check(self): + # Invalid name, but the check happens after the microversion check. + response = self.get_json( + '/allocations/ba!na!na!', + headers={api_base.Version.string: str(api_v1.min_version())}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_many(self): + allocations = [] + for id_ in range(5): + allocation = obj_utils.create_test_allocation( + self.context, node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % id_) + allocations.append(allocation.uuid) + data = self.get_json('/allocations', headers=self.headers) + self.assertEqual(len(allocations), len(data['allocations'])) + + uuids = [n['uuid'] for n in data['allocations']] + six.assertCountEqual(self, allocations, uuids) + + def test_links(self): + uuid = uuidutils.generate_uuid() + obj_utils.create_test_allocation(self.context, + uuid=uuid, + node_id=self.node.id) + data = self.get_json('/allocations/%s' % uuid, headers=self.headers) + self.assertIn('links', data) + self.assertEqual(2, len(data['links'])) + self.assertIn(uuid, data['links'][0]['href']) + for l in data['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], bookmark=bookmark, + headers=self.headers)) + + def test_collection_links(self): + allocations = [] + for id_ in range(5): + allocation = obj_utils.create_test_allocation( + self.context, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % id_) + allocations.append(allocation.uuid) + data = self.get_json('/allocations/?limit=3', headers=self.headers) + self.assertEqual(3, len(data['allocations'])) + + next_marker = data['allocations'][-1]['uuid'] + self.assertIn(next_marker, data['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + allocations = [] + for id_ in range(5): + allocation = obj_utils.create_test_allocation( + self.context, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % id_) + allocations.append(allocation.uuid) + data = self.get_json('/allocations', headers=self.headers) + self.assertEqual(3, len(data['allocations'])) + + next_marker = data['allocations'][-1]['uuid'] + self.assertIn(next_marker, data['next']) + + def test_get_collection_pagination_no_uuid(self): + fields = 'node_uuid' + limit = 2 + allocations = [] + for id_ in range(3): + allocation = obj_utils.create_test_allocation( + self.context, + node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % id_) + allocations.append(allocation) + + data = self.get_json( + '/allocations?fields=%s&limit=%s' % (fields, limit), + headers=self.headers) + + self.assertEqual(limit, len(data['allocations'])) + self.assertIn('marker=%s' % allocations[limit - 1].uuid, data['next']) + + def test_allocation_get_all_invalid_api_version(self): + obj_utils.create_test_allocation( + self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), + name='allocation_1') + response = self.get_json('/allocations', + headers={api_base.Version.string: '1.14'}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_sort_key(self): + allocations = [] + for id_ in range(3): + allocation = obj_utils.create_test_allocation( + self.context, + node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % id_) + allocations.append(allocation.uuid) + data = self.get_json('/allocations?sort_key=uuid', + headers=self.headers) + uuids = [n['uuid'] for n in data['allocations']] + self.assertEqual(sorted(allocations), uuids) + + def test_sort_key_invalid(self): + invalid_keys_list = ['foo', 'extra', 'internal_info', 'properties'] + for invalid_key in invalid_keys_list: + response = self.get_json('/allocations?sort_key=%s' % invalid_key, + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertIn(invalid_key, response.json['error_message']) + + def test_sort_key_allowed(self): + allocation_uuids = [] + for id_ in range(3, 0, -1): + allocation = obj_utils.create_test_allocation( + self.context, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % id_) + allocation_uuids.append(allocation.uuid) + allocation_uuids.reverse() + data = self.get_json('/allocations?sort_key=name', + headers=self.headers) + data_uuids = [p['uuid'] for p in data['allocations']] + self.assertEqual(allocation_uuids, data_uuids) + + def test_get_all_by_state(self): + for i in range(5): + if i < 3: + state = 'allocating' + else: + state = 'active' + obj_utils.create_test_allocation( + self.context, + state=state, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % i) + data = self.get_json("/allocations?state=allocating", + headers=self.headers) + self.assertEqual(3, len(data['allocations'])) + + def test_get_all_by_node_name(self): + for i in range(5): + if i < 3: + node_id = self.node.id + else: + node_id = 100000 + i + obj_utils.create_test_allocation( + self.context, + node_id=node_id, + uuid=uuidutils.generate_uuid(), + name='allocation%s' % i) + data = self.get_json("/allocations?node=%s" % self.node.name, + headers=self.headers) + self.assertEqual(3, len(data['allocations'])) + + def test_get_all_by_node_uuid(self): + obj_utils.create_test_allocation(self.context, node_id=self.node.id) + data = self.get_json('/allocations?node=%s' % (self.node.uuid), + headers=self.headers) + self.assertEqual(1, len(data['allocations'])) + + def test_get_all_by_non_existing_node(self): + obj_utils.create_test_allocation(self.context, node_id=self.node.id) + response = self.get_json('/allocations?node=banana', + headers=self.headers, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + + def test_get_by_node_resource(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + data = self.get_json('/nodes/%s/allocation' % self.node.uuid, + headers=self.headers) + self.assertEqual(allocation.uuid, data['uuid']) + self.assertEqual({}, data["extra"]) + self.assertEqual(self.node.uuid, data["node_uuid"]) + + def test_get_by_node_resource_invalid_api_version(self): + obj_utils.create_test_allocation(self.context, node_id=self.node.id) + response = self.get_json( + '/nodes/%s/allocation' % self.node.uuid, + headers={api_base.Version.string: str(api_v1.min_version())}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_get_by_node_resource_with_fields(self): + obj_utils.create_test_allocation(self.context, node_id=self.node.id) + data = self.get_json('/nodes/%s/allocation?fields=name,extra' % + self.node.uuid, + headers=self.headers) + self.assertNotIn('uuid', data) + self.assertIn('name', data) + self.assertEqual({}, data["extra"]) + + def test_get_by_node_resource_and_id(self): + allocation = obj_utils.create_test_allocation(self.context, + node_id=self.node.id) + response = self.get_json('/nodes/%s/allocation/%s' % (self.node.uuid, + allocation.uuid), + headers=self.headers, expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_by_node_resource_not_existed(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + res = self.get_json('/node/%s/allocation' % node.uuid, + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.NOT_FOUND, res.status_code) + + def test_by_node_invalid_node(self): + res = self.get_json('/node/%s/allocation' % uuidutils.generate_uuid(), + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.NOT_FOUND, res.status_code) + + +class TestPatch(test_api_base.BaseApiTest): + headers = {api_base.Version.string: str(api_v1.max_version())} + + def setUp(self): + super(TestPatch, self).setUp() + self.allocation = obj_utils.create_test_allocation(self.context) + + def test_update_not_allowed(self): + response = self.patch_json('/allocations/%s' % self.allocation.uuid, + [{'path': '/extra/foo', + 'value': 'bar', + 'op': 'add'}], + expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + +def _create_locally(_api, _ctx, allocation, _topic): + allocation.create() + return allocation + + +@mock.patch.object(rpcapi.ConductorAPI, 'create_allocation', _create_locally) +class TestPost(test_api_base.BaseApiTest): + headers = {api_base.Version.string: str(api_v1.max_version())} + + def setUp(self): + super(TestPost, self).setUp() + self.mock_get_topic = self.useFixture( + fixtures.MockPatchObject(rpcapi.ConductorAPI, 'get_random_topic') + ).mock + self.mock_get_topic.return_value = 'some-topic' + + @mock.patch.object(notification_utils, '_emit_api_notification') + @mock.patch.object(timeutils, 'utcnow', autospec=True) + def test_create_allocation(self, mock_utcnow, mock_notify): + adict = apiutils.allocation_post_data() + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + response = self.post_json('/allocations', adict, + headers=self.headers) + self.assertEqual(http_client.CREATED, response.status_int) + self.assertEqual(adict['uuid'], response.json['uuid']) + self.assertEqual('allocating', response.json['state']) + self.assertIsNone(response.json['node_uuid']) + self.assertEqual([], response.json['candidate_nodes']) + self.assertEqual([], response.json['traits']) + result = self.get_json('/allocations/%s' % adict['uuid'], + headers=self.headers) + self.assertEqual(adict['uuid'], result['uuid']) + self.assertFalse(result['updated_at']) + self.assertIsNone(result['node_uuid']) + self.assertEqual([], result['candidate_nodes']) + self.assertEqual([], result['traits']) + return_created_at = timeutils.parse_isotime( + result['created_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_created_at) + # Check location header + self.assertIsNotNone(response.location) + expected_location = '/v1/allocations/%s' % adict['uuid'] + self.assertEqual(urlparse.urlparse(response.location).path, + expected_location) + mock_notify.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.END), + ]) + + def test_create_allocation_invalid_api_version(self): + adict = apiutils.allocation_post_data() + response = self.post_json( + '/allocations', adict, headers={api_base.Version.string: '1.50'}, + expect_errors=True) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_create_allocation_doesnt_contain_id(self): + with mock.patch.object(self.dbapi, 'create_allocation', + wraps=self.dbapi.create_allocation) as cp_mock: + adict = apiutils.allocation_post_data(extra={'foo': 123}) + self.post_json('/allocations', adict, headers=self.headers) + result = self.get_json('/allocations/%s' % adict['uuid'], + headers=self.headers) + self.assertEqual(adict['extra'], result['extra']) + cp_mock.assert_called_once_with(mock.ANY) + # Check that 'id' is not in first arg of positional args + self.assertNotIn('id', cp_mock.call_args[0][0]) + + @mock.patch.object(notification_utils.LOG, 'exception', autospec=True) + @mock.patch.object(notification_utils.LOG, 'warning', autospec=True) + def test_create_allocation_generate_uuid(self, mock_warn, mock_except): + adict = apiutils.allocation_post_data() + del adict['uuid'] + response = self.post_json('/allocations', adict, headers=self.headers) + result = self.get_json('/allocations/%s' % response.json['uuid'], + headers=self.headers) + self.assertTrue(uuidutils.is_uuid_like(result['uuid'])) + self.assertFalse(mock_warn.called) + self.assertFalse(mock_except.called) + + @mock.patch.object(notification_utils, '_emit_api_notification') + @mock.patch.object(objects.Allocation, 'create') + def test_create_allocation_error(self, mock_create, mock_notify): + mock_create.side_effect = Exception() + adict = apiutils.allocation_post_data() + self.post_json('/allocations', adict, headers=self.headers, + expect_errors=True) + mock_notify.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.ERROR, + obj_fields.NotificationStatus.ERROR), + ]) + + def test_create_allocation_with_candidate_nodes(self): + node1 = obj_utils.create_test_node(self.context, + name='node-1') + node2 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + adict = apiutils.allocation_post_data( + candidate_nodes=[node1.name, node2.uuid]) + response = self.post_json('/allocations', adict, + headers=self.headers) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/allocations/%s' % adict['uuid'], + headers=self.headers) + self.assertEqual(adict['uuid'], result['uuid']) + self.assertEqual([node1.uuid, node2.uuid], result['candidate_nodes']) + + def test_create_allocation_valid_extra(self): + adict = apiutils.allocation_post_data( + extra={'str': 'foo', 'int': 123, 'float': 0.1, 'bool': True, + 'list': [1, 2], 'none': None, 'dict': {'cat': 'meow'}}) + self.post_json('/allocations', adict, headers=self.headers) + result = self.get_json('/allocations/%s' % adict['uuid'], + headers=self.headers) + self.assertEqual(adict['extra'], result['extra']) + + def test_create_allocation_with_no_extra(self): + adict = apiutils.allocation_post_data() + del adict['extra'] + response = self.post_json('/allocations', adict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + + def test_create_allocation_no_mandatory_field_resource_class(self): + adict = apiutils.allocation_post_data() + del adict['resource_class'] + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_create_allocation_resource_class_too_long(self): + adict = apiutils.allocation_post_data() + adict['resource_class'] = 'f' * 81 + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_create_allocation_with_traits(self): + adict = apiutils.allocation_post_data() + adict['traits'] = ['CUSTOM_GPU', 'CUSTOM_FOO_BAR'] + response = self.post_json('/allocations', adict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + self.assertEqual(['CUSTOM_GPU', 'CUSTOM_FOO_BAR'], + response.json['traits']) + result = self.get_json('/allocations/%s' % adict['uuid'], + headers=self.headers) + self.assertEqual(adict['uuid'], result['uuid']) + self.assertEqual(['CUSTOM_GPU', 'CUSTOM_FOO_BAR'], + result['traits']) + + def test_create_allocation_invalid_trait(self): + adict = apiutils.allocation_post_data() + adict['traits'] = ['CUSTOM_GPU', 'FOO_BAR'] + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_create_allocation_invalid_candidate_node_format(self): + adict = apiutils.allocation_post_data( + candidate_nodes=['invalid-format']) + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_create_allocation_candidate_node_not_found(self): + adict = apiutils.allocation_post_data( + candidate_nodes=['1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e']) + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_create_allocation_candidate_node_invalid(self): + adict = apiutils.allocation_post_data( + candidate_nodes=['this/is/not a/node/name']) + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_create_allocation_name_ok(self): + name = 'foo' + adict = apiutils.allocation_post_data(name=name) + self.post_json('/allocations', adict, headers=self.headers) + result = self.get_json('/allocations/%s' % adict['uuid'], + headers=self.headers) + self.assertEqual(name, result['name']) + + def test_create_allocation_name_invalid(self): + name = 'aa:bb_cc' + adict = apiutils.allocation_post_data(name=name) + response = self.post_json('/allocations', adict, headers=self.headers, + expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + + def test_create_by_node_not_allowed(self): + node = obj_utils.create_test_node(self.context) + adict = apiutils.allocation_post_data() + response = self.post_json('/nodes/%s/allocation' % node.uuid, + adict, headers=self.headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_create_with_node_uuid_not_allowed(self): + adict = apiutils.allocation_post_data() + adict['node_uuid'] = uuidutils.generate_uuid() + response = self.post_json('/allocations', adict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + +@mock.patch.object(rpcapi.ConductorAPI, 'destroy_allocation') +class TestDelete(test_api_base.BaseApiTest): + headers = {api_base.Version.string: str(api_v1.max_version())} + + def setUp(self): + super(TestDelete, self).setUp() + self.node = obj_utils.create_test_node(self.context) + self.allocation = obj_utils.create_test_allocation( + self.context, node_id=self.node.id, name='alloc1') + + self.mock_get_topic = self.useFixture( + fixtures.MockPatchObject(rpcapi.ConductorAPI, 'get_random_topic') + ).mock + + @mock.patch.object(notification_utils, '_emit_api_notification') + def test_delete_allocation_by_id(self, mock_notify, mock_destroy): + self.delete('/allocations/%s' % self.allocation.uuid, + headers=self.headers) + self.assertTrue(mock_destroy.called) + mock_notify.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START, + node_uuid=self.node.uuid), + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.END, + node_uuid=self.node.uuid), + ]) + + @mock.patch.object(notification_utils, '_emit_api_notification') + def test_delete_allocation_node_locked(self, mock_notify, mock_destroy): + self.node.reserve(self.context, 'fake', self.node.uuid) + mock_destroy.side_effect = exception.NodeLocked(node='fake-node', + host='fake-host') + ret = self.delete('/allocations/%s' % self.allocation.uuid, + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.CONFLICT, ret.status_code) + self.assertTrue(ret.json['error_message']) + self.assertTrue(mock_destroy.called) + mock_notify.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START, + node_uuid=self.node.uuid), + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.ERROR, + obj_fields.NotificationStatus.ERROR, + node_uuid=self.node.uuid), + ]) + + def test_delete_allocation_invalid_api_version(self, mock_destroy): + response = self.delete('/allocations/%s' % self.allocation.uuid, + expect_errors=True, + headers={api_base.Version.string: '1.14'}) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_delete_allocation_invalid_api_version_without_check(self, + mock_destroy): + # Invalid name, but the check happens after the microversion check. + response = self.delete('/allocations/ba!na!na1', + expect_errors=True, + headers={api_base.Version.string: '1.14'}) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_delete_allocation_by_name(self, mock_destroy): + self.delete('/allocations/%s' % self.allocation.name, + headers=self.headers) + self.assertTrue(mock_destroy.called) + + def test_delete_allocation_by_name_with_json(self, mock_destroy): + self.delete('/allocations/%s.json' % self.allocation.name, + headers=self.headers) + self.assertTrue(mock_destroy.called) + + def test_delete_allocation_by_name_not_existed(self, mock_destroy): + res = self.delete('/allocations/%s' % 'blah', expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.NOT_FOUND, res.status_code) + + @mock.patch.object(notification_utils, '_emit_api_notification') + def test_delete_allocation_by_node(self, mock_notify, mock_destroy): + self.delete('/nodes/%s/allocation' % self.node.uuid, + headers=self.headers) + self.assertTrue(mock_destroy.called) + mock_notify.assert_has_calls([ + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START, + node_uuid=self.node.uuid), + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.END, + node_uuid=self.node.uuid), + ]) + + def test_delete_allocation_by_node_not_existed(self, mock_destroy): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + res = self.delete('/nodes/%s/allocation' % node.uuid, + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.NOT_FOUND, res.status_code) + + def test_delete_allocation_invalid_node(self, mock_destroy): + res = self.delete('/nodes/%s/allocation' % uuidutils.generate_uuid(), + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.NOT_FOUND, res.status_code) + + def test_delete_allocation_by_node_invalid_api_version(self, mock_destroy): + obj_utils.create_test_allocation(self.context, node_id=self.node.id) + response = self.delete( + '/nodes/%s/allocation' % self.node.uuid, + headers={api_base.Version.string: str(api_v1.min_version())}, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + self.assertFalse(mock_destroy.called) diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_deploy_template.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_deploy_template.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_deploy_template.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_deploy_template.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,956 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for the API /deploy_templates/ methods. +""" + +import datetime + +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from oslo_utils import uuidutils +import six +from six.moves import http_client +from six.moves.urllib import parse as urlparse + +from ironic.api.controllers import base as api_base +from ironic.api.controllers import v1 as api_v1 +from ironic.api.controllers.v1 import deploy_template as api_deploy_template +from ironic.api.controllers.v1 import notification_utils +from ironic.common import exception +from ironic import objects +from ironic.objects import fields as obj_fields +from ironic.tests import base +from ironic.tests.unit.api import base as test_api_base +from ironic.tests.unit.api import utils as test_api_utils +from ironic.tests.unit.objects import utils as obj_utils + + +def _obj_to_api_step(obj_step): + """Convert a deploy step in 'object' form to one in 'API' form.""" + return { + 'interface': obj_step['interface'], + 'step': obj_step['step'], + 'args': obj_step['args'], + 'priority': obj_step['priority'], + } + + +class TestDeployTemplateObject(base.TestCase): + + def test_deploy_template_init(self): + template_dict = test_api_utils.deploy_template_post_data() + template = api_deploy_template.DeployTemplate(**template_dict) + self.assertEqual(template_dict['uuid'], template.uuid) + self.assertEqual(template_dict['name'], template.name) + self.assertEqual(template_dict['extra'], template.extra) + for t_dict_step, t_step in zip(template_dict['steps'], template.steps): + self.assertEqual(t_dict_step['interface'], t_step.interface) + self.assertEqual(t_dict_step['step'], t_step.step) + self.assertEqual(t_dict_step['args'], t_step.args) + self.assertEqual(t_dict_step['priority'], t_step.priority) + + def test_deploy_template_sample(self): + sample = api_deploy_template.DeployTemplate.sample(expand=False) + self.assertEqual('534e73fa-1014-4e58-969a-814cc0cb9d43', sample.uuid) + self.assertEqual('CUSTOM_RAID1', sample.name) + self.assertEqual({'foo': 'bar'}, sample.extra) + + +class BaseDeployTemplatesAPITest(test_api_base.BaseApiTest): + headers = {api_base.Version.string: str(api_v1.max_version())} + invalid_version_headers = {api_base.Version.string: '1.54'} + + +class TestListDeployTemplates(BaseDeployTemplatesAPITest): + + def test_empty(self): + data = self.get_json('/deploy_templates', headers=self.headers) + self.assertEqual([], data['deploy_templates']) + + def test_one(self): + template = obj_utils.create_test_deploy_template(self.context) + data = self.get_json('/deploy_templates', headers=self.headers) + self.assertEqual(1, len(data['deploy_templates'])) + self.assertEqual(template.uuid, data['deploy_templates'][0]['uuid']) + self.assertEqual(template.name, data['deploy_templates'][0]['name']) + self.assertNotIn('steps', data['deploy_templates'][0]) + self.assertNotIn('extra', data['deploy_templates'][0]) + + def test_get_one(self): + template = obj_utils.create_test_deploy_template(self.context) + data = self.get_json('/deploy_templates/%s' % template.uuid, + headers=self.headers) + self.assertEqual(template.uuid, data['uuid']) + self.assertEqual(template.name, data['name']) + self.assertEqual(template.extra, data['extra']) + for t_dict_step, t_step in zip(data['steps'], template.steps): + self.assertEqual(t_dict_step['interface'], t_step['interface']) + self.assertEqual(t_dict_step['step'], t_step['step']) + self.assertEqual(t_dict_step['args'], t_step['args']) + self.assertEqual(t_dict_step['priority'], t_step['priority']) + + def test_get_one_with_json(self): + template = obj_utils.create_test_deploy_template(self.context) + data = self.get_json('/deploy_templates/%s.json' % template.uuid, + headers=self.headers) + self.assertEqual(template.uuid, data['uuid']) + + def test_get_one_with_suffix(self): + template = obj_utils.create_test_deploy_template(self.context, + name='CUSTOM_DT1') + data = self.get_json('/deploy_templates/%s' % template.uuid, + headers=self.headers) + self.assertEqual(template.uuid, data['uuid']) + + def test_get_one_custom_fields(self): + template = obj_utils.create_test_deploy_template(self.context) + fields = 'name,steps' + data = self.get_json( + '/deploy_templates/%s?fields=%s' % (template.uuid, fields), + headers=self.headers) + # We always append "links" + self.assertItemsEqual(['name', 'steps', 'links'], data) + + def test_get_collection_custom_fields(self): + fields = 'uuid,steps' + for i in range(3): + obj_utils.create_test_deploy_template( + self.context, + uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % i) + + data = self.get_json( + '/deploy_templates?fields=%s' % fields, + headers=self.headers) + + self.assertEqual(3, len(data['deploy_templates'])) + for template in data['deploy_templates']: + # We always append "links" + self.assertItemsEqual(['uuid', 'steps', 'links'], template) + + def test_get_custom_fields_invalid_fields(self): + template = obj_utils.create_test_deploy_template(self.context) + fields = 'uuid,spongebob' + response = self.get_json( + '/deploy_templates/%s?fields=%s' % (template.uuid, fields), + headers=self.headers, expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertIn('spongebob', response.json['error_message']) + + def test_get_all_invalid_api_version(self): + obj_utils.create_test_deploy_template(self.context) + response = self.get_json('/deploy_templates', + headers=self.invalid_version_headers, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_get_one_invalid_api_version(self): + template = obj_utils.create_test_deploy_template(self.context) + response = self.get_json( + '/deploy_templates/%s' % (template.uuid), + headers=self.invalid_version_headers, + expect_errors=True) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + + def test_detail_query(self): + template = obj_utils.create_test_deploy_template(self.context) + data = self.get_json('/deploy_templates?detail=True', + headers=self.headers) + self.assertEqual(template.uuid, data['deploy_templates'][0]['uuid']) + self.assertIn('name', data['deploy_templates'][0]) + self.assertIn('steps', data['deploy_templates'][0]) + self.assertIn('extra', data['deploy_templates'][0]) + + def test_detail_query_false(self): + obj_utils.create_test_deploy_template(self.context) + data1 = self.get_json('/deploy_templates', headers=self.headers) + data2 = self.get_json( + '/deploy_templates?detail=False', headers=self.headers) + self.assertEqual(data1['deploy_templates'], data2['deploy_templates']) + + def test_detail_using_query_false_and_fields(self): + obj_utils.create_test_deploy_template(self.context) + data = self.get_json( + '/deploy_templates?detail=False&fields=steps', + headers=self.headers) + self.assertIn('steps', data['deploy_templates'][0]) + self.assertNotIn('uuid', data['deploy_templates'][0]) + self.assertNotIn('extra', data['deploy_templates'][0]) + + def test_detail_using_query_and_fields(self): + obj_utils.create_test_deploy_template(self.context) + response = self.get_json( + '/deploy_templates?detail=True&fields=name', headers=self.headers, + expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + + def test_many(self): + templates = [] + for id_ in range(5): + template = obj_utils.create_test_deploy_template( + self.context, uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % id_) + templates.append(template.uuid) + data = self.get_json('/deploy_templates', headers=self.headers) + self.assertEqual(len(templates), len(data['deploy_templates'])) + + uuids = [n['uuid'] for n in data['deploy_templates']] + six.assertCountEqual(self, templates, uuids) + + def test_links(self): + uuid = uuidutils.generate_uuid() + obj_utils.create_test_deploy_template(self.context, uuid=uuid) + data = self.get_json('/deploy_templates/%s' % uuid, + headers=self.headers) + self.assertIn('links', data) + self.assertEqual(2, len(data['links'])) + self.assertIn(uuid, data['links'][0]['href']) + for l in data['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], bookmark=bookmark, + headers=self.headers)) + + def test_collection_links(self): + templates = [] + for id_ in range(5): + template = obj_utils.create_test_deploy_template( + self.context, uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % id_) + templates.append(template.uuid) + data = self.get_json('/deploy_templates/?limit=3', + headers=self.headers) + self.assertEqual(3, len(data['deploy_templates'])) + + next_marker = data['deploy_templates'][-1]['uuid'] + self.assertIn(next_marker, data['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + templates = [] + for id_ in range(5): + template = obj_utils.create_test_deploy_template( + self.context, uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % id_) + templates.append(template.uuid) + data = self.get_json('/deploy_templates', headers=self.headers) + self.assertEqual(3, len(data['deploy_templates'])) + + next_marker = data['deploy_templates'][-1]['uuid'] + self.assertIn(next_marker, data['next']) + + def test_get_collection_pagination_no_uuid(self): + fields = 'name' + limit = 2 + templates = [] + for id_ in range(3): + template = obj_utils.create_test_deploy_template( + self.context, + uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % id_) + templates.append(template) + + data = self.get_json( + '/deploy_templates?fields=%s&limit=%s' % (fields, limit), + headers=self.headers) + + self.assertEqual(limit, len(data['deploy_templates'])) + self.assertIn('marker=%s' % templates[limit - 1].uuid, data['next']) + + def test_sort_key(self): + templates = [] + for id_ in range(3): + template = obj_utils.create_test_deploy_template( + self.context, + uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % id_) + templates.append(template.uuid) + data = self.get_json('/deploy_templates?sort_key=uuid', + headers=self.headers) + uuids = [n['uuid'] for n in data['deploy_templates']] + self.assertEqual(sorted(templates), uuids) + + def test_sort_key_invalid(self): + invalid_keys_list = ['extra', 'foo', 'steps'] + for invalid_key in invalid_keys_list: + path = '/deploy_templates?sort_key=%s' % invalid_key + response = self.get_json(path, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertIn(invalid_key, response.json['error_message']) + + def _test_sort_key_allowed(self, detail=False): + template_uuids = [] + for id_ in range(3, 0, -1): + template = obj_utils.create_test_deploy_template( + self.context, + uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%s' % id_) + template_uuids.append(template.uuid) + template_uuids.reverse() + url = '/deploy_templates?sort_key=name&detail=%s' % str(detail) + data = self.get_json(url, headers=self.headers) + data_uuids = [p['uuid'] for p in data['deploy_templates']] + self.assertEqual(template_uuids, data_uuids) + + def test_sort_key_allowed(self): + self._test_sort_key_allowed() + + def test_detail_sort_key_allowed(self): + self._test_sort_key_allowed(detail=True) + + def test_sensitive_data_masked(self): + template = obj_utils.get_test_deploy_template(self.context) + template.steps[0]['args']['password'] = 'correcthorsebatterystaple' + template.create() + data = self.get_json('/deploy_templates/%s' % template.uuid, + headers=self.headers) + + self.assertEqual("******", data['steps'][0]['args']['password']) + + +@mock.patch.object(objects.DeployTemplate, 'save', autospec=True) +class TestPatch(BaseDeployTemplatesAPITest): + + def setUp(self): + super(TestPatch, self).setUp() + self.template = obj_utils.create_test_deploy_template( + self.context, name='CUSTOM_DT1') + + def _test_update_ok(self, mock_save, patch): + response = self.patch_json('/deploy_templates/%s' % self.template.uuid, + patch, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + mock_save.assert_called_once_with(mock.ANY) + return response + + def _test_update_bad_request(self, mock_save, patch, error_msg): + response = self.patch_json('/deploy_templates/%s' % self.template.uuid, + patch, expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_code) + self.assertTrue(response.json['error_message']) + self.assertIn(error_msg, response.json['error_message']) + self.assertFalse(mock_save.called) + return response + + @mock.patch.object(notification_utils, '_emit_api_notification', + autospec=True) + def test_update_by_id(self, mock_notify, mock_save): + name = 'CUSTOM_DT2' + patch = [{'path': '/name', 'value': name, 'op': 'add'}] + response = self._test_update_ok(mock_save, patch) + self.assertEqual(name, response.json['name']) + + mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'update', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.END)]) + + def test_update_by_name(self, mock_save): + steps = [{ + 'interface': 'bios', + 'step': 'apply_configuration', + 'args': {'foo': 'bar'}, + 'priority': 42 + }] + patch = [{'path': '/steps', 'value': steps, 'op': 'replace'}] + response = self.patch_json('/deploy_templates/%s' % self.template.name, + patch, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + mock_save.assert_called_once_with(mock.ANY) + self.assertEqual(steps, response.json['steps']) + + def test_update_by_name_with_json(self, mock_save): + interface = 'bios' + path = '/deploy_templates/%s.json' % self.template.name + response = self.patch_json(path, + [{'path': '/steps/0/interface', + 'value': interface, + 'op': 'replace'}], + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual(interface, response.json['steps'][0]['interface']) + + def test_update_name_standard_trait(self, mock_save): + name = 'HW_CPU_X86_VMX' + patch = [{'path': '/name', 'value': name, 'op': 'replace'}] + response = self._test_update_ok(mock_save, patch) + self.assertEqual(name, response.json['name']) + + def test_update_name_custom_trait(self, mock_save): + name = 'CUSTOM_DT2' + patch = [{'path': '/name', 'value': name, 'op': 'replace'}] + response = self._test_update_ok(mock_save, patch) + self.assertEqual(name, response.json['name']) + + def test_update_invalid_name(self, mock_save): + self._test_update_bad_request( + mock_save, + [{'path': '/name', 'value': 'aa:bb_cc', 'op': 'replace'}], + 'Deploy template name must be a valid trait') + + def test_update_by_id_invalid_api_version(self, mock_save): + name = 'CUSTOM_DT2' + headers = self.invalid_version_headers + response = self.patch_json('/deploy_templates/%s' % self.template.uuid, + [{'path': '/name', + 'value': name, + 'op': 'add'}], + headers=headers, + expect_errors=True) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + self.assertFalse(mock_save.called) + + def test_update_by_name_old_api_version(self, mock_save): + name = 'CUSTOM_DT2' + response = self.patch_json('/deploy_templates/%s' % self.template.name, + [{'path': '/name', + 'value': name, + 'op': 'add'}], + expect_errors=True) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + self.assertFalse(mock_save.called) + + def test_update_not_found(self, mock_save): + name = 'CUSTOM_DT2' + uuid = uuidutils.generate_uuid() + response = self.patch_json('/deploy_templates/%s' % uuid, + [{'path': '/name', + 'value': name, + 'op': 'add'}], + expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + self.assertTrue(response.json['error_message']) + self.assertFalse(mock_save.called) + + @mock.patch.object(notification_utils, '_emit_api_notification', + autospec=True) + def test_replace_name_already_exist(self, mock_notify, mock_save): + name = 'CUSTOM_DT2' + obj_utils.create_test_deploy_template(self.context, + uuid=uuidutils.generate_uuid(), + name=name) + mock_save.side_effect = exception.DeployTemplateAlreadyExists( + uuid=self.template.uuid) + response = self.patch_json('/deploy_templates/%s' % self.template.uuid, + [{'path': '/name', + 'value': name, + 'op': 'replace'}], + expect_errors=True, + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CONFLICT, response.status_code) + self.assertTrue(response.json['error_message']) + mock_save.assert_called_once_with(mock.ANY) + mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'update', + obj_fields.NotificationLevel.ERROR, + obj_fields.NotificationStatus.ERROR)]) + + def test_replace_invalid_name_too_long(self, mock_save): + name = 'CUSTOM_' + 'X' * 249 + patch = [{'path': '/name', 'op': 'replace', 'value': name}] + self._test_update_bad_request( + mock_save, patch, 'Deploy template name must be a valid trait') + + def test_replace_invalid_name_not_a_trait(self, mock_save): + name = 'not-a-trait' + patch = [{'path': '/name', 'op': 'replace', 'value': name}] + self._test_update_bad_request( + mock_save, patch, 'Deploy template name must be a valid trait') + + def test_replace_invalid_name_none(self, mock_save): + patch = [{'path': '/name', 'op': 'replace', 'value': None}] + self._test_update_bad_request( + mock_save, patch, "Deploy template name cannot be None") + + def test_replace_duplicate_step(self, mock_save): + # interface & step combination must be unique. + steps = [ + { + 'interface': 'raid', + 'step': 'create_configuration', + 'args': {'foo': '%d' % i}, + 'priority': i, + } + for i in range(2) + ] + patch = [{'path': '/steps', 'op': 'replace', 'value': steps}] + self._test_update_bad_request( + mock_save, patch, "Duplicate deploy steps") + + def test_replace_invalid_step_interface_fail(self, mock_save): + step = { + 'interface': 'foo', + 'step': 'apply_configuration', + 'args': {'foo': 'bar'}, + 'priority': 42 + } + patch = [{'path': '/steps/0', 'op': 'replace', 'value': step}] + self._test_update_bad_request( + mock_save, patch, "Invalid input for field/attribute interface.") + + def test_replace_non_existent_step_fail(self, mock_save): + step = { + 'interface': 'bios', + 'step': 'apply_configuration', + 'args': {'foo': 'bar'}, + 'priority': 42 + } + patch = [{'path': '/steps/1', 'op': 'replace', 'value': step}] + self._test_update_bad_request( + mock_save, patch, "list assignment index out of range") + + def test_replace_empty_step_list_fail(self, mock_save): + patch = [{'path': '/steps', 'op': 'replace', 'value': []}] + self._test_update_bad_request( + mock_save, patch, 'No deploy steps specified') + + def _test_remove_not_allowed(self, mock_save, field, error_msg): + patch = [{'path': '/%s' % field, 'op': 'remove'}] + self._test_update_bad_request(mock_save, patch, error_msg) + + def test_remove_uuid(self, mock_save): + self._test_remove_not_allowed( + mock_save, 'uuid', + "'/uuid' is an internal attribute and can not be updated") + + def test_remove_name(self, mock_save): + self._test_remove_not_allowed( + mock_save, 'name', + "'/name' is a mandatory attribute and can not be removed") + + def test_remove_steps(self, mock_save): + self._test_remove_not_allowed( + mock_save, 'steps', + "'/steps' is a mandatory attribute and can not be removed") + + def test_remove_foo(self, mock_save): + self._test_remove_not_allowed( + mock_save, 'foo', "can't remove non-existent object 'foo'") + + def test_replace_step_invalid_interface(self, mock_save): + patch = [{'path': '/steps/0/interface', 'op': 'replace', + 'value': 'foo'}] + self._test_update_bad_request( + mock_save, patch, "Invalid input for field/attribute interface.") + + def test_replace_multi(self, mock_save): + steps = [ + { + 'interface': 'raid', + 'step': 'create_configuration%d' % i, + 'args': {}, + 'priority': 10, + } + for i in range(3) + ] + template = obj_utils.create_test_deploy_template( + self.context, uuid=uuidutils.generate_uuid(), name='CUSTOM_DT2', + steps=steps) + + # mutate steps so we replace all of them + for step in steps: + step['priority'] = step['priority'] + 1 + + patch = [] + for i, step in enumerate(steps): + patch.append({'path': '/steps/%s' % i, + 'value': step, + 'op': 'replace'}) + response = self.patch_json('/deploy_templates/%s' % template.uuid, + patch, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual(steps, response.json['steps']) + mock_save.assert_called_once_with(mock.ANY) + + def test_remove_multi(self, mock_save): + steps = [ + { + 'interface': 'raid', + 'step': 'create_configuration%d' % i, + 'args': {}, + 'priority': 10, + } + for i in range(3) + ] + template = obj_utils.create_test_deploy_template( + self.context, uuid=uuidutils.generate_uuid(), name='CUSTOM_DT2', + steps=steps) + + # Removing one step from the collection + steps.pop(1) + response = self.patch_json('/deploy_templates/%s' % template.uuid, + [{'path': '/steps/1', + 'op': 'remove'}], + headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual(steps, response.json['steps']) + mock_save.assert_called_once_with(mock.ANY) + + def test_remove_non_existent_property_fail(self, mock_save): + patch = [{'path': '/non-existent', 'op': 'remove'}] + self._test_update_bad_request( + mock_save, patch, + "can't remove non-existent object 'non-existent'") + + def test_remove_non_existent_step_fail(self, mock_save): + patch = [{'path': '/steps/1', 'op': 'remove'}] + self._test_update_bad_request( + mock_save, patch, "can't remove non-existent object '1'") + + def test_remove_only_step_fail(self, mock_save): + patch = [{'path': '/steps/0', 'op': 'remove'}] + self._test_update_bad_request( + mock_save, patch, "No deploy steps specified") + + def test_remove_non_existent_step_property_fail(self, mock_save): + patch = [{'path': '/steps/0/non-existent', 'op': 'remove'}] + self._test_update_bad_request( + mock_save, patch, + "can't remove non-existent object 'non-existent'") + + def test_add_root_non_existent(self, mock_save): + patch = [{'path': '/foo', 'value': 'bar', 'op': 'add'}] + self._test_update_bad_request( + mock_save, patch, "Adding a new attribute (/foo)") + + def test_add_too_high_index_step_fail(self, mock_save): + step = { + 'interface': 'bios', + 'step': 'apply_configuration', + 'args': {'foo': 'bar'}, + 'priority': 42 + } + patch = [{'path': '/steps/2', 'op': 'add', 'value': step}] + self._test_update_bad_request( + mock_save, patch, "can't insert outside of list") + + def test_add_multi(self, mock_save): + steps = [ + { + 'interface': 'raid', + 'step': 'create_configuration%d' % i, + 'args': {}, + 'priority': 10, + } + for i in range(3) + ] + patch = [] + for i, step in enumerate(steps): + patch.append({'path': '/steps/%d' % i, + 'value': step, + 'op': 'add'}) + response = self.patch_json('/deploy_templates/%s' % self.template.uuid, + patch, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + self.assertEqual(steps, response.json['steps'][:-1]) + self.assertEqual(_obj_to_api_step(self.template.steps[0]), + response.json['steps'][-1]) + mock_save.assert_called_once_with(mock.ANY) + + +class TestPost(BaseDeployTemplatesAPITest): + + @mock.patch.object(notification_utils, '_emit_api_notification', + autospec=True) + @mock.patch.object(timeutils, 'utcnow', autospec=True) + def test_create(self, mock_utcnow, mock_notify): + tdict = test_api_utils.post_get_test_deploy_template() + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + response = self.post_json('/deploy_templates', tdict, + headers=self.headers) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/deploy_templates/%s' % tdict['uuid'], + headers=self.headers) + self.assertEqual(tdict['uuid'], result['uuid']) + self.assertFalse(result['updated_at']) + return_created_at = timeutils.parse_isotime( + result['created_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_created_at) + # Check location header + self.assertIsNotNone(response.location) + expected_location = '/v1/deploy_templates/%s' % tdict['uuid'] + self.assertEqual(expected_location, + urlparse.urlparse(response.location).path) + mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.END)]) + + def test_create_invalid_api_version(self): + tdict = test_api_utils.post_get_test_deploy_template() + response = self.post_json( + '/deploy_templates', tdict, headers=self.invalid_version_headers, + expect_errors=True) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_create_doesnt_contain_id(self): + with mock.patch.object( + self.dbapi, 'create_deploy_template', + wraps=self.dbapi.create_deploy_template) as mock_create: + tdict = test_api_utils.post_get_test_deploy_template() + self.post_json('/deploy_templates', tdict, headers=self.headers) + self.get_json('/deploy_templates/%s' % tdict['uuid'], + headers=self.headers) + mock_create.assert_called_once_with(mock.ANY) + # Check that 'id' is not in first arg of positional args + self.assertNotIn('id', mock_create.call_args[0][0]) + + @mock.patch.object(notification_utils.LOG, 'exception', autospec=True) + @mock.patch.object(notification_utils.LOG, 'warning', autospec=True) + def test_create_generate_uuid(self, mock_warn, mock_except): + tdict = test_api_utils.post_get_test_deploy_template() + del tdict['uuid'] + response = self.post_json('/deploy_templates', tdict, + headers=self.headers) + result = self.get_json('/deploy_templates/%s' % response.json['uuid'], + headers=self.headers) + self.assertTrue(uuidutils.is_uuid_like(result['uuid'])) + self.assertFalse(mock_warn.called) + self.assertFalse(mock_except.called) + + @mock.patch.object(notification_utils, '_emit_api_notification', + autospec=True) + @mock.patch.object(objects.DeployTemplate, 'create', autospec=True) + def test_create_error(self, mock_create, mock_notify): + mock_create.side_effect = Exception() + tdict = test_api_utils.post_get_test_deploy_template() + self.post_json('/deploy_templates', tdict, headers=self.headers, + expect_errors=True) + mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'create', + obj_fields.NotificationLevel.ERROR, + obj_fields.NotificationStatus.ERROR)]) + + def _test_create_ok(self, tdict): + response = self.post_json('/deploy_templates', tdict, + headers=self.headers) + self.assertEqual(http_client.CREATED, response.status_int) + + def _test_create_bad_request(self, tdict, error_msg): + response = self.post_json('/deploy_templates', tdict, + expect_errors=True, headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + self.assertIn(error_msg, response.json['error_message']) + + def test_create_long_name(self): + name = 'CUSTOM_' + 'X' * 248 + tdict = test_api_utils.post_get_test_deploy_template(name=name) + self._test_create_ok(tdict) + + def test_create_standard_trait_name(self): + name = 'HW_CPU_X86_VMX' + tdict = test_api_utils.post_get_test_deploy_template(name=name) + self._test_create_ok(tdict) + + def test_create_name_invalid_too_long(self): + name = 'CUSTOM_' + 'X' * 249 + tdict = test_api_utils.post_get_test_deploy_template(name=name) + self._test_create_bad_request( + tdict, 'Deploy template name must be a valid trait') + + def test_create_name_invalid_not_a_trait(self): + name = 'not-a-trait' + tdict = test_api_utils.post_get_test_deploy_template(name=name) + self._test_create_bad_request( + tdict, 'Deploy template name must be a valid trait') + + def test_create_steps_invalid_duplicate(self): + steps = [ + { + 'interface': 'raid', + 'step': 'create_configuration', + 'args': {'foo': '%d' % i}, + 'priority': i, + } + for i in range(2) + ] + tdict = test_api_utils.post_get_test_deploy_template(steps=steps) + self._test_create_bad_request(tdict, "Duplicate deploy steps") + + def _test_create_no_mandatory_field(self, field): + tdict = test_api_utils.post_get_test_deploy_template() + del tdict[field] + self._test_create_bad_request(tdict, "Mandatory field missing") + + def test_create_no_mandatory_field_name(self): + self._test_create_no_mandatory_field('name') + + def test_create_no_mandatory_field_steps(self): + self._test_create_no_mandatory_field('steps') + + def _test_create_no_mandatory_step_field(self, field): + tdict = test_api_utils.post_get_test_deploy_template() + del tdict['steps'][0][field] + self._test_create_bad_request(tdict, "Mandatory field missing") + + def test_create_no_mandatory_step_field_interface(self): + self._test_create_no_mandatory_step_field('interface') + + def test_create_no_mandatory_step_field_step(self): + self._test_create_no_mandatory_step_field('step') + + def test_create_no_mandatory_step_field_args(self): + self._test_create_no_mandatory_step_field('args') + + def test_create_no_mandatory_step_field_priority(self): + self._test_create_no_mandatory_step_field('priority') + + def _test_create_invalid_field(self, field, value, error_msg): + tdict = test_api_utils.post_get_test_deploy_template() + tdict[field] = value + self._test_create_bad_request(tdict, error_msg) + + def test_create_invalid_field_name(self): + self._test_create_invalid_field( + 'name', 42, 'Invalid input for field/attribute name') + + def test_create_invalid_field_name_none(self): + self._test_create_invalid_field( + 'name', None, "Deploy template name cannot be None") + + def test_create_invalid_field_steps(self): + self._test_create_invalid_field( + 'steps', {}, "Invalid input for field/attribute template") + + def test_create_invalid_field_empty_steps(self): + self._test_create_invalid_field( + 'steps', [], "No deploy steps specified") + + def test_create_invalid_field_extra(self): + self._test_create_invalid_field( + 'extra', 42, "Invalid input for field/attribute template") + + def test_create_invalid_field_foo(self): + self._test_create_invalid_field( + 'foo', 'bar', "Unknown attribute for argument template: foo") + + def _test_create_invalid_step_field(self, field, value, error_msg=None): + tdict = test_api_utils.post_get_test_deploy_template() + tdict['steps'][0][field] = value + if error_msg is None: + error_msg = "Invalid input for field/attribute" + self._test_create_bad_request(tdict, error_msg) + + def test_create_invalid_step_field_interface1(self): + self._test_create_invalid_step_field('interface', [3]) + + def test_create_invalid_step_field_interface2(self): + self._test_create_invalid_step_field('interface', 'foo') + + def test_create_invalid_step_field_step(self): + self._test_create_invalid_step_field('step', 42) + + def test_create_invalid_step_field_args1(self): + self._test_create_invalid_step_field('args', 'not a dict') + + def test_create_invalid_step_field_args2(self): + self._test_create_invalid_step_field('args', []) + + def test_create_invalid_step_field_priority(self): + self._test_create_invalid_step_field('priority', 'not a number') + + def test_create_invalid_step_field_negative_priority(self): + self._test_create_invalid_step_field('priority', -1) + + def test_create_invalid_step_field_foo(self): + self._test_create_invalid_step_field( + 'foo', 'bar', "Unknown attribute for argument template.steps: foo") + + def test_create_step_string_priority(self): + tdict = test_api_utils.post_get_test_deploy_template() + tdict['steps'][0]['priority'] = '42' + self._test_create_ok(tdict) + + def test_create_complex_step_args(self): + tdict = test_api_utils.post_get_test_deploy_template() + tdict['steps'][0]['args'] = {'foo': [{'bar': 'baz'}]} + self._test_create_ok(tdict) + + +@mock.patch.object(objects.DeployTemplate, 'destroy', autospec=True) +class TestDelete(BaseDeployTemplatesAPITest): + + def setUp(self): + super(TestDelete, self).setUp() + self.template = obj_utils.create_test_deploy_template(self.context) + + @mock.patch.object(notification_utils, '_emit_api_notification', + autospec=True) + def test_delete_by_uuid(self, mock_notify, mock_destroy): + self.delete('/deploy_templates/%s' % self.template.uuid, + headers=self.headers) + mock_destroy.assert_called_once_with(mock.ANY) + mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.START), + mock.call(mock.ANY, mock.ANY, 'delete', + obj_fields.NotificationLevel.INFO, + obj_fields.NotificationStatus.END)]) + + def test_delete_by_uuid_with_json(self, mock_destroy): + self.delete('/deploy_templates/%s.json' % self.template.uuid, + headers=self.headers) + mock_destroy.assert_called_once_with(mock.ANY) + + def test_delete_by_name(self, mock_destroy): + self.delete('/deploy_templates/%s' % self.template.name, + headers=self.headers) + mock_destroy.assert_called_once_with(mock.ANY) + + def test_delete_by_name_with_json(self, mock_destroy): + self.delete('/deploy_templates/%s.json' % self.template.name, + headers=self.headers) + mock_destroy.assert_called_once_with(mock.ANY) + + def test_delete_invalid_api_version(self, mock_dpt): + response = self.delete('/deploy_templates/%s' % self.template.uuid, + expect_errors=True, + headers=self.invalid_version_headers) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_delete_old_api_version(self, mock_dpt): + # Names like CUSTOM_1 were not valid in API 1.1, but the check should + # go after the microversion check. + response = self.delete('/deploy_templates/%s' % self.template.name, + expect_errors=True) + self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) + + def test_delete_by_name_non_existent(self, mock_dpt): + res = self.delete('/deploy_templates/%s' % 'blah', expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.NOT_FOUND, res.status_code) diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_event.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_event.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_event.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_event.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,180 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for the API /events methods. +""" + +import mock +from six.moves import http_client + +from ironic.api.controllers import base as api_base +from ironic.api.controllers.v1 import types +from ironic.api.controllers.v1 import versions +from ironic.tests.unit.api import base as test_api_base +from ironic.tests.unit.api.utils import fake_event_validator + + +def get_fake_port_event(): + return {'event': 'network.bind_port', + 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', + 'mac_address': 'de:ad:ca:fe:ba:be', + 'status': 'ACTIVE', + 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:vnic_type': 'baremetal'} + + +class TestPost(test_api_base.BaseApiTest): + + def setUp(self): + super(TestPost, self).setUp() + self.headers = {api_base.Version.string: str( + versions.max_version_string())} + + @mock.patch.object(types.EventType, 'event_validators', + {'valid.event': fake_event_validator}) + @mock.patch.object(types.EventType, 'valid_events', {'valid.event'}) + def test_events(self): + events_dict = {'events': [{'event': 'valid.event'}]} + response = self.post_json('/events', events_dict, headers=self.headers) + self.assertEqual(http_client.NO_CONTENT, response.status_int) + + @mock.patch.object(types.EventType, 'event_validators', + {'valid.event1': fake_event_validator, + 'valid.event2': fake_event_validator, + 'valid.event3': fake_event_validator}) + @mock.patch.object(types.EventType, 'valid_events', + {'valid.event1', 'valid.event2', 'valid.event3'}) + def test_multiple_events(self): + events_dict = {'events': [{'event': 'valid.event1'}, + {'event': 'valid.event2'}, + {'event': 'valid.event3'}]} + response = self.post_json('/events', events_dict, headers=self.headers) + self.assertEqual(http_client.NO_CONTENT, response.status_int) + + def test_events_does_not_contain_event(self): + events_dict = {'events': [{'INVALID': 'fake.event'}]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + @mock.patch.object(types.EventType, 'event_validators', + {'valid.event': fake_event_validator}) + def test_events_invalid_event(self): + events_dict = {'events': [{'event': 'invalid.event'}]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_unknown_event_property(self): + events_dict = {'events': [{'event': 'network.unbind_port', + 'UNKNOWN': 'EVENT_PROPERTY'}]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_bind_port_events(self): + events_dict = {'events': [get_fake_port_event()]} + response = self.post_json('/events', events_dict, headers=self.headers) + self.assertEqual(http_client.NO_CONTENT, response.status_int) + + def test_network_unbind_port_events(self): + events_dict = {'events': [get_fake_port_event()]} + events_dict['events'][0].update({'event': 'network.unbind_port'}) + response = self.post_json('/events', events_dict, headers=self.headers) + self.assertEqual(http_client.NO_CONTENT, response.status_int) + + def test_network_delete_port_events(self): + events_dict = {'events': [get_fake_port_event()]} + events_dict['events'][0].update({'event': 'network.delete_port'}) + response = self.post_json('/events', events_dict, headers=self.headers) + self.assertEqual(http_client.NO_CONTENT, response.status_int) + + def test_network_port_event_invalid_mac_address(self): + port_evt = get_fake_port_event() + port_evt.update({'mac_address': 'INVALID_MAC_ADDRESS'}) + events_dict = {'events': [port_evt]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_port_event_invalid_device_id(self): + port_evt = get_fake_port_event() + port_evt.update({'device_id': 'DEVICE_ID_SHOULD_BE_UUID'}) + events_dict = {'events': [port_evt]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_port_event_invalid_port_id(self): + port_evt = get_fake_port_event() + port_evt.update({'port_id': 'PORT_ID_SHOULD_BE_UUID'}) + events_dict = {'events': [port_evt]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_port_event_invalid_status(self): + port_evt = get_fake_port_event() + port_evt.update({'status': ['status', 'SHOULD', 'BE', 'TEXT']}) + events_dict = {'events': [port_evt]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_port_event_invalid_binding_vnic_type(self): + port_evt = get_fake_port_event() + port_evt.update({'binding:vnic_type': ['binding:vnic_type', 'SHOULD', + 'BE', 'TEXT']}) + events_dict = {'events': [port_evt]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_network_port_event_invalid_binding_host_id(self): + port_evt = get_fake_port_event() + port_evt.update({'binding:host_id': ['binding:host_id', 'IS', + 'NODE_UUID', 'IN', 'IRONIC']}) + events_dict = {'events': [port_evt]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=self.headers) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + @mock.patch.object(types.EventType, 'event_validators', + {'valid.event': fake_event_validator}) + @mock.patch.object(types.EventType, 'valid_events', {'valid.event'}) + def test_events_unsupported_api_version(self): + headers = {api_base.Version.string: '1.50'} + events_dict = {'events': [{'event': 'valid.event'}]} + response = self.post_json('/events', events_dict, expect_errors=True, + headers=headers) + self.assertEqual(http_client.NOT_FOUND, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_node.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_node.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_node.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_node.py 2019-03-21 20:07:40.000000000 +0000 @@ -132,6 +132,7 @@ self.assertNotIn('automated_clean', data['nodes'][0]) self.assertNotIn('protected', data['nodes'][0]) self.assertNotIn('protected_reason', data['nodes'][0]) + self.assertNotIn('owner', data['nodes'][0]) def test_get_one(self): node = obj_utils.create_test_node(self.context, @@ -173,6 +174,9 @@ self.assertIn('automated_clean', data) self.assertIn('protected', data) self.assertIn('protected_reason', data) + self.assertIn('owner', data) + self.assertNotIn('allocation_id', data) + self.assertIn('allocation_uuid', data) def test_get_one_with_json(self): # Test backward compatibility with guess_content_type_from_ext @@ -326,6 +330,29 @@ self.assertTrue(data['protected']) self.assertEqual('reason!', data['protected_reason']) + def test_node_owner_hidden_in_lower_version(self): + self._test_node_field_hidden_in_lower_version('owner', + '1.49', '1.50') + + def test_node_owner_null_field(self): + node = obj_utils.create_test_node(self.context, owner=None) + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: '1.50'}) + self.assertIsNone(data['owner']) + + def test_node_owner_present(self): + node = obj_utils.create_test_node(self.context, + owner="akindofmagic") + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: '1.50'}) + self.assertEqual(data['owner'], "akindofmagic") + + def test_node_description_null_field(self): + node = obj_utils.create_test_node(self.context, description=None) + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: '1.51'}) + self.assertIsNone(data['description']) + def test_get_one_custom_fields(self): node = obj_utils.create_test_node(self.context, chassis_id=self.chassis.id) @@ -517,6 +544,30 @@ headers={api_base.Version.string: '1.49'}) self.assertIn('conductor', response) + def test_get_owner_fields(self): + node = obj_utils.create_test_node(self.context, owner='fred') + fields = 'owner' + response = self.get_json('/nodes/%s?fields=%s' % (node.uuid, fields), + headers={api_base.Version.string: '1.50'}) + self.assertIn('owner', response) + + def test_get_description_field(self): + node = obj_utils.create_test_node(self.context, + description='useful piece') + fields = 'description' + response = self.get_json('/nodes/%s?fields=%s' % (node.uuid, fields), + headers={api_base.Version.string: '1.51'}) + self.assertIn('description', response) + + def test_get_with_allocation(self): + allocation = obj_utils.create_test_allocation(self.context) + node = obj_utils.create_test_node(self.context, + allocation_id=allocation.id) + fields = 'allocation_uuid' + response = self.get_json('/nodes/%s?fields=%s' % (node.uuid, fields), + headers={api_base.Version.string: '1.52'}) + self.assertEqual(allocation.uuid, response['allocation_uuid']) + def test_detail(self): node = obj_utils.create_test_node(self.context, chassis_id=self.chassis.id) @@ -550,8 +601,11 @@ self.assertIn('automated_clean', data['nodes'][0]) self.assertIn('protected', data['nodes'][0]) self.assertIn('protected_reason', data['nodes'][0]) + self.assertIn('owner', data['nodes'][0]) # never expose the chassis_id self.assertNotIn('chassis_id', data['nodes'][0]) + self.assertNotIn('allocation_id', data['nodes'][0]) + self.assertIn('allocation_uuid', data['nodes'][0]) def test_detail_using_query(self): node = obj_utils.create_test_node(self.context, @@ -582,6 +636,7 @@ self.assertIn('automated_clean', data['nodes'][0]) self.assertIn('protected', data['nodes'][0]) self.assertIn('protected_reason', data['nodes'][0]) + self.assertIn('owner', data['nodes'][0]) for field in api_utils.V31_FIELDS: self.assertIn(field, data['nodes'][0]) # never expose the chassis_id @@ -762,6 +817,17 @@ '/nodes/detail', headers={api_base.Version.string: '1.37'}) self.assertEqual(['CUSTOM_1'], new_data['nodes'][0]["traits"]) + def test_hide_fields_in_newer_versions_description(self): + node = obj_utils.create_test_node(self.context, + description="useful piece") + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: "1.50"}) + self.assertNotIn('description', data) + + data = self.get_json('/nodes/%s' % node.uuid, + headers={api_base.Version.string: "1.51"}) + self.assertEqual('useful piece', data['description']) + def test_many(self): nodes = [] for id in range(5): @@ -1575,7 +1641,7 @@ def test_get_nodes_by_conductor_not_allowed(self): response = self.get_json('/nodes?conductor=rocky.rocks', - headers={api_base.Version.string: "1.47"}, + headers={api_base.Version.string: "1.48"}, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) @@ -1608,6 +1674,79 @@ self.assertNotIn(node1.uuid, uuids) self.assertIn(node2.uuid, uuids) + def test_get_nodes_by_conductor_no_valid_host(self): + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + + self.mock_get_conductor_for.side_effect = exception.NoValidHost( + reason='hey a conductor just goes vacation') + response = self.get_json('/nodes?conductor=like.shadows', + headers={api_base.Version.string: "1.49"}) + self.assertEqual([], response['nodes']) + + self.mock_get_conductor_for.side_effect = exception.TemporaryFailure( + reason='this must be conductor strike') + response = self.get_json('/nodes?conductor=like.shadows', + headers={api_base.Version.string: "1.49"}) + self.assertEqual([], response['nodes']) + + self.mock_get_conductor_for.side_effect = exception.IronicException( + 'Some unexpected thing happened') + response = self.get_json('/nodes?conductor=fake.conductor', + headers={api_base.Version.string: "1.49"}, + expect_errors=True) + self.assertIn('Some unexpected thing happened', + response.json['error_message']) + + def test_get_nodes_by_owner(self): + node1 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + owner='fred') + node2 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + owner='bob') + + for base_url in ('/nodes', '/nodes/detail'): + data = self.get_json(base_url + '?owner=fred', + headers={api_base.Version.string: "1.50"}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node1.uuid, uuids) + self.assertNotIn(node2.uuid, uuids) + data = self.get_json(base_url + '?owner=bob', + headers={api_base.Version.string: "1.50"}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node2.uuid, uuids) + self.assertNotIn(node1.uuid, uuids) + + def test_get_nodes_by_owner_not_allowed(self): + for url in ('/nodes?owner=fred', + '/nodes/detail?owner=fred'): + response = self.get_json( + url, headers={api_base.Version.string: "1.48"}, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + self.assertTrue(response.json['error_message']) + + def test_get_nodes_by_description(self): + node1 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + description='some cats here') + node2 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + description='some dogs there') + data = self.get_json('/nodes?description_contains=cat', + headers={api_base.Version.string: '1.51'}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node1.uuid, uuids) + self.assertNotIn(node2.uuid, uuids) + + data = self.get_json('/nodes?description_contains=dog', + headers={api_base.Version.string: '1.51'}) + uuids = [n['uuid'] for n in data['nodes']] + self.assertIn(node2.uuid, uuids) + self.assertNotIn(node1.uuid, uuids) + def test_get_console_information(self): node = obj_utils.create_test_node(self.context) expected_console_info = {'test': 'test-data'} @@ -2185,6 +2324,19 @@ self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) self.assertTrue(response.json['error_message']) + @mock.patch('pecan.request') + def test__update_changed_fields_lowers_conductor_group(self, + mock_pecan_req): + mock_pecan_req.version.minor = versions.MINOR_MAX_VERSION + controller = api_node.NodesController() + + node_dict = self.node.as_dict() + node_dict['conductor_group'] = 'NEW-GROUP' + node_obj = api_node.Node(**node_dict) + + controller._update_changed_fields(node_obj, self.node) + self.assertEqual('new-group', self.node.conductor_group) + @mock.patch("pecan.request") def test__update_changed_fields_remove_chassis_uuid(self, mock_pecan_req): mock_pecan_req.version.minor = versions.MINOR_MAX_VERSION @@ -2688,6 +2840,19 @@ self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertTrue(response.json['error_message']) + def test_patch_allocation_uuid_forbidden(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/allocation_uuid', + 'op': 'replace', + 'value': uuidutils.generate_uuid()}], + headers={api_base.Version.string: "1.52"}, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_code) + self.assertTrue(response.json['error_message']) + def test_update_conductor_group(self): node = obj_utils.create_test_node(self.context, uuid=uuidutils.generate_uuid()) @@ -2783,6 +2948,19 @@ response = self.patch_json('/nodes/%s' % node.uuid, [{'path': '/protected_reason', 'value': 'reason!', + 'op': 'replace'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + + def test_update_owner(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + headers = {api_base.Version.string: '1.50'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/owner', + 'value': 'meow', 'op': 'replace'}], headers=headers) self.assertEqual('application/json', response.content_type) @@ -2815,6 +2993,62 @@ self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertTrue(response.json['error_message']) + def test_update_owner_old_api(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + headers = {api_base.Version.string: '1.47'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/owner', + 'value': 'meow', + 'op': 'replace'}], + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) + + def test_update_description(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + self.mock_update_node.return_value = node + headers = {api_base.Version.string: '1.51'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/description', + 'value': 'meow', + 'op': 'replace'}], + headers=headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.OK, response.status_code) + + def test_update_description_oversize(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + desc = '12345678' * 512 + 'last weed' + self.mock_update_node.return_value = node + headers = {api_base.Version.string: '1.51'} + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/description', + 'value': desc, + 'op': 'replace'}], + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_code) + + def test_patch_allocation_forbidden(self): + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid()) + response = self.patch_json('/nodes/%s' % node.uuid, + [{'path': '/allocation_uuid', + 'op': 'replace', + 'value': uuidutils.generate_uuid()}], + headers={api_base.Version.string: + str(api_v1.max_version())}, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_code) + self.assertTrue(response.json['error_message']) + def _create_node_locally(node): driver_factory.check_and_update_node_interfaces(node) @@ -3422,6 +3656,46 @@ self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) + def test_create_node_owner(self): + ndict = test_api_utils.post_get_test_node(owner='cowsay') + response = self.post_json('/nodes', ndict, + headers={api_base.Version.string: + str(api_v1.max_version())}) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/nodes/%s' % ndict['uuid'], + headers={api_base.Version.string: + str(api_v1.max_version())}) + self.assertEqual('cowsay', result['owner']) + + def test_create_node_owner_old_api_version(self): + headers = {api_base.Version.string: '1.32'} + ndict = test_api_utils.post_get_test_node(owner='bob') + response = self.post_json('/nodes', ndict, headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + + def test_create_node_description(self): + node = test_api_utils.post_get_test_node(description='useful stuff') + response = self.post_json('/nodes', node, + headers={api_base.Version.string: + str(api_v1.max_version())}) + self.assertEqual(http_client.CREATED, response.status_int) + result = self.get_json('/nodes/%s' % node['uuid'], + headers={api_base.Version.string: + str(api_v1.max_version())}) + self.assertEqual('useful stuff', result['description']) + + def test_create_node_description_oversize(self): + desc = '12345678' * 512 + 'last weed' + node = test_api_utils.post_get_test_node(description=desc) + response = self.post_json('/nodes', node, + headers={api_base.Version.string: + str(api_v1.max_version())}, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + class TestDelete(test_api_base.BaseApiTest): @@ -3847,6 +4121,42 @@ self.assertEqual(urlparse.urlparse(ret.location).path, expected_location) + def test_provision_with_deploy_configdrive_as_dict(self): + ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid, + {'target': states.ACTIVE, + 'configdrive': {'user_data': 'foo'}}, + headers={api_base.Version.string: '1.56'}) + self.assertEqual(http_client.ACCEPTED, ret.status_code) + self.assertEqual(b'', ret.body) + self.mock_dnd.assert_called_once_with(context=mock.ANY, + node_id=self.node.uuid, + rebuild=False, + configdrive={'user_data': 'foo'}, + topic='test-topic') + + def test_provision_with_deploy_configdrive_as_dict_all_fields(self): + fake_cd = {'user_data': {'serialize': 'me'}, + 'meta_data': {'hostname': 'example.com'}, + 'network_data': {'links': []}} + ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid, + {'target': states.ACTIVE, + 'configdrive': fake_cd}, + headers={api_base.Version.string: '1.56'}) + self.assertEqual(http_client.ACCEPTED, ret.status_code) + self.assertEqual(b'', ret.body) + self.mock_dnd.assert_called_once_with(context=mock.ANY, + node_id=self.node.uuid, + rebuild=False, + configdrive=fake_cd, + topic='test-topic') + + def test_provision_with_deploy_configdrive_as_dict_unsupported(self): + ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid, + {'target': states.ACTIVE, + 'configdrive': {'user_data': 'foo'}}, + expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, ret.status_code) + def test_provision_with_rebuild(self): node = self.node node.provision_state = states.ACTIVE diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_port.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_port.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_port.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_port.py 2019-03-21 20:07:40.000000000 +0000 @@ -351,6 +351,18 @@ headers={api_base.Version.string: "1.34"}) self.assertNotIn('physical_network', data) + def test_hide_fields_in_newer_versions_is_smartnic(self): + port = obj_utils.create_test_port(self.context, node_id=self.node.id, + is_smartnic=True) + data = self.get_json( + '/ports/%s' % port.uuid, + headers={api_base.Version.string: "1.52"}) + self.assertNotIn('is_smartnic', data) + + data = self.get_json('/ports/%s' % port.uuid, + headers={api_base.Version.string: "1.53"}) + self.assertTrue(data['is_smartnic']) + def test_get_collection_custom_fields(self): fields = 'uuid,extra' for i in range(3): @@ -436,6 +448,24 @@ expect_errors=True) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + def test_get_custom_fields_is_smartnic(self): + port = obj_utils.create_test_port(self.context, node_id=self.node.id, + is_smartnic=True) + fields = 'uuid,is_smartnic' + response = self.get_json( + '/ports/%s?fields=%s' % (port.uuid, fields), + headers={api_base.Version.string: "1.52"}, + expect_errors=True) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + + response = self.get_json( + '/ports/%s?fields=%s' % (port.uuid, fields), + headers={api_base.Version.string: "1.53"}) + + # 'links' field is always retrieved in the response + # regardless of which fields are specified. + self.assertItemsEqual(['uuid', 'is_smartnic', 'links'], response) + def test_detail(self): llc = {'switch_info': 'switch', 'switch_id': 'aa:bb:cc:dd:ee:ff', 'port_id': 'Gig0/1'} @@ -445,7 +475,8 @@ portgroup_id=portgroup.id, pxe_enabled=False, local_link_connection=llc, - physical_network='physnet1') + physical_network='physnet1', + is_smartnic=True) data = self.get_json( '/ports/detail', headers={api_base.Version.string: str(api_v1.max_version())} @@ -458,6 +489,7 @@ self.assertIn('local_link_connection', data['ports'][0]) self.assertIn('portgroup_uuid', data['ports'][0]) self.assertIn('physical_network', data['ports'][0]) + self.assertIn('is_smartnic', data['ports'][0]) # never expose the node_id and portgroup_id self.assertNotIn('node_id', data['ports'][0]) self.assertNotIn('portgroup_id', data['ports'][0]) @@ -1680,6 +1712,7 @@ pdict.pop('pxe_enabled') pdict.pop('extra') pdict.pop('physical_network') + pdict.pop('is_smartnic') headers = {api_base.Version.string: str(api_v1.min_version())} response = self.post_json('/ports', pdict, headers=headers) self.assertEqual('application/json', response.content_type) @@ -2071,6 +2104,7 @@ pdict = post_get_test_port(pxe_enabled=False, extra={'vif_port_id': 'foo'}) pdict.pop('physical_network') + pdict.pop('is_smartnic') response = self.post_json('/ports', pdict, headers=headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CREATED, response.status_int) @@ -2227,6 +2261,64 @@ self.assertIn('maximum character', response.json['error_message']) self.assertFalse(mock_create.called) + def test_create_port_with_is_smartnic(self, mock_create): + llc = {'hostname': 'host1', 'port_id': 'rep0-0'} + pdict = post_get_test_port(is_smartnic=True, node_uuid=self.node.uuid, + local_link_connection=llc) + response = self.post_json('/ports', pdict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, + 'test-topic') + self.assertTrue(response.json['is_smartnic']) + port = objects.Port.get(self.context, pdict['uuid']) + self.assertTrue(port.is_smartnic) + + def test_create_port_with_is_smartnic_default_value(self, mock_create): + pdict = post_get_test_port(node_uuid=self.node.uuid) + response = self.post_json('/ports', pdict, headers=self.headers) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.CREATED, response.status_int) + mock_create.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, + 'test-topic') + self.assertFalse(response.json['is_smartnic']) + port = objects.Port.get(self.context, pdict['uuid']) + self.assertFalse(port.is_smartnic) + + def test_create_port_with_is_smartnic_old_api_version(self, mock_create): + pdict = post_get_test_port(is_smartnic=True, node_uuid=self.node.uuid) + headers = {api_base.Version.string: '1.52'} + response = self.post_json('/ports', pdict, + headers=headers, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) + self.assertFalse(mock_create.called) + + def test_create_port_with_is_smartnic_missing_hostname(self, mock_create): + llc = {'switch_info': 'switch', + 'switch_id': 'aa:bb:cc:dd:ee:ff', + 'port_id': 'Gig0/1'} + pdict = post_get_test_port(is_smartnic=True, + node_uuid=self.node.uuid, + local_link_connection=llc) + response = self.post_json('/ports', pdict, + headers=self.headers, expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertFalse(mock_create.called) + + def test_create_port_with_is_smartnic_missing_port_id(self, mock_create): + llc = {'switch_info': 'switch', + 'switch_id': 'aa:bb:cc:dd:ee:ff', + 'hostname': 'host'} + pdict = post_get_test_port(is_smartnic=True, + node_uuid=self.node.uuid, + local_link_connection=llc) + response = self.post_json('/ports', pdict, + headers=self.headers, expect_errors=True) + self.assertEqual(http_client.BAD_REQUEST, response.status_int) + self.assertFalse(mock_create.called) + @mock.patch.object(rpcapi.ConductorAPI, 'destroy_port') class TestDelete(test_api_base.BaseApiTest): diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_ramdisk.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_ramdisk.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_ramdisk.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_ramdisk.py 2019-03-21 20:07:40.000000000 +0000 @@ -24,6 +24,7 @@ from ironic.api.controllers import base as api_base from ironic.api.controllers import v1 as api_v1 from ironic.api.controllers.v1 import ramdisk +from ironic.common import states from ironic.conductor import rpcapi from ironic.tests.unit.api import base as test_api_base from ironic.tests.unit.objects import utils as obj_utils @@ -160,6 +161,17 @@ set(data['node'])) self._check_config(data) + def test_fast_deploy_lookup(self): + CONF.set_override('fast_track', True, 'deploy') + for provision_state in [states.ENROLL, states.MANAGEABLE, + states.AVAILABLE]: + self.node.provision_state = provision_state + data = self.get_json( + '/lookup?addresses=%s&node_uuid=%s' % + (','.join(self.addresses), self.node.uuid), + headers={api_base.Version.string: str(api_v1.max_version())}) + self.assertEqual(self.node.uuid, data['node']['uuid']) + @mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for', lambda *n: 'test-topic') diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_types.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_types.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_types.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_types.py 2019-03-21 20:07:40.000000000 +0000 @@ -27,6 +27,7 @@ from ironic.common import exception from ironic.common import utils from ironic.tests import base +from ironic.tests.unit.api.utils import fake_event_validator class TestMacAddressType(base.TestCase): @@ -323,14 +324,14 @@ self.assertRaisesRegex(exception.Invalid, 'are invalid keys', v.validate, value) - def test_local_link_connection_type_missing_mandatory_key(self): + def test_local_link_connection_type_missing_local_link_mandatory_key(self): v = types.locallinkconnectiontype value = {'switch_id': '0a:1b:2c:3d:4e:5f', 'switch_info': 'value3'} self.assertRaisesRegex(exception.Invalid, 'Missing mandatory', v.validate, value) - def test_local_link_connection_type_without_optional_key(self): + def test_local_link_connection_type_local_link_keys_mandatory(self): v = types.locallinkconnectiontype value = {'switch_id': '0a:1b:2c:3d:4e:5f', 'port_id': 'value2'} @@ -341,6 +342,34 @@ value = {} self.assertItemsEqual(value, v.validate(value)) + def test_local_link_connection_type_smart_nic_keys_mandatory(self): + v = types.locallinkconnectiontype + value = {'port_id': 'rep0-0', + 'hostname': 'hostname'} + self.assertTrue(v.validate_for_smart_nic(value)) + self.assertTrue(v.validate(value)) + + def test_local_link_connection_type_smart_nic_keys_with_optional(self): + v = types.locallinkconnectiontype + value = {'port_id': 'rep0-0', + 'hostname': 'hostname', + 'switch_id': '0a:1b:2c:3d:4e:5f', + 'switch_info': 'sw_info'} + self.assertTrue(v.validate_for_smart_nic(value)) + self.assertTrue(v.validate(value)) + + def test_local_link_connection_type_smart_nic_keys_hostname_missing(self): + v = types.locallinkconnectiontype + value = {'port_id': 'rep0-0'} + self.assertFalse(v.validate_for_smart_nic(value)) + self.assertRaises(exception.Invalid, v.validate, value) + + def test_local_link_connection_type_smart_nic_keys_port_id_missing(self): + v = types.locallinkconnectiontype + value = {'hostname': 'hostname'} + self.assertFalse(v.validate_for_smart_nic(value)) + self.assertRaises(exception.Invalid, v.validate, value) + @mock.patch("pecan.request", mock.Mock(version=mock.Mock(minor=10))) class TestVifType(base.TestCase): @@ -365,3 +394,62 @@ v = types.viftype self.assertRaises(exception.InvalidUuidOrName, v.frombasetype, {'id': 5678}) + + +class TestEventType(base.TestCase): + + def setUp(self): + super(TestEventType, self).setUp() + self.v = types.eventtype + + @mock.patch.object(types.EventType, 'event_validators', + {'valid.event': fake_event_validator}) + @mock.patch.object(types.EventType, 'valid_events', set(['valid.event'])) + def test_simple_event_type(self): + value = {'event': 'valid.event'} + self.assertItemsEqual(value, self.v.validate(value)) + + @mock.patch.object(types.EventType, 'valid_events', set(['valid.event'])) + def test_invalid_event_type(self): + value = {'event': 'invalid.event'} + self.assertRaisesRegex(exception.Invalid, + 'invalid.event is not one of valid events:', + self.v.validate, value) + + def test_event_missing_madatory_field(self): + value = {'invalid': 'invalid'} + self.assertRaisesRegex(exception.Invalid, 'Missing mandatory keys:', + self.v.validate, value) + + def test_network_port_event(self): + value = {'event': 'network.bind_port', + 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', + 'mac_address': 'de:ad:ca:fe:ba:be', + 'status': 'ACTIVE', + 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:vnic_type': 'baremetal' + } + self.assertItemsEqual(value, self.v.validate(value)) + + def test_invalid_mac_network_port_event(self): + value = {'event': 'network.bind_port', + 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', + 'mac_address': 'INVALID_MAC_ADDRESS', + 'status': 'ACTIVE', + 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:vnic_type': 'baremetal' + } + self.assertRaisesRegex(exception.Invalid, + 'Event validation failure for mac_address.', + self.v.validate, value) + + def test_missing_mandatory_fields_network_port_event(self): + value = {'event': 'network.bind_port', + 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', + 'binding:vnic_type': 'baremetal' + } + self.assertRaisesRegex(exception.Invalid, 'Missing mandatory keys:', + self.v.validate, value) diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_utils.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_utils.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -26,6 +26,7 @@ from ironic.api.controllers.v1 import node as api_node from ironic.api.controllers.v1 import utils from ironic.common import exception +from ironic.common import policy from ironic.common import states from ironic import objects from ironic.tests import base @@ -80,6 +81,40 @@ utils.validate_trait(large) self.assertRaises(wsme.exc.ClientSideError, utils.validate_trait, large + "1") + # Check custom error prefix. + self.assertRaisesRegex(wsme.exc.ClientSideError, + "spongebob", + utils.validate_trait, "invalid", "spongebob") + + def test_apply_jsonpatch(self): + doc = {"foo": {"bar": "baz"}} + patch = [{"op": "add", "path": "/foo/answer", "value": 42}] + result = utils.apply_jsonpatch(doc, patch) + expected = {"foo": {"bar": "baz", "answer": 42}} + self.assertEqual(expected, result) + + def test_apply_jsonpatch_no_add_root_attr(self): + doc = {} + patch = [{"op": "add", "path": "/foo", "value": 42}] + self.assertRaisesRegex(wsme.exc.ClientSideError, + "Adding a new attribute", + utils.apply_jsonpatch, doc, patch) + + def test_apply_jsonpatch_remove_non_existent(self): + # Raises a KeyError. + doc = {} + patch = [{"op": "remove", "path": "/foo"}] + self.assertRaisesRegex(exception.PatchError, + "can't remove non-existent object 'foo'", + utils.apply_jsonpatch, doc, patch) + + def test_apply_jsonpatch_replace_non_existent_list_item(self): + # Raises an IndexError. + doc = [] + patch = [{"op": "replace", "path": "/0", "value": 42}] + self.assertRaisesRegex(exception.PatchError, + "list assignment index out of range", + utils.apply_jsonpatch, doc, patch) def test_get_patch_values_no_path(self): patch = [{'path': '/name', 'op': 'update', 'value': 'node-0'}] @@ -496,18 +531,48 @@ def test_check_allow_configdrive_fails(self, mock_request): mock_request.version.minor = 35 self.assertRaises(wsme.exc.ClientSideError, - utils.check_allow_configdrive, states.DELETED) + utils.check_allow_configdrive, states.DELETED, + "abcd") + self.assertRaises(wsme.exc.ClientSideError, + utils.check_allow_configdrive, states.ACTIVE, + {'meta_data': {}}) mock_request.version.minor = 34 self.assertRaises(wsme.exc.ClientSideError, - utils.check_allow_configdrive, states.REBUILD) + utils.check_allow_configdrive, states.REBUILD, + "abcd") @mock.patch.object(pecan, 'request', spec_set=['version']) def test_check_allow_configdrive(self, mock_request): mock_request.version.minor = 35 - utils.check_allow_configdrive(states.ACTIVE) - utils.check_allow_configdrive(states.REBUILD) + utils.check_allow_configdrive(states.ACTIVE, "abcd") + utils.check_allow_configdrive(states.REBUILD, "abcd") mock_request.version.minor = 34 - utils.check_allow_configdrive(states.ACTIVE) + utils.check_allow_configdrive(states.ACTIVE, "abcd") + + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allow_configdrive_as_dict(self, mock_request): + mock_request.version.minor = 56 + utils.check_allow_configdrive(states.ACTIVE, {'meta_data': {}}) + utils.check_allow_configdrive(states.ACTIVE, {'meta_data': {}, + 'network_data': {}, + 'user_data': {}}) + utils.check_allow_configdrive(states.ACTIVE, {'user_data': 'foo'}) + utils.check_allow_configdrive(states.ACTIVE, {'user_data': ['foo']}) + + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_check_allow_configdrive_as_dict_invalid(self, mock_request): + mock_request.version.minor = 56 + self.assertRaises(wsme.exc.ClientSideError, + utils.check_allow_configdrive, states.REBUILD, + {'foo': 'bar'}) + for key in ['meta_data', 'network_data']: + self.assertRaises(wsme.exc.ClientSideError, + utils.check_allow_configdrive, states.REBUILD, + {key: 'a string'}) + for key in ['meta_data', 'network_data', 'user_data']: + self.assertRaises(wsme.exc.ClientSideError, + utils.check_allow_configdrive, states.REBUILD, + {key: 42}) @mock.patch.object(pecan, 'request', spec_set=['version']) def test_allow_rescue_interface(self, mock_request): @@ -523,6 +588,20 @@ mock_request.version.minor = 40 self.assertFalse(utils.allow_inspect_abort()) + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_allow_port_is_smartnic(self, mock_request): + mock_request.version.minor = 53 + self.assertTrue(utils.allow_port_is_smartnic()) + mock_request.version.minor = 52 + self.assertFalse(utils.allow_port_is_smartnic()) + + @mock.patch.object(pecan, 'request', spec_set=['version']) + def test_allow_deploy_templates(self, mock_request): + mock_request.version.minor = 55 + self.assertTrue(utils.allow_deploy_templates()) + mock_request.version.minor = 54 + self.assertFalse(utils.allow_deploy_templates()) + class TestNodeIdent(base.TestCase): @@ -710,6 +789,20 @@ sorted(utils.get_controller_reserved_names( api_node.NodesController))) + @mock.patch.object(pecan, 'request', spec_set=["context"]) + @mock.patch.object(policy, 'authorize', spec=True) + def test_check_policy(self, mock_authorize, mock_pr): + utils.check_policy('fake-policy') + cdict = pecan.request.context.to_policy_values() + mock_authorize.assert_called_once_with('fake-policy', cdict, cdict) + + @mock.patch.object(pecan, 'request', spec_set=["context"]) + @mock.patch.object(policy, 'authorize', spec=True) + def test_check_policy_forbidden(self, mock_authorize, mock_pr): + mock_authorize.side_effect = exception.HTTPForbidden(resource='fake') + self.assertRaises(exception.HTTPForbidden, + utils.check_policy, 'fake-policy') + class TestPortgroupIdent(base.TestCase): def setUp(self): diff -Nru ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_versions.py ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_versions.py --- ironic-12.0.0/ironic/tests/unit/api/controllers/v1/test_versions.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/controllers/v1/test_versions.py 2019-03-21 20:07:40.000000000 +0000 @@ -80,6 +80,11 @@ self.assertEqual(versions._MAX_VERSION_STRING, versions.max_version_string()) + def test_max_version_not_pinned_in_release_mappings(self): + CONF.set_override('pin_release_version', None) + self.assertEqual(release_mappings.RELEASE_MAPPING['master']['api'], + versions.max_version_string()) + @mock.patch('ironic.common.release_mappings.RELEASE_MAPPING', autospec=True) def test_max_version_pinned(self, mock_release_mapping): diff -Nru ironic-12.0.0/ironic/tests/unit/api/utils.py ironic-12.1.0/ironic/tests/unit/api/utils.py --- ironic-12.0.0/ironic/tests/unit/api/utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/api/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -20,9 +20,11 @@ import json from ironic.api.controllers.v1 import chassis as chassis_controller +from ironic.api.controllers.v1 import deploy_template as dt_controller from ironic.api.controllers.v1 import node as node_controller from ironic.api.controllers.v1 import port as port_controller from ironic.api.controllers.v1 import portgroup as portgroup_controller +from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import volume_connector as vc_controller from ironic.api.controllers.v1 import volume_target as vt_controller @@ -100,6 +102,7 @@ node.pop('chassis_id') node.pop('tags') node.pop('traits') + node.pop('allocation_id') # NOTE(jroll): pop out fields that were introduced in later API versions, # unless explicitly requested. Otherwise, these will cause tests using @@ -183,3 +186,40 @@ node = db_utils.get_test_node() portgroup['node_uuid'] = kw.get('node_uuid', node['uuid']) return portgroup + + +_ALLOCATION_POST_FIELDS = {'resource_class', 'uuid', 'traits', + 'candidate_nodes', 'name', 'extra'} + + +def allocation_post_data(**kw): + """Return an Allocation object without internal attributes.""" + allocation = db_utils.get_test_allocation(**kw) + return {key: value for key, value in allocation.items() + if key in _ALLOCATION_POST_FIELDS} + + +def fake_event_validator(v): + """A fake event validator""" + return v + + +def deploy_template_post_data(**kw): + """Return a DeployTemplate object without internal attributes.""" + template = db_utils.get_test_deploy_template(**kw) + # These values are not part of the API object + template.pop('version') + # Remove internal attributes from each step. + step_internal = types.JsonPatchType.internal_attrs() + step_internal.append('deploy_template_id') + template['steps'] = [remove_internal(step, step_internal) + for step in template['steps']] + # Remove internal attributes from the template. + dt_patch = dt_controller.DeployTemplatePatchType + internal = dt_patch.internal_attrs() + return remove_internal(template, internal) + + +def post_get_test_deploy_template(**kw): + """Return a DeployTemplate object with appropriate attributes.""" + return deploy_template_post_data(**kw) diff -Nru ironic-12.0.0/ironic/tests/unit/cmd/test_dbsync.py ironic-12.1.0/ironic/tests/unit/cmd/test_dbsync.py --- ironic-12.0.0/ironic/tests/unit/cmd/test_dbsync.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/cmd/test_dbsync.py 2019-03-21 20:07:40.000000000 +0000 @@ -36,20 +36,38 @@ self.context = context.get_admin_context() self.db_cmds = dbsync.DBCommand() - def test__check_versions(self): + def test_check_obj_versions(self): with mock.patch.object(self.dbapi, 'check_versions', autospec=True) as mock_check_versions: mock_check_versions.return_value = True - self.db_cmds._check_versions() - mock_check_versions.assert_called_once_with() + msg = self.db_cmds.check_obj_versions() + self.assertIsNone(msg) + mock_check_versions.assert_called_once_with(ignore_models=()) - def test__check_versions_bad(self): + def test_check_obj_versions_bad(self): with mock.patch.object(self.dbapi, 'check_versions', autospec=True) as mock_check_versions: mock_check_versions.return_value = False - exit = self.assertRaises(SystemExit, self.db_cmds._check_versions) - mock_check_versions.assert_called_once_with() - self.assertEqual(2, exit.code) + msg = self.db_cmds.check_obj_versions() + self.assertIsNotNone(msg) + mock_check_versions.assert_called_once_with(ignore_models=()) + + def test_check_obj_versions_ignore_models(self): + with mock.patch.object(self.dbapi, 'check_versions', + autospec=True) as mock_check_versions: + mock_check_versions.return_value = True + msg = self.db_cmds.check_obj_versions(ignore_missing_tables=True) + self.assertIsNone(msg) + mock_check_versions.assert_called_once_with( + ignore_models=dbsync.NEW_MODELS) + + @mock.patch.object(dbsync.DBCommand, 'check_obj_versions', autospec=True) + def test_check_versions_bad(self, mock_check_versions): + mock_check_versions.return_value = 'This is bad' + exit = self.assertRaises(SystemExit, self.db_cmds._check_versions) + mock_check_versions.assert_called_once_with( + mock.ANY, ignore_missing_tables=False) + self.assertEqual(2, exit.code) @mock.patch.object(dbsync, 'ONLINE_MIGRATIONS', autospec=True) def test__run_migration_functions(self, mock_migrations): diff -Nru ironic-12.0.0/ironic/tests/unit/cmd/test_status.py ironic-12.1.0/ironic/tests/unit/cmd/test_status.py --- ironic-12.0.0/ironic/tests/unit/cmd/test_status.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/cmd/test_status.py 2019-03-21 20:07:40.000000000 +0000 @@ -12,8 +12,10 @@ # License for the specific language governing permissions and limitations # under the License. +import mock from oslo_upgradecheck.upgradecheck import Code +from ironic.cmd import dbsync from ironic.cmd import status from ironic.tests.unit.db import base as db_base @@ -24,7 +26,14 @@ super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() - def test__check_placeholder(self): - check_result = self.cmd._check_placeholder() - self.assertEqual( - Code.SUCCESS, check_result.code) + def test__check_obj_versions(self): + check_result = self.cmd._check_obj_versions() + self.assertEqual(Code.SUCCESS, check_result.code) + + @mock.patch.object(dbsync.DBCommand, 'check_obj_versions', autospec=True) + def test__check_obj_versions_bad(self, mock_check): + msg = 'This is bad' + mock_check.return_value = msg + check_result = self.cmd._check_obj_versions() + self.assertEqual(Code.FAILURE, check_result.code) + self.assertEqual(msg, check_result.details) diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_cinder.py ironic-12.1.0/ironic/tests/unit/common/test_cinder.py --- ironic-12.0.0/ironic/tests/unit/common/test_cinder.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_cinder.py 2019-03-21 20:07:40.000000000 +0000 @@ -195,6 +195,7 @@ self.node = object_utils.create_test_node( self.context, instance_uuid=uuidutils.generate_uuid()) + self.mount_point = 'ironic_mountpoint' @mock.patch.object(cinderclient.volumes.VolumeManager, 'attach', autospec=True) @@ -239,7 +240,8 @@ mock_reserve.assert_called_once_with(mock.ANY, volume_id) mock_init.assert_called_once_with(mock.ANY, volume_id, connector) mock_attach.assert_called_once_with(mock.ANY, volume_id, - self.node.instance_uuid, None) + self.node.instance_uuid, + self.mount_point) mock_set_meta.assert_called_once_with(mock.ANY, volume_id, {'bar': 'baz'}) mock_get.assert_called_once_with(mock.ANY, volume_id) @@ -271,7 +273,6 @@ 'ironic_volume_uuid': '000-001'}}] volumes = [volume_id, 'already_attached'] - connector = {'foo': 'bar'} mock_create_meta.return_value = {'bar': 'baz'} mock_get.side_effect = [ @@ -294,7 +295,8 @@ mock_reserve.assert_called_once_with(mock.ANY, volume_id) mock_init.assert_called_once_with(mock.ANY, volume_id, connector) mock_attach.assert_called_once_with(mock.ANY, volume_id, - self.node.instance_uuid, None) + self.node.instance_uuid, + self.mount_point) mock_set_meta.assert_called_once_with(mock.ANY, volume_id, {'bar': 'baz'}) @@ -355,7 +357,7 @@ mock.ANY, '111111111-0000-0000-0000-000000000003', connector) mock_attach.assert_called_once_with( mock.ANY, '111111111-0000-0000-0000-000000000003', - self.node.instance_uuid, None) + self.node.instance_uuid, self.mount_point) mock_set_meta.assert_called_once_with( mock.ANY, '111111111-0000-0000-0000-000000000003', {'bar': 'baz'}) @@ -446,7 +448,8 @@ mock_reserve.assert_called_once_with(mock.ANY, volume_id) mock_init.assert_called_once_with(mock.ANY, volume_id, connector) mock_attach.assert_called_once_with(mock.ANY, volume_id, - self.node.instance_uuid, None) + self.node.instance_uuid, + self.mount_point) mock_get.assert_called_once_with(mock.ANY, volume_id) mock_is_attached.assert_called_once_with(mock.ANY, mock_get.return_value) @@ -496,7 +499,8 @@ mock_reserve.assert_called_once_with(mock.ANY, volume_id) mock_init.assert_called_once_with(mock.ANY, volume_id, connector) mock_attach.assert_called_once_with(mock.ANY, volume_id, - self.node.instance_uuid, None) + self.node.instance_uuid, + self.mount_point) mock_set_meta.assert_called_once_with(mock.ANY, volume_id, {'bar': 'baz'}) mock_get.assert_called_once_with(mock.ANY, volume_id) diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_exception.py ironic-12.1.0/ironic/tests/unit/common/test_exception.py --- ironic-12.0.0/ironic/tests/unit/common/test_exception.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_exception.py 2019-03-21 20:07:40.000000000 +0000 @@ -32,7 +32,7 @@ class TestIronicException(base.TestCase): - def test___init__(self): + def test___str__encoding(self): expected = b'\xc3\xa9\xe0\xaf\xb2\xe0\xbe\x84' if six.PY3: expected = expected.decode('utf-8') @@ -40,6 +40,11 @@ exc = exception.IronicException(message) self.assertEqual(expected, exc.__str__()) + def test___str__non_string(self): + exc = exception.IronicException(42) + self.assertEqual("42", exc.__str__()) + self.assertEqual(u"42", exc.__unicode__()) + @mock.patch.object(exception.LOG, 'error', autospec=True) def test___init___invalid_kwarg(self, log_mock): self.config(fatal_exception_format_errors=False) diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_images.py ironic-12.1.0/ironic/tests/unit/common/test_images.py --- ironic-12.0.0/ironic/tests/unit/common/test_images.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_images.py 2019-03-21 20:07:40.000000000 +0000 @@ -438,9 +438,9 @@ @mock.patch.object(os.path, 'relpath', autospec=True) @mock.patch.object(os, 'walk', autospec=True) @mock.patch.object(utils, 'mount', autospec=True) - def test__mount_deploy_iso_fail_no_efibootimg(self, mount_mock, - walk_mock, relpath_mock, - umount_mock): + def test__mount_deploy_iso_fail_no_esp_imageimg(self, mount_mock, + walk_mock, relpath_mock, + umount_mock): walk_mock.return_value = [('/tmpdir1/EFI/ubuntu', [], ['grub.cfg']), ('/tmpdir1/isolinux', [], ['isolinux.bin', 'isolinux.cfg'])] @@ -489,26 +489,22 @@ @mock.patch.object(images, '_mount_deploy_iso', autospec=True) @mock.patch.object(utils, 'tempdir', autospec=True) @mock.patch.object(images, '_generate_cfg', autospec=True) - def test_create_isolinux_image_for_uefi( + def test_create_isolinux_image_for_uefi_with_deploy_iso( self, gen_cfg_mock, tempdir_mock, mount_mock, execute_mock, write_to_file_mock, create_root_fs_mock, umount_mock): files_info = { 'path/to/kernel': 'vmlinuz', 'path/to/ramdisk': 'initrd', - CONF.isolinux_bin: 'isolinux/isolinux.bin', - 'path/to/grub': 'relpath/to/grub.cfg', - 'sourceabspath/to/efiboot.img': 'path/to/efiboot.img' + 'sourceabspath/to/efiboot.img': 'path/to/efiboot.img', + 'path/to/grub': 'relpath/to/grub.cfg' } - cfg = "cfg" - cfg_file = 'tmpdir/isolinux/isolinux.cfg' + grubcfg = "grubcfg" grub_file = 'tmpdir/relpath/to/grub.cfg' - gen_cfg_mock.side_effect = cfg, grubcfg + gen_cfg_mock.side_effect = (grubcfg,) params = ['a=b', 'c'] - isolinux_options = {'kernel': '/vmlinuz', - 'ramdisk': '/initrd'} grub_options = {'linux': '/vmlinuz', 'initrd': '/initrd'} @@ -525,28 +521,66 @@ mount_mock.return_value = (uefi_path_info, e_img_rel_path, grub_rel_path) - images.create_isolinux_image_for_uefi('tgt_file', 'path/to/deploy_iso', + images.create_isolinux_image_for_uefi('tgt_file', 'path/to/kernel', 'path/to/ramdisk', + deploy_iso='path/to/deploy_iso', kernel_params=params) mount_mock.assert_called_once_with('path/to/deploy_iso', 'mountdir') create_root_fs_mock.assert_called_once_with('tmpdir', files_info) - gen_cfg_mock.assert_any_call(params, CONF.isolinux_config_template, - isolinux_options) - write_to_file_mock.assert_any_call(cfg_file, cfg) gen_cfg_mock.assert_any_call(params, CONF.grub_config_template, grub_options) write_to_file_mock.assert_any_call(grub_file, grubcfg) execute_mock.assert_called_once_with( - 'mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", '-cache-inodes', '-J', - '-l', '-no-emul-boot', '-boot-load-size', '4', '-boot-info-table', - '-b', 'isolinux/isolinux.bin', '-eltorito-alt-boot', - '-e', 'path/to/efiboot.img', '-no-emul-boot', - '-o', 'tgt_file', 'tmpdir') + 'mkisofs', '-r', '-V', 'VMEDIA_BOOT_ISO', '-l', '-e', + 'path/to/efiboot.img', '-no-emul-boot', '-o', 'tgt_file', 'tmpdir') umount_mock.assert_called_once_with('mountdir') @mock.patch.object(images, '_create_root_fs', autospec=True) @mock.patch.object(utils, 'write_to_file', autospec=True) + @mock.patch.object(utils, 'execute', autospec=True) + @mock.patch.object(utils, 'tempdir', autospec=True) + @mock.patch.object(images, '_generate_cfg', autospec=True) + def test_create_isolinux_image_for_uefi_with_esp_image( + self, gen_cfg_mock, tempdir_mock, execute_mock, + write_to_file_mock, create_root_fs_mock): + + files_info = { + 'path/to/kernel': 'vmlinuz', + 'path/to/ramdisk': 'initrd', + 'sourceabspath/to/efiboot.img': 'boot/grub/efiboot.img', + 'tmpdir/boot/grub/grub.cfg': 'boot/grub/grub.cfg' + } + + grubcfg = "grubcfg" + grub_file = 'tmpdir/boot/grub/grub.cfg' + gen_cfg_mock.side_effect = (grubcfg,) + + params = ['a=b', 'c'] + grub_options = {'linux': '/vmlinuz', + 'initrd': '/initrd'} + + mock_file_handle = mock.MagicMock(spec=file) + mock_file_handle.__enter__.return_value = 'tmpdir' + mock_file_handle1 = mock.MagicMock(spec=file) + mock_file_handle1.__enter__.return_value = 'mountdir' + tempdir_mock.side_effect = mock_file_handle, mock_file_handle1 + + images.create_isolinux_image_for_uefi( + 'tgt_file', 'path/to/kernel', 'path/to/ramdisk', + esp_image='sourceabspath/to/efiboot.img', + kernel_params=params) + create_root_fs_mock.assert_called_once_with('tmpdir', files_info) + gen_cfg_mock.assert_any_call(params, CONF.grub_config_template, + grub_options) + write_to_file_mock.assert_any_call(grub_file, grubcfg) + execute_mock.assert_called_once_with( + 'mkisofs', '-r', '-V', 'VMEDIA_BOOT_ISO', '-l', '-e', + 'boot/grub/efiboot.img', '-no-emul-boot', '-o', 'tgt_file', + 'tmpdir') + + @mock.patch.object(images, '_create_root_fs', autospec=True) + @mock.patch.object(utils, 'write_to_file', autospec=True) @mock.patch.object(utils, 'tempdir', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) @mock.patch.object(images, '_generate_cfg', autospec=True) @@ -626,9 +660,10 @@ self.assertRaises(exception.ImageCreationFailed, images.create_isolinux_image_for_uefi, - 'tgt_file', 'path/to/deployiso', + 'tgt_file', 'path/to/kernel', - 'path/to/ramdisk') + 'path/to/ramdisk', + deploy_iso='path/to/deployiso') umount_mock.assert_called_once_with('mountdir') @mock.patch.object(images, '_create_root_fs', autospec=True) @@ -671,9 +706,10 @@ self.assertRaises(exception.ImageCreationFailed, images.create_isolinux_image_for_uefi, - 'tgt_file', 'path/to/deployiso', + 'tgt_file', 'path/to/kernel', - 'path/to/ramdisk') + 'path/to/ramdisk', + deploy_iso='path/to/deployiso') umount_mock.assert_called_once_with('mountdir') @mock.patch.object(images, '_create_root_fs', autospec=True) @@ -700,15 +736,17 @@ @mock.patch.object(images, 'create_isolinux_image_for_uefi', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @mock.patch.object(utils, 'tempdir', autospec=True) - def test_create_boot_iso_for_uefi( + def test_create_boot_iso_for_uefi_deploy_iso( self, tempdir_mock, fetch_images_mock, create_isolinux_mock): mock_file_handle = mock.MagicMock(spec=file) mock_file_handle.__enter__.return_value = 'tmpdir' tempdir_mock.return_value = mock_file_handle - images.create_boot_iso('ctx', 'output_file', 'kernel-uuid', - 'ramdisk-uuid', 'deploy_iso-uuid', - 'root-uuid', 'kernel-params', 'uefi') + images.create_boot_iso( + 'ctx', 'output_file', 'kernel-uuid', + 'ramdisk-uuid', deploy_iso_href='deploy_iso-uuid', + root_uuid='root-uuid', kernel_params='kernel-params', + boot_mode='uefi') fetch_images_mock.assert_any_call( 'ctx', 'kernel-uuid', 'tmpdir/kernel-uuid') @@ -719,21 +757,53 @@ params = ['root=UUID=root-uuid', 'kernel-params'] create_isolinux_mock.assert_called_once_with( - 'output_file', 'tmpdir/deploy_iso-uuid', 'tmpdir/kernel-uuid', - 'tmpdir/ramdisk-uuid', params) + 'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', + deploy_iso='tmpdir/deploy_iso-uuid', esp_image=None, + kernel_params=params) + + @mock.patch.object(images, 'create_isolinux_image_for_uefi', autospec=True) + @mock.patch.object(images, 'fetch', autospec=True) + @mock.patch.object(utils, 'tempdir', autospec=True) + def test_create_boot_iso_for_uefi_esp_image( + self, tempdir_mock, fetch_images_mock, create_isolinux_mock): + mock_file_handle = mock.MagicMock(spec=file) + mock_file_handle.__enter__.return_value = 'tmpdir' + tempdir_mock.return_value = mock_file_handle + + images.create_boot_iso( + 'ctx', 'output_file', 'kernel-uuid', + 'ramdisk-uuid', esp_image_href='efiboot-uuid', + root_uuid='root-uuid', kernel_params='kernel-params', + boot_mode='uefi') + + fetch_images_mock.assert_any_call( + 'ctx', 'kernel-uuid', 'tmpdir/kernel-uuid') + fetch_images_mock.assert_any_call( + 'ctx', 'ramdisk-uuid', 'tmpdir/ramdisk-uuid') + fetch_images_mock.assert_any_call( + 'ctx', 'efiboot-uuid', 'tmpdir/efiboot-uuid') + + params = ['root=UUID=root-uuid', 'kernel-params'] + create_isolinux_mock.assert_called_once_with( + 'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', + deploy_iso=None, esp_image='tmpdir/efiboot-uuid', + kernel_params=params) @mock.patch.object(images, 'create_isolinux_image_for_uefi', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @mock.patch.object(utils, 'tempdir', autospec=True) - def test_create_boot_iso_for_uefi_for_hrefs( + def test_create_boot_iso_for_uefi_deploy_iso_for_hrefs( self, tempdir_mock, fetch_images_mock, create_isolinux_mock): mock_file_handle = mock.MagicMock(spec=file) mock_file_handle.__enter__.return_value = 'tmpdir' tempdir_mock.return_value = mock_file_handle - images.create_boot_iso('ctx', 'output_file', 'http://kernel-href', - 'http://ramdisk-href', 'http://deploy_iso-href', - 'root-uuid', 'kernel-params', 'uefi') + images.create_boot_iso( + 'ctx', 'output_file', 'http://kernel-href', 'http://ramdisk-href', + deploy_iso_href='http://deploy_iso-href', + root_uuid='root-uuid', kernel_params='kernel-params', + boot_mode='uefi') + expected_calls = [mock.call('ctx', 'http://kernel-href', 'tmpdir/kernel-href'), mock.call('ctx', 'http://ramdisk-href', @@ -743,8 +813,37 @@ fetch_images_mock.assert_has_calls(expected_calls) params = ['root=UUID=root-uuid', 'kernel-params'] create_isolinux_mock.assert_called_once_with( - 'output_file', 'tmpdir/deploy_iso-href', 'tmpdir/kernel-href', - 'tmpdir/ramdisk-href', params) + 'output_file', 'tmpdir/kernel-href', 'tmpdir/ramdisk-href', + deploy_iso='tmpdir/deploy_iso-href', esp_image=None, + kernel_params=params) + + @mock.patch.object(images, 'create_isolinux_image_for_uefi', autospec=True) + @mock.patch.object(images, 'fetch', autospec=True) + @mock.patch.object(utils, 'tempdir', autospec=True) + def test_create_boot_iso_for_uefi_esp_image_for_hrefs( + self, tempdir_mock, fetch_images_mock, create_isolinux_mock): + mock_file_handle = mock.MagicMock(spec=file) + mock_file_handle.__enter__.return_value = 'tmpdir' + tempdir_mock.return_value = mock_file_handle + + images.create_boot_iso( + 'ctx', 'output_file', 'http://kernel-href', 'http://ramdisk-href', + esp_image_href='http://efiboot-href', + root_uuid='root-uuid', kernel_params='kernel-params', + boot_mode='uefi') + + expected_calls = [mock.call('ctx', 'http://kernel-href', + 'tmpdir/kernel-href'), + mock.call('ctx', 'http://ramdisk-href', + 'tmpdir/ramdisk-href'), + mock.call('ctx', 'http://efiboot-href', + 'tmpdir/efiboot-href')] + fetch_images_mock.assert_has_calls(expected_calls) + params = ['root=UUID=root-uuid', 'kernel-params'] + create_isolinux_mock.assert_called_once_with( + 'output_file', 'tmpdir/kernel-href', 'tmpdir/ramdisk-href', + deploy_iso=None, esp_image='tmpdir/efiboot-href', + kernel_params=params) @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True) @mock.patch.object(images, 'fetch', autospec=True) @@ -757,7 +856,8 @@ images.create_boot_iso('ctx', 'output_file', 'kernel-uuid', 'ramdisk-uuid', 'deploy_iso-uuid', - 'root-uuid', 'kernel-params', 'bios') + 'efiboot-uuid', 'root-uuid', 'kernel-params', + 'bios') fetch_images_mock.assert_any_call( 'ctx', 'kernel-uuid', 'tmpdir/kernel-uuid') @@ -788,7 +888,8 @@ images.create_boot_iso('ctx', 'output_file', 'kernel-uuid', 'ramdisk-uuid', 'deploy_iso-uuid', - 'root-uuid', 'kernel-params', None) + 'efiboot-uuid', 'root-uuid', 'kernel-params', + None) fetch_images_mock.assert_any_call( 'ctx', 'kernel-uuid', 'tmpdir/kernel-uuid') diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_json_rpc.py ironic-12.1.0/ironic/tests/unit/common/test_json_rpc.py --- ironic-12.0.0/ironic/tests/unit/common/test_json_rpc.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_json_rpc.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,495 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock +import oslo_messaging +import webob + +from ironic.common import context as ir_ctx +from ironic.common import exception +from ironic.common.json_rpc import client +from ironic.common.json_rpc import server +from ironic import objects +from ironic.objects import base as objects_base +from ironic.tests import base as test_base +from ironic.tests.unit.objects import utils as obj_utils + + +class FakeManager(object): + + def success(self, context, x, y=0): + assert isinstance(context, ir_ctx.RequestContext) + assert context.user_name == 'admin' + return x - y + + def with_node(self, context, node): + assert isinstance(context, ir_ctx.RequestContext) + assert isinstance(node, objects.Node) + node.extra['answer'] = 42 + return node + + def no_result(self, context): + assert isinstance(context, ir_ctx.RequestContext) + return None + + def no_context(self): + return 42 + + def fail(self, context, message): + assert isinstance(context, ir_ctx.RequestContext) + raise exception.IronicException(message) + + @oslo_messaging.expected_exceptions(exception.Invalid) + def expected(self, context, message): + assert isinstance(context, ir_ctx.RequestContext) + raise exception.Invalid(message) + + def crash(self, context): + raise RuntimeError('boom') + + def init_host(self, context): + assert False, "This should not be exposed" + + def _private(self, context): + assert False, "This should not be exposed" + + # This should not be exposed either + value = 42 + + +class TestService(test_base.TestCase): + + def setUp(self): + super(TestService, self).setUp() + self.config(auth_strategy='noauth', group='json_rpc') + self.server_mock = self.useFixture(fixtures.MockPatch( + 'oslo_service.wsgi.Server', autospec=True)).mock + + self.serializer = objects_base.IronicObjectSerializer(is_server=True) + self.service = server.WSGIService(FakeManager(), self.serializer) + self.app = self.service._application + self.ctx = {'user_name': 'admin'} + + def _request(self, name=None, params=None, expected_error=None, + request_id='abcd', **kwargs): + body = { + 'jsonrpc': '2.0', + } + if request_id is not None: + body['id'] = request_id + if name is not None: + body['method'] = name + if params is not None: + body['params'] = params + if 'json_body' not in kwargs: + kwargs['json_body'] = body + kwargs.setdefault('method', 'POST') + kwargs.setdefault('headers', {'Content-Type': 'application/json'}) + + request = webob.Request.blank("/", **kwargs) + response = request.get_response(self.app) + self.assertEqual(response.status_code, + expected_error or (200 if request_id else 204)) + if request_id is not None: + if expected_error: + self.assertEqual(expected_error, + response.json_body['error']['code']) + else: + return response.json_body + else: + self.assertFalse(response.text) + + def _check(self, body, result=None, error=None, request_id='abcd'): + self.assertEqual('2.0', body.pop('jsonrpc')) + self.assertEqual(request_id, body.pop('id')) + if error is not None: + self.assertEqual({'error': error}, body) + else: + self.assertEqual({'result': result}, body) + + def test_success(self): + body = self._request('success', {'context': self.ctx, 'x': 42}) + self._check(body, result=42) + + def test_success_no_result(self): + body = self._request('no_result', {'context': self.ctx}) + self._check(body, result=None) + + def test_notification(self): + body = self._request('no_result', {'context': self.ctx}, + request_id=None) + self.assertIsNone(body) + + def test_no_context(self): + body = self._request('no_context') + self._check(body, result=42) + + def test_serialize_objects(self): + node = obj_utils.get_test_node(self.context) + node = self.serializer.serialize_entity(self.context, node) + body = self._request('with_node', {'context': self.ctx, 'node': node}) + self.assertNotIn('error', body) + self.assertIsInstance(body['result'], dict) + node = self.serializer.deserialize_entity(self.context, body['result']) + self.assertEqual({'answer': 42}, node.extra) + + def test_non_json_body(self): + for body in (b'', b'???', b"\xc3\x28"): + request = webob.Request.blank("/", method='POST', body=body) + response = request.get_response(self.app) + self._check( + response.json_body, + error={ + 'message': server.ParseError._msg_fmt, + 'code': -32700, + }, + request_id=None) + + def test_invalid_requests(self): + bodies = [ + # Invalid requests with request ID. + {'method': 'no_result', 'id': 'abcd', + 'params': {'context': self.ctx}}, + {'jsonrpc': '2.0', 'id': 'abcd', 'params': {'context': self.ctx}}, + # These do not count as notifications, since they're malformed. + {'method': 'no_result', 'params': {'context': self.ctx}}, + {'jsonrpc': '2.0', 'params': {'context': self.ctx}}, + 42, + # We do not implement batched requests. + [], + [{'jsonrpc': '2.0', 'method': 'no_result', + 'params': {'context': self.ctx}}], + ] + for body in bodies: + body = self._request(json_body=body) + self._check( + body, + error={ + 'message': server.InvalidRequest._msg_fmt, + 'code': -32600, + }, + request_id=body.get('id')) + + def test_malformed_context(self): + body = self._request(json_body={'jsonrpc': '2.0', 'id': 'abcd', + 'method': 'no_result', + 'params': {'context': 42}}) + self._check( + body, + error={ + 'message': 'Context must be a dictionary, if provided', + 'code': -32602, + }) + + def test_expected_failure(self): + body = self._request('fail', {'context': self.ctx, + 'message': 'some error'}) + self._check(body, + error={ + 'message': 'some error', + 'code': 500, + 'data': { + 'class': 'ironic.common.exception.IronicException' + } + }) + + def test_expected_failure_oslo(self): + # Check that exceptions wrapped by oslo's expected_exceptions get + # unwrapped correctly. + body = self._request('expected', {'context': self.ctx, + 'message': 'some error'}) + self._check(body, + error={ + 'message': 'some error', + 'code': 400, + 'data': { + 'class': 'ironic.common.exception.Invalid' + } + }) + + @mock.patch.object(server.LOG, 'exception', autospec=True) + def test_unexpected_failure(self, mock_log): + body = self._request('crash', {'context': self.ctx}) + self._check(body, + error={ + 'message': 'boom', + 'code': 500, + }) + self.assertTrue(mock_log.called) + + def test_method_not_found(self): + body = self._request('banana', {'context': self.ctx}) + self._check(body, + error={ + 'message': 'Method banana was not found', + 'code': -32601, + }) + + def test_no_blacklisted_methods(self): + for name in ('__init__', '_private', 'init_host', 'value'): + body = self._request(name, {'context': self.ctx}) + self._check(body, + error={ + 'message': 'Method %s was not found' % name, + 'code': -32601, + }) + + def test_missing_argument(self): + body = self._request('success', {'context': self.ctx}) + # The exact error message depends on the Python version + self.assertEqual(-32602, body['error']['code']) + self.assertNotIn('result', body) + + def test_method_not_post(self): + self._request('success', {'context': self.ctx, 'x': 42}, + method='GET', expected_error=405) + + def test_authenticated(self): + self.config(auth_strategy='keystone', group='json_rpc') + self.service = server.WSGIService(FakeManager(), self.serializer) + self.app = self.server_mock.call_args[0][2] + self._request('success', {'context': self.ctx, 'x': 42}, + expected_error=401) + + def test_authenticated_no_admin_role(self): + self.config(auth_strategy='keystone', group='json_rpc') + self._request('success', {'context': self.ctx, 'x': 42}, + expected_error=403) + + +@mock.patch.object(client, '_get_session', autospec=True) +class TestClient(test_base.TestCase): + + def setUp(self): + super(TestClient, self).setUp() + self.serializer = objects_base.IronicObjectSerializer(is_server=True) + self.client = client.Client(self.serializer) + self.ctx_json = self.context.to_dict() + + def test_can_send_version(self, mock_session): + self.assertTrue(self.client.can_send_version('1.42')) + self.client = client.Client(self.serializer, version_cap='1.42') + self.assertTrue(self.client.can_send_version('1.42')) + self.assertTrue(self.client.can_send_version('1.0')) + self.assertFalse(self.client.can_send_version('1.99')) + self.assertFalse(self.client.can_send_version('2.0')) + + def test_call_success(self, mock_session): + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'result': 42 + } + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + result = cctx.call(self.context, 'do_something', answer=42) + self.assertEqual(42, result) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json}, + 'id': self.context.request_id}) + + def test_call_success_with_version(self, mock_session): + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'result': 42 + } + cctx = self.client.prepare('foo.example.com', version='1.42') + self.assertEqual('example.com', cctx.host) + result = cctx.call(self.context, 'do_something', answer=42) + self.assertEqual(42, result) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json, + 'rpc.version': '1.42'}, + 'id': self.context.request_id}) + + def test_call_success_with_version_and_cap(self, mock_session): + self.client = client.Client(self.serializer, version_cap='1.99') + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'result': 42 + } + cctx = self.client.prepare('foo.example.com', version='1.42') + self.assertEqual('example.com', cctx.host) + result = cctx.call(self.context, 'do_something', answer=42) + self.assertEqual(42, result) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json, + 'rpc.version': '1.42'}, + 'id': self.context.request_id}) + + def test_cast_success(self, mock_session): + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + result = cctx.cast(self.context, 'do_something', answer=42) + self.assertIsNone(result) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json}}) + + def test_cast_success_with_version(self, mock_session): + cctx = self.client.prepare('foo.example.com', version='1.42') + self.assertEqual('example.com', cctx.host) + result = cctx.cast(self.context, 'do_something', answer=42) + self.assertIsNone(result) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json, + 'rpc.version': '1.42'}}) + + def test_call_serialization(self, mock_session): + node = obj_utils.get_test_node(self.context) + node_json = self.serializer.serialize_entity(self.context, node) + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'result': node_json + } + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + result = cctx.call(self.context, 'do_something', node=node) + self.assertIsInstance(result, objects.Node) + self.assertEqual(result.uuid, node.uuid) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'node': node_json, 'context': self.ctx_json}, + 'id': self.context.request_id}) + + def test_call_failure(self, mock_session): + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'error': { + 'code': 418, + 'message': 'I am a teapot', + 'data': { + 'class': 'ironic.common.exception.Invalid' + } + } + } + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + # Make sure that the class is restored correctly for expected errors. + exc = self.assertRaises(exception.Invalid, + cctx.call, + self.context, 'do_something', answer=42) + # Code from the body has priority over one in the class. + self.assertEqual(418, exc.code) + self.assertIn('I am a teapot', str(exc)) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json}, + 'id': self.context.request_id}) + + def test_call_unexpected_failure(self, mock_session): + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'error': { + 'code': 500, + 'message': 'AttributeError', + } + } + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + exc = self.assertRaises(exception.IronicException, + cctx.call, + self.context, 'do_something', answer=42) + self.assertEqual(500, exc.code) + self.assertIn('Unexpected error', str(exc)) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json}, + 'id': self.context.request_id}) + + def test_call_failure_with_foreign_class(self, mock_session): + # This should not happen, but provide an additional safeguard + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'error': { + 'code': 500, + 'message': 'AttributeError', + 'data': { + 'class': 'AttributeError' + } + } + } + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + exc = self.assertRaises(exception.IronicException, + cctx.call, + self.context, 'do_something', answer=42) + self.assertEqual(500, exc.code) + self.assertIn('Unexpected error', str(exc)) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json}, + 'id': self.context.request_id}) + + def test_cast_failure(self, mock_session): + # Cast cannot return normal failures, but make sure we ignore them even + # if server sends something in violation of the protocol (or because + # it's a low-level error like HTTP Forbidden). + response = mock_session.return_value.post.return_value + response.json.return_value = { + 'jsonrpc': '2.0', + 'error': { + 'code': 418, + 'message': 'I am a teapot', + 'data': { + 'class': 'ironic.common.exception.IronicException' + } + } + } + cctx = self.client.prepare('foo.example.com') + self.assertEqual('example.com', cctx.host) + result = cctx.cast(self.context, 'do_something', answer=42) + self.assertIsNone(result) + mock_session.return_value.post.assert_called_once_with( + 'http://example.com:8089', + json={'jsonrpc': '2.0', + 'method': 'do_something', + 'params': {'answer': 42, 'context': self.ctx_json}}) + + def test_call_failure_with_version_and_cap(self, mock_session): + self.client = client.Client(self.serializer, version_cap='1.42') + cctx = self.client.prepare('foo.example.com', version='1.99') + self.assertRaisesRegex(RuntimeError, + "requested version 1.99, maximum allowed " + "version is 1.42", + cctx.call, self.context, 'do_something', + answer=42) + self.assertFalse(mock_session.return_value.post.called) diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_neutron.py ironic-12.1.0/ironic/tests/unit/common/test_neutron.py --- ironic-12.0.0/ironic/tests/unit/common/test_neutron.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_neutron.py 2019-03-21 20:07:40.000000000 +0000 @@ -10,6 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. +import time + from keystoneauth1 import loading as kaloading import mock from neutronclient.common import exceptions as neutron_client_exc @@ -185,6 +187,8 @@ 'mac_address': '52:54:00:cf:2d:32'} self.network_uuid = uuidutils.generate_uuid() self.client_mock = mock.Mock() + self.client_mock.list_agents.return_value = { + 'agents': [{'alive': True}]} patcher = mock.patch('ironic.common.neutron.get_client', return_value=self.client_mock, autospec=True) patcher.start() @@ -582,6 +586,203 @@ self.assertTrue(res) self.assertFalse(log_mock.warning.called) + @mock.patch.object(neutron, 'LOG', autospec=True) + def test_validate_port_info_neutron_with_smartnic_and_link_info( + self, log_mock): + self.node.network_interface = 'neutron' + self.node.save() + llc = {'hostname': 'host1', 'port_id': 'rep0-0'} + port = object_utils.create_test_port( + self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), + address='52:54:00:cf:2d:33', local_link_connection=llc, + is_smartnic=True) + res = neutron.validate_port_info(self.node, port) + self.assertTrue(res) + self.assertFalse(log_mock.error.called) + + @mock.patch.object(neutron, 'LOG', autospec=True) + def test_validate_port_info_neutron_with_no_smartnic_and_link_info( + self, log_mock): + self.node.network_interface = 'neutron' + self.node.save() + llc = {'hostname': 'host1', 'port_id': 'rep0-0'} + port = object_utils.create_test_port( + self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), + address='52:54:00:cf:2d:33', local_link_connection=llc, + is_smartnic=False) + res = neutron.validate_port_info(self.node, port) + self.assertFalse(res) + self.assertTrue(log_mock.error.called) + + @mock.patch.object(neutron, 'LOG', autospec=True) + def test_validate_port_info_neutron_with_smartnic_and_no_link_info( + self, log_mock): + self.node.network_interface = 'neutron' + self.node.save() + llc = {'switch_id': 'switch', 'port_id': 'rep0-0'} + port = object_utils.create_test_port( + self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), + address='52:54:00:cf:2d:33', local_link_connection=llc, + is_smartnic=True) + res = neutron.validate_port_info(self.node, port) + self.assertFalse(res) + self.assertTrue(log_mock.error.called) + + def test_validate_agent_up(self): + self.client_mock.list_agents.return_value = { + 'agents': [{'alive': True}]} + self.assertTrue(neutron._validate_agent(self.client_mock)) + + def test_validate_agent_down(self): + self.client_mock.list_agents.return_value = { + 'agents': [{'alive': False}]} + self.assertFalse(neutron._validate_agent(self.client_mock)) + + def test_is_smartnic_port_true(self): + port = self.ports[0] + port.is_smartnic = True + self.assertTrue(neutron.is_smartnic_port(port)) + + def test_is_smartnic_port_false(self): + port = self.ports[0] + self.assertFalse(neutron.is_smartnic_port(port)) + + @mock.patch.object(neutron, '_validate_agent') + @mock.patch.object(time, 'sleep') + def test_wait_for_host_agent_up_target_state_up( + self, sleep_mock, validate_agent_mock): + validate_agent_mock.return_value = True + self.assertTrue(neutron.wait_for_host_agent( + self.client_mock, 'hostname')) + sleep_mock.assert_not_called() + + @mock.patch.object(neutron, '_validate_agent') + @mock.patch.object(time, 'sleep') + def test_wait_for_host_agent_down_target_state_up( + self, sleep_mock, validate_agent_mock): + validate_agent_mock.return_value = False + self.assertRaises(exception.NetworkError, + neutron.wait_for_host_agent, + self.client_mock, 'hostname') + + @mock.patch.object(neutron, '_validate_agent') + @mock.patch.object(time, 'sleep') + def test_wait_for_host_agent_up_target_state_down( + self, sleep_mock, validate_agent_mock): + validate_agent_mock.return_value = True + self.assertRaises(exception.NetworkError, + neutron.wait_for_host_agent, + self.client_mock, 'hostname', target_state='down') + + @mock.patch.object(neutron, '_validate_agent') + @mock.patch.object(time, 'sleep') + def test_wait_for_host_agent_down_target_state_down( + self, sleep_mock, validate_agent_mock): + validate_agent_mock.return_value = False + self.assertTrue( + neutron.wait_for_host_agent(self.client_mock, 'hostname', + target_state='down')) + sleep_mock.assert_not_called() + + @mock.patch.object(neutron, '_get_port_by_uuid') + @mock.patch.object(time, 'sleep') + def test_wait_for_port_status_up(self, sleep_mock, get_port_mock): + get_port_mock.return_value = {'status': 'ACTIVE'} + neutron.wait_for_port_status(self.client_mock, 'port_id', 'ACTIVE') + sleep_mock.assert_not_called() + + @mock.patch.object(neutron, '_get_port_by_uuid') + @mock.patch.object(time, 'sleep') + def test_wait_for_port_status_down(self, sleep_mock, get_port_mock): + get_port_mock.side_effect = [{'status': 'DOWN'}, {'status': 'ACTIVE'}] + neutron.wait_for_port_status(self.client_mock, 'port_id', 'ACTIVE') + sleep_mock.assert_called_once() + + @mock.patch.object(neutron, '_get_port_by_uuid') + @mock.patch.object(time, 'sleep') + def test_wait_for_port_status_active_max_retry(self, sleep_mock, + get_port_mock): + get_port_mock.return_value = {'status': 'DOWN'} + self.assertRaises(exception.NetworkError, + neutron.wait_for_port_status, + self.client_mock, 'port_id', 'ACTIVE') + + @mock.patch.object(neutron, '_get_port_by_uuid') + @mock.patch.object(time, 'sleep') + def test_wait_for_port_status_down_max_retry(self, sleep_mock, + get_port_mock): + get_port_mock.return_value = {'status': 'ACTIVE'} + self.assertRaises(exception.NetworkError, + neutron.wait_for_port_status, + self.client_mock, 'port_id', 'DOWN') + + @mock.patch.object(neutron, 'wait_for_host_agent', autospec=True) + @mock.patch.object(neutron, 'wait_for_port_status', autospec=True) + def test_add_smartnic_port_to_network( + self, wait_port_mock, wait_agent_mock): + # Ports will be created only if pxe_enabled is True + self.node.network_interface = 'neutron' + self.node.save() + object_utils.create_test_port( + self.context, node_id=self.node.id, + uuid=uuidutils.generate_uuid(), + address='52:54:00:cf:2d:22', + pxe_enabled=False + ) + port = self.ports[0] + + local_link_connection = port.local_link_connection + local_link_connection['hostname'] = 'hostname' + port.local_link_connection = local_link_connection + port.is_smartnic = True + port.save() + + expected_body = { + 'port': { + 'network_id': self.network_uuid, + 'admin_state_up': True, + 'binding:vnic_type': 'smart-nic', + 'device_owner': 'baremetal:none', + 'binding:host_id': port.local_link_connection['hostname'], + 'device_id': self.node.uuid, + 'mac_address': port.address, + 'binding:profile': { + 'local_link_information': [port.local_link_connection] + } + } + } + + # Ensure we can create ports + self.client_mock.create_port.return_value = { + 'port': self.neutron_port} + expected = {port.uuid: self.neutron_port['id']} + with task_manager.acquire(self.context, self.node.uuid) as task: + ports = neutron.add_ports_to_network(task, self.network_uuid) + self.assertEqual(expected, ports) + self.client_mock.create_port.assert_called_once_with( + expected_body) + wait_agent_mock.assert_called_once_with( + self.client_mock, 'hostname') + wait_port_mock.assert_called_once_with( + self.client_mock, self.neutron_port['id'], 'ACTIVE') + + @mock.patch.object(neutron, 'is_smartnic_port', autospec=True) + @mock.patch.object(neutron, 'wait_for_host_agent', autospec=True) + def test_remove_neutron_smartnic_ports( + self, wait_agent_mock, is_smartnic_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + is_smartnic_mock.return_value = True + self.neutron_port['binding:host_id'] = 'hostname' + self.client_mock.list_ports.return_value = { + 'ports': [self.neutron_port]} + neutron.remove_neutron_ports(task, {'param': 'value'}) + self.client_mock.list_ports.assert_called_once_with( + **{'param': 'value'}) + self.client_mock.delete_port.assert_called_once_with( + self.neutron_port['id']) + is_smartnic_mock.assert_called_once_with(self.neutron_port) + wait_agent_mock.assert_called_once_with(self.client_mock, 'hostname') + @mock.patch.object(neutron, 'get_client', autospec=True) class TestValidateNetwork(base.TestCase): diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_pxe_utils.py ironic-12.1.0/ironic/tests/unit/common/test_pxe_utils.py --- ironic-12.0.0/ironic/tests/unit/common/test_pxe_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_pxe_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -860,13 +860,23 @@ expected_info = [{'opt_name': 'tag:!ipxe,59', 'opt_value': 'tftp://[ff80::1]/fake-bootfile', 'ip_version': ip_version}, + {'opt_name': 'tag:!ipxe6,59', + 'opt_value': 'tftp://[ff80::1]/fake-bootfile', + 'ip_version': ip_version}, {'opt_name': 'tag:ipxe,59', 'opt_value': expected_boot_script_url, + 'ip_version': ip_version}, + {'opt_name': 'tag:ipxe6,59', + 'opt_value': expected_boot_script_url, 'ip_version': ip_version}] + elif ip_version == 4: expected_info = [{'opt_name': 'tag:!ipxe,67', 'opt_value': boot_file, 'ip_version': ip_version}, + {'opt_name': 'tag:!ipxe6,67', + 'opt_value': boot_file, + 'ip_version': ip_version}, {'opt_name': '66', 'opt_value': '192.0.2.1', 'ip_version': ip_version}, @@ -876,6 +886,9 @@ {'opt_name': 'tag:ipxe,67', 'opt_value': expected_boot_script_url, 'ip_version': ip_version}, + {'opt_name': 'tag:ipxe6,67', + 'opt_value': expected_boot_script_url, + 'ip_version': ip_version}, {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.1', 'ip_version': ip_version}] @@ -1101,6 +1114,20 @@ image_info = pxe_utils.get_image_info(self.node) self.assertEqual(expected_info, image_info) + def test__get_deploy_image_info_ipxe(self): + expected_info = {'deploy_ramdisk': + (DRV_INFO_DICT['deploy_ramdisk'], + os.path.join(CONF.deploy.http_root, + self.node.uuid, + 'deploy_ramdisk')), + 'deploy_kernel': + (DRV_INFO_DICT['deploy_kernel'], + os.path.join(CONF.deploy.http_root, + self.node.uuid, + 'deploy_kernel'))} + image_info = pxe_utils.get_image_info(self.node, ipxe_enabled=True) + self.assertEqual(expected_info, image_info) + def test__get_deploy_image_info_missing_deploy_kernel(self): del self.node.driver_info['deploy_kernel'] self.assertRaises(exception.MissingParameterValue, @@ -1740,3 +1767,29 @@ mock_pxe_clean.assert_called_once_with(task, ipxe_enabled=False) mock_unlink.assert_any_call('deploy_kernel') mock_cache.return_value.clean_up.assert_called_once_with() + + +class TFTPImageCacheTestCase(db_base.DbTestCase): + @mock.patch.object(fileutils, 'ensure_tree') + def test_with_master_path(self, mock_ensure_tree): + self.config(tftp_master_path='/fake/path', group='pxe') + self.config(image_cache_size=500, group='pxe') + self.config(image_cache_ttl=30, group='pxe') + + cache = pxe_utils.TFTPImageCache() + + mock_ensure_tree.assert_called_once_with('/fake/path') + self.assertEqual(500 * 1024 * 1024, cache._cache_size) + self.assertEqual(30 * 60, cache._cache_ttl) + + @mock.patch.object(fileutils, 'ensure_tree') + def test_without_master_path(self, mock_ensure_tree): + self.config(tftp_master_path='', group='pxe') + self.config(image_cache_size=500, group='pxe') + self.config(image_cache_ttl=30, group='pxe') + + cache = pxe_utils.TFTPImageCache() + + mock_ensure_tree.assert_not_called() + self.assertEqual(500 * 1024 * 1024, cache._cache_size) + self.assertEqual(30 * 60, cache._cache_ttl) diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_raid.py ironic-12.1.0/ironic/tests/unit/common/test_raid.py --- ironic-12.0.0/ironic/tests/unit/common/test_raid.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_raid.py 2019-03-21 20:07:40.000000000 +0000 @@ -161,6 +161,21 @@ class RaidPublicMethodsTestCase(db_base.DbTestCase): + def setUp(self): + super(RaidPublicMethodsTestCase, self).setUp() + self.target_raid_config = { + "logical_disks": [ + {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True}, + {'size_gb': 200, 'raid_level': 5} + ]} + n = { + 'boot_interface': 'pxe', + 'deploy_interface': 'direct', + 'raid_interface': 'agent', + 'target_raid_config': self.target_raid_config, + } + self.node = obj_utils.create_test_node(self.context, **n) + def test_get_logical_disk_properties(self): with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj: schema = json.load(raid_schema_fobj) @@ -186,7 +201,7 @@ def _test_update_raid_info(self, current_config, capabilities=None): - node = obj_utils.create_test_node(self.context) + node = self.node if capabilities: properties = node.properties properties['capabilities'] = capabilities @@ -239,3 +254,37 @@ self.assertRaises(exception.InvalidParameterValue, self._test_update_raid_info, current_config) + + def test_filter_target_raid_config(self): + result = raid.filter_target_raid_config(self.node) + self.assertEqual(self.node.target_raid_config, result) + + def test_filter_target_raid_config_skip_root(self): + result = raid.filter_target_raid_config( + self.node, create_root_volume=False) + exp_target_raid_config = { + "logical_disks": [{'size_gb': 200, 'raid_level': 5}]} + self.assertEqual(exp_target_raid_config, result) + + def test_filter_target_raid_config_skip_nonroot(self): + result = raid.filter_target_raid_config( + self.node, create_nonroot_volumes=False) + exp_target_raid_config = { + "logical_disks": [{'size_gb': 200, + 'raid_level': 0, + 'is_root_volume': True}]} + self.assertEqual(exp_target_raid_config, result) + + def test_filter_target_raid_config_no_target_raid_config_after_skipping( + self): + self.assertRaises(exception.MissingParameterValue, + raid.filter_target_raid_config, + self.node, create_root_volume=False, + create_nonroot_volumes=False) + + def test_filter_target_raid_config_empty_target_raid_config(self): + self.node.target_raid_config = {} + self.node.save() + self.assertRaises(exception.MissingParameterValue, + raid.filter_target_raid_config, + self.node) diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_release_mappings.py ironic-12.1.0/ironic/tests/unit/common/test_release_mappings.py --- ironic-12.0.0/ironic/tests/unit/common/test_release_mappings.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_release_mappings.py 2019-03-21 20:07:40.000000000 +0000 @@ -85,7 +85,7 @@ self.assertIn('master', release_mappings.RELEASE_MAPPING) model_names = set((s.__name__ for s in models.Base.__subclasses__())) exceptions = set(['NodeTag', 'ConductorHardwareInterfaces', - 'NodeTrait', 'BIOSSetting']) + 'NodeTrait', 'BIOSSetting', 'DeployTemplateStep']) # NOTE(xek): As a rule, all models which can be changed between # releases or are sent through RPC should have their counterpart # versioned objects. diff -Nru ironic-12.0.0/ironic/tests/unit/common/test_rpc.py ironic-12.1.0/ironic/tests/unit/common/test_rpc.py --- ironic-12.0.0/ironic/tests/unit/common/test_rpc.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/common/test_rpc.py 2019-03-21 20:07:40.000000000 +0000 @@ -48,9 +48,24 @@ mock_get_notification, mock_json_serializer, mock_notifier) - def _test_init_globals(self, notifications_enabled, mock_get_rpc_transport, - mock_get_notification, mock_json_serializer, - mock_notifier): + @mock.patch.object(messaging, 'Notifier', autospec=True) + @mock.patch.object(messaging, 'JsonPayloadSerializer', autospec=True) + @mock.patch.object(messaging, 'get_notification_transport', autospec=True) + @mock.patch.object(messaging, 'get_rpc_transport', autospec=True) + def test_init_globals_with_custom_topics(self, mock_get_rpc_transport, + mock_get_notification, + mock_json_serializer, + mock_notifier): + self._test_init_globals( + False, mock_get_rpc_transport, mock_get_notification, + mock_json_serializer, mock_notifier, + versioned_notifications_topics=['custom_topic1', 'custom_topic2']) + + def _test_init_globals( + self, notifications_enabled, mock_get_rpc_transport, + mock_get_notification, mock_json_serializer, mock_notifier, + versioned_notifications_topics=['ironic_versioned_notifications']): + rpc.TRANSPORT = None rpc.NOTIFICATION_TRANSPORT = None rpc.SENSORS_NOTIFIER = None @@ -89,7 +104,7 @@ mock.call( rpc.NOTIFICATION_TRANSPORT, serializer=mock_request_serializer.return_value, - topics=['ironic_versioned_notifications']) + topics=versioned_notifications_topics) ] mock_notifier.assert_has_calls(notifier_calls) diff -Nru ironic-12.0.0/ironic/tests/unit/conductor/test_allocations.py ironic-12.1.0/ironic/tests/unit/conductor/test_allocations.py --- ironic-12.0.0/ironic/tests/unit/conductor/test_allocations.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/conductor/test_allocations.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,424 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for functionality related to allocations.""" + +import mock +import oslo_messaging as messaging +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.conductor import allocations +from ironic.conductor import manager +from ironic.conductor import task_manager +from ironic import objects +from ironic.tests.unit.conductor import mgr_utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + + +@mgr_utils.mock_record_keepalive +class AllocationTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase): + @mock.patch.object(manager.ConductorManager, '_spawn_worker', + autospec=True) + def test_create_allocation(self, mock_spawn): + # In this test we mock spawn_worker, so that the actual processing does + # not happen, and the allocation stays in the "allocating" state. + allocation = obj_utils.get_test_allocation(self.context, + extra={'test': 'one'}) + self._start_service() + + mock_spawn.assert_any_call(self.service, + self.service._resume_allocations, + mock.ANY) + mock_spawn.reset_mock() + + res = self.service.create_allocation(self.context, allocation) + + self.assertEqual({'test': 'one'}, res['extra']) + self.assertEqual('allocating', res['state']) + self.assertIsNotNone(res['uuid']) + self.assertEqual(self.service.conductor.id, res['conductor_affinity']) + res = objects.Allocation.get_by_uuid(self.context, allocation['uuid']) + self.assertEqual({'test': 'one'}, res['extra']) + self.assertEqual('allocating', res['state']) + self.assertIsNotNone(res['uuid']) + self.assertEqual(self.service.conductor.id, res['conductor_affinity']) + + mock_spawn.assert_called_once_with(self.service, + allocations.do_allocate, + self.context, mock.ANY) + + def test_destroy_allocation_without_node(self): + allocation = obj_utils.create_test_allocation(self.context) + self.service.destroy_allocation(self.context, allocation) + self.assertRaises(exception.AllocationNotFound, + objects.Allocation.get_by_uuid, + self.context, allocation['uuid']) + + def test_destroy_allocation_with_node(self): + node = obj_utils.create_test_node(self.context) + allocation = obj_utils.create_test_allocation(self.context, + node_id=node['id']) + node.instance_uuid = allocation['uuid'] + node.allocation_id = allocation['id'] + node.save() + + self.service.destroy_allocation(self.context, allocation) + self.assertRaises(exception.AllocationNotFound, + objects.Allocation.get_by_uuid, + self.context, allocation['uuid']) + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertIsNone(node['instance_uuid']) + self.assertIsNone(node['allocation_id']) + + def test_destroy_allocation_with_active_node(self): + node = obj_utils.create_test_node(self.context, + provision_state='active') + allocation = obj_utils.create_test_allocation(self.context, + node_id=node['id']) + node.instance_uuid = allocation['uuid'] + node.allocation_id = allocation['id'] + node.save() + + exc = self.assertRaises(messaging.rpc.ExpectedException, + self.service.destroy_allocation, + self.context, allocation) + # Compare true exception hidden by @messaging.expected_exceptions + self.assertEqual(exception.InvalidState, exc.exc_info[0]) + + objects.Allocation.get_by_uuid(self.context, allocation['uuid']) + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertEqual(allocation['uuid'], node['instance_uuid']) + self.assertEqual(allocation['id'], node['allocation_id']) + + def test_destroy_allocation_with_transient_node(self): + node = obj_utils.create_test_node(self.context, + target_provision_state='active', + provision_state='deploying') + allocation = obj_utils.create_test_allocation(self.context, + node_id=node['id']) + node.instance_uuid = allocation['uuid'] + node.allocation_id = allocation['id'] + node.save() + + exc = self.assertRaises(messaging.rpc.ExpectedException, + self.service.destroy_allocation, + self.context, allocation) + # Compare true exception hidden by @messaging.expected_exceptions + self.assertEqual(exception.InvalidState, exc.exc_info[0]) + + objects.Allocation.get_by_uuid(self.context, allocation['uuid']) + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertEqual(allocation['uuid'], node['instance_uuid']) + self.assertEqual(allocation['id'], node['allocation_id']) + + def test_destroy_allocation_with_node_in_maintenance(self): + node = obj_utils.create_test_node(self.context, + provision_state='active', + maintenance=True) + allocation = obj_utils.create_test_allocation(self.context, + node_id=node['id']) + node.instance_uuid = allocation['uuid'] + node.allocation_id = allocation['id'] + node.save() + + self.service.destroy_allocation(self.context, allocation) + self.assertRaises(exception.AllocationNotFound, + objects.Allocation.get_by_uuid, + self.context, allocation['uuid']) + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertIsNone(node['instance_uuid']) + self.assertIsNone(node['allocation_id']) + + @mock.patch.object(allocations, 'do_allocate', autospec=True) + def test_resume_allocations(self, mock_allocate): + another_conductor = obj_utils.create_test_conductor( + self.context, id=42, hostname='another-host') + + self._start_service() + + obj_utils.create_test_allocation( + self.context, + state='active', + conductor_affinity=self.service.conductor.id) + obj_utils.create_test_allocation( + self.context, + state='allocating', + conductor_affinity=another_conductor.id) + allocation = obj_utils.create_test_allocation( + self.context, + state='allocating', + conductor_affinity=self.service.conductor.id) + + self.service._resume_allocations(self.context) + + mock_allocate.assert_called_once_with(self.context, mock.ANY) + actual = mock_allocate.call_args[0][1] + self.assertEqual(allocation.uuid, actual.uuid) + self.assertIsInstance(allocation, objects.Allocation) + + @mock.patch.object(allocations, 'do_allocate', autospec=True) + def test_check_orphaned_allocations(self, mock_allocate): + alive_conductor = obj_utils.create_test_conductor( + self.context, id=42, hostname='alive') + dead_conductor = obj_utils.create_test_conductor( + self.context, id=43, hostname='dead') + + obj_utils.create_test_allocation( + self.context, + state='allocating', + conductor_affinity=alive_conductor.id) + allocation = obj_utils.create_test_allocation( + self.context, + state='allocating', + conductor_affinity=dead_conductor.id) + + self._start_service() + with mock.patch.object(self.dbapi, 'get_offline_conductors', + autospec=True) as mock_conds: + mock_conds.return_value = [dead_conductor.id] + self.service._check_orphan_allocations(self.context) + + mock_allocate.assert_called_once_with(self.context, mock.ANY) + actual = mock_allocate.call_args[0][1] + self.assertEqual(allocation.uuid, actual.uuid) + self.assertIsInstance(allocation, objects.Allocation) + + allocation = self.dbapi.get_allocation_by_id(allocation.id) + self.assertEqual(self.service.conductor.id, + allocation.conductor_affinity) + + +@mock.patch('time.sleep', lambda _: None) +class DoAllocateTestCase(db_base.DbTestCase): + def test_success(self): + node = obj_utils.create_test_node(self.context, + power_state='power on', + resource_class='x-large', + provision_state='available') + allocation = obj_utils.create_test_allocation(self.context, + resource_class='x-large') + + allocations.do_allocate(self.context, allocation) + + allocation = objects.Allocation.get_by_uuid(self.context, + allocation['uuid']) + self.assertIsNone(allocation['last_error']) + self.assertEqual('active', allocation['state']) + + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertEqual(allocation['uuid'], node['instance_uuid']) + self.assertEqual(allocation['id'], node['allocation_id']) + + def test_with_traits(self): + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + power_state='power on', + resource_class='x-large', + provision_state='available') + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + power_state='power on', + resource_class='x-large', + provision_state='available') + db_utils.create_test_node_traits(['tr1', 'tr2'], node_id=node.id) + + allocation = obj_utils.create_test_allocation(self.context, + resource_class='x-large', + traits=['tr2']) + + allocations.do_allocate(self.context, allocation) + + allocation = objects.Allocation.get_by_uuid(self.context, + allocation['uuid']) + self.assertIsNone(allocation['last_error']) + self.assertEqual('active', allocation['state']) + + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertEqual(allocation['uuid'], node['instance_uuid']) + self.assertEqual(allocation['id'], node['allocation_id']) + self.assertEqual(allocation['traits'], ['tr2']) + + def test_with_candidates(self): + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + power_state='power on', + resource_class='x-large', + provision_state='available') + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + power_state='power on', + resource_class='x-large', + provision_state='available') + + allocation = obj_utils.create_test_allocation( + self.context, resource_class='x-large', + candidate_nodes=[node['uuid']]) + + allocations.do_allocate(self.context, allocation) + + allocation = objects.Allocation.get_by_uuid(self.context, + allocation['uuid']) + self.assertIsNone(allocation['last_error']) + self.assertEqual('active', allocation['state']) + + node = objects.Node.get_by_uuid(self.context, node['uuid']) + self.assertEqual(allocation['uuid'], node['instance_uuid']) + self.assertEqual(allocation['id'], node['allocation_id']) + self.assertEqual([node['uuid']], allocation['candidate_nodes']) + + @mock.patch.object(task_manager, 'acquire', autospec=True, + side_effect=task_manager.acquire) + def test_nodes_filtered_out(self, mock_acquire): + # Resource class does not match + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + resource_class='x-small', + power_state='power off', + provision_state='available') + # Provision state is not available + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + resource_class='x-large', + power_state='power off', + provision_state='manageable') + # Power state is undefined + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + resource_class='x-large', + power_state=None, + provision_state='available') + # Maintenance mode is on + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + maintenance=True, + resource_class='x-large', + power_state='power off', + provision_state='available') + # Already associated + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + instance_uuid=uuidutils.generate_uuid(), + resource_class='x-large', + power_state='power off', + provision_state='available') + + allocation = obj_utils.create_test_allocation(self.context, + resource_class='x-large') + allocations.do_allocate(self.context, allocation) + self.assertIn('no available nodes', allocation['last_error']) + self.assertIn('x-large', allocation['last_error']) + self.assertEqual('error', allocation['state']) + + # All nodes are filtered out on the database level. + self.assertFalse(mock_acquire.called) + + @mock.patch.object(task_manager, 'acquire', autospec=True, + side_effect=task_manager.acquire) + def test_nodes_locked(self, mock_acquire): + self.config(node_locked_retry_attempts=2, group='conductor') + node1 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + maintenance=False, + resource_class='x-large', + power_state='power off', + provision_state='available', + reservation='example.com') + node2 = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + resource_class='x-large', + power_state='power off', + provision_state='available', + reservation='example.com') + + allocation = obj_utils.create_test_allocation(self.context, + resource_class='x-large') + allocations.do_allocate(self.context, allocation) + self.assertIn('could not reserve any of 2', allocation['last_error']) + self.assertEqual('error', allocation['state']) + + self.assertEqual(6, mock_acquire.call_count) + # NOTE(dtantsur): node are tried in random order by design, so we + # cannot directly use assert_has_calls. Check that all nodes are tried + # before going into retries (rather than each tried 3 times in a row). + nodes = [call[0][1] for call in mock_acquire.call_args_list] + for offset in (0, 2, 4): + self.assertEqual(set(nodes[offset:offset + 2]), + {node1.uuid, node2.uuid}) + + @mock.patch.object(task_manager, 'acquire', autospec=True) + def test_nodes_changed_after_lock(self, mock_acquire): + nodes = [obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + resource_class='x-large', + power_state='power off', + provision_state='available') + for _ in range(5)] + for node in nodes: + db_utils.create_test_node_trait(trait='tr1', node_id=node.id) + + # Modify nodes in-memory so that they no longer match the allocation: + + # Resource class does not match + nodes[0].resource_class = 'x-small' + # Provision state is not available + nodes[1].provision_state = 'deploying' + # Maintenance mode is on + nodes[2].maintenance = True + # Already associated + nodes[3].instance_uuid = uuidutils.generate_uuid() + # Traits changed + nodes[4].traits.objects[:] = [] + + mock_acquire.side_effect = [ + mock.MagicMock(**{'__enter__.return_value.node': node}) + for node in nodes + ] + + allocation = obj_utils.create_test_allocation(self.context, + resource_class='x-large', + traits=['tr1']) + allocations.do_allocate(self.context, allocation) + self.assertIn('all nodes were filtered out', allocation['last_error']) + self.assertEqual('error', allocation['state']) + + # No retries for these failures. + self.assertEqual(5, mock_acquire.call_count) + + @mock.patch.object(task_manager, 'acquire', autospec=True, + side_effect=task_manager.acquire) + def test_nodes_candidates_do_not_match(self, mock_acquire): + obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + resource_class='x-large', + power_state='power off', + provision_state='available') + # Resource class does not match + node = obj_utils.create_test_node(self.context, + uuid=uuidutils.generate_uuid(), + power_state='power on', + resource_class='x-small', + provision_state='available') + + allocation = obj_utils.create_test_allocation( + self.context, resource_class='x-large', + candidate_nodes=[node['uuid']]) + + allocations.do_allocate(self.context, allocation) + self.assertIn('none of the requested nodes', allocation['last_error']) + self.assertIn('x-large', allocation['last_error']) + self.assertEqual('error', allocation['state']) + + # All nodes are filtered out on the database level. + self.assertFalse(mock_acquire.called) diff -Nru ironic-12.0.0/ironic/tests/unit/conductor/test_manager.py ironic-12.1.0/ironic/tests/unit/conductor/test_manager.py --- ironic-12.0.0/ironic/tests/unit/conductor/test_manager.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/conductor/test_manager.py 2019-03-21 20:07:40.000000000 +0000 @@ -22,6 +22,7 @@ import datetime import eventlet +from futurist import waiters import mock from oslo_config import cfg from oslo_db import exception as db_exception @@ -40,6 +41,7 @@ from ironic.common import swift from ironic.conductor import manager from ironic.conductor import notification_utils +from ironic.conductor import steps as conductor_steps from ironic.conductor import task_manager from ironic.conductor import utils as conductor_utils from ironic.db import api as dbapi @@ -625,6 +627,16 @@ node.refresh() self.assertEqual(existing_driver, node.driver) + def test_update_node_from_invalid_driver(self): + existing_driver = 'fake-hardware' + wrong_driver = 'wrong-driver' + node = obj_utils.create_test_node(self.context, driver=wrong_driver) + node.driver = existing_driver + result = self.service.update_node(self.context, node) + self.assertEqual(existing_driver, result.driver) + node.refresh() + self.assertEqual(existing_driver, node.driver) + UpdateInterfaces = namedtuple('UpdateInterfaces', ('old', 'new')) # NOTE(dtantsur): "old" interfaces here do not match the defaults, so that # we can test resetting them. @@ -801,6 +813,74 @@ self.assertEqual(new_hardware, node.driver) self.assertEqual(new_interface, node.boot_interface) + def test_update_node_deleting_allocation(self): + node = obj_utils.create_test_node(self.context) + alloc = obj_utils.create_test_allocation(self.context) + # Establish cross-linking between the node and the allocation + alloc.node_id = node.id + alloc.save() + node.refresh() + self.assertEqual(alloc.id, node.allocation_id) + self.assertEqual(alloc.uuid, node.instance_uuid) + + node.instance_uuid = None + res = self.service.update_node(self.context, node) + self.assertRaises(exception.AllocationNotFound, + objects.Allocation.get_by_id, + self.context, alloc.id) + self.assertIsNone(res['instance_uuid']) + self.assertIsNone(res['allocation_id']) + + node.refresh() + self.assertIsNone(node.instance_uuid) + self.assertIsNone(node.allocation_id) + + def test_update_node_deleting_allocation_forbidden(self): + node = obj_utils.create_test_node(self.context, + provision_state='active', + maintenance=False) + alloc = obj_utils.create_test_allocation(self.context) + # Establish cross-linking between the node and the allocation + alloc.node_id = node.id + alloc.save() + node.refresh() + self.assertEqual(alloc.id, node.allocation_id) + self.assertEqual(alloc.uuid, node.instance_uuid) + + node.instance_uuid = None + exc = self.assertRaises(messaging.rpc.ExpectedException, + self.service.update_node, + self.context, node) + self.assertEqual(exception.InvalidState, exc.exc_info[0]) + + node.refresh() + self.assertEqual(alloc.id, node.allocation_id) + self.assertEqual(alloc.uuid, node.instance_uuid) + + def test_update_node_deleting_allocation_in_maintenance(self): + node = obj_utils.create_test_node(self.context, + provision_state='active', + maintenance=True) + alloc = obj_utils.create_test_allocation(self.context) + # Establish cross-linking between the node and the allocation + alloc.node_id = node.id + alloc.save() + node.refresh() + self.assertEqual(alloc.id, node.allocation_id) + self.assertEqual(alloc.uuid, node.instance_uuid) + + node.instance_uuid = None + res = self.service.update_node(self.context, node) + self.assertRaises(exception.AllocationNotFound, + objects.Allocation.get_by_id, + self.context, alloc.id) + self.assertIsNone(res['instance_uuid']) + self.assertIsNone(res['allocation_id']) + + node.refresh() + self.assertIsNone(node.instance_uuid) + self.assertIsNone(node.allocation_id) + @mgr_utils.mock_record_keepalive class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase): @@ -1246,6 +1326,11 @@ mock_iwdi): self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi) + @mock.patch.object(conductor_steps, 'validate_deploy_templates') + def test_do_node_deploy_validate_template_fail(self, mock_validate, + mock_iwdi): + self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi) + def test_do_node_deploy_partial_ok(self, mock_iwdi): mock_iwdi.return_value = False self._start_service() @@ -1951,26 +2036,29 @@ mock_store.assert_called_once_with(task.node, configdrive) @mock.patch.object(manager, '_store_configdrive') - def _test__do_node_deploy_ok(self, mock_store, configdrive=None): + def _test__do_node_deploy_ok(self, mock_store, configdrive=None, + expected_configdrive=None): + expected_configdrive = expected_configdrive or configdrive self._start_service() with mock.patch.object(fake.FakeDeploy, 'deploy', autospec=True) as mock_deploy: mock_deploy.return_value = None - node = obj_utils.create_test_node( - self.context, driver='fake-hardware', + self.node = obj_utils.create_test_node( + self.context, driver='fake-hardware', name=None, provision_state=states.DEPLOYING, target_provision_state=states.ACTIVE) - task = task_manager.TaskManager(self.context, node.uuid) + task = task_manager.TaskManager(self.context, self.node.uuid) manager.do_node_deploy(task, self.service.conductor.id, configdrive=configdrive) - node.refresh() - self.assertEqual(states.ACTIVE, node.provision_state) - self.assertEqual(states.NOSTATE, node.target_provision_state) - self.assertIsNone(node.last_error) + self.node.refresh() + self.assertEqual(states.ACTIVE, self.node.provision_state) + self.assertEqual(states.NOSTATE, self.node.target_provision_state) + self.assertIsNone(self.node.last_error) mock_deploy.assert_called_once_with(mock.ANY, mock.ANY) if configdrive: - mock_store.assert_called_once_with(task.node, configdrive) + mock_store.assert_called_once_with(task.node, + expected_configdrive) else: self.assertFalse(mock_store.called) @@ -1981,6 +2069,48 @@ configdrive = 'foo' self._test__do_node_deploy_ok(configdrive=configdrive) + @mock.patch('openstack.baremetal.configdrive.build', autospec=True) + def test__do_node_deploy_configdrive_as_dict(self, mock_cd): + mock_cd.return_value = 'foo' + configdrive = {'user_data': 'abcd'} + self._test__do_node_deploy_ok(configdrive=configdrive, + expected_configdrive='foo') + mock_cd.assert_called_once_with({'uuid': self.node.uuid}, + network_data=None, + user_data=b'abcd') + + @mock.patch('openstack.baremetal.configdrive.build', autospec=True) + def test__do_node_deploy_configdrive_as_dict_with_meta_data(self, mock_cd): + mock_cd.return_value = 'foo' + configdrive = {'meta_data': {'uuid': uuidutils.generate_uuid(), + 'name': 'new-name', + 'hostname': 'example.com'}} + self._test__do_node_deploy_ok(configdrive=configdrive, + expected_configdrive='foo') + mock_cd.assert_called_once_with(configdrive['meta_data'], + network_data=None, + user_data=None) + + @mock.patch('openstack.baremetal.configdrive.build', autospec=True) + def test__do_node_deploy_configdrive_with_network_data(self, mock_cd): + mock_cd.return_value = 'foo' + configdrive = {'network_data': {'links': []}} + self._test__do_node_deploy_ok(configdrive=configdrive, + expected_configdrive='foo') + mock_cd.assert_called_once_with({'uuid': self.node.uuid}, + network_data={'links': []}, + user_data=None) + + @mock.patch('openstack.baremetal.configdrive.build', autospec=True) + def test__do_node_deploy_configdrive_and_user_data_as_dict(self, mock_cd): + mock_cd.return_value = 'foo' + configdrive = {'user_data': {'user': 'data'}} + self._test__do_node_deploy_ok(configdrive=configdrive, + expected_configdrive='foo') + mock_cd.assert_called_once_with({'uuid': self.node.uuid}, + network_data=None, + user_data=b'{"user": "data"}') + @mock.patch.object(swift, 'SwiftAPI') @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare') def test__do_node_deploy_configdrive_swift_error(self, mock_prepare, @@ -2109,7 +2239,7 @@ @mock.patch.object(manager, '_do_next_deploy_step', autospec=True) @mock.patch.object(manager, '_old_rest_of_do_node_deploy', autospec=True) - @mock.patch.object(conductor_utils, 'set_node_deployment_steps', + @mock.patch.object(conductor_steps, 'set_node_deployment_steps', autospec=True) def test_do_node_deploy_deprecated(self, mock_set_steps, mock_old_way, mock_deploy_step): @@ -2130,7 +2260,7 @@ @mock.patch.object(manager, '_do_next_deploy_step', autospec=True) @mock.patch.object(manager, '_old_rest_of_do_node_deploy', autospec=True) - @mock.patch.object(conductor_utils, 'set_node_deployment_steps', + @mock.patch.object(conductor_steps, 'set_node_deployment_steps', autospec=True) def test_do_node_deploy_steps(self, mock_set_steps, mock_old_way, mock_deploy_step): @@ -2159,7 +2289,7 @@ @mock.patch.object(manager, '_do_next_deploy_step', autospec=True) @mock.patch.object(manager, '_old_rest_of_do_node_deploy', autospec=True) - @mock.patch.object(conductor_utils, 'set_node_deployment_steps', + @mock.patch.object(conductor_steps, 'set_node_deployment_steps', autospec=True) def test_do_node_deploy_steps_old_rpc(self, mock_set_steps, mock_old_way, mock_deploy_step): @@ -2726,13 +2856,15 @@ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down') def _test__do_node_tear_down_ok(self, mock_tear_down, mock_clean, mock_unbind, mock_console, - enabled_console=False): + enabled_console=False, + with_allocation=False): # test when driver.deploy.tear_down succeeds node = obj_utils.create_test_node( self.context, driver='fake-hardware', provision_state=states.DELETING, target_provision_state=states.AVAILABLE, - instance_uuid=uuidutils.generate_uuid(), + instance_uuid=(uuidutils.generate_uuid() + if not with_allocation else None), instance_info={'foo': 'bar'}, console_enabled=enabled_console, driver_internal_info={'is_whole_disk_image': False, @@ -2742,6 +2874,12 @@ port = obj_utils.create_test_port( self.context, node_id=node.id, internal_info={'tenant_vif_port_id': 'foo'}) + if with_allocation: + alloc = obj_utils.create_test_allocation(self.context) + # Establish cross-linking between the node and the allocation + alloc.node_id = node.id + alloc.save() + node.refresh() task = task_manager.TaskManager(self.context, node.uuid) self._start_service() @@ -2753,6 +2891,7 @@ self.assertEqual(states.AVAILABLE, node.target_provision_state) self.assertIsNone(node.last_error) self.assertIsNone(node.instance_uuid) + self.assertIsNone(node.allocation_id) self.assertEqual({}, node.instance_info) self.assertNotIn('instance', node.driver_internal_info) self.assertNotIn('clean_steps', node.driver_internal_info) @@ -2766,6 +2905,10 @@ mock_console.assert_called_once_with(task) else: self.assertFalse(mock_console.called) + if with_allocation: + self.assertRaises(exception.AllocationNotFound, + objects.Allocation.get_by_id, + self.context, alloc.id) def test__do_node_tear_down_ok_without_console(self): self._test__do_node_tear_down_ok(enabled_console=False) @@ -2773,6 +2916,9 @@ def test__do_node_tear_down_ok_with_console(self): self._test__do_node_tear_down_ok(enabled_console=True) + def test__do_node_tear_down_with_allocation(self): + self._test__do_node_tear_down_ok(with_allocation=True) + @mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up') @mock.patch('ironic.conductor.manager.ConductorManager._do_node_clean') @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down') @@ -3354,7 +3500,7 @@ self.__do_node_clean_validate_fail(mock_validate, clean_steps=[]) @mock.patch.object(manager, 'LOG', autospec=True) - @mock.patch.object(conductor_utils, 'set_node_cleaning_steps', + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) @mock.patch('ironic.conductor.manager.ConductorManager.' '_do_next_clean_step', autospec=True) @@ -3611,7 +3757,7 @@ self.__do_node_clean_prepare_clean_wait(clean_steps=[self.deploy_raid]) @mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True) - @mock.patch.object(conductor_utils, 'set_node_cleaning_steps', + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) def __do_node_clean_steps_fail(self, mock_steps, mock_validate, clean_steps=None, invalid_exc=True): @@ -3643,7 +3789,7 @@ self.__do_node_clean_steps_fail(clean_steps=[self.deploy_raid], invalid_exc=invalid) - @mock.patch.object(conductor_utils, 'set_node_cleaning_steps', + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', autospec=True) @mock.patch('ironic.conductor.manager.ConductorManager.' '_do_next_clean_step', autospec=True) @@ -4700,6 +4846,23 @@ self.assertEqual(reason, ret['deploy']['reason']) mock_iwdi.assert_called_once_with(self.context, node.instance_info) + @mock.patch.object(images, 'is_whole_disk_image') + def test_validate_driver_interfaces_validation_fail_deploy_templates( + self, mock_iwdi): + mock_iwdi.return_value = False + node = obj_utils.create_test_node(self.context, driver='fake-hardware', + network_interface='noop') + with mock.patch( + 'ironic.conductor.steps.validate_deploy_templates' + ) as mock_validate: + reason = 'fake reason' + mock_validate.side_effect = exception.InvalidParameterValue(reason) + ret = self.service.validate_driver_interfaces(self.context, + node.uuid) + self.assertFalse(ret['deploy']['result']) + self.assertEqual(reason, ret['deploy']['reason']) + mock_iwdi.assert_called_once_with(self.context, node.instance_info) + @mock.patch.object(manager.ConductorManager, '_fail_if_in_state', autospec=True) @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor') @@ -4940,6 +5103,25 @@ node.refresh() self.assertIsNone(node.reservation) + def test_destroy_node_with_allocation(self): + # Nodes with allocations can be deleted in maintenance + node = obj_utils.create_test_node(self.context, + provision_state=states.ACTIVE, + maintenance=True) + alloc = obj_utils.create_test_allocation(self.context) + # Establish cross-linking between the node and the allocation + alloc.node_id = node.id + alloc.save() + node.refresh() + + self.service.destroy_node(self.context, node.uuid) + self.assertRaises(exception.NodeNotFound, + self.dbapi.get_node_by_uuid, + node.uuid) + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_id, + alloc.id) + def test_destroy_node_invalid_provision_state(self): self._start_service() node = obj_utils.create_test_node(self.context, @@ -4954,6 +5136,22 @@ node.refresh() self.assertIsNone(node.reservation) + def test_destroy_node_protected_provision_state_available(self): + CONF.set_override('allow_deleting_available_nodes', + False, group='conductor') + self._start_service() + node = obj_utils.create_test_node(self.context, + provision_state=states.AVAILABLE) + + exc = self.assertRaises(messaging.rpc.ExpectedException, + self.service.destroy_node, + self.context, node.uuid) + # Compare true exception hidden by @messaging.expected_exceptions + self.assertEqual(exception.InvalidState, exc.exc_info[0]) + # Verify reservation was released. + node.refresh() + self.assertIsNone(node.reservation) + def test_destroy_node_protected(self): self._start_service() node = obj_utils.create_test_node(self.context, @@ -5395,8 +5593,9 @@ expected_result = {} self.assertEqual(expected_result, actual_result) + @mock.patch.object(messaging.Notifier, 'info', autospec=True) @mock.patch.object(task_manager, 'acquire') - def test_send_sensor_task(self, acquire_mock): + def test_send_sensor_task(self, acquire_mock, notifier_mock): nodes = queue.Queue() for i in range(5): nodes.put_nowait(('fake_uuid-%d' % i, 'fake-hardware', '', None)) @@ -5405,6 +5604,8 @@ task = acquire_mock.return_value.__enter__.return_value task.node.maintenance = False + task.node.driver = 'fake' + task.node.name = 'fake_node' get_sensors_data_mock = task.driver.management.get_sensors_data validate_mock = task.driver.management.validate get_sensors_data_mock.return_value = 'fake-sensor-data' @@ -5412,6 +5613,21 @@ self.assertEqual(5, acquire_mock.call_count) self.assertEqual(5, validate_mock.call_count) self.assertEqual(5, get_sensors_data_mock.call_count) + self.assertEqual(5, notifier_mock.call_count) + if six.PY2: + # bail out if python2 as matching fails to match the + # data structure becasue it requires the order to be consistent + # but the mock also records the call dictionary contents in + # random order changing with every invocation. :\ + return + n_call = mock.call(mock.ANY, mock.ANY, 'hardware.fake.metrics', + {'event_type': 'hardware.fake.metrics.update', + 'node_name': 'fake_node', 'timestamp': mock.ANY, + 'message_id': mock.ANY, + 'payload': 'fake-sensor-data', + 'node_uuid': mock.ANY, 'instance_uuid': None}) + notifier_mock.assert_has_calls([n_call, n_call, n_call, + n_call, n_call]) @mock.patch.object(task_manager, 'acquire') def test_send_sensor_task_shutdown(self, acquire_mock): @@ -6303,6 +6519,10 @@ self.task.upgrade_lock.assert_called_once_with() +@mock.patch.object(waiters, 'wait_for_all', + new=mock.MagicMock(return_value=(0, 0))) +@mock.patch.object(manager.ConductorManager, '_spawn_worker', + new=lambda self, fun, *args: fun(*args)) @mock.patch.object(manager, 'do_sync_power_state') @mock.patch.object(task_manager, 'acquire') @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor') @@ -6964,7 +7184,7 @@ 'force_persistent_boot_device', 'ipmi_protocol_version', 'ipmi_force_boot_device', 'deploy_forces_oob_reboot', 'rescue_kernel', 'rescue_ramdisk', - 'ipmi_disable_boot_timeout'] + 'ipmi_disable_boot_timeout', 'ipmi_hex_kg_key'] self._check_driver_properties("ipmi", expected) def test_driver_properties_snmp(self): @@ -7027,6 +7247,92 @@ self._check_hardware_type_properties('manual-management', expected) +@mock.patch.object(waiters, 'wait_for_all') +@mock.patch.object(manager.ConductorManager, '_spawn_worker') +@mock.patch.object(manager.ConductorManager, '_sync_power_state_nodes_task') +class ParallelPowerSyncTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase): + + def setUp(self): + super(ParallelPowerSyncTestCase, self).setUp() + self.service = manager.ConductorManager('hostname', 'test-topic') + + def test__sync_power_states_9_nodes_8_workers( + self, sync_mock, spawn_mock, waiter_mock): + + CONF.set_override('sync_power_state_workers', 8, group='conductor') + + with mock.patch.object(self.service, 'iter_nodes', + new=mock.MagicMock(return_value=[[0]] * 9)): + + self.service._sync_power_states(self.context) + + self.assertEqual(7, spawn_mock.call_count) + self.assertEqual(1, sync_mock.call_count) + self.assertEqual(1, waiter_mock.call_count) + + def test__sync_power_states_6_nodes_8_workers( + self, sync_mock, spawn_mock, waiter_mock): + + CONF.set_override('sync_power_state_workers', 8, group='conductor') + + with mock.patch.object(self.service, 'iter_nodes', + new=mock.MagicMock(return_value=[[0]] * 6)): + + self.service._sync_power_states(self.context) + + self.assertEqual(5, spawn_mock.call_count) + self.assertEqual(1, sync_mock.call_count) + self.assertEqual(1, waiter_mock.call_count) + + def test__sync_power_states_1_nodes_8_workers( + self, sync_mock, spawn_mock, waiter_mock): + + CONF.set_override('sync_power_state_workers', 8, group='conductor') + + with mock.patch.object(self.service, 'iter_nodes', + new=mock.MagicMock(return_value=[[0]])): + + self.service._sync_power_states(self.context) + + self.assertEqual(0, spawn_mock.call_count) + self.assertEqual(1, sync_mock.call_count) + self.assertEqual(1, waiter_mock.call_count) + + def test__sync_power_states_9_nodes_1_worker( + self, sync_mock, spawn_mock, waiter_mock): + + CONF.set_override('sync_power_state_workers', 1, group='conductor') + + with mock.patch.object(self.service, 'iter_nodes', + new=mock.MagicMock(return_value=[[0]] * 9)): + + self.service._sync_power_states(self.context) + + self.assertEqual(0, spawn_mock.call_count) + self.assertEqual(1, sync_mock.call_count) + self.assertEqual(1, waiter_mock.call_count) + + @mock.patch.object(queue, 'Queue', autospec=True) + def test__sync_power_states_node_prioritization( + self, queue_mock, sync_mock, spawn_mock, waiter_mock): + + CONF.set_override('sync_power_state_workers', 1, group='conductor') + + with mock.patch.object( + self.service, 'iter_nodes', + new=mock.MagicMock(return_value=[[0], [1], [2]]) + ), mock.patch.dict( + self.service.power_state_sync_count, + {0: 1, 1: 0, 2: 2}, clear=True): + + queue_mock.return_value.qsize.return_value = 0 + + self.service._sync_power_states(self.context) + + expected_calls = [mock.call([2]), mock.call([0]), mock.call([1])] + queue_mock.return_value.put.assert_has_calls(expected_calls) + + @mock.patch.object(task_manager, 'acquire') @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor') @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list') diff -Nru ironic-12.0.0/ironic/tests/unit/conductor/test_rpcapi.py ironic-12.1.0/ironic/tests/unit/conductor/test_rpcapi.py --- ironic-12.0.0/ironic/tests/unit/conductor/test_rpcapi.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/conductor/test_rpcapi.py 2019-03-21 20:07:40.000000000 +0000 @@ -168,6 +168,20 @@ self.assertEqual(rpcapi.get_conductor_for(self.fake_node_obj), 'fake-host') + def test_get_random_topic(self): + CONF.set_override('host', 'fake-host') + self.dbapi.register_conductor({'hostname': 'fake-host', 'drivers': []}) + + rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic') + expected_topic = 'fake-topic.fake-host' + self.assertEqual(expected_topic, rpcapi.get_random_topic()) + + def test_get_random_topic_no_conductors(self): + CONF.set_override('host', 'fake-host') + + rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic') + self.assertRaises(exception.TemporaryFailure, rpcapi.get_random_topic) + def _test_can_send_create_port(self, can_send): rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic') with mock.patch.object(rpcapi.client, @@ -584,3 +598,15 @@ node_id='fake-node', traits=None, version='1.44') + + def test_create_allocation(self): + self._test_rpcapi('create_allocation', + 'call', + allocation='fake-allocation', + version='1.48') + + def test_destroy_allocation(self): + self._test_rpcapi('destroy_allocation', + 'call', + allocation='fake-allocation', + version='1.48') diff -Nru ironic-12.0.0/ironic/tests/unit/conductor/test_steps.py ironic-12.1.0/ironic/tests/unit/conductor/test_steps.py --- ironic-12.0.0/ironic/tests/unit/conductor/test_steps.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/conductor/test_steps.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,724 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.common import states +from ironic.conductor import steps as conductor_steps +from ironic.conductor import task_manager +from ironic import objects +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + +CONF = cfg.CONF + + +class NodeDeployStepsTestCase(db_base.DbTestCase): + def setUp(self): + super(NodeDeployStepsTestCase, self).setUp() + + self.deploy_start = { + 'step': 'deploy_start', 'priority': 50, 'interface': 'deploy'} + self.power_one = { + 'step': 'power_one', 'priority': 40, 'interface': 'power'} + self.deploy_middle = { + 'step': 'deploy_middle', 'priority': 40, 'interface': 'deploy'} + self.deploy_end = { + 'step': 'deploy_end', 'priority': 20, 'interface': 'deploy'} + self.power_disable = { + 'step': 'power_disable', 'priority': 0, 'interface': 'power'} + self.deploy_core = { + 'step': 'deploy', 'priority': 100, 'interface': 'deploy'} + # enabled steps + self.deploy_steps = [self.deploy_start, self.power_one, + self.deploy_middle, self.deploy_end] + # Deploy step with argsinfo. + self.deploy_raid = { + 'step': 'build_raid', 'priority': 0, 'interface': 'deploy', + 'argsinfo': {'arg1': {'description': 'desc1', 'required': True}, + 'arg2': {'description': 'desc2'}}} + self.node = obj_utils.create_test_node( + self.context, driver='fake-hardware') + + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps', + autospec=True) + @mock.patch('ironic.drivers.modules.fake.FakePower.get_deploy_steps', + autospec=True) + @mock.patch('ironic.drivers.modules.fake.FakeManagement.get_deploy_steps', + autospec=True) + def test__get_deployment_steps(self, mock_mgt_steps, mock_power_steps, + mock_deploy_steps): + # Test getting deploy steps, with one driver returning None, two + # conflicting priorities, and asserting they are ordered properly. + + mock_power_steps.return_value = [self.power_disable, self.power_one] + mock_deploy_steps.return_value = [ + self.deploy_start, self.deploy_middle, self.deploy_end] + + expected = self.deploy_steps + [self.power_disable] + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + steps = conductor_steps._get_deployment_steps(task, enabled=False) + + self.assertEqual(expected, steps) + mock_mgt_steps.assert_called_once_with(mock.ANY, task) + mock_power_steps.assert_called_once_with(mock.ANY, task) + mock_deploy_steps.assert_called_once_with(mock.ANY, task) + + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps', + autospec=True) + @mock.patch('ironic.drivers.modules.fake.FakePower.get_deploy_steps', + autospec=True) + @mock.patch('ironic.drivers.modules.fake.FakeManagement.get_deploy_steps', + autospec=True) + def test__get_deploy_steps_unsorted(self, mock_mgt_steps, mock_power_steps, + mock_deploy_steps): + + mock_deploy_steps.return_value = [self.deploy_end, + self.deploy_start, + self.deploy_middle] + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + steps = conductor_steps._get_deployment_steps(task, enabled=False, + sort=False) + self.assertEqual(mock_deploy_steps.return_value, steps) + mock_mgt_steps.assert_called_once_with(mock.ANY, task) + mock_power_steps.assert_called_once_with(mock.ANY, task) + mock_deploy_steps.assert_called_once_with(mock.ANY, task) + + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps', + autospec=True) + @mock.patch('ironic.drivers.modules.fake.FakePower.get_deploy_steps', + autospec=True) + @mock.patch('ironic.drivers.modules.fake.FakeManagement.get_deploy_steps', + autospec=True) + def test__get_deployment_steps_only_enabled( + self, mock_mgt_steps, mock_power_steps, mock_deploy_steps): + # Test getting only deploy steps, with one driver returning None, two + # conflicting priorities, and asserting they are ordered properly. + # Should discard zero-priority deploy step. + + mock_power_steps.return_value = [self.power_one, self.power_disable] + mock_deploy_steps.return_value = [self.deploy_end, + self.deploy_middle, + self.deploy_start] + + with task_manager.acquire( + self.context, self.node.uuid, shared=True) as task: + steps = conductor_steps._get_deployment_steps(task, enabled=True) + + self.assertEqual(self.deploy_steps, steps) + mock_mgt_steps.assert_called_once_with(mock.ANY, task) + mock_power_steps.assert_called_once_with(mock.ANY, task) + mock_deploy_steps.assert_called_once_with(mock.ANY, task) + + @mock.patch.object(objects.DeployTemplate, 'list_by_names') + def test__get_deployment_templates_no_traits(self, mock_list): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + templates = conductor_steps._get_deployment_templates(task) + self.assertEqual([], templates) + self.assertFalse(mock_list.called) + + @mock.patch.object(objects.DeployTemplate, 'list_by_names') + def test__get_deployment_templates(self, mock_list): + traits = ['CUSTOM_DT1', 'CUSTOM_DT2'] + node = obj_utils.create_test_node( + self.context, uuid=uuidutils.generate_uuid(), + instance_info={'traits': traits}) + template1 = obj_utils.get_test_deploy_template(self.context) + template2 = obj_utils.get_test_deploy_template( + self.context, name='CUSTOM_DT2', uuid=uuidutils.generate_uuid(), + steps=[{'interface': 'bios', 'step': 'apply_configuration', + 'args': {}, 'priority': 1}]) + mock_list.return_value = [template1, template2] + expected = [template1, template2] + with task_manager.acquire( + self.context, node.uuid, shared=False) as task: + templates = conductor_steps._get_deployment_templates(task) + self.assertEqual(expected, templates) + mock_list.assert_called_once_with(task.context, traits) + + def test__get_steps_from_deployment_templates(self): + template1 = obj_utils.get_test_deploy_template(self.context) + template2 = obj_utils.get_test_deploy_template( + self.context, name='CUSTOM_DT2', uuid=uuidutils.generate_uuid(), + steps=[{'interface': 'bios', 'step': 'apply_configuration', + 'args': {}, 'priority': 1}]) + step1 = template1.steps[0] + step2 = template2.steps[0] + expected = [ + { + 'interface': step1['interface'], + 'step': step1['step'], + 'args': step1['args'], + 'priority': step1['priority'], + }, + { + 'interface': step2['interface'], + 'step': step2['step'], + 'args': step2['args'], + 'priority': step2['priority'], + } + ] + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + steps = conductor_steps._get_steps_from_deployment_templates( + task, [template1, template2]) + self.assertEqual(expected, steps) + + @mock.patch.object(conductor_steps, '_get_validated_steps_from_templates', + autospec=True) + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def _test__get_all_deployment_steps(self, user_steps, driver_steps, + expected_steps, mock_steps, + mock_validated): + mock_validated.return_value = user_steps + mock_steps.return_value = driver_steps + + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + steps = conductor_steps._get_all_deployment_steps(task) + self.assertEqual(expected_steps, steps) + mock_validated.assert_called_once_with(task) + mock_steps.assert_called_once_with(task, enabled=True, sort=False) + + def test__get_all_deployment_steps_no_steps(self): + # Nothing in -> nothing out. + user_steps = [] + driver_steps = [] + expected_steps = [] + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + def test__get_all_deployment_steps_no_user_steps(self): + # Only driver steps in -> only driver steps out. + user_steps = [] + driver_steps = self.deploy_steps + expected_steps = self.deploy_steps + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + def test__get_all_deployment_steps_no_driver_steps(self): + # Only user steps in -> only user steps out. + user_steps = self.deploy_steps + driver_steps = [] + expected_steps = self.deploy_steps + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + def test__get_all_deployment_steps_user_and_driver_steps(self): + # Driver and user steps in -> driver and user steps out. + user_steps = self.deploy_steps[:2] + driver_steps = self.deploy_steps[2:] + expected_steps = self.deploy_steps + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + def test__get_all_deployment_steps_disable_core_steps(self): + # User steps can disable core driver steps. + user_steps = [self.deploy_core.copy()] + user_steps[0].update({'priority': 0}) + driver_steps = [self.deploy_core] + expected_steps = [] + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + def test__get_all_deployment_steps_override_driver_steps(self): + # User steps override non-core driver steps. + user_steps = [step.copy() for step in self.deploy_steps[:2]] + user_steps[0].update({'priority': 200}) + user_steps[1].update({'priority': 100}) + driver_steps = self.deploy_steps + expected_steps = user_steps + self.deploy_steps[2:] + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + def test__get_all_deployment_steps_duplicate_user_steps(self): + # Duplicate user steps override non-core driver steps. + + # NOTE(mgoddard): This case is currently prevented by the API and + # conductor - the interface/step must be unique across all enabled + # steps. This test ensures that we can support this case, in case we + # choose to allow it in future. + user_steps = [self.deploy_start.copy(), self.deploy_start.copy()] + user_steps[0].update({'priority': 200}) + user_steps[1].update({'priority': 100}) + driver_steps = self.deploy_steps + # Each user invocation of the deploy_start step should be included, but + # not the default deploy_start from the driver. + expected_steps = user_steps + self.deploy_steps[1:] + self._test__get_all_deployment_steps(user_steps, driver_steps, + expected_steps) + + @mock.patch.object(conductor_steps, '_get_validated_steps_from_templates', + autospec=True) + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__get_all_deployment_steps_error(self, mock_steps, mock_validated): + mock_validated.side_effect = exception.InvalidParameterValue('foo') + + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertRaises(exception.InvalidParameterValue, + conductor_steps._get_all_deployment_steps, task) + mock_validated.assert_called_once_with(task) + self.assertFalse(mock_steps.called) + + @mock.patch.object(conductor_steps, '_get_all_deployment_steps', + autospec=True) + def test_set_node_deployment_steps(self, mock_steps): + mock_steps.return_value = self.deploy_steps + + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + conductor_steps.set_node_deployment_steps(task) + self.node.refresh() + self.assertEqual(self.deploy_steps, + self.node.driver_internal_info['deploy_steps']) + self.assertEqual({}, self.node.deploy_step) + self.assertIsNone( + self.node.driver_internal_info['deploy_step_index']) + mock_steps.assert_called_once_with(task) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps(self, mock_steps): + mock_steps.return_value = self.deploy_steps + + user_steps = [{'step': 'deploy_start', 'interface': 'deploy', + 'priority': 100}, + {'step': 'power_one', 'interface': 'power', + 'priority': 200}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + result = conductor_steps._validate_user_deploy_steps(task, + user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + self.assertEqual(user_steps, result) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_no_steps(self, mock_steps): + mock_steps.return_value = self.deploy_steps + + with task_manager.acquire(self.context, self.node.uuid) as task: + conductor_steps._validate_user_deploy_steps(task, []) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_get_steps_exception(self, mock_steps): + mock_steps.side_effect = exception.InstanceDeployFailure('bad') + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaises(exception.InstanceDeployFailure, + conductor_steps._validate_user_deploy_steps, + task, []) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_not_supported(self, mock_steps): + mock_steps.return_value = self.deploy_steps + user_steps = [{'step': 'power_one', 'interface': 'power', + 'priority': 200}, + {'step': 'bad_step', 'interface': 'deploy', + 'priority': 100}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "does not support.*bad_step", + conductor_steps._validate_user_deploy_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_invalid_arg(self, mock_steps): + mock_steps.return_value = self.deploy_steps + user_steps = [{'step': 'power_one', 'interface': 'power', + 'args': {'arg1': 'val1', 'arg2': 'val2'}, + 'priority': 200}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "power_one.*unexpected.*arg1", + conductor_steps._validate_user_deploy_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_missing_required_arg(self, + mock_steps): + mock_steps.return_value = [self.power_one, self.deploy_raid] + user_steps = [{'step': 'power_one', 'interface': 'power', + 'priority': 200}, + {'step': 'build_raid', 'interface': 'deploy', + 'priority': 100}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "build_raid.*missing.*arg1", + conductor_steps._validate_user_deploy_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_disable_non_core(self, mock_steps): + # Required arguments don't apply to disabled steps. + mock_steps.return_value = [self.power_one, self.deploy_raid] + user_steps = [{'step': 'power_one', 'interface': 'power', + 'priority': 200}, + {'step': 'build_raid', 'interface': 'deploy', + 'priority': 0}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + result = conductor_steps._validate_user_deploy_steps(task, + user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + self.assertEqual(user_steps, result) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_disable_core(self, mock_steps): + mock_steps.return_value = [self.power_one, self.deploy_core] + user_steps = [{'step': 'power_one', 'interface': 'power', + 'priority': 200}, + {'step': 'deploy', 'interface': 'deploy', 'priority': 0}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + result = conductor_steps._validate_user_deploy_steps(task, + user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + self.assertEqual(user_steps, result) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_override_core(self, mock_steps): + mock_steps.return_value = [self.power_one, self.deploy_core] + user_steps = [{'step': 'power_one', 'interface': 'power', + 'priority': 200}, + {'step': 'deploy', 'interface': 'deploy', + 'priority': 200}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "deploy.*is a core step", + conductor_steps._validate_user_deploy_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_deployment_steps', autospec=True) + def test__validate_user_deploy_steps_duplicates(self, mock_steps): + mock_steps.return_value = [self.power_one, self.deploy_core] + user_steps = [{'step': 'power_one', 'interface': 'power', + 'priority': 200}, + {'step': 'power_one', 'interface': 'power', + 'priority': 100}] + + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "Duplicate deploy steps for " + "power.power_one", + conductor_steps._validate_user_deploy_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + +class NodeCleaningStepsTestCase(db_base.DbTestCase): + def setUp(self): + super(NodeCleaningStepsTestCase, self).setUp() + + self.power_update = { + 'step': 'update_firmware', 'priority': 10, 'interface': 'power'} + self.deploy_update = { + 'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'} + self.deploy_erase = { + 'step': 'erase_disks', 'priority': 20, 'interface': 'deploy', + 'abortable': True} + # Automated cleaning should be executed in this order + self.clean_steps = [self.deploy_erase, self.power_update, + self.deploy_update] + # Manual clean step + self.deploy_raid = { + 'step': 'build_raid', 'priority': 0, 'interface': 'deploy', + 'argsinfo': {'arg1': {'description': 'desc1', 'required': True}, + 'arg2': {'description': 'desc2'}}} + + @mock.patch('ironic.drivers.modules.fake.FakeBIOS.get_clean_steps', + lambda self, task: []) + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps') + @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps') + def test__get_cleaning_steps(self, mock_power_steps, mock_deploy_steps): + # Test getting cleaning steps, with one driver returning None, two + # conflicting priorities, and asserting they are ordered properly. + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.CLEANING, + target_provision_state=states.AVAILABLE) + + mock_power_steps.return_value = [self.power_update] + mock_deploy_steps.return_value = [self.deploy_erase, + self.deploy_update] + + with task_manager.acquire( + self.context, node.uuid, shared=False) as task: + steps = conductor_steps._get_cleaning_steps(task, enabled=False) + + self.assertEqual(self.clean_steps, steps) + + @mock.patch('ironic.drivers.modules.fake.FakeBIOS.get_clean_steps', + lambda self, task: []) + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps') + @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps') + def test__get_cleaning_steps_unsorted(self, mock_power_steps, + mock_deploy_steps): + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.CLEANING, + target_provision_state=states.MANAGEABLE) + + mock_deploy_steps.return_value = [self.deploy_raid, + self.deploy_update, + self.deploy_erase] + with task_manager.acquire( + self.context, node.uuid, shared=False) as task: + steps = conductor_steps._get_cleaning_steps(task, enabled=False, + sort=False) + self.assertEqual(mock_deploy_steps.return_value, steps) + + @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps') + @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps') + def test__get_cleaning_steps_only_enabled(self, mock_power_steps, + mock_deploy_steps): + # Test getting only cleaning steps, with one driver returning None, two + # conflicting priorities, and asserting they are ordered properly. + # Should discard zero-priority (manual) clean step + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.CLEANING, + target_provision_state=states.AVAILABLE) + + mock_power_steps.return_value = [self.power_update] + mock_deploy_steps.return_value = [self.deploy_erase, + self.deploy_update, + self.deploy_raid] + + with task_manager.acquire( + self.context, node.uuid, shared=True) as task: + steps = conductor_steps._get_cleaning_steps(task, enabled=True) + + self.assertEqual(self.clean_steps, steps) + + @mock.patch.object(conductor_steps, '_validate_user_clean_steps') + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test_set_node_cleaning_steps_automated(self, mock_steps, + mock_validate_user_steps): + mock_steps.return_value = self.clean_steps + + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.CLEANING, + target_provision_state=states.AVAILABLE, + last_error=None, + clean_step=None) + + with task_manager.acquire( + self.context, node.uuid, shared=False) as task: + conductor_steps.set_node_cleaning_steps(task) + node.refresh() + self.assertEqual(self.clean_steps, + node.driver_internal_info['clean_steps']) + self.assertEqual({}, node.clean_step) + mock_steps.assert_called_once_with(task, enabled=True) + self.assertFalse(mock_validate_user_steps.called) + + @mock.patch.object(conductor_steps, '_validate_user_clean_steps') + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test_set_node_cleaning_steps_manual(self, mock_steps, + mock_validate_user_steps): + clean_steps = [self.deploy_raid] + mock_steps.return_value = self.clean_steps + mock_validate_user_steps.return_value = clean_steps + + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + provision_state=states.CLEANING, + target_provision_state=states.MANAGEABLE, + last_error=None, + clean_step=None, + driver_internal_info={'clean_steps': clean_steps}) + + with task_manager.acquire( + self.context, node.uuid, shared=False) as task: + conductor_steps.set_node_cleaning_steps(task) + node.refresh() + self.assertEqual(clean_steps, + node.driver_internal_info['clean_steps']) + self.assertEqual({}, node.clean_step) + self.assertFalse(mock_steps.called) + mock_validate_user_steps.assert_called_once_with(task, clean_steps) + + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test__validate_user_clean_steps(self, mock_steps): + node = obj_utils.create_test_node(self.context) + mock_steps.return_value = self.clean_steps + + user_steps = [{'step': 'update_firmware', 'interface': 'power'}, + {'step': 'erase_disks', 'interface': 'deploy'}] + + with task_manager.acquire(self.context, node.uuid) as task: + result = conductor_steps._validate_user_clean_steps(task, + user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + expected = [{'step': 'update_firmware', 'interface': 'power', + 'priority': 10, 'abortable': False}, + {'step': 'erase_disks', 'interface': 'deploy', + 'priority': 20, 'abortable': True}] + self.assertEqual(expected, result) + + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test__validate_user_clean_steps_no_steps(self, mock_steps): + node = obj_utils.create_test_node(self.context) + mock_steps.return_value = self.clean_steps + + with task_manager.acquire(self.context, node.uuid) as task: + conductor_steps._validate_user_clean_steps(task, []) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test__validate_user_clean_steps_get_steps_exception(self, mock_steps): + node = obj_utils.create_test_node(self.context) + mock_steps.side_effect = exception.NodeCleaningFailure('bad') + + with task_manager.acquire(self.context, node.uuid) as task: + self.assertRaises(exception.NodeCleaningFailure, + conductor_steps._validate_user_clean_steps, + task, []) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test__validate_user_clean_steps_not_supported(self, mock_steps): + node = obj_utils.create_test_node(self.context) + mock_steps.return_value = [self.power_update, self.deploy_raid] + user_steps = [{'step': 'update_firmware', 'interface': 'power'}, + {'step': 'bad_step', 'interface': 'deploy'}] + + with task_manager.acquire(self.context, node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "does not support.*bad_step", + conductor_steps._validate_user_clean_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test__validate_user_clean_steps_invalid_arg(self, mock_steps): + node = obj_utils.create_test_node(self.context) + mock_steps.return_value = self.clean_steps + user_steps = [{'step': 'update_firmware', 'interface': 'power', + 'args': {'arg1': 'val1', 'arg2': 'val2'}}, + {'step': 'erase_disks', 'interface': 'deploy'}] + + with task_manager.acquire(self.context, node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "update_firmware.*unexpected.*arg1", + conductor_steps._validate_user_clean_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + @mock.patch.object(conductor_steps, '_get_cleaning_steps') + def test__validate_user_clean_steps_missing_required_arg(self, mock_steps): + node = obj_utils.create_test_node(self.context) + mock_steps.return_value = [self.power_update, self.deploy_raid] + user_steps = [{'step': 'update_firmware', 'interface': 'power'}, + {'step': 'build_raid', 'interface': 'deploy'}] + + with task_manager.acquire(self.context, node.uuid) as task: + self.assertRaisesRegex(exception.InvalidParameterValue, + "build_raid.*missing.*arg1", + conductor_steps._validate_user_clean_steps, + task, user_steps) + mock_steps.assert_called_once_with(task, enabled=False, sort=False) + + +@mock.patch.object(conductor_steps, '_get_deployment_templates', + autospec=True) +@mock.patch.object(conductor_steps, '_get_steps_from_deployment_templates', + autospec=True) +@mock.patch.object(conductor_steps, '_validate_user_deploy_steps', + autospec=True) +class GetValidatedStepsFromTemplatesTestCase(db_base.DbTestCase): + + def setUp(self): + super(GetValidatedStepsFromTemplatesTestCase, self).setUp() + self.node = obj_utils.create_test_node(self.context, + driver='fake-hardware') + self.template = obj_utils.get_test_deploy_template(self.context) + + def test_ok(self, mock_validate, mock_steps, mock_templates): + mock_templates.return_value = [self.template] + steps = [db_utils.get_test_deploy_template_step()] + mock_steps.return_value = steps + mock_validate.return_value = steps + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + result = conductor_steps._get_validated_steps_from_templates(task) + self.assertEqual(steps, result) + mock_templates.assert_called_once_with(task) + mock_steps.assert_called_once_with(task, [self.template]) + mock_validate.assert_called_once_with(task, steps, mock.ANY) + + def test_invalid_parameter_value(self, mock_validate, mock_steps, + mock_templates): + mock_templates.return_value = [self.template] + mock_validate.side_effect = exception.InvalidParameterValue('fake') + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertRaises( + exception.InvalidParameterValue, + conductor_steps._get_validated_steps_from_templates, task) + + def test_instance_deploy_failure(self, mock_validate, mock_steps, + mock_templates): + mock_templates.return_value = [self.template] + mock_validate.side_effect = exception.InstanceDeployFailure('foo') + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertRaises( + exception.InstanceDeployFailure, + conductor_steps._get_validated_steps_from_templates, task) + + +@mock.patch.object(conductor_steps, '_get_validated_steps_from_templates', + autospec=True) +class ValidateDeployTemplatesTestCase(db_base.DbTestCase): + + def setUp(self): + super(ValidateDeployTemplatesTestCase, self).setUp() + self.node = obj_utils.create_test_node(self.context, + driver='fake-hardware') + + def test_ok(self, mock_validated): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + result = conductor_steps.validate_deploy_templates(task) + self.assertIsNone(result) + mock_validated.assert_called_once_with(task) + + def test_error(self, mock_validated): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + mock_validated.side_effect = exception.InvalidParameterValue('foo') + self.assertRaises(exception.InvalidParameterValue, + conductor_steps.validate_deploy_templates, task) + mock_validated.assert_called_once_with(task) diff -Nru ironic-12.0.0/ironic/tests/unit/conductor/test_task_manager.py ironic-12.1.0/ironic/tests/unit/conductor/test_task_manager.py --- ironic-12.0.0/ironic/tests/unit/conductor/test_task_manager.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/conductor/test_task_manager.py 2019-03-21 20:07:40.000000000 +0000 @@ -79,6 +79,24 @@ release_mock.assert_called_once_with(self.context, self.host, self.node.id) + def test_no_driver(self, get_voltgt_mock, get_volconn_mock, + get_portgroups_mock, get_ports_mock, + build_driver_mock, reserve_mock, release_mock, + node_get_mock): + reserve_mock.return_value = self.node + with task_manager.TaskManager(self.context, 'fake-node-id', + load_driver=False) as task: + self.assertEqual(self.context, task.context) + self.assertEqual(self.node, task.node) + self.assertEqual(get_ports_mock.return_value, task.ports) + self.assertEqual(get_portgroups_mock.return_value, task.portgroups) + self.assertEqual(get_volconn_mock.return_value, + task.volume_connectors) + self.assertEqual(get_voltgt_mock.return_value, task.volume_targets) + self.assertIsNone(task.driver) + self.assertFalse(task.shared) + self.assertFalse(build_driver_mock.called) + def test_excl_nested_acquire( self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock, get_ports_mock, build_driver_mock, @@ -159,6 +177,28 @@ reserve_mock.assert_has_calls(expected_calls) self.assertEqual(2, reserve_mock.call_count) + def test_excl_lock_exception_no_retries( + self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock, + get_ports_mock, build_driver_mock, + reserve_mock, release_mock, node_get_mock): + retry_attempts = 3 + self.config(node_locked_retry_attempts=retry_attempts, + group='conductor') + + # Fail on the first lock attempt, succeed on the second. + reserve_mock.side_effect = [exception.NodeLocked(node='foo', + host='foo'), + self.node] + + self.assertRaises(exception.NodeLocked, + task_manager.TaskManager, + self.context, + 'fake-node-id', + retry=False) + + reserve_mock.assert_called_once_with(self.context, self.host, + 'fake-node-id') + def test_excl_lock_reserve_exception( self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock, get_ports_mock, build_driver_mock, diff -Nru ironic-12.0.0/ironic/tests/unit/conductor/test_utils.py ironic-12.1.0/ironic/tests/unit/conductor/test_utils.py --- ironic-12.0.0/ironic/tests/unit/conductor/test_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/conductor/test_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -9,14 +9,18 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import time import mock from oslo_config import cfg +from oslo_utils import timeutils from oslo_utils import uuidutils +from ironic.common import boot_devices from ironic.common import boot_modes from ironic.common import exception from ironic.common import network +from ironic.common import neutron from ironic.common import states from ironic.conductor import rpcapi from ironic.conductor import task_manager @@ -963,332 +967,6 @@ self.task.process_event.assert_called_once_with('fail') -class NodeDeployStepsTestCase(db_base.DbTestCase): - def setUp(self): - super(NodeDeployStepsTestCase, self).setUp() - - self.deploy_start = { - 'step': 'deploy_start', 'priority': 50, 'interface': 'deploy'} - self.power_one = { - 'step': 'power_one', 'priority': 40, 'interface': 'power'} - self.deploy_middle = { - 'step': 'deploy_middle', 'priority': 40, 'interface': 'deploy'} - self.deploy_end = { - 'step': 'deploy_end', 'priority': 20, 'interface': 'deploy'} - self.power_disable = { - 'step': 'power_disable', 'priority': 0, 'interface': 'power'} - # enabled steps - self.deploy_steps = [self.deploy_start, self.power_one, - self.deploy_middle, self.deploy_end] - self.node = obj_utils.create_test_node( - self.context, driver='fake-hardware') - - @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps', - autospec=True) - @mock.patch('ironic.drivers.modules.fake.FakePower.get_deploy_steps', - autospec=True) - @mock.patch('ironic.drivers.modules.fake.FakeManagement.get_deploy_steps', - autospec=True) - def test__get_deployment_steps(self, mock_mgt_steps, mock_power_steps, - mock_deploy_steps): - # Test getting deploy steps, with one driver returning None, two - # conflicting priorities, and asserting they are ordered properly. - - mock_power_steps.return_value = [self.power_disable, self.power_one] - mock_deploy_steps.return_value = [ - self.deploy_start, self.deploy_middle, self.deploy_end] - - expected = self.deploy_steps + [self.power_disable] - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - steps = conductor_utils._get_deployment_steps(task, enabled=False) - - self.assertEqual(expected, steps) - mock_mgt_steps.assert_called_once_with(mock.ANY, task) - mock_power_steps.assert_called_once_with(mock.ANY, task) - mock_deploy_steps.assert_called_once_with(mock.ANY, task) - - @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps', - autospec=True) - @mock.patch('ironic.drivers.modules.fake.FakePower.get_deploy_steps', - autospec=True) - @mock.patch('ironic.drivers.modules.fake.FakeManagement.get_deploy_steps', - autospec=True) - def test__get_deploy_steps_unsorted(self, mock_mgt_steps, mock_power_steps, - mock_deploy_steps): - - mock_deploy_steps.return_value = [self.deploy_end, - self.deploy_start, - self.deploy_middle] - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - steps = conductor_utils._get_deployment_steps(task, enabled=False, - sort=False) - self.assertEqual(mock_deploy_steps.return_value, steps) - mock_mgt_steps.assert_called_once_with(mock.ANY, task) - mock_power_steps.assert_called_once_with(mock.ANY, task) - mock_deploy_steps.assert_called_once_with(mock.ANY, task) - - @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_deploy_steps', - autospec=True) - @mock.patch('ironic.drivers.modules.fake.FakePower.get_deploy_steps', - autospec=True) - @mock.patch('ironic.drivers.modules.fake.FakeManagement.get_deploy_steps', - autospec=True) - def test__get_deployment_steps_only_enabled( - self, mock_mgt_steps, mock_power_steps, mock_deploy_steps): - # Test getting only deploy steps, with one driver returning None, two - # conflicting priorities, and asserting they are ordered properly. - # Should discard zero-priority deploy step. - - mock_power_steps.return_value = [self.power_one, self.power_disable] - mock_deploy_steps.return_value = [self.deploy_end, - self.deploy_middle, - self.deploy_start] - - with task_manager.acquire( - self.context, self.node.uuid, shared=True) as task: - steps = conductor_utils._get_deployment_steps(task, enabled=True) - - self.assertEqual(self.deploy_steps, steps) - mock_mgt_steps.assert_called_once_with(mock.ANY, task) - mock_power_steps.assert_called_once_with(mock.ANY, task) - mock_deploy_steps.assert_called_once_with(mock.ANY, task) - - @mock.patch.object(conductor_utils, '_get_deployment_steps', - autospec=True) - def test_set_node_deployment_steps(self, mock_steps): - mock_steps.return_value = self.deploy_steps - - with task_manager.acquire( - self.context, self.node.uuid, shared=False) as task: - conductor_utils.set_node_deployment_steps(task) - self.node.refresh() - self.assertEqual(self.deploy_steps, - self.node.driver_internal_info['deploy_steps']) - self.assertEqual({}, self.node.deploy_step) - self.assertIsNone( - self.node.driver_internal_info['deploy_step_index']) - mock_steps.assert_called_once_with(task, enabled=True) - - -class NodeCleaningStepsTestCase(db_base.DbTestCase): - def setUp(self): - super(NodeCleaningStepsTestCase, self).setUp() - - self.power_update = { - 'step': 'update_firmware', 'priority': 10, 'interface': 'power'} - self.deploy_update = { - 'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'} - self.deploy_erase = { - 'step': 'erase_disks', 'priority': 20, 'interface': 'deploy', - 'abortable': True} - # Automated cleaning should be executed in this order - self.clean_steps = [self.deploy_erase, self.power_update, - self.deploy_update] - # Manual clean step - self.deploy_raid = { - 'step': 'build_raid', 'priority': 0, 'interface': 'deploy', - 'argsinfo': {'arg1': {'description': 'desc1', 'required': True}, - 'arg2': {'description': 'desc2'}}} - - @mock.patch('ironic.drivers.modules.fake.FakeBIOS.get_clean_steps', - lambda self, task: []) - @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps') - @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps') - def test__get_cleaning_steps(self, mock_power_steps, mock_deploy_steps): - # Test getting cleaning steps, with one driver returning None, two - # conflicting priorities, and asserting they are ordered properly. - node = obj_utils.create_test_node( - self.context, driver='fake-hardware', - provision_state=states.CLEANING, - target_provision_state=states.AVAILABLE) - - mock_power_steps.return_value = [self.power_update] - mock_deploy_steps.return_value = [self.deploy_erase, - self.deploy_update] - - with task_manager.acquire( - self.context, node.uuid, shared=False) as task: - steps = conductor_utils._get_cleaning_steps(task, enabled=False) - - self.assertEqual(self.clean_steps, steps) - - @mock.patch('ironic.drivers.modules.fake.FakeBIOS.get_clean_steps', - lambda self, task: []) - @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps') - @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps') - def test__get_cleaning_steps_unsorted(self, mock_power_steps, - mock_deploy_steps): - node = obj_utils.create_test_node( - self.context, driver='fake-hardware', - provision_state=states.CLEANING, - target_provision_state=states.MANAGEABLE) - - mock_deploy_steps.return_value = [self.deploy_raid, - self.deploy_update, - self.deploy_erase] - with task_manager.acquire( - self.context, node.uuid, shared=False) as task: - steps = conductor_utils._get_cleaning_steps(task, enabled=False, - sort=False) - self.assertEqual(mock_deploy_steps.return_value, steps) - - @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps') - @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps') - def test__get_cleaning_steps_only_enabled(self, mock_power_steps, - mock_deploy_steps): - # Test getting only cleaning steps, with one driver returning None, two - # conflicting priorities, and asserting they are ordered properly. - # Should discard zero-priority (manual) clean step - node = obj_utils.create_test_node( - self.context, driver='fake-hardware', - provision_state=states.CLEANING, - target_provision_state=states.AVAILABLE) - - mock_power_steps.return_value = [self.power_update] - mock_deploy_steps.return_value = [self.deploy_erase, - self.deploy_update, - self.deploy_raid] - - with task_manager.acquire( - self.context, node.uuid, shared=True) as task: - steps = conductor_utils._get_cleaning_steps(task, enabled=True) - - self.assertEqual(self.clean_steps, steps) - - @mock.patch.object(conductor_utils, '_validate_user_clean_steps') - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test_set_node_cleaning_steps_automated(self, mock_steps, - mock_validate_user_steps): - mock_steps.return_value = self.clean_steps - - node = obj_utils.create_test_node( - self.context, driver='fake-hardware', - provision_state=states.CLEANING, - target_provision_state=states.AVAILABLE, - last_error=None, - clean_step=None) - - with task_manager.acquire( - self.context, node.uuid, shared=False) as task: - conductor_utils.set_node_cleaning_steps(task) - node.refresh() - self.assertEqual(self.clean_steps, - node.driver_internal_info['clean_steps']) - self.assertEqual({}, node.clean_step) - mock_steps.assert_called_once_with(task, enabled=True) - self.assertFalse(mock_validate_user_steps.called) - - @mock.patch.object(conductor_utils, '_validate_user_clean_steps') - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test_set_node_cleaning_steps_manual(self, mock_steps, - mock_validate_user_steps): - clean_steps = [self.deploy_raid] - mock_steps.return_value = self.clean_steps - mock_validate_user_steps.return_value = clean_steps - - node = obj_utils.create_test_node( - self.context, driver='fake-hardware', - provision_state=states.CLEANING, - target_provision_state=states.MANAGEABLE, - last_error=None, - clean_step=None, - driver_internal_info={'clean_steps': clean_steps}) - - with task_manager.acquire( - self.context, node.uuid, shared=False) as task: - conductor_utils.set_node_cleaning_steps(task) - node.refresh() - self.assertEqual(clean_steps, - node.driver_internal_info['clean_steps']) - self.assertEqual({}, node.clean_step) - self.assertFalse(mock_steps.called) - mock_validate_user_steps.assert_called_once_with(task, clean_steps) - - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test__validate_user_clean_steps(self, mock_steps): - node = obj_utils.create_test_node(self.context) - mock_steps.return_value = self.clean_steps - - user_steps = [{'step': 'update_firmware', 'interface': 'power'}, - {'step': 'erase_disks', 'interface': 'deploy'}] - - with task_manager.acquire(self.context, node.uuid) as task: - result = conductor_utils._validate_user_clean_steps(task, - user_steps) - mock_steps.assert_called_once_with(task, enabled=False, sort=False) - - expected = [{'step': 'update_firmware', 'interface': 'power', - 'priority': 10, 'abortable': False}, - {'step': 'erase_disks', 'interface': 'deploy', - 'priority': 20, 'abortable': True}] - self.assertEqual(expected, result) - - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test__validate_user_clean_steps_no_steps(self, mock_steps): - node = obj_utils.create_test_node(self.context) - mock_steps.return_value = self.clean_steps - - with task_manager.acquire(self.context, node.uuid) as task: - conductor_utils._validate_user_clean_steps(task, []) - mock_steps.assert_called_once_with(task, enabled=False, sort=False) - - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test__validate_user_clean_steps_get_steps_exception(self, mock_steps): - node = obj_utils.create_test_node(self.context) - mock_steps.side_effect = exception.NodeCleaningFailure('bad') - - with task_manager.acquire(self.context, node.uuid) as task: - self.assertRaises(exception.NodeCleaningFailure, - conductor_utils._validate_user_clean_steps, - task, []) - mock_steps.assert_called_once_with(task, enabled=False, sort=False) - - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test__validate_user_clean_steps_not_supported(self, mock_steps): - node = obj_utils.create_test_node(self.context) - mock_steps.return_value = [self.power_update, self.deploy_raid] - user_steps = [{'step': 'update_firmware', 'interface': 'power'}, - {'step': 'bad_step', 'interface': 'deploy'}] - - with task_manager.acquire(self.context, node.uuid) as task: - self.assertRaisesRegex(exception.InvalidParameterValue, - "does not support.*bad_step", - conductor_utils._validate_user_clean_steps, - task, user_steps) - mock_steps.assert_called_once_with(task, enabled=False, sort=False) - - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test__validate_user_clean_steps_invalid_arg(self, mock_steps): - node = obj_utils.create_test_node(self.context) - mock_steps.return_value = self.clean_steps - user_steps = [{'step': 'update_firmware', 'interface': 'power', - 'args': {'arg1': 'val1', 'arg2': 'val2'}}, - {'step': 'erase_disks', 'interface': 'deploy'}] - - with task_manager.acquire(self.context, node.uuid) as task: - self.assertRaisesRegex(exception.InvalidParameterValue, - "update_firmware.*invalid.*arg1", - conductor_utils._validate_user_clean_steps, - task, user_steps) - mock_steps.assert_called_once_with(task, enabled=False, sort=False) - - @mock.patch.object(conductor_utils, '_get_cleaning_steps') - def test__validate_user_clean_steps_missing_required_arg(self, mock_steps): - node = obj_utils.create_test_node(self.context) - mock_steps.return_value = [self.power_update, self.deploy_raid] - user_steps = [{'step': 'update_firmware', 'interface': 'power'}, - {'step': 'build_raid', 'interface': 'deploy'}] - - with task_manager.acquire(self.context, node.uuid) as task: - self.assertRaisesRegex(exception.InvalidParameterValue, - "build_raid.*missing.*arg1", - conductor_utils._validate_user_clean_steps, - task, user_steps) - mock_steps.assert_called_once_with(task, enabled=False, sort=False) - - class ErrorHandlersTestCase(tests_base.TestCase): def setUp(self): super(ErrorHandlersTestCase, self).setUp() @@ -2011,6 +1689,117 @@ mock_resume.assert_called_once_with( task, 'deploying', 'continue_node_deploy') + @mock.patch.object(time, 'sleep', autospec=True) + @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True) + @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on') + @mock.patch.object(conductor_utils, 'node_set_boot_device', + autospec=True) + @mock.patch.object(conductor_utils, 'node_power_action', + autospec=True) + def test_power_on_node_if_needed_true( + self, power_action_mock, boot_device_mock, + need_power_on_mock, get_power_state_mock, time_mock): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + need_power_on_mock.return_value = True + get_power_state_mock.return_value = states.POWER_OFF + power_state = conductor_utils.power_on_node_if_needed(task) + self.assertEqual(power_state, states.POWER_OFF) + boot_device_mock.assert_called_once_with( + task, boot_devices.BIOS, persistent=False) + power_action_mock.assert_called_once_with(task, states.POWER_ON) + + @mock.patch.object(time, 'sleep', autospec=True) + @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True) + @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on') + @mock.patch.object(conductor_utils, 'node_set_boot_device', + autospec=True) + @mock.patch.object(conductor_utils, 'node_power_action', + autospec=True) + def test_power_on_node_if_needed_false_power_on( + self, power_action_mock, boot_device_mock, + need_power_on_mock, get_power_state_mock, time_mock): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + need_power_on_mock.return_value = True + get_power_state_mock.return_value = states.POWER_ON + power_state = conductor_utils.power_on_node_if_needed(task) + self.assertIsNone(power_state) + self.assertEqual(0, boot_device_mock.call_count) + self.assertEqual(0, power_action_mock.call_count) + + @mock.patch.object(time, 'sleep', autospec=True) + @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True) + @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on') + @mock.patch.object(conductor_utils, 'node_set_boot_device', + autospec=True) + @mock.patch.object(conductor_utils, 'node_power_action', + autospec=True) + def test_power_on_node_if_needed_false_no_need( + self, power_action_mock, boot_device_mock, + need_power_on_mock, get_power_state_mock, time_mock): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + need_power_on_mock.return_value = False + get_power_state_mock.return_value = states.POWER_OFF + power_state = conductor_utils.power_on_node_if_needed(task) + self.assertIsNone(power_state) + self.assertEqual(0, boot_device_mock.call_count) + self.assertEqual(0, power_action_mock.call_count) + + @mock.patch.object(neutron, 'get_client', autospec=True) + @mock.patch.object(neutron, 'wait_for_host_agent', autospec=True) + @mock.patch.object(time, 'sleep', autospec=True) + @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True) + @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on') + @mock.patch.object(conductor_utils, 'node_set_boot_device', + autospec=True) + @mock.patch.object(conductor_utils, 'node_power_action', + autospec=True) + def test_power_on_node_if_needed_with_smart_nic_port( + self, power_action_mock, boot_device_mock, + need_power_on_mock, get_power_state_mock, time_mock, + wait_agent_mock, get_client_mock): + llc = {'port_id': 'rep0-0', 'hostname': 'host1'} + port = obj_utils.get_test_port(self.context, node_id=self.node.id, + is_smartnic=True, + local_link_connection=llc) + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + task.ports = [port] + need_power_on_mock.return_value = True + get_power_state_mock.return_value = states.POWER_OFF + power_state = conductor_utils.power_on_node_if_needed(task) + self.assertEqual(power_state, states.POWER_OFF) + boot_device_mock.assert_called_once_with( + task, boot_devices.BIOS, persistent=False) + power_action_mock.assert_called_once_with(task, states.POWER_ON) + get_client_mock.assert_called_once_with(context=self.context) + wait_agent_mock.assert_called_once_with(mock.ANY, 'host1', + target_state='down') + + @mock.patch.object(time, 'sleep', autospec=True) + @mock.patch.object(conductor_utils, 'node_power_action', + autospec=True) + def test_restore_power_state_if_needed_true( + self, power_action_mock, time_mock): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + power_state = states.POWER_OFF + conductor_utils.restore_power_state_if_needed(task, power_state) + power_action_mock.assert_called_once_with(task, power_state) + + @mock.patch.object(time, 'sleep', autospec=True) + @mock.patch.object(conductor_utils, 'node_power_action', + autospec=True) + def test_restore_power_state_if_needed_false( + self, power_action_mock, time_mock): + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + power_state = None + conductor_utils.restore_power_state_if_needed(task, power_state) + self.assertEqual(0, power_action_mock.call_count) + class ValidateInstanceInfoTraitsTestCase(tests_base.TestCase): @@ -2027,14 +1816,14 @@ self.node.instance_info['traits'] = [] conductor_utils.validate_instance_info_traits(self.node) - def test_parse_instance_info_traits_invalid_type(self): + def test_validate_instance_info_traits_invalid_type(self): self.node.instance_info['traits'] = 'not-a-list' self.assertRaisesRegex(exception.InvalidParameterValue, 'Error parsing traits from Node', conductor_utils.validate_instance_info_traits, self.node) - def test_parse_instance_info_traits_invalid_trait_type(self): + def test_validate_instance_info_traits_invalid_trait_type(self): self.node.instance_info['traits'] = ['trait1', {}] self.assertRaisesRegex(exception.InvalidParameterValue, 'Error parsing traits from Node', @@ -2051,3 +1840,53 @@ 'Cannot specify instance traits that are not', conductor_utils.validate_instance_info_traits, self.node) + + +@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True) +class FastTrackTestCase(db_base.DbTestCase): + + def setUp(self): + super(FastTrackTestCase, self).setUp() + self.node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + uuid=uuidutils.generate_uuid(), + driver_internal_info={ + 'agent_last_heartbeat': str(timeutils.utcnow().isoformat())}) + self.config(fast_track=True, group='deploy') + + def test_is_fast_track(self, mock_get_power): + mock_get_power.return_value = states.POWER_ON + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertTrue(conductor_utils.is_fast_track(task)) + + def test_is_fast_track_config_false(self, mock_get_power): + self.config(fast_track=False, group='deploy') + mock_get_power.return_value = states.POWER_ON + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertFalse(conductor_utils.is_fast_track(task)) + + def test_is_fast_track_power_off_false(self, mock_get_power): + mock_get_power.return_value = states.POWER_OFF + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertFalse(conductor_utils.is_fast_track(task)) + + def test_is_fast_track_no_heartbeat(self, mock_get_power): + mock_get_power.return_value = states.POWER_ON + i_info = self.node.driver_internal_info + i_info.pop('agent_last_heartbeat') + self.node.driver_internal_info = i_info + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertFalse(conductor_utils.is_fast_track(task)) + + def test_is_fast_track_error_blocks(self, mock_get_power): + mock_get_power.return_value = states.POWER_ON + self.node.last_error = "bad things happened" + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.assertFalse(conductor_utils.is_fast_track(task)) diff -Nru ironic-12.0.0/ironic/tests/unit/db/sqlalchemy/test_migrations.py ironic-12.1.0/ironic/tests/unit/db/sqlalchemy/test_migrations.py --- ironic-12.0.0/ironic/tests/unit/db/sqlalchemy/test_migrations.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/sqlalchemy/test_migrations.py 2019-03-21 20:07:40.000000000 +0000 @@ -786,6 +786,184 @@ self.assertFalse(node['protected']) self.assertIsNone(node['protected_reason']) + def _check_f190f9d00a11(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + col_names = [column.name for column in nodes.c] + self.assertIn('owner', col_names) + + def _pre_upgrade_dd67b91a1981(self, engine): + data = { + 'node_uuid': uuidutils.generate_uuid(), + } + + nodes = db_utils.get_table(engine, 'nodes') + nodes.insert().execute({'uuid': data['node_uuid']}) + + return data + + def _check_dd67b91a1981(self, engine, data): + nodes = db_utils.get_table(engine, 'nodes') + col_names = [column.name for column in nodes.c] + self.assertIn('allocation_id', col_names) + + node = nodes.select( + nodes.c.uuid == data['node_uuid']).execute().first() + self.assertIsNone(node['allocation_id']) + + allocations = db_utils.get_table(engine, 'allocations') + col_names = [column.name for column in allocations.c] + expected_names = ['id', 'uuid', 'node_id', 'created_at', 'updated_at', + 'name', 'version', 'state', 'last_error', + 'resource_class', 'traits', 'candidate_nodes', + 'extra', 'conductor_affinity'] + self.assertEqual(sorted(expected_names), sorted(col_names)) + self.assertIsInstance(allocations.c.created_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(allocations.c.updated_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(allocations.c.id.type, + sqlalchemy.types.Integer) + self.assertIsInstance(allocations.c.uuid.type, + sqlalchemy.types.String) + self.assertIsInstance(allocations.c.node_id.type, + sqlalchemy.types.Integer) + self.assertIsInstance(allocations.c.state.type, + sqlalchemy.types.String) + self.assertIsInstance(allocations.c.last_error.type, + sqlalchemy.types.TEXT) + self.assertIsInstance(allocations.c.resource_class.type, + sqlalchemy.types.String) + self.assertIsInstance(allocations.c.traits.type, + sqlalchemy.types.TEXT) + self.assertIsInstance(allocations.c.candidate_nodes.type, + sqlalchemy.types.TEXT) + self.assertIsInstance(allocations.c.extra.type, + sqlalchemy.types.TEXT) + self.assertIsInstance(allocations.c.conductor_affinity.type, + sqlalchemy.types.Integer) + + def _check_9cbeefa3763f(self, engine, data): + ports = db_utils.get_table(engine, 'ports') + col_names = [column.name for column in ports.c] + self.assertIn('is_smartnic', col_names) + # in some backends bool type is integer + self.assertIsInstance(ports.c.is_smartnic.type, + (sqlalchemy.types.Boolean, + sqlalchemy.types.Integer)) + + def _check_28c44432c9c3(self, engine, data): + nodes_tbl = db_utils.get_table(engine, 'nodes') + col_names = [column.name for column in nodes_tbl.c] + self.assertIn('description', col_names) + self.assertIsInstance(nodes_tbl.c.description.type, + sqlalchemy.types.TEXT) + + def _check_2aac7e0872f6(self, engine, data): + # Deploy templates. + deploy_templates = db_utils.get_table(engine, 'deploy_templates') + col_names = [column.name for column in deploy_templates.c] + expected = ['created_at', 'updated_at', 'version', + 'id', 'uuid', 'name'] + self.assertEqual(sorted(expected), sorted(col_names)) + self.assertIsInstance(deploy_templates.c.created_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(deploy_templates.c.updated_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(deploy_templates.c.version.type, + sqlalchemy.types.String) + self.assertIsInstance(deploy_templates.c.id.type, + sqlalchemy.types.Integer) + self.assertIsInstance(deploy_templates.c.uuid.type, + sqlalchemy.types.String) + self.assertIsInstance(deploy_templates.c.name.type, + sqlalchemy.types.String) + + # Insert a deploy template. + uuid = uuidutils.generate_uuid() + name = 'CUSTOM_DT1' + template = {'name': name, 'uuid': uuid} + deploy_templates.insert().execute(template) + # Query by UUID. + result = deploy_templates.select( + deploy_templates.c.uuid == uuid).execute().first() + template_id = result['id'] + self.assertEqual(name, result['name']) + # Query by name. + result = deploy_templates.select( + deploy_templates.c.name == name).execute().first() + self.assertEqual(template_id, result['id']) + # Query by ID. + result = deploy_templates.select( + deploy_templates.c.id == template_id).execute().first() + self.assertEqual(uuid, result['uuid']) + self.assertEqual(name, result['name']) + # UUID is unique. + template = {'name': 'CUSTOM_DT2', 'uuid': uuid} + self.assertRaises(db_exc.DBDuplicateEntry, + deploy_templates.insert().execute, template) + # Name is unique. + template = {'name': name, 'uuid': uuidutils.generate_uuid()} + self.assertRaises(db_exc.DBDuplicateEntry, + deploy_templates.insert().execute, template) + + # Deploy template steps. + deploy_template_steps = db_utils.get_table(engine, + 'deploy_template_steps') + col_names = [column.name for column in deploy_template_steps.c] + expected = ['created_at', 'updated_at', 'version', + 'id', 'deploy_template_id', 'interface', 'step', 'args', + 'priority'] + self.assertEqual(sorted(expected), sorted(col_names)) + + self.assertIsInstance(deploy_template_steps.c.created_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(deploy_template_steps.c.updated_at.type, + sqlalchemy.types.DateTime) + self.assertIsInstance(deploy_template_steps.c.version.type, + sqlalchemy.types.String) + self.assertIsInstance(deploy_template_steps.c.id.type, + sqlalchemy.types.Integer) + self.assertIsInstance(deploy_template_steps.c.deploy_template_id.type, + sqlalchemy.types.Integer) + self.assertIsInstance(deploy_template_steps.c.interface.type, + sqlalchemy.types.String) + self.assertIsInstance(deploy_template_steps.c.step.type, + sqlalchemy.types.String) + self.assertIsInstance(deploy_template_steps.c.args.type, + sqlalchemy.types.Text) + self.assertIsInstance(deploy_template_steps.c.priority.type, + sqlalchemy.types.Integer) + + # Insert a deploy template step. + interface = 'raid' + step_name = 'create_configuration' + args = '{"logical_disks": []}' + priority = 10 + step = {'deploy_template_id': template_id, 'interface': interface, + 'step': step_name, 'args': args, 'priority': priority} + deploy_template_steps.insert().execute(step) + # Query by deploy template ID. + result = deploy_template_steps.select( + deploy_template_steps.c.deploy_template_id == + template_id).execute().first() + self.assertEqual(template_id, result['deploy_template_id']) + self.assertEqual(interface, result['interface']) + self.assertEqual(step_name, result['step']) + self.assertEqual(args, result['args']) + self.assertEqual(priority, result['priority']) + # Insert another step for the same template. + deploy_template_steps.insert().execute(step) + + def _check_1e15e7122cc9(self, engine, data): + # Deploy template 'extra' field. + deploy_templates = db_utils.get_table(engine, 'deploy_templates') + col_names = [column.name for column in deploy_templates.c] + expected = ['created_at', 'updated_at', 'version', + 'id', 'uuid', 'name', 'extra'] + self.assertEqual(sorted(expected), sorted(col_names)) + self.assertIsInstance(deploy_templates.c.extra.type, + sqlalchemy.types.TEXT) + def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_api.upgrade('head') diff -Nru ironic-12.0.0/ironic/tests/unit/db/test_allocations.py ironic-12.1.0/ironic/tests/unit/db/test_allocations.py --- ironic-12.0.0/ironic/tests/unit/db/test_allocations.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/test_allocations.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,281 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for manipulating allocations via the DB API""" + +from oslo_utils import uuidutils + +from ironic.common import exception +from ironic.db import api as db_api +from ironic.tests.unit.db import base +from ironic.tests.unit.db import utils as db_utils + + +class AllocationsTestCase(base.DbTestCase): + + def setUp(self): + super(AllocationsTestCase, self).setUp() + self.node = db_utils.create_test_node() + self.allocation = db_utils.create_test_allocation(name='host1') + + def test_create(self): + dbapi = db_api.get_instance() + allocation = dbapi.create_allocation({'resource_class': 'bm'}) + self.assertIsNotNone(allocation.uuid) + self.assertEqual('allocating', allocation.state) + + def _create_test_allocation_range(self, count, start_idx=0, **kw): + """Create the specified number of test allocation entries in DB + + It uses create_test_allocation method. And returns List of Allocation + DB objects. + + :param count: Specifies the number of allocations to be created + :returns: List of Allocation DB objects + + """ + return [db_utils.create_test_allocation(uuid=uuidutils.generate_uuid(), + name='allocation' + str(i), + **kw).uuid + for i in range(start_idx, count + start_idx)] + + def test_get_allocation_by_id(self): + res = self.dbapi.get_allocation_by_id(self.allocation.id) + self.assertEqual(self.allocation.uuid, res.uuid) + + def test_get_allocation_by_id_that_does_not_exist(self): + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_id, 99) + + def test_get_allocation_by_uuid(self): + res = self.dbapi.get_allocation_by_uuid(self.allocation.uuid) + self.assertEqual(self.allocation.id, res.id) + + def test_get_allocation_by_uuid_that_does_not_exist(self): + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_uuid, + 'EEEEEEEE-EEEE-EEEE-EEEE-EEEEEEEEEEEE') + + def test_get_allocation_by_name(self): + res = self.dbapi.get_allocation_by_name(self.allocation.name) + self.assertEqual(self.allocation.id, res.id) + + def test_get_allocation_by_name_that_does_not_exist(self): + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_name, 'testfail') + + def test_get_allocation_list(self): + uuids = self._create_test_allocation_range(6) + # Also add the uuid for the allocation created in setUp() + uuids.append(self.allocation.uuid) + + res = self.dbapi.get_allocation_list() + self.assertEqual(set(uuids), {r.uuid for r in res}) + + def test_get_allocation_list_sorted(self): + uuids = self._create_test_allocation_range(6) + # Also add the uuid for the allocation created in setUp() + uuids.append(self.allocation.uuid) + + res = self.dbapi.get_allocation_list(sort_key='uuid') + res_uuids = [r.uuid for r in res] + self.assertEqual(sorted(uuids), res_uuids) + + def test_get_allocation_list_filter_by_state(self): + self._create_test_allocation_range(6, state='error') + + res = self.dbapi.get_allocation_list(filters={'state': 'allocating'}) + self.assertEqual([self.allocation.uuid], [r.uuid for r in res]) + + res = self.dbapi.get_allocation_list(filters={'state': 'error'}) + self.assertEqual(6, len(res)) + + def test_get_allocation_list_filter_by_node(self): + self._create_test_allocation_range(6) + self.dbapi.update_allocation(self.allocation.id, + {'node_id': self.node.id}) + + res = self.dbapi.get_allocation_list( + filters={'node_uuid': self.node.uuid}) + self.assertEqual([self.allocation.uuid], [r.uuid for r in res]) + + def test_get_allocation_list_filter_by_rsc(self): + self._create_test_allocation_range(6) + self.dbapi.update_allocation(self.allocation.id, + {'resource_class': 'very-large'}) + + res = self.dbapi.get_allocation_list( + filters={'resource_class': 'very-large'}) + self.assertEqual([self.allocation.uuid], [r.uuid for r in res]) + + def test_get_allocation_list_filter_by_conductor_affinity(self): + db_utils.create_test_conductor(id=1, hostname='host1') + db_utils.create_test_conductor(id=2, hostname='host2') + in_host1 = self._create_test_allocation_range(2, conductor_affinity=1) + in_host2 = self._create_test_allocation_range(2, conductor_affinity=2, + start_idx=2) + + res = self.dbapi.get_allocation_list( + filters={'conductor_affinity': 1}) + self.assertEqual(set(in_host1), {r.uuid for r in res}) + + res = self.dbapi.get_allocation_list( + filters={'conductor_affinity': 'host2'}) + self.assertEqual(set(in_host2), {r.uuid for r in res}) + + def test_get_allocation_list_invalid_fields(self): + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.get_allocation_list, sort_key='foo') + self.assertRaises(ValueError, + self.dbapi.get_allocation_list, + filters={'foo': 42}) + + def test_destroy_allocation(self): + self.dbapi.destroy_allocation(self.allocation.id) + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_id, self.allocation.id) + + def test_destroy_allocation_with_node(self): + self.dbapi.update_node(self.node.id, + {'allocation_id': self.allocation.id, + 'instance_uuid': uuidutils.generate_uuid(), + 'instance_info': {'traits': ['foo']}}) + self.dbapi.destroy_allocation(self.allocation.id) + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_id, self.allocation.id) + node = self.dbapi.get_node_by_id(self.node.id) + self.assertIsNone(node.allocation_id) + self.assertIsNone(node.instance_uuid) + # NOTE(dtantsur): currently we do not clean up instance_info contents + # on deallocation. It may be changed in the future. + self.assertEqual(node.instance_info, {'traits': ['foo']}) + + def test_destroy_allocation_that_does_not_exist(self): + self.assertRaises(exception.AllocationNotFound, + self.dbapi.destroy_allocation, 99) + + def test_destroy_allocation_uuid(self): + self.dbapi.destroy_allocation(self.allocation.uuid) + + def test_update_allocation(self): + old_name = self.allocation.name + new_name = 'newname' + self.assertNotEqual(old_name, new_name) + res = self.dbapi.update_allocation(self.allocation.id, + {'name': new_name}) + self.assertEqual(new_name, res.name) + + def test_update_allocation_uuid(self): + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.update_allocation, self.allocation.id, + {'uuid': ''}) + + def test_update_allocation_not_found(self): + id_2 = 99 + self.assertNotEqual(self.allocation.id, id_2) + self.assertRaises(exception.AllocationNotFound, + self.dbapi.update_allocation, id_2, + {'name': 'newname'}) + + def test_update_allocation_duplicated_name(self): + name1 = self.allocation.name + allocation2 = db_utils.create_test_allocation( + uuid=uuidutils.generate_uuid(), name='name2') + self.assertRaises(exception.AllocationDuplicateName, + self.dbapi.update_allocation, allocation2.id, + {'name': name1}) + + def test_update_allocation_with_node_id(self): + res = self.dbapi.update_allocation(self.allocation.id, + {'name': 'newname', + 'traits': ['foo'], + 'node_id': self.node.id}) + self.assertEqual('newname', res.name) + self.assertEqual(['foo'], res.traits) + self.assertEqual(self.node.id, res.node_id) + + node = self.dbapi.get_node_by_id(self.node.id) + self.assertEqual(res.id, node.allocation_id) + self.assertEqual(res.uuid, node.instance_uuid) + self.assertEqual(['foo'], node.instance_info['traits']) + + def test_update_allocation_node_already_associated(self): + existing_uuid = uuidutils.generate_uuid() + self.dbapi.update_node(self.node.id, {'instance_uuid': existing_uuid}) + self.assertRaises(exception.NodeAssociated, + self.dbapi.update_allocation, self.allocation.id, + {'node_id': self.node.id, 'traits': ['foo']}) + + # Make sure we do not see partial updates + allocation = self.dbapi.get_allocation_by_id(self.allocation.id) + self.assertEqual([], allocation.traits) + self.assertIsNone(allocation.node_id) + + node = self.dbapi.get_node_by_id(self.node.id) + self.assertIsNone(node.allocation_id) + self.assertEqual(existing_uuid, node.instance_uuid) + self.assertNotIn('traits', node.instance_info) + + def test_update_allocation_associated_with_another_node(self): + db_utils.create_test_node(uuid=uuidutils.generate_uuid(), + allocation_id=self.allocation.id, + instance_uuid=self.allocation.uuid) + + self.assertRaises(exception.InstanceAssociated, + self.dbapi.update_allocation, self.allocation.id, + {'node_id': self.node.id, 'traits': ['foo']}) + + # Make sure we do not see partial updates + allocation = self.dbapi.get_allocation_by_id(self.allocation.id) + self.assertEqual([], allocation.traits) + self.assertIsNone(allocation.node_id) + + node = self.dbapi.get_node_by_id(self.node.id) + self.assertIsNone(node.allocation_id) + self.assertIsNone(node.instance_uuid) + self.assertNotIn('traits', node.instance_info) + + def test_take_over_success(self): + for i in range(2): + db_utils.create_test_conductor(id=i, hostname='host-%d' % i) + allocation = db_utils.create_test_allocation(conductor_affinity=0) + + self.assertTrue(self.dbapi.take_over_allocation( + allocation.id, old_conductor_id=0, new_conductor_id=1)) + allocation = self.dbapi.get_allocation_by_id(allocation.id) + self.assertEqual(1, allocation.conductor_affinity) + + def test_take_over_conflict(self): + for i in range(3): + db_utils.create_test_conductor(id=i, hostname='host-%d' % i) + allocation = db_utils.create_test_allocation(conductor_affinity=2) + + self.assertFalse(self.dbapi.take_over_allocation( + allocation.id, old_conductor_id=0, new_conductor_id=1)) + allocation = self.dbapi.get_allocation_by_id(allocation.id) + # The affinity was not changed + self.assertEqual(2, allocation.conductor_affinity) + + def test_take_over_allocation_not_found(self): + self.assertRaises(exception.AllocationNotFound, + self.dbapi.take_over_allocation, 999, 0, 1) + + def test_create_allocation_duplicated_name(self): + self.assertRaises(exception.AllocationDuplicateName, + db_utils.create_test_allocation, + uuid=uuidutils.generate_uuid(), + name=self.allocation.name) + + def test_create_allocation_duplicated_uuid(self): + self.assertRaises(exception.AllocationAlreadyExists, + db_utils.create_test_allocation, + uuid=self.allocation.uuid) diff -Nru ironic-12.0.0/ironic/tests/unit/db/test_api.py ironic-12.1.0/ironic/tests/unit/db/test_api.py --- ironic-12.0.0/ironic/tests/unit/db/test_api.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/test_api.py 2019-03-21 20:07:40.000000000 +0000 @@ -56,6 +56,12 @@ self.assertIsNone(node.version) self.assertFalse(self.dbapi.check_versions()) + def test_check_versions_ignore_node(self): + node = utils.create_test_node(version=None) + node = self.dbapi.get_node_by_id(node.id) + self.assertIsNone(node.version) + self.assertTrue(self.dbapi.check_versions(ignore_models=['Node'])) + def test_check_versions_node_old(self): node = utils.create_test_node(version='1.0') node = self.dbapi.get_node_by_id(node.id) diff -Nru ironic-12.0.0/ironic/tests/unit/db/test_conductor.py ironic-12.1.0/ironic/tests/unit/db/test_conductor.py --- ironic-12.0.0/ironic/tests/unit/db/test_conductor.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/test_conductor.py 2019-03-21 20:07:40.000000000 +0000 @@ -334,6 +334,24 @@ # 61 seconds passed since last heartbeat, it's dead mock_utcnow.return_value = time_ + datetime.timedelta(seconds=61) self.assertEqual([c.hostname], self.dbapi.get_offline_conductors()) + self.assertEqual([c.id], self.dbapi.get_offline_conductors(field='id')) + + @mock.patch.object(timeutils, 'utcnow', autospec=True) + def test_get_online_conductors(self, mock_utcnow): + self.config(heartbeat_timeout=60, group='conductor') + time_ = datetime.datetime(2000, 1, 1, 0, 0) + + mock_utcnow.return_value = time_ + c = self._create_test_cdr() + + # Only 30 seconds passed since last heartbeat, it's still + # considered alive + mock_utcnow.return_value = time_ + datetime.timedelta(seconds=30) + self.assertEqual([c.hostname], self.dbapi.get_online_conductors()) + + # 61 seconds passed since last heartbeat, it's dead + mock_utcnow.return_value = time_ + datetime.timedelta(seconds=61) + self.assertEqual([], self.dbapi.get_online_conductors()) @mock.patch.object(timeutils, 'utcnow', autospec=True) def test_list_hardware_type_interfaces(self, mock_utcnow): diff -Nru ironic-12.0.0/ironic/tests/unit/db/test_deploy_templates.py ironic-12.1.0/ironic/tests/unit/db/test_deploy_templates.py --- ironic-12.0.0/ironic/tests/unit/db/test_deploy_templates.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/test_deploy_templates.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,210 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for manipulating DeployTemplates via the DB API""" + +from oslo_db import exception as db_exc +from oslo_utils import uuidutils +import six + +from ironic.common import exception +from ironic.tests.unit.db import base +from ironic.tests.unit.db import utils as db_utils + + +class DbDeployTemplateTestCase(base.DbTestCase): + + def setUp(self): + super(DbDeployTemplateTestCase, self).setUp() + self.template = db_utils.create_test_deploy_template() + + def test_create(self): + self.assertEqual('CUSTOM_DT1', self.template.name) + self.assertEqual(1, len(self.template.steps)) + step = self.template.steps[0] + self.assertEqual(self.template.id, step.deploy_template_id) + self.assertEqual('raid', step.interface) + self.assertEqual('create_configuration', step.step) + self.assertEqual({'logical_disks': []}, step.args) + self.assertEqual(10, step.priority) + self.assertEqual({}, self.template.extra) + + def test_create_no_steps(self): + uuid = uuidutils.generate_uuid() + template = db_utils.create_test_deploy_template( + uuid=uuid, name='CUSTOM_DT2', steps=[]) + self.assertEqual([], template.steps) + + def test_create_duplicate_uuid(self): + self.assertRaises(exception.DeployTemplateAlreadyExists, + db_utils.create_test_deploy_template, + uuid=self.template.uuid, name='CUSTOM_DT2') + + def test_create_duplicate_name(self): + uuid = uuidutils.generate_uuid() + self.assertRaises(exception.DeployTemplateDuplicateName, + db_utils.create_test_deploy_template, + uuid=uuid, name=self.template.name) + + def test_create_invalid_step_no_interface(self): + uuid = uuidutils.generate_uuid() + template = db_utils.get_test_deploy_template(uuid=uuid, + name='CUSTOM_DT2') + del template['steps'][0]['interface'] + self.assertRaises(db_exc.DBError, + self.dbapi.create_deploy_template, + template) + + def test_update_name(self): + values = {'name': 'CUSTOM_DT2'} + template = self.dbapi.update_deploy_template(self.template.id, values) + self.assertEqual('CUSTOM_DT2', template.name) + + def test_update_steps_replace(self): + step = {'interface': 'bios', 'step': 'apply_configuration', + 'args': {}, 'priority': 50} + values = {'steps': [step]} + template = self.dbapi.update_deploy_template(self.template.id, values) + self.assertEqual(1, len(template.steps)) + step = template.steps[0] + self.assertEqual('bios', step.interface) + self.assertEqual('apply_configuration', step.step) + self.assertEqual({}, step.args) + self.assertEqual(50, step.priority) + + def test_update_steps_add(self): + step = {'interface': 'bios', 'step': 'apply_configuration', + 'args': {}, 'priority': 50} + values = {'steps': [self.template.steps[0], step]} + template = self.dbapi.update_deploy_template(self.template.id, values) + self.assertEqual(2, len(template.steps)) + step0 = template.steps[0] + self.assertEqual(self.template.steps[0].id, step0.id) + self.assertEqual('raid', step0.interface) + self.assertEqual('create_configuration', step0.step) + self.assertEqual({'logical_disks': []}, step0.args) + self.assertEqual(10, step0.priority) + step1 = template.steps[1] + self.assertNotEqual(self.template.steps[0].id, step1.id) + self.assertEqual('bios', step1.interface) + self.assertEqual('apply_configuration', step1.step) + self.assertEqual({}, step1.args) + self.assertEqual(50, step1.priority) + + def test_update_steps_replace_args(self): + step = self.template.steps[0] + step['args'] = {'foo': 'bar'} + values = {'steps': [step]} + template = self.dbapi.update_deploy_template(self.template.id, values) + self.assertEqual(1, len(template.steps)) + step = template.steps[0] + self.assertEqual({'foo': 'bar'}, step.args) + + def test_update_steps_remove_all(self): + values = {'steps': []} + template = self.dbapi.update_deploy_template(self.template.id, values) + self.assertEqual([], template.steps) + + def test_update_extra(self): + values = {'extra': {'foo': 'bar'}} + template = self.dbapi.update_deploy_template(self.template.id, values) + self.assertEqual({'foo': 'bar'}, template.extra) + + def test_update_duplicate_name(self): + uuid = uuidutils.generate_uuid() + template2 = db_utils.create_test_deploy_template(uuid=uuid, + name='CUSTOM_DT2') + values = {'name': self.template.name} + self.assertRaises(exception.DeployTemplateDuplicateName, + self.dbapi.update_deploy_template, template2.id, + values) + + def test_update_not_found(self): + self.assertRaises(exception.DeployTemplateNotFound, + self.dbapi.update_deploy_template, 123, {}) + + def test_update_uuid_not_allowed(self): + uuid = uuidutils.generate_uuid() + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.update_deploy_template, + self.template.id, {'uuid': uuid}) + + def test_destroy(self): + self.dbapi.destroy_deploy_template(self.template.id) + # Attempt to retrieve the template to verify it is gone. + self.assertRaises(exception.DeployTemplateNotFound, + self.dbapi.get_deploy_template_by_id, + self.template.id) + # Ensure that the destroy_deploy_template returns the + # expected exception. + self.assertRaises(exception.DeployTemplateNotFound, + self.dbapi.destroy_deploy_template, + self.template.id) + + def test_get_deploy_template_by_id(self): + res = self.dbapi.get_deploy_template_by_id(self.template.id) + self.assertEqual(self.template.id, res.id) + self.assertEqual(self.template.name, res.name) + self.assertEqual(1, len(res.steps)) + self.assertEqual(self.template.id, res.steps[0].deploy_template_id) + self.assertRaises(exception.DeployTemplateNotFound, + self.dbapi.get_deploy_template_by_id, -1) + + def test_get_deploy_template_by_uuid(self): + res = self.dbapi.get_deploy_template_by_uuid(self.template.uuid) + self.assertEqual(self.template.id, res.id) + invalid_uuid = uuidutils.generate_uuid() + self.assertRaises(exception.DeployTemplateNotFound, + self.dbapi.get_deploy_template_by_uuid, invalid_uuid) + + def test_get_deploy_template_by_name(self): + res = self.dbapi.get_deploy_template_by_name(self.template.name) + self.assertEqual(self.template.id, res.id) + self.assertRaises(exception.DeployTemplateNotFound, + self.dbapi.get_deploy_template_by_name, 'bogus') + + def _template_list_preparation(self): + uuids = [six.text_type(self.template.uuid)] + for i in range(1, 3): + template = db_utils.create_test_deploy_template( + uuid=uuidutils.generate_uuid(), + name='CUSTOM_DT%d' % (i + 1)) + uuids.append(six.text_type(template.uuid)) + return uuids + + def test_get_deploy_template_list(self): + uuids = self._template_list_preparation() + res = self.dbapi.get_deploy_template_list() + res_uuids = [r.uuid for r in res] + six.assertCountEqual(self, uuids, res_uuids) + + def test_get_deploy_template_list_sorted(self): + uuids = self._template_list_preparation() + res = self.dbapi.get_deploy_template_list(sort_key='uuid') + res_uuids = [r.uuid for r in res] + self.assertEqual(sorted(uuids), res_uuids) + + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.get_deploy_template_list, sort_key='foo') + + def test_get_deploy_template_list_by_names(self): + self._template_list_preparation() + names = ['CUSTOM_DT2', 'CUSTOM_DT3'] + res = self.dbapi.get_deploy_template_list_by_names(names=names) + res_names = [r.name for r in res] + six.assertCountEqual(self, names, res_names) + + def test_get_deploy_template_list_by_names_no_match(self): + self._template_list_preparation() + names = ['CUSTOM_FOO'] + res = self.dbapi.get_deploy_template_list_by_names(names=names) + self.assertEqual([], res) diff -Nru ironic-12.0.0/ironic/tests/unit/db/test_nodes.py ironic-12.1.0/ironic/tests/unit/db/test_nodes.py --- ironic-12.0.0/ironic/tests/unit/db/test_nodes.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/test_nodes.py 2019-03-21 20:07:40.000000000 +0000 @@ -273,6 +273,19 @@ states.INSPECTING}) self.assertEqual([node2.id], [r[0] for r in res]) + def test_get_nodeinfo_list_description(self): + node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + description='Hello') + node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + description='World!') + res = self.dbapi.get_nodeinfo_list( + filters={'description_contains': 'Hello'}) + self.assertEqual([node1.id], [r[0] for r in res]) + + res = self.dbapi.get_nodeinfo_list(filters={'description_contains': + 'World!'}) + self.assertEqual([node2.id], [r[0] for r in res]) + def test_get_node_list(self): uuids = [] for i in range(1, 6): @@ -302,7 +315,8 @@ maintenance=True, fault='boom', resource_class='foo', - conductor_group='group1') + conductor_group='group1', + power_state='power on') res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']}) self.assertEqual([node1.id], [r.id for r in res]) @@ -355,6 +369,18 @@ res = self.dbapi.get_node_list(filters={'uuid': node1.uuid}) self.assertEqual([node1.id], [r.id for r in res]) + uuids = [uuidutils.generate_uuid(), + node1.uuid, + uuidutils.generate_uuid()] + res = self.dbapi.get_node_list(filters={'uuid_in': uuids}) + self.assertEqual([node1.id], [r.id for r in res]) + + res = self.dbapi.get_node_list(filters={'with_power_state': True}) + self.assertEqual([node2.id], [r.id for r in res]) + + res = self.dbapi.get_node_list(filters={'with_power_state': False}) + self.assertEqual([node1.id], [r.id for r in res]) + # ensure unknown filters explode filters = {'bad_filter': 'foo'} self.assertRaisesRegex(ValueError, @@ -369,6 +395,19 @@ self.dbapi.get_node_list, filters=filters) + def test_get_node_list_description(self): + node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + description='Hello') + node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + description='World!') + res = self.dbapi.get_node_list(filters={ + 'description_contains': 'Hello'}) + self.assertEqual([node1.id], [r.id for r in res]) + + res = self.dbapi.get_node_list(filters={ + 'description_contains': 'World!'}) + self.assertEqual([node2.id], [r.id for r in res]) + def test_get_node_list_chassis_not_found(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.get_node_list, @@ -519,6 +558,15 @@ self.assertRaises(exception.NodeNotFound, self.dbapi.node_trait_exists, node.id, trait.trait) + def test_allocations_get_destroyed_after_destroying_a_node_by_uuid(self): + node = utils.create_test_node() + + allocation = utils.create_test_allocation(node_id=node.id) + + self.dbapi.destroy_node(node.uuid) + self.assertRaises(exception.AllocationNotFound, + self.dbapi.get_allocation_by_id, allocation.id) + def test_update_node(self): node = utils.create_test_node() @@ -805,3 +853,38 @@ 'Multiple nodes', self.dbapi.get_node_by_port_addresses, addresses) + + def test_check_node_list(self): + node1 = utils.create_test_node(uuid=uuidutils.generate_uuid()) + node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + name='node_2') + node3 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + name='node_3') + + mapping = self.dbapi.check_node_list([node1.uuid, node2.name, + node3.uuid]) + self.assertEqual({node1.uuid: node1.uuid, + node2.name: node2.uuid, + node3.uuid: node3.uuid}, + mapping) + + def test_check_node_list_non_existing(self): + node1 = utils.create_test_node(uuid=uuidutils.generate_uuid()) + node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(), + name='node_2') + uuid = uuidutils.generate_uuid() + + exc = self.assertRaises(exception.NodeNotFound, + self.dbapi.check_node_list, + [node1.uuid, uuid, 'could-be-a-name', + node2.name]) + self.assertIn(uuid, str(exc)) + self.assertIn('could-be-a-name', str(exc)) + + def test_check_node_list_impossible(self): + node1 = utils.create_test_node(uuid=uuidutils.generate_uuid()) + + exc = self.assertRaises(exception.NodeNotFound, + self.dbapi.check_node_list, + [node1.uuid, 'this/cannot/be/a/name']) + self.assertIn('this/cannot/be/a/name', str(exc)) diff -Nru ironic-12.0.0/ironic/tests/unit/db/utils.py ironic-12.1.0/ironic/tests/unit/db/utils.py --- ironic-12.0.0/ironic/tests/unit/db/utils.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/db/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -16,13 +16,16 @@ from oslo_utils import timeutils +from oslo_utils import uuidutils from ironic.common import states from ironic.db import api as db_api from ironic.drivers import base as drivers_base +from ironic.objects import allocation from ironic.objects import bios from ironic.objects import chassis from ironic.objects import conductor +from ironic.objects import deploy_template from ironic.objects import node from ironic.objects import port from ironic.objects import portgroup @@ -218,6 +221,9 @@ 'protected': kw.get('protected', False), 'protected_reason': kw.get('protected_reason', None), 'conductor': kw.get('conductor'), + 'owner': kw.get('owner', None), + 'allocation_id': kw.get('allocation_id'), + 'description': kw.get('description'), } for iface in drivers_base.ALL_INTERFACES: @@ -265,6 +271,7 @@ 'pxe_enabled': kw.get('pxe_enabled', True), 'internal_info': kw.get('internal_info', {"bar": "buzz"}), 'physical_network': kw.get('physical_network'), + 'is_smartnic': kw.get('is_smartnic', False), } @@ -587,3 +594,89 @@ {'name': 'hyperthread', 'value': 'enabled'}, {'name': 'numlock', 'value': 'off'} ] + + +def get_test_allocation(**kw): + return { + 'candidate_nodes': kw.get('candidate_nodes', []), + 'conductor_affinity': kw.get('conductor_affinity'), + 'created_at': kw.get('created_at'), + 'extra': kw.get('extra', {}), + 'id': kw.get('id', 42), + 'last_error': kw.get('last_error'), + 'name': kw.get('name'), + 'node_id': kw.get('node_id'), + 'resource_class': kw.get('resource_class', 'baremetal'), + 'state': kw.get('state', 'allocating'), + 'traits': kw.get('traits', []), + 'updated_at': kw.get('updated_at'), + 'uuid': kw.get('uuid', uuidutils.generate_uuid()), + 'version': kw.get('version', allocation.Allocation.VERSION), + } + + +def create_test_allocation(**kw): + allocation = get_test_allocation(**kw) + if 'id' not in kw: + del allocation['id'] + dbapi = db_api.get_instance() + return dbapi.create_allocation(allocation) + + +def get_test_deploy_template(**kw): + default_uuid = uuidutils.generate_uuid() + return { + 'version': kw.get('version', deploy_template.DeployTemplate.VERSION), + 'created_at': kw.get('created_at'), + 'updated_at': kw.get('updated_at'), + 'id': kw.get('id', 234), + 'name': kw.get('name', u'CUSTOM_DT1'), + 'uuid': kw.get('uuid', default_uuid), + 'steps': kw.get('steps', [get_test_deploy_template_step( + deploy_template_id=kw.get('id', 234))]), + 'extra': kw.get('extra', {}), + } + + +def get_test_deploy_template_step(**kw): + return { + 'created_at': kw.get('created_at'), + 'updated_at': kw.get('updated_at'), + 'id': kw.get('id', 345), + 'deploy_template_id': kw.get('deploy_template_id', 234), + 'interface': kw.get('interface', 'raid'), + 'step': kw.get('step', 'create_configuration'), + 'args': kw.get('args', {'logical_disks': []}), + 'priority': kw.get('priority', 10), + } + + +def create_test_deploy_template(**kw): + """Create a deployment template in the DB and return DeployTemplate model. + + :param kw: kwargs with overriding values for the deploy template. + :returns: Test DeployTemplate DB object. + """ + template = get_test_deploy_template(**kw) + dbapi = db_api.get_instance() + # Let DB generate an ID if one isn't specified explicitly. + if 'id' not in kw: + del template['id'] + if 'steps' not in kw: + for step in template['steps']: + del step['id'] + del step['deploy_template_id'] + else: + for kw_step, template_step in zip(kw['steps'], template['steps']): + if 'id' not in kw_step: + del template_step['id'] + return dbapi.create_deploy_template(template) + + +def get_test_ibmc_info(): + return { + "ibmc_address": "https://example.com", + "ibmc_username": "username", + "ibmc_password": "password", + "verify_ca": False, + } diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ansible/test_deploy.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ansible/test_deploy.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ansible/test_deploy.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ansible/test_deploy.py 2019-03-21 20:07:40.000000000 +0000 @@ -10,6 +10,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json + from ironic_lib import utils as irlib_utils import mock from oslo_concurrency import processutils @@ -18,11 +20,13 @@ from ironic.common import exception from ironic.common import states from ironic.common import utils as com_utils +from ironic.conductor import steps from ironic.conductor import task_manager from ironic.conductor import utils from ironic.drivers.modules.ansible import deploy as ansible_deploy from ironic.drivers.modules import deploy_utils from ironic.drivers.modules import fake +from ironic.drivers.modules.network import flat as flat_network from ironic.drivers.modules import pxe from ironic.tests.unit.db import base as db_base from ironic.tests.unit.objects import utils as object_utils @@ -161,6 +165,62 @@ '/path/to/playbooks/inventory', '-e', '{"ironic": {"foo": "bar"}}', '--private-key=/path/to/key', '-vvvv') + @mock.patch.object(com_utils, 'execute', return_value=('out', 'err'), + autospec=True) + def test__run_playbook_ansible_interpreter_python3(self, execute_mock): + self.config(group='ansible', playbooks_path='/path/to/playbooks') + self.config(group='ansible', config_file_path='/path/to/config') + self.config(group='ansible', verbosity=3) + self.config(group='ansible', + default_python_interpreter='/usr/bin/python3') + self.config(group='ansible', ansible_extra_args='--timeout=100') + extra_vars = {'foo': 'bar'} + + ansible_deploy._run_playbook(self.node, 'deploy', + extra_vars, '/path/to/key', + tags=['spam'], notags=['ham']) + + execute_mock.assert_called_once_with( + 'env', 'ANSIBLE_CONFIG=/path/to/config', + 'ansible-playbook', '/path/to/playbooks/deploy', '-i', + '/path/to/playbooks/inventory', '-e', + mock.ANY, '--tags=spam', '--skip-tags=ham', + '--private-key=/path/to/key', '-vvv', '--timeout=100') + + all_vars = execute_mock.call_args[0][7] + self.assertEqual({"ansible_python_interpreter": "/usr/bin/python3", + "ironic": {"foo": "bar"}}, + json.loads(all_vars)) + + @mock.patch.object(com_utils, 'execute', return_value=('out', 'err'), + autospec=True) + def test__run_playbook_ansible_interpreter_override(self, execute_mock): + self.config(group='ansible', playbooks_path='/path/to/playbooks') + self.config(group='ansible', config_file_path='/path/to/config') + self.config(group='ansible', verbosity=3) + self.config(group='ansible', + default_python_interpreter='/usr/bin/python3') + self.config(group='ansible', ansible_extra_args='--timeout=100') + self.node.driver_info['ansible_python_interpreter'] = ( + '/usr/bin/python4') + extra_vars = {'foo': 'bar'} + + ansible_deploy._run_playbook(self.node, 'deploy', + extra_vars, '/path/to/key', + tags=['spam'], notags=['ham']) + + execute_mock.assert_called_once_with( + 'env', 'ANSIBLE_CONFIG=/path/to/config', + 'ansible-playbook', '/path/to/playbooks/deploy', '-i', + '/path/to/playbooks/inventory', '-e', + mock.ANY, '--tags=spam', '--skip-tags=ham', + '--private-key=/path/to/key', '-vvv', '--timeout=100') + + all_vars = execute_mock.call_args[0][7] + self.assertEqual({"ansible_python_interpreter": "/usr/bin/python4", + "ironic": {"foo": "bar"}}, + json.loads(all_vars)) + @mock.patch.object(com_utils, 'execute', side_effect=processutils.ProcessExecutionError( description='VIKINGS!'), @@ -256,6 +316,16 @@ self.assertEqual(2, ansible_deploy._calculate_memory_req(task)) image_mock.assert_called_once_with(task.context, 'fake-image') + def test__get_python_interpreter(self): + self.config(group='ansible', + default_python_interpreter='/usr/bin/python3') + self.node.driver_info['ansible_python_interpreter'] = ( + '/usr/bin/python4') + + python_interpreter = ansible_deploy._get_python_interpreter(self.node) + + self.assertEqual('/usr/bin/python4', python_interpreter) + def test__get_configdrive_path(self): self.config(tempdir='/path/to/tmpdir') self.assertEqual('/path/to/tmpdir/spam.cndrive', @@ -665,7 +735,7 @@ self.assertFalse(log_mock.info.called) @mock.patch.object(ansible_deploy, '_run_playbook', autospec=True) - @mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True) @mock.patch.object(utils, 'node_power_action', autospec=True) @mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options', return_value={'op1': 'test1'}, autospec=True) @@ -695,7 +765,7 @@ self.assertFalse(run_playbook_mock.called) self.assertEqual(states.CLEANWAIT, state) - @mock.patch.object(utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True) def test_prepare_cleaning_callback_no_steps(self, set_node_cleaning_steps): with task_manager.acquire(self.context, self.node.uuid) as task: @@ -776,7 +846,10 @@ prepare_vars_mock.return_value = _vars driver_internal_info = self.node.driver_internal_info driver_internal_info['is_whole_disk_image'] = True + instance_info = self.node.instance_info + del instance_info['root_mb'] self.node.driver_internal_info = driver_internal_info + self.node.instance_info = instance_info self.node.extra = {'ham': 'spam'} self.node.save() @@ -792,11 +865,13 @@ run_playbook_mock.assert_called_once_with( task.node, 'test_pl', ironic_nodes, 'test_k') + @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True) @mock.patch.object(fake.FakePower, 'get_power_state', return_value=states.POWER_OFF) @mock.patch.object(utils, 'node_power_action', autospec=True) - def test_reboot_and_finish_deploy_force_reboot(self, power_action_mock, - get_pow_state_mock): + def test_reboot_and_finish_deploy_force_reboot( + self, power_action_mock, get_pow_state_mock, + power_on_node_if_needed_mock): d_info = self.node.driver_info d_info['deploy_forces_oob_reboot'] = True self.node.driver_info = d_info @@ -806,6 +881,7 @@ self.node.provision_state = states.DEPLOYING self.node.save() + power_on_node_if_needed_mock.return_value = None with task_manager.acquire(self.context, self.node.uuid) as task: with mock.patch.object(task.driver, 'network') as net_mock: self.driver.reboot_and_finish_deploy(task) @@ -819,11 +895,12 @@ power_action_mock.call_args_list) get_pow_state_mock.assert_not_called() + @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True) @mock.patch.object(ansible_deploy, '_run_playbook', autospec=True) @mock.patch.object(utils, 'node_power_action', autospec=True) - def test_reboot_and_finish_deploy_soft_poweroff_retry(self, - power_action_mock, - run_playbook_mock): + def test_reboot_and_finish_deploy_soft_poweroff_retry( + self, power_action_mock, run_playbook_mock, + power_on_node_if_needed_mock): self.config(group='ansible', post_deploy_get_power_state_retry_interval=0) self.config(group='ansible', @@ -834,6 +911,7 @@ self.node.driver_internal_info = di_info self.node.save() + power_on_node_if_needed_mock.return_value = None with task_manager.acquire(self.context, self.node.uuid) as task: with mock.patch.object(task.driver, 'network') as net_mock: with mock.patch.object(task.driver.power, @@ -916,3 +994,141 @@ task) task.driver.boot.clean_up_ramdisk.assert_called_once_with( task) + + @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True) + @mock.patch.object(utils, 'power_on_node_if_needed') + @mock.patch.object(utils, 'node_power_action', autospec=True) + def test_tear_down_with_smartnic_port( + self, power_mock, power_on_node_if_needed_mock, + restore_power_state_mock): + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + driver_return = self.driver.tear_down(task) + power_mock.assert_called_once_with(task, states.POWER_OFF) + self.assertEqual(driver_return, states.DELETED) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + + @mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network', + autospec=True) + @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True) + @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(utils, 'node_power_action', autospec=True) + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy', + autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') + def test_prepare_with_smartnic_port( + self, pxe_prepare_ramdisk_mock, + build_instance_info_mock, build_options_mock, + power_action_mock, power_on_node_if_needed_mock, + restore_power_state_mock, net_mock): + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.node.provision_state = states.DEPLOYING + build_instance_info_mock.return_value = {'test': 'test'} + build_options_mock.return_value = {'op1': 'test1'} + power_on_node_if_needed_mock.return_value = states.POWER_OFF + self.driver.prepare(task) + power_action_mock.assert_called_once_with( + task, states.POWER_OFF) + build_instance_info_mock.assert_called_once_with(task) + build_options_mock.assert_called_once_with(task.node) + pxe_prepare_ramdisk_mock.assert_called_once_with( + task, {'op1': 'test1'}) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + + self.node.refresh() + self.assertEqual('test', self.node.instance_info['test']) + + @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True) + @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(ansible_deploy, '_run_playbook', autospec=True) + @mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(utils, 'node_power_action', autospec=True) + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') + def test_prepare_cleaning_with_smartnic_port( + self, prepare_ramdisk_mock, build_options_mock, power_action_mock, + set_node_cleaning_steps, run_playbook_mock, + power_on_node_if_needed_mock, restore_power_state_mock): + step = {'priority': 10, 'interface': 'deploy', + 'step': 'erase_devices', 'tags': ['clean']} + driver_internal_info = dict(DRIVER_INTERNAL_INFO) + driver_internal_info['clean_steps'] = [step] + self.node.driver_internal_info = driver_internal_info + self.node.save() + + with task_manager.acquire(self.context, self.node.uuid) as task: + task.driver.network.add_cleaning_network = mock.Mock() + build_options_mock.return_value = {'op1': 'test1'} + power_on_node_if_needed_mock.return_value = states.POWER_OFF + state = self.driver.prepare_cleaning(task) + set_node_cleaning_steps.assert_called_once_with(task) + task.driver.network.add_cleaning_network.assert_called_once_with( + task) + build_options_mock.assert_called_once_with(task.node) + prepare_ramdisk_mock.assert_called_once_with( + task, {'op1': 'test1'}) + power_action_mock.assert_called_once_with(task, states.REBOOT) + self.assertFalse(run_playbook_mock.called) + self.assertEqual(states.CLEANWAIT, state) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + + @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True) + @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(utils, 'node_power_action', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk') + def test_tear_down_cleaning_with_smartnic_port( + self, clean_ramdisk_mock, power_action_mock, + power_on_node_if_needed_mock, restore_power_state_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + task.driver.network.remove_cleaning_network = mock.Mock() + power_on_node_if_needed_mock.return_value = states.POWER_OFF + self.driver.tear_down_cleaning(task) + power_action_mock.assert_called_once_with(task, states.POWER_OFF) + clean_ramdisk_mock.assert_called_once_with(task) + (task.driver.network.remove_cleaning_network + .assert_called_once_with(task)) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + + @mock.patch.object(flat_network.FlatNetwork, 'remove_provisioning_network', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'configure_tenant_networks', + autospec=True) + @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True) + @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(fake.FakePower, 'get_power_state', + return_value=states.POWER_OFF) + @mock.patch.object(utils, 'node_power_action', autospec=True) + def test_reboot_and_finish_deploy_with_smartnic_port( + self, power_action_mock, get_pow_state_mock, + power_on_node_if_needed_mock, restore_power_state_mock, + configure_tenant_networks_mock, remove_provisioning_network_mock): + d_info = self.node.driver_info + d_info['deploy_forces_oob_reboot'] = True + self.node.driver_info = d_info + self.node.save() + self.config(group='ansible', + post_deploy_get_power_state_retry_interval=0) + self.node.provision_state = states.DEPLOYING + self.node.save() + power_on_node_if_needed_mock.return_value = states.POWER_OFF + with task_manager.acquire(self.context, self.node.uuid) as task: + self.driver.reboot_and_finish_deploy(task) + expected_power_calls = [((task, states.POWER_OFF),), + ((task, states.POWER_ON),)] + self.assertEqual( + expected_power_calls, power_action_mock.call_args_list) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + get_pow_state_mock.assert_not_called() diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_common.py ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_common.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_common.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_common.py 2019-03-21 20:07:40.000000000 +0000 @@ -40,31 +40,6 @@ self.assertEqual(INFO_DICT['drac_username'], info['drac_username']) self.assertEqual(INFO_DICT['drac_password'], info['drac_password']) - @mock.patch.object(drac_common.LOG, 'warning') - def test_parse_driver_info_drac_host(self, mock_log): - driver_info = db_utils.get_test_drac_info() - driver_info['drac_host'] = '4.5.6.7' - driver_info.pop('drac_address') - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=driver_info) - info = drac_common.parse_driver_info(node) - self.assertEqual('4.5.6.7', info['drac_address']) - self.assertNotIn('drac_host', info) - self.assertTrue(mock_log.called) - - @mock.patch.object(drac_common.LOG, 'warning') - def test_parse_driver_info_drac_host_and_drac_address(self, mock_log): - driver_info = db_utils.get_test_drac_info() - driver_info['drac_host'] = '4.5.6.7' - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=driver_info) - info = drac_common.parse_driver_info(node) - self.assertEqual('4.5.6.7', driver_info['drac_host']) - self.assertEqual(driver_info['drac_address'], info['drac_address']) - self.assertTrue(mock_log.called) - def test_parse_driver_info_missing_host(self): node = obj_utils.create_test_node(self.context, driver='idrac', diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_inspect.py ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_inspect.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_inspect.py 2019-03-21 20:07:40.000000000 +0000 @@ -159,7 +159,8 @@ 'memory_mb': 32768, 'local_gb': 1116, 'cpus': 18, - 'cpu_arch': 'x86_64'} + 'cpu_arch': 'x86_64', + 'capabilities': 'boot_mode:uefi'} mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.list_memory.return_value = self.memory @@ -188,6 +189,7 @@ mock_client.list_cpus.return_value = self.cpus mock_client.list_virtual_disks.side_effect = ( drac_exceptions.BaseClientException('boom')) + mock_client.list_bios_settings.return_value = self.bios_boot_settings with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: @@ -203,7 +205,8 @@ 'memory_mb': 32768, 'local_gb': 279, 'cpus': 18, - 'cpu_arch': 'x86_64'} + 'cpu_arch': 'x86_64', + 'capabilities': 'boot_mode:uefi'} mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.list_memory.return_value = self.memory @@ -234,6 +237,7 @@ mock_client.list_virtual_disks.return_value = [] mock_client.list_physical_disks.return_value = self.physical_disks mock_client.list_nics.return_value = self.nics + mock_client.list_bios_settings.return_value = self.uefi_boot_settings with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: @@ -249,7 +253,8 @@ 'memory_mb': 32768, 'local_gb': 1116, 'cpus': 18, - 'cpu_arch': 'x86_64'} + 'cpu_arch': 'x86_64', + 'capabilities': 'boot_mode:uefi'} mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.list_memory.return_value = self.memory diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_job.py ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_job.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_job.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_job.py 2019-03-21 20:07:40.000000000 +0000 @@ -43,9 +43,9 @@ 'start_time': '00000101000000', 'until_time': 'TIME_NA', 'message': 'Job in progress', - 'state': 'Running', + 'status': 'Running', 'percent_complete': 34} - self.job = test_utils.dict_to_namedtuple(values=self.job_dict) + self.job = test_utils.make_job(self.job_dict) def test_get_job(self, mock_get_drac_client): mock_client = mock.Mock() @@ -127,9 +127,9 @@ 'start_time': '00000101000000', 'until_time': 'TIME_NA', 'message': 'Job in progress', - 'state': 'Running', + 'status': 'Running', 'percent_complete': 34} - self.job = test_utils.dict_to_namedtuple(values=self.job_dict) + self.job = test_utils.make_job(self.job_dict) def test_list_unfinished_jobs(self, mock_get_drac_client): mock_client = mock.Mock() diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py 2019-03-21 20:07:40.000000000 +0000 @@ -42,7 +42,7 @@ 'start_time': '00000101000000', 'until_time': 'TIME_NA', 'message': 'Job in progress', - 'state': 'Running', + 'status': 'Running', 'percent_complete': 34} self.virtual_disk = { 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', @@ -51,8 +51,8 @@ 'controller': 'RAID.Integrated.1-1', 'raid_level': '1', 'size_mb': 571776, - 'state': 'ok', - 'raid_state': 'online', + 'status': 'ok', + 'raid_status': 'online', 'span_depth': 1, 'span_length': 2, 'pending_operations': None @@ -153,7 +153,7 @@ # mock task task = mock.Mock(node=self.node, context=self.context) # mock dracclient.get_job - self.job['state'] = 'Completed' + self.job['status'] = 'Completed' mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.get_job.return_value = test_utils.dict_to_namedtuple( @@ -183,7 +183,7 @@ # mock task task = mock.Mock(node=self.node, context=self.context) # mock dracclient.get_job - self.job['state'] = 'Failed' + self.job['status'] = 'Failed' self.job['message'] = 'boom' mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client @@ -222,7 +222,7 @@ # mock task task = mock.Mock(node=self.node, context=self.context) # mock dracclient.get_job - self.job['state'] = 'Completed' + self.job['status'] = 'Completed' mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.get_job.return_value = test_utils.dict_to_namedtuple( @@ -261,7 +261,7 @@ # mock task task = mock.Mock(node=self.node, context=self.context) # mock dracclient.get_job - self.job['state'] = 'Completed' + self.job['status'] = 'Completed' mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client mock_client.get_job.return_value = test_utils.dict_to_namedtuple( @@ -302,9 +302,9 @@ # mock task task = mock.Mock(node=self.node, context=self.context) # mock dracclient.get_job - self.job['state'] = 'Completed' + self.job['status'] = 'Completed' failed_job = self.job.copy() - failed_job['state'] = 'Failed' + failed_job['status'] = 'Failed' failed_job['message'] = 'boom' mock_client = mock.Mock() mock_get_drac_client.return_value = mock_client diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_raid.py ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_raid.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/test_raid.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/test_raid.py 2019-03-21 20:07:40.000000000 +0000 @@ -45,9 +45,11 @@ 'description': 'Integrated RAID Controller 1', 'manufacturer': 'DELL', 'model': 'PERC H710 Mini', - 'firmware_version': '21.3.0-0009'} - self.raid_controller = test_utils.dict_to_namedtuple( - values=raid_controller_dict) + 'primary_status': 'ok', + 'firmware_version': '21.3.0-0009', + 'bus': '1'} + self.raid_controller = test_utils.make_raid_controller( + raid_controller_dict) virtual_disk_dict = { 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', @@ -56,13 +58,13 @@ 'controller': 'RAID.Integrated.1-1', 'raid_level': '1', 'size_mb': 571776, - 'state': 'ok', - 'raid_state': 'online', + 'status': 'ok', + 'raid_status': 'online', 'span_depth': 1, 'span_length': 2, - 'pending_operations': None} - self.virtual_disk = test_utils.dict_to_namedtuple( - values=virtual_disk_dict) + 'pending_operations': None, + 'physical_disks': []} + self.virtual_disk = test_utils.make_virtual_disk(virtual_disk_dict) physical_disk_dict = { 'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', @@ -77,10 +79,11 @@ 'free_size_mb': 571776, 'serial_number': 'S0M3EY2Z', 'firmware_version': 'LS0A', - 'state': 'ok', - 'raid_state': 'ready'} - self.physical_disk = test_utils.dict_to_namedtuple( - values=physical_disk_dict) + 'status': 'ok', + 'raid_status': 'ready', + 'sas_address': '500056B37789ABE3', + 'device_protocol': None} + self.physical_disk = test_utils.make_physical_disk(physical_disk_dict) def test_list_raid_controllers(self, mock_get_drac_client): mock_client = mock.Mock() @@ -287,8 +290,10 @@ 'free_size_mb': 571776, 'serial_number': 'S0M3EY2Z', 'firmware_version': 'LS0A', - 'state': 'ok', - 'raid_state': 'ready'} + 'status': 'ok', + 'raid_status': 'ready', + 'sas_address': '500056B37789ABE3', + 'device_protocol': None} self.physical_disks = [] for i in range(8): @@ -330,8 +335,7 @@ physical_disks = [] for disk in self.physical_disks: - physical_disks.append( - test_utils.dict_to_namedtuple(values=disk)) + physical_disks.append(test_utils.make_physical_disk(disk)) return physical_disks @@ -539,8 +543,10 @@ 'free_size_mb': 571776, 'serial_number': 'S0M3EY2Z', 'firmware_version': 'LS0A', - 'state': 'ok', - 'raid_state': 'ready'} + 'status': 'ok', + 'raid_status': 'ready', + 'sas_address': '500056B37789ABE3', + 'device_protocol': None} self.physical_disks = [] for i in range(8): @@ -582,8 +588,7 @@ physical_disks = [] for disk in self.physical_disks: - physical_disks.append( - test_utils.dict_to_namedtuple(values=disk)) + physical_disks.append(test_utils.make_physical_disk(disk)) return physical_disks @@ -1300,13 +1305,14 @@ 'controller': 'RAID.Integrated.1-1', 'raid_level': '1', 'size_mb': 571776, - 'state': 'ok', - 'raid_state': 'online', + 'status': 'ok', + 'raid_status': 'online', 'span_depth': 1, 'span_length': 2, - 'pending_operations': None} + 'pending_operations': None, + 'physical_disks': []} mock_list_virtual_disks.return_value = [ - test_utils.dict_to_namedtuple(values=virtual_disk_dict)] + test_utils.make_virtual_disk(virtual_disk_dict)] mock_commit_config.return_value = '42' with task_manager.acquire(self.context, self.node.uuid, @@ -1359,13 +1365,14 @@ 'controller': 'RAID.Integrated.1-1', 'raid_level': '1', 'size_mb': 571776, - 'state': 'ok', - 'raid_state': 'online', + 'status': 'ok', + 'raid_status': 'online', 'span_depth': 1, 'span_length': 2, - 'pending_operations': None} + 'pending_operations': None, + 'physical_disks': []} mock_list_virtual_disks.return_value = [ - test_utils.dict_to_namedtuple(values=virtual_disk_dict)] + test_utils.make_virtual_disk(virtual_disk_dict)] expected_logical_disk = {'id': 'Disk.Virtual.0:RAID.Integrated.1-1', 'size_gb': 558, 'raid_level': '1', diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/utils.py ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/utils.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/drac/utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/drac/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -13,12 +13,17 @@ import collections +from oslo_utils import importutils + from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils INFO_DICT = db_utils.get_test_drac_info() +dracclient_job = importutils.try_import('dracclient.resources.job') +dracclient_raid = importutils.try_import('dracclient.resources.raid') + class BaseDracTest(db_base.DbTestCase): def setUp(self): @@ -38,13 +43,20 @@ setattr(self, key, dictionary[key]) -def dict_to_namedtuple(name='GenericNamedTuple', values=None): +def dict_to_namedtuple(name='GenericNamedTuple', values=None, + tuple_class=None): """Converts a dict to a collections.namedtuple""" if values is None: values = {} - return collections.namedtuple(name, list(values))(**values) + if tuple_class is None: + tuple_class = collections.namedtuple(name, list(values)) + else: + # Support different versions of the driver as fields change. + values = {field: values.get(field) for field in tuple_class._fields} + + return tuple_class(**values) def dict_of_object(data): @@ -55,3 +67,27 @@ dict_obj = DictToObj(v) data[k] = dict_obj return data + + +def make_job(job_dict): + tuple_class = dracclient_job.Job if dracclient_job else None + return dict_to_namedtuple(values=job_dict, + tuple_class=tuple_class) + + +def make_raid_controller(raid_controller_dict): + tuple_class = dracclient_raid.RAIDController if dracclient_raid else None + return dict_to_namedtuple(values=raid_controller_dict, + tuple_class=tuple_class) + + +def make_virtual_disk(virtual_disk_dict): + tuple_class = dracclient_raid.VirtualDisk if dracclient_raid else None + return dict_to_namedtuple(values=virtual_disk_dict, + tuple_class=tuple_class) + + +def make_physical_disk(physical_disk_dict): + tuple_class = dracclient_raid.PhysicalDisk if dracclient_raid else None + return dict_to_namedtuple(values=physical_disk_dict, + tuple_class=tuple_class) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/base.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/base.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/base.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/base.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,42 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test base class for iBMC Driver.""" + +import mock + +from ironic.drivers.modules.ibmc import utils +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + + +class IBMCTestCase(db_base.DbTestCase): + + def setUp(self): + super(IBMCTestCase, self).setUp() + self.driver_info = db_utils.get_test_ibmc_info() + self.config(enabled_hardware_types=['ibmc'], + enabled_power_interfaces=['ibmc'], + enabled_management_interfaces=['ibmc'], + enabled_vendor_interfaces=['ibmc']) + self.node = obj_utils.create_test_node( + self.context, driver='ibmc', driver_info=self.driver_info) + self.ibmc = utils.parse_driver_info(self.node) + + @staticmethod + def mock_ibmc_conn(ibmc_client_connect): + conn = mock.Mock(system=mock.PropertyMock()) + conn.__enter__ = mock.Mock(return_value=conn) + conn.__exit__ = mock.Mock(return_value=None) + ibmc_client_connect.return_value = conn + return conn diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_management.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_management.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_management.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_management.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,276 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test class for iBMC Management interface.""" + +import itertools + +import mock +from oslo_utils import importutils + +from ironic.common import boot_devices +from ironic.common import boot_modes +from ironic.common import exception +from ironic.conductor import task_manager +from ironic.drivers.modules.ibmc import mappings +from ironic.drivers.modules.ibmc import utils +from ironic.tests.unit.drivers.modules.ibmc import base + +constants = importutils.try_import('ibmc_client.constants') +ibmc_client = importutils.try_import('ibmc_client') +ibmc_error = importutils.try_import('ibmc_client.exceptions') + + +class IBMCManagementTestCase(base.IBMCTestCase): + + def test_get_properties(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + properties = task.driver.get_properties() + for prop in utils.COMMON_PROPERTIES: + self.assertIn(prop, properties) + + @mock.patch.object(utils, 'parse_driver_info', autospec=True) + def test_validate(self, mock_parse_driver_info): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + task.driver.management.validate(task) + mock_parse_driver_info.assert_called_once_with(task.node) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_get_supported_boot_devices(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock return value + _supported_boot_devices = list(mappings.GET_BOOT_DEVICE_MAP) + conn.system.get.return_value = mock.Mock( + boot_source_override=mock.Mock( + supported_boot_devices=_supported_boot_devices + ) + ) + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + supported_boot_devices = ( + task.driver.management.get_supported_boot_devices(task)) + connect_ibmc.assert_called_once_with(**self.ibmc) + expect = sorted(list(mappings.GET_BOOT_DEVICE_MAP.values())) + self.assertEqual(expect, sorted(supported_boot_devices)) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_boot_device(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock return value + conn.system.set_boot_source.return_value = None + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + device_mapping = [ + (boot_devices.PXE, constants.BOOT_SOURCE_TARGET_PXE), + (boot_devices.DISK, constants.BOOT_SOURCE_TARGET_HDD), + (boot_devices.CDROM, constants.BOOT_SOURCE_TARGET_CD), + (boot_devices.BIOS, + constants.BOOT_SOURCE_TARGET_BIOS_SETUP), + ('floppy', constants.BOOT_SOURCE_TARGET_FLOPPY), + ] + + persistent_mapping = [ + (True, constants.BOOT_SOURCE_ENABLED_CONTINUOUS), + (False, constants.BOOT_SOURCE_ENABLED_ONCE) + ] + + data_source = list(itertools.product(device_mapping, + persistent_mapping)) + for (device, persistent) in data_source: + task.driver.management.set_boot_device( + task, device[0], persistent=persistent[0]) + connect_ibmc.assert_called_once_with(**self.ibmc) + conn.system.set_boot_source.assert_called_once_with( + device[1], + enabled=persistent[1]) + # Reset mocks + connect_ibmc.reset_mock() + conn.system.set_boot_source.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_boot_device_fail(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock return value + conn.system.set_boot_source.side_effect = ( + ibmc_error.IBMCClientError + ) + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaisesRegex( + exception.IBMCError, 'set iBMC boot device', + task.driver.management.set_boot_device, task, + boot_devices.PXE) + connect_ibmc.assert_called_once_with(**self.ibmc) + conn.system.set_boot_source.assert_called_once_with( + constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_ONCE) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_get_boot_device(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock return value + conn.system.get.return_value = mock.Mock( + boot_source_override=mock.Mock( + target=constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS + ) + ) + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + result_boot_device = task.driver.management.get_boot_device(task) + conn.system.get.assert_called_once() + connect_ibmc.assert_called_once_with(**self.ibmc) + expected = {'boot_device': boot_devices.PXE, + 'persistent': True} + self.assertEqual(expected, result_boot_device) + + def test_get_supported_boot_modes(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + supported_boot_modes = ( + task.driver.management.get_supported_boot_modes(task)) + self.assertEqual(list(mappings.SET_BOOT_MODE_MAP), + supported_boot_modes) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_boot_mode(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock system boot source override return value + conn.system.get.return_value = mock.Mock( + boot_source_override=mock.Mock( + target=constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS + ) + ) + conn.system.set_boot_source.return_value = None + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + expected_values = [ + (boot_modes.LEGACY_BIOS, constants.BOOT_SOURCE_MODE_BIOS), + (boot_modes.UEFI, constants.BOOT_SOURCE_MODE_UEFI) + ] + + for ironic_boot_mode, ibmc_boot_mode in expected_values: + task.driver.management.set_boot_mode(task, + mode=ironic_boot_mode) + + conn.system.get.assert_called_once() + connect_ibmc.assert_called_once_with(**self.ibmc) + + conn.system.set_boot_source.assert_called_once_with( + constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS, + mode=ibmc_boot_mode) + + # Reset + connect_ibmc.reset_mock() + conn.system.set_boot_source.reset_mock() + conn.system.get.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_boot_mode_fail(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock system boot source override return value + conn.system.get.return_value = mock.Mock( + boot_source_override=mock.Mock( + target=constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS + ) + ) + conn.system.set_boot_source.side_effect = ( + ibmc_error.IBMCClientError + ) + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + expected_values = [ + (boot_modes.LEGACY_BIOS, constants.BOOT_SOURCE_MODE_BIOS), + (boot_modes.UEFI, constants.BOOT_SOURCE_MODE_UEFI) + ] + + for ironic_boot_mode, ibmc_boot_mode in expected_values: + self.assertRaisesRegex( + exception.IBMCError, 'set iBMC boot mode', + task.driver.management.set_boot_mode, task, + ironic_boot_mode) + + conn.system.set_boot_source.assert_called_once_with( + constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS, + mode=ibmc_boot_mode) + + conn.system.get.assert_called_once() + connect_ibmc.assert_called_once_with(**self.ibmc) + + # Reset + connect_ibmc.reset_mock() + conn.system.set_boot_source.reset_mock() + conn.system.get.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_get_boot_mode(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock system boot source override return value + conn.system.get.return_value = mock.Mock( + boot_source_override=mock.Mock( + target=constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS, + mode=constants.BOOT_SOURCE_MODE_BIOS, + ) + ) + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + response = task.driver.management.get_boot_mode(task) + + conn.system.get.assert_called_once() + connect_ibmc.assert_called_once_with(**self.ibmc) + + expected = boot_modes.LEGACY_BIOS + self.assertEqual(expected, response) + + def test_get_sensors_data(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + self.assertRaises(NotImplementedError, + task.driver.management.get_sensors_data, task) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_inject_nmi(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock system boot source override return value + conn.system.reset.return_value = None + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + task.driver.management.inject_nmi(task) + + connect_ibmc.assert_called_once_with(**self.ibmc) + conn.system.reset.assert_called_once_with(constants.RESET_NMI) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_inject_nmi_fail(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + # mock system boot source override return value + conn.system.reset.side_effect = ( + ibmc_error.IBMCClientError + ) + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.assertRaisesRegex( + exception.IBMCError, 'inject iBMC NMI', + task.driver.management.inject_nmi, task) + + connect_ibmc.assert_called_once_with(**self.ibmc) + conn.system.reset.assert_called_once_with(constants.RESET_NMI) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_power.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_power.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_power.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_power.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,284 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test class for iBMC Power interface.""" + +import mock +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common import states +from ironic.conductor import task_manager +from ironic.drivers.modules.ibmc import mappings +from ironic.drivers.modules.ibmc import utils +from ironic.tests.unit.drivers.modules.ibmc import base + +constants = importutils.try_import('ibmc_client.constants') +ibmc_client = importutils.try_import('ibmc_client') +ibmc_error = importutils.try_import('ibmc_client.exceptions') + + +@mock.patch('eventlet.greenthread.sleep', lambda _t: None) +class IBMCPowerTestCase(base.IBMCTestCase): + + def test_get_properties(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + properties = task.driver.get_properties() + for prop in utils.COMMON_PROPERTIES: + self.assertIn(prop, properties) + + @mock.patch.object(utils, 'parse_driver_info', autospec=True) + def test_validate(self, mock_parse_driver_info): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + task.driver.power.validate(task) + mock_parse_driver_info.assert_called_once_with(task.node) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_get_power_state(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + expected_values = mappings.GET_POWER_STATE_MAP + for current, expected in expected_values.items(): + # Mock + conn.system.get.return_value = mock.Mock( + power_state=current + ) + + # Asserts + self.assertEqual(expected, + task.driver.power.get_power_state(task)) + + conn.system.get.assert_called_once() + connect_ibmc.assert_called_once_with(**self.ibmc) + + # Reset Mock + conn.system.get.reset_mock() + connect_ibmc.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_power_state(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + state_mapping = mappings.SET_POWER_STATE_MAP + for (expect_state, reset_type) in state_mapping.items(): + if expect_state in (states.POWER_OFF, states.SOFT_POWER_OFF): + final = constants.SYSTEM_POWER_STATE_OFF + transient = constants.SYSTEM_POWER_STATE_ON + else: + final = constants.SYSTEM_POWER_STATE_ON + transient = constants.SYSTEM_POWER_STATE_OFF + + # Mocks + mock_system_get_results = ( + [mock.Mock(power_state=transient)] * 3 + + [mock.Mock(power_state=final)]) + conn.system.get.side_effect = mock_system_get_results + + task.driver.power.set_power_state(task, expect_state) + + # Asserts + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with(reset_type) + self.assertEqual(4, conn.system.get.call_count) + + # Reset Mocks + # TODO(Qianbiao.NG) why reset_mock does not reset call_count + connect_ibmc.reset_mock() + conn.system.get.reset_mock() + conn.system.reset.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_power_state_not_reached(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.config(power_state_change_timeout=2, group='conductor') + + state_mapping = mappings.SET_POWER_STATE_MAP + for (expect_state, reset_type) in state_mapping.items(): + if expect_state in (states.POWER_OFF, states.SOFT_POWER_OFF): + final = constants.SYSTEM_POWER_STATE_OFF + transient = constants.SYSTEM_POWER_STATE_ON + else: + final = constants.SYSTEM_POWER_STATE_ON + transient = constants.SYSTEM_POWER_STATE_OFF + + # Mocks + mock_system_get_results = ( + [mock.Mock(power_state=transient)] * 5 + + [mock.Mock(power_state=final)]) + conn.system.get.side_effect = mock_system_get_results + + self.assertRaises(exception.PowerStateFailure, + task.driver.power.set_power_state, + task, expect_state) + + # Asserts + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with(reset_type) + + # Reset Mocks + connect_ibmc.reset_mock() + conn.system.get.reset_mock() + conn.system.reset.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_power_state_fail(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + + # Mocks + conn.system.reset.side_effect = ( + ibmc_error.IBMCClientError + ) + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + # Asserts + self.assertRaisesRegex( + exception.IBMCError, 'set iBMC power state', + task.driver.power.set_power_state, task, states.POWER_ON) + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with(constants.RESET_ON) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_set_power_state_timeout(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.config(power_state_change_timeout=2, group='conductor') + + # Mocks + conn.system.get.side_effect = ( + [mock.Mock(power_state=constants.SYSTEM_POWER_STATE_OFF)] * 3 + ) + + # Asserts + self.assertRaisesRegex( + exception.PowerStateFailure, + 'Failed to set node power state to power on', + task.driver.power.set_power_state, task, states.POWER_ON) + + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with(constants.RESET_ON) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_reboot(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.config(power_state_change_timeout=2, group='conductor') + expected_values = [ + (constants.SYSTEM_POWER_STATE_OFF, constants.RESET_ON), + (constants.SYSTEM_POWER_STATE_ON, + constants.RESET_FORCE_RESTART) + ] + + # for (expect_state, reset_type) in state_mapping.items(): + for current, reset_type in expected_values: + mock_system_get_results = [ + # Initial state + mock.Mock(power_state=current), + # Transient state - powering off + mock.Mock(power_state=constants.SYSTEM_POWER_STATE_OFF), + # Final state - down powering off + mock.Mock(power_state=constants.SYSTEM_POWER_STATE_ON) + ] + conn.system.get.side_effect = mock_system_get_results + + task.driver.power.reboot(task) + + # Asserts + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with(reset_type) + + # Reset Mocks + connect_ibmc.reset_mock() + conn.system.get.reset_mock() + conn.system.reset.reset_mock() + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_reboot_not_reached(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.config(power_state_change_timeout=2, group='conductor') + + # Mocks + conn.system.get.return_value = mock.Mock( + power_state=constants.SYSTEM_POWER_STATE_OFF) + self.assertRaisesRegex( + exception.PowerStateFailure, + 'Failed to set node power state to power on', + task.driver.power.reboot, task) + + # Asserts + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with(constants.RESET_ON) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_reboot_fail(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + + # Mocks + conn.system.reset.side_effect = ( + ibmc_error.IBMCClientError + ) + conn.system.get.return_value = mock.Mock( + power_state=constants.SYSTEM_POWER_STATE_ON + ) + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + # Asserts + self.assertRaisesRegex( + exception.IBMCError, 'reboot iBMC', + task.driver.power.reboot, task) + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.get.assert_called_once() + conn.system.reset.assert_called_once_with( + constants.RESET_FORCE_RESTART) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_reboot_timeout(self, connect_ibmc): + conn = self.mock_ibmc_conn(connect_ibmc) + + # Mocks + conn.system.get.side_effect = [mock.Mock( + power_state=constants.SYSTEM_POWER_STATE_OFF + )] * 5 + + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + self.config(power_state_change_timeout=2, group='conductor') + + # Asserts + self.assertRaisesRegex( + exception.PowerStateFailure, + 'Failed to set node power state to power on', + task.driver.power.reboot, task) + + # Asserts + connect_ibmc.assert_called_with(**self.ibmc) + conn.system.reset.assert_called_once_with( + constants.RESET_ON) + + def test_get_supported_power_states(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + supported_power_states = ( + task.driver.power.get_supported_power_states(task)) + self.assertEqual(sorted(list(mappings.SET_POWER_STATE_MAP)), + sorted(supported_power_states)) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_utils.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_utils.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,172 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test class for iBMC Driver common utils.""" + +import copy +import os + +import mock +from oslo_utils import importutils + +from ironic.common import exception +from ironic.conductor import task_manager +from ironic.drivers.modules.ibmc import utils +from ironic.tests.unit.drivers.modules.ibmc import base + +constants = importutils.try_import('ibmc_client.constants') +ibmc_client = importutils.try_import('ibmc_client') +ibmc_error = importutils.try_import('ibmc_client.exceptions') + + +class IBMCUtilsTestCase(base.IBMCTestCase): + + def setUp(self): + super(IBMCUtilsTestCase, self).setUp() + # Redfish specific configurations + self.config(connection_attempts=2, group='ibmc') + self.parsed_driver_info = { + 'address': 'https://example.com', + 'username': 'username', + 'password': 'password', + 'verify_ca': True, + } + + def test_parse_driver_info(self): + response = utils.parse_driver_info(self.node) + self.assertEqual(self.parsed_driver_info, response) + + def test_parse_driver_info_default_scheme(self): + self.node.driver_info['ibmc_address'] = 'example.com' + response = utils.parse_driver_info(self.node) + self.assertEqual(self.parsed_driver_info, response) + + def test_parse_driver_info_default_scheme_with_port(self): + self.node.driver_info['ibmc_address'] = 'example.com:42' + self.parsed_driver_info['address'] = 'https://example.com:42' + response = utils.parse_driver_info(self.node) + self.assertEqual(self.parsed_driver_info, response) + + def test_parse_driver_info_missing_info(self): + for prop in utils.REQUIRED_PROPERTIES: + self.node.driver_info = self.driver_info.copy() + self.node.driver_info.pop(prop) + self.assertRaises(exception.MissingParameterValue, + utils.parse_driver_info, self.node) + + def test_parse_driver_info_invalid_address(self): + for value in ['/banana!', '#location', '?search=hello']: + self.node.driver_info['ibmc_address'] = value + self.assertRaisesRegex(exception.InvalidParameterValue, + 'Invalid iBMC address', + utils.parse_driver_info, self.node) + + @mock.patch.object(os.path, 'exists', autospec=True) + def test_parse_driver_info_path_verify_ca(self, + mock_isdir): + mock_isdir.return_value = True + fake_path = '/path/to/a/valid/CA' + self.node.driver_info['ibmc_verify_ca'] = fake_path + self.parsed_driver_info['verify_ca'] = fake_path + + response = utils.parse_driver_info(self.node) + self.assertEqual(self.parsed_driver_info, response) + mock_isdir.assert_called_once_with(fake_path) + + @mock.patch.object(os.path, 'exists', autospec=True) + def test_parse_driver_info_valid_capath(self, mock_isfile): + mock_isfile.return_value = True + fake_path = '/path/to/a/valid/CA.pem' + self.node.driver_info['ibmc_verify_ca'] = fake_path + self.parsed_driver_info['verify_ca'] = fake_path + + response = utils.parse_driver_info(self.node) + self.assertEqual(self.parsed_driver_info, response) + mock_isfile.assert_called_once_with(fake_path) + + def test_parse_driver_info_invalid_value_verify_ca(self): + # Integers are not supported + self.node.driver_info['ibmc_verify_ca'] = 123456 + self.assertRaisesRegex(exception.InvalidParameterValue, + 'Invalid value type', + utils.parse_driver_info, self.node) + + def test_parse_driver_info_valid_string_value_verify_ca(self): + for value in ('0', 'f', 'false', 'off', 'n', 'no'): + self.node.driver_info['ibmc_verify_ca'] = value + response = utils.parse_driver_info(self.node) + parsed_driver_info = copy.deepcopy(self.parsed_driver_info) + parsed_driver_info['verify_ca'] = False + self.assertEqual(parsed_driver_info, response) + + for value in ('1', 't', 'true', 'on', 'y', 'yes'): + self.node.driver_info['ibmc_verify_ca'] = value + response = utils.parse_driver_info(self.node) + self.assertEqual(self.parsed_driver_info, response) + + def test_parse_driver_info_invalid_string_value_verify_ca(self): + for value in ('xyz', '*', '!123', '123'): + self.node.driver_info['ibmc_verify_ca'] = value + self.assertRaisesRegex(exception.InvalidParameterValue, + 'The value should be a Boolean', + utils.parse_driver_info, self.node) + + def test_revert_dictionary(self): + data = { + "key1": "value1", + "key2": "value2" + } + + revert = utils.revert_dictionary(data) + self.assertEqual({ + "value1": "key1", + "value2": "key2" + }, revert) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_handle_ibmc_exception_retry(self, connect_ibmc): + + @utils.handle_ibmc_exception('get IBMC system') + def get_ibmc_system(_task): + driver_info = utils.parse_driver_info(_task.node) + with ibmc_client.connect(**driver_info) as _conn: + return _conn.system.get() + + conn = self.mock_ibmc_conn(connect_ibmc) + # Mocks + conn.system.get.side_effect = [ + ibmc_error.ConnectionError(url=self.ibmc['address'], + error='Failed to connect to host'), + mock.PropertyMock( + boot_source_override=mock.PropertyMock( + target=constants.BOOT_SOURCE_TARGET_PXE, + enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS + ) + ) + ] + + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + system = get_ibmc_system(task) + + # Asserts + self.assertEqual(constants.BOOT_SOURCE_TARGET_PXE, + system.boot_source_override.target) + self.assertEqual(constants.BOOT_SOURCE_ENABLED_CONTINUOUS, + system.boot_source_override.enabled) + + # 1 failed, 1 succeed + connect_ibmc.assert_called_with(**self.ibmc) + self.assertEqual(2, connect_ibmc.call_count) + + # 1 failed, 1 succeed + self.assertEqual(2, conn.system.get.call_count) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,60 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Test class for iBMC vendor interface.""" + +import mock +from oslo_utils import importutils + +from ironic.conductor import task_manager +from ironic.drivers.modules.ibmc import utils +from ironic.tests.unit.drivers.modules.ibmc import base + +ibmc_client = importutils.try_import('ibmc_client') + + +@mock.patch('eventlet.greenthread.sleep', lambda _t: None) +class IBMCVendorTestCase(base.IBMCTestCase): + + def setUp(self): + super(IBMCVendorTestCase, self).setUp() + + def test_get_properties(self): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + properties = task.driver.get_properties() + for prop in utils.COMMON_PROPERTIES: + self.assertIn(prop, properties) + + @mock.patch.object(utils, 'parse_driver_info', autospec=True) + def test_validate(self, mock_parse_driver_info): + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + task.driver.power.validate(task) + mock_parse_driver_info.assert_called_once_with(task.node) + + @mock.patch.object(ibmc_client, 'connect', autospec=True) + def test_list_boot_type_order(self, connect_ibmc): + # Mocks + conn = self.mock_ibmc_conn(connect_ibmc) + boot_up_seq = ['Pxe', 'Hdd', 'Others', 'Cd'] + conn.system.get.return_value = mock.Mock( + boot_sequence=['Pxe', 'Hdd', 'Others', 'Cd'] + ) + + expected = {'boot_up_sequence': boot_up_seq} + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + seq = task.driver.vendor.boot_up_seq(task) + conn.system.get.assert_called_once() + connect_ibmc.assert_called_once_with(**self.ibmc) + self.assertEqual(expected, seq) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_bios.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_bios.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_bios.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_bios.py 2019-03-21 20:07:40.000000000 +0000 @@ -21,6 +21,10 @@ from ironic.common import exception from ironic.conductor import task_manager +from ironic.conductor import utils as manager_utils +from ironic.drivers.modules import deploy_utils +from ironic.drivers.modules.ilo import bios as ilo_bios +from ironic.drivers.modules.ilo import boot as ilo_boot from ironic.drivers.modules.ilo import common as ilo_common from ironic import objects from ironic.tests.unit.db import utils as db_utils @@ -48,145 +52,364 @@ task.driver.bios.validate(task) mock_drvinfo.assert_called_once_with(task.node) - @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, - autospec=True) - def _test_ilo_error(self, error_type, - test_methods_not_called, method_details, ilo_mock): - error_dict = { - "missing_parameter": exception.MissingParameterValue, - "invalid_parameter": exception.InvalidParameterValue - } - - exc = error_dict.get(error_type)('error') - ilo_mock.side_effect = exc + def _test_ilo_error(self, exc_cls, + test_methods_not_called, + test_methods_called, + method_details, exception_mock): + exception_mock.side_effect = exc_cls('error') method = method_details.get("name") args = method_details.get("args") self.assertRaises(exception.NodeCleaningFailure, method, *args) for test_method in test_methods_not_called: - eval("ilo_mock.return_value.%s.assert_not_called()" % ( - test_method)) + test_method.assert_not_called() + for called_method in test_methods_called: + called_method["name"].assert_called_once_with( + *called_method["args"]) + @mock.patch.object(ilo_bios.IloBIOS, 'cache_bios_settings', + autospec=True) + @mock.patch.object(ilo_bios.IloBIOS, '_execute_post_boot_bios_step', + autospec=True) + @mock.patch.object(ilo_bios.IloBIOS, '_execute_pre_boot_bios_step', + autospec=True) + def test_apply_configuration_pre_boot(self, exe_pre_boot_mock, + exe_post_boot_mock, + cache_settings_mock): + settings = [ + { + "name": "SET_A", "value": "VAL_A", + }, + { + "name": "SET_B", "value": "VAL_B", + }, + { + "name": "SET_C", "value": "VAL_C", + }, + { + "name": "SET_D", "value": "VAL_D", + } + ] + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + driver_internal_info = task.node.driver_internal_info + driver_internal_info.pop('apply_bios', None) + task.node.driver_internal_info = driver_internal_info + task.node.save() + actual_settings = {'SET_A': 'VAL_A', 'SET_B': 'VAL_B', + 'SET_C': 'VAL_C', 'SET_D': 'VAL_D'} + task.driver.bios.apply_configuration(task, settings) + + exe_pre_boot_mock.assert_called_once_with( + task.driver.bios, task, 'apply_configuration', actual_settings) + self.assertFalse(exe_post_boot_mock.called) + cache_settings_mock.assert_called_once_with(task.driver.bios, task) + + @mock.patch.object(ilo_bios.IloBIOS, 'cache_bios_settings', + autospec=True) + @mock.patch.object(ilo_bios.IloBIOS, '_execute_post_boot_bios_step', + autospec=True) + @mock.patch.object(ilo_bios.IloBIOS, '_execute_pre_boot_bios_step', + autospec=True) + def test_apply_configuration_post_boot(self, exe_pre_boot_mock, + exe_post_boot_mock, + cache_settings_mock): + settings = [ + { + "name": "SET_A", "value": "VAL_A", + }, + { + "name": "SET_B", "value": "VAL_B", + }, + { + "name": "SET_C", "value": "VAL_C", + }, + { + "name": "SET_D", "value": "VAL_D", + } + ] + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + driver_internal_info = task.node.driver_internal_info + driver_internal_info['apply_bios'] = True + task.node.driver_internal_info = driver_internal_info + task.node.save() + task.driver.bios.apply_configuration(task, settings) + + exe_post_boot_mock.assert_called_once_with( + task.driver.bios, task, 'apply_configuration') + self.assertFalse(exe_pre_boot_mock.called) + cache_settings_mock.assert_called_once_with(task.driver.bios, task) + + @mock.patch.object(ilo_boot.IloVirtualMediaBoot, 'prepare_ramdisk', + spec_set=True, autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, + autospec=True) + @mock.patch.object(deploy_utils, 'build_agent_options', spec_set=True, + autospec=True) @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True) - def test_apply_configuration(self, get_ilo_object_mock): + def test__execute_pre_boot_bios_step_apply_configuration( + self, get_ilo_object_mock, build_agent_mock, + node_power_mock, prepare_mock): + with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: ilo_object_mock = get_ilo_object_mock.return_value - data = [ - { - "name": "SET_A", "value": "VAL_A", - }, - { - "name": "SET_B", "value": "VAL_B", - }, - { - "name": "SET_C", "value": "VAL_C", - }, - { - "name": "SET_D", "value": "VAL_D", - } - ] - task.driver.bios.apply_configuration(task, data) - expected = { + data = { "SET_A": "VAL_A", "SET_B": "VAL_B", "SET_C": "VAL_C", "SET_D": "VAL_D" } - ilo_object_mock.set_bios_settings.assert_called_once_with(expected) + step = 'apply_configuration' + task.driver.bios._execute_pre_boot_bios_step(task, step, data) + driver_info = task.node.driver_internal_info + self.assertTrue( + all(x in driver_info for x in ( + 'apply_bios', 'cleaning_reboot', + 'skip_current_clean_step'))) + ilo_object_mock.set_bios_settings.assert_called_once_with(data) + self.assertFalse(ilo_object_mock.reset_bios_to_default.called) + build_agent_mock.assert_called_once_with(task.node) + self.assertTrue(prepare_mock.called) + self.assertTrue(node_power_mock.called) + + @mock.patch.object(ilo_boot.IloVirtualMediaBoot, 'prepare_ramdisk', + spec_set=True, autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, + autospec=True) + @mock.patch.object(deploy_utils, 'build_agent_options', spec_set=True, + autospec=True) + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_pre_boot_bios_step_factory_reset( + self, get_ilo_object_mock, build_agent_mock, + node_power_mock, prepare_mock): - def test_apply_configuration_missing_parameter(self): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mdobj = { - "name": task.driver.bios.apply_configuration, - "args": (task, []) + ilo_object_mock = get_ilo_object_mock.return_value + data = { + "SET_A": "VAL_A", + "SET_B": "VAL_B", + "SET_C": "VAL_C", + "SET_D": "VAL_D" } - self._test_ilo_error("missing_parameter", ["set_bios_settings"], - mdobj) + step = 'factory_reset' + task.driver.bios._execute_pre_boot_bios_step(task, step, data) + driver_info = task.node.driver_internal_info + self.assertTrue( + all(x in driver_info for x in ( + 'reset_bios', 'cleaning_reboot', + 'skip_current_clean_step'))) + ilo_object_mock.reset_bios_to_default.assert_called_once_with() + self.assertFalse(ilo_object_mock.set_bios_settings.called) + build_agent_mock.assert_called_once_with(task.node) + self.assertTrue(prepare_mock.called) + self.assertTrue(node_power_mock.called) + + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_pre_boot_bios_step_invalid( + self, get_ilo_object_mock): - def test_apply_configuration_invalid_parameter(self): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mdobj = { - "name": task.driver.bios.apply_configuration, - "args": (task, []) + data = { + "SET_A": "VAL_A", + "SET_B": "VAL_B", + "SET_C": "VAL_C", + "SET_D": "VAL_D" } - self._test_ilo_error("invalid_parameter", ["set_bios_settings"], - mdobj) + step = 'invalid_step' + self.assertRaises(exception.NodeCleaningFailure, + task.driver.bios._execute_pre_boot_bios_step, + task, step, data) @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True) - def test_apply_configuration_with_ilo_error(self, get_ilo_object_mock): + def test__execute_pre_boot_bios_step_iloobj_failed( + self, get_ilo_object_mock): + with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - ilo_object_mock = get_ilo_object_mock.return_value - data = [ - { - "name": "SET_A", "value": "VAL_A", - }, - { - "name": "SET_B", "value": "VAL_B", - }, - ] - exc = ilo_error.IloError('error') - ilo_object_mock.set_bios_settings.side_effect = exc + data = { + "SET_A": "VAL_A", + "SET_B": "VAL_B", + "SET_C": "VAL_C", + "SET_D": "VAL_D" + } + get_ilo_object_mock.side_effect = exception.MissingParameterValue( + 'err') + step = 'apply_configuration' self.assertRaises(exception.NodeCleaningFailure, - task.driver.bios.apply_configuration, - task, data) + task.driver.bios._execute_pre_boot_bios_step, + task, step, data) @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True) - def test_factory_reset(self, get_ilo_object_mock): + def test__execute_pre_boot_bios_step_set_bios_failed( + self, get_ilo_object_mock): + with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: + data = { + "SET_A": "VAL_A", + "SET_B": "VAL_B", + "SET_C": "VAL_C", + "SET_D": "VAL_D" + } ilo_object_mock = get_ilo_object_mock.return_value - task.driver.bios.factory_reset(task) - ilo_object_mock.reset_bios_to_default.assert_called_once_with() + ilo_object_mock.set_bios_settings.side_effect = ilo_error.IloError( + 'err') + step = 'apply_configuration' + self.assertRaises(exception.NodeCleaningFailure, + task.driver.bios._execute_pre_boot_bios_step, + task, step, data) + + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_pre_boot_bios_step_reset_bios_failed( + self, get_ilo_object_mock): - def test_factory_reset_missing_parameter(self): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mdobj = { - "name": task.driver.bios.factory_reset, - "args": (task,) + data = { + "SET_A": "VAL_A", + "SET_B": "VAL_B", + "SET_C": "VAL_C", + "SET_D": "VAL_D" } - self._test_ilo_error("missing_parameter", - ["reset_bios_to_default"], mdobj) + ilo_object_mock = get_ilo_object_mock.return_value + ilo_object_mock.reset_bios_to_default.side_effect = ( + ilo_error.IloError('err')) + step = 'factory_reset' + self.assertRaises(exception.NodeCleaningFailure, + task.driver.bios._execute_pre_boot_bios_step, + task, step, data) + + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_post_boot_bios_step_apply_configuration( + self, get_ilo_object_mock): - def test_factory_reset_invalid_parameter(self): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: - mdobj = { - "name": task.driver.bios.factory_reset, - "args": (task,) - } - self._test_ilo_error("invalid_parameter", - ["reset_bios_to_default"], mdobj) + driver_info = task.node.driver_internal_info + driver_info.update({'apply_bios': True}) + task.node.driver_internal_info = driver_info + task.node.save() + ilo_object_mock = get_ilo_object_mock.return_value + step = 'apply_configuration' + task.driver.bios._execute_post_boot_bios_step(task, step) + driver_info = task.node.driver_internal_info + self.assertTrue('apply_bios' not in driver_info) + ilo_object_mock.get_bios_settings_result.assert_called_once_with() @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True) - def test_factory_reset_with_ilo_error(self, get_ilo_object_mock): + def test__execute_post_boot_bios_step_factory_reset( + self, get_ilo_object_mock): + with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: + driver_info = task.node.driver_internal_info + driver_info.update({'reset_bios': True}) + task.node.driver_internal_info = driver_info + task.node.save() ilo_object_mock = get_ilo_object_mock.return_value - exc = ilo_error.IloError('error') - ilo_object_mock.reset_bios_to_default.side_effect = exc + step = 'factory_reset' + task.driver.bios._execute_post_boot_bios_step(task, step) + driver_info = task.node.driver_internal_info + self.assertTrue('reset_bios' not in driver_info) + ilo_object_mock.get_bios_settings_result.assert_called_once_with() + + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_post_boot_bios_step_invalid( + self, get_ilo_object_mock): + + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + driver_info = task.node.driver_internal_info + driver_info.update({'apply_bios': True}) + task.node.driver_internal_info = driver_info + task.node.save() + step = 'invalid_step' + self.assertRaises(exception.NodeCleaningFailure, + task.driver.bios._execute_post_boot_bios_step, + task, step) + self.assertTrue( + 'apply_bios' not in task.node.driver_internal_info) + + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_post_boot_bios_step_iloobj_failed( + self, get_ilo_object_mock): + + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + driver_info = task.node.driver_internal_info + driver_info.update({'apply_bios': True}) + task.node.driver_internal_info = driver_info + task.node.save() + get_ilo_object_mock.side_effect = exception.MissingParameterValue( + 'err') + step = 'apply_configuration' self.assertRaises(exception.NodeCleaningFailure, - task.driver.bios.factory_reset, task) + task.driver.bios._execute_post_boot_bios_step, + task, step) + self.assertTrue( + 'apply_bios' not in task.node.driver_internal_info) + + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test__execute_post_boot_bios_get_settings_error( + self, get_ilo_object_mock): + + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + driver_info = task.node.driver_internal_info + driver_info.update({'apply_bios': True}) + task.node.driver_internal_info = driver_info + task.node.save() + ilo_object_mock = get_ilo_object_mock.return_value + + step = 'apply_configuration' + mdobj = { + "name": task.driver.bios._execute_post_boot_bios_step, + "args": (task, step,) + } + + self._test_ilo_error(ilo_error.IloCommandNotSupportedError, + [], + [], mdobj, + ilo_object_mock.get_bios_settings_result) + self.assertTrue( + 'apply_bios' not in task.node.driver_internal_info) @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True) - def test_factory_reset_with_unknown_error(self, get_ilo_object_mock): + def test__execute_post_boot_bios_get_settings_failed( + self, get_ilo_object_mock): + with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: + driver_info = task.node.driver_internal_info + driver_info.update({'reset_bios': True}) + task.node.driver_internal_info = driver_info + task.node.save() ilo_object_mock = get_ilo_object_mock.return_value - exc = ilo_error.IloCommandNotSupportedError('error') - ilo_object_mock.reset_bios_to_default.side_effect = exc + ilo_object_mock.get_bios_settings_result.return_value = ( + {'status': 'failed', 'message': 'Some data'}) + step = 'factory_reset' self.assertRaises(exception.NodeCleaningFailure, - task.driver.bios.factory_reset, task) + task.driver.bios._execute_post_boot_bios_step, + task, step) + self.assertTrue( + 'reset_bios' not in task.node.driver_internal_info) @mock.patch.object(objects.BIOSSettingList, 'create') @mock.patch.object(objects.BIOSSettingList, 'save') @@ -205,7 +428,7 @@ "SET_D": True } - ilo_object_mock.get_pending_bios_settings.return_value = settings + ilo_object_mock.get_current_bios_settings.return_value = settings expected_bios_settings = [ {"name": "SET_A", "value": True}, {"name": "SET_B", "value": True}, @@ -230,7 +453,7 @@ ) sync_node_mock.return_value = all_settings task.driver.bios.cache_bios_settings(task) - ilo_object_mock.get_pending_bios_settings.assert_called_once_with() + ilo_object_mock.get_current_bios_settings.assert_called_once_with() actual_arg = sorted(sync_node_mock.call_args[0][2], key=lambda x: x.get("name")) expected_arg = sorted(expected_bios_settings, @@ -244,25 +467,29 @@ delete_mock.assert_called_once_with( self.context, task.node.id, del_names) - def test_cache_bios_settings_missing_parameter(self): + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_cache_bios_settings_missing_parameter(self, get_ilo_object_mock): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: mdobj = { "name": task.driver.bios.cache_bios_settings, "args": (task,) } - self._test_ilo_error("missing_parameter", - ["get_pending_bios_settings"], mdobj) + self._test_ilo_error(exception.MissingParameterValue, + [], + [], mdobj, get_ilo_object_mock) - def test_cache_bios_settings_invalid_parameter(self): + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_cache_bios_settings_invalid_parameter(self, get_ilo_object_mock): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: mdobj = { "name": task.driver.bios.cache_bios_settings, "args": (task,) } - self._test_ilo_error("invalid_parameter", - ["get_pending_bios_settings"], mdobj) + self._test_ilo_error(exception.InvalidParameterValue, + [], + [], mdobj, get_ilo_object_mock) @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) def test_cache_bios_settings_with_ilo_error(self, get_ilo_object_mock): @@ -270,10 +497,15 @@ with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: ilo_object_mock = get_ilo_object_mock.return_value - exc = ilo_error.IloError('error') - ilo_object_mock.get_pending_bios_settings.side_effect = exc - self.assertRaises(exception.NodeCleaningFailure, - task.driver.bios.cache_bios_settings, task) + mdobj = { + "name": task.driver.bios.cache_bios_settings, + "args": (task,) + } + self._test_ilo_error(ilo_error.IloError, + [], + [], + mdobj, + ilo_object_mock.get_current_bios_settings) @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) def test_cache_bios_settings_with_unknown_error(self, get_ilo_object_mock): @@ -281,7 +513,13 @@ with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: ilo_object_mock = get_ilo_object_mock.return_value - exc = ilo_error.IloCommandNotSupportedError('error') - ilo_object_mock.get_pending_bios_settings.side_effect = exc - self.assertRaises(exception.NodeCleaningFailure, - task.driver.bios.cache_bios_settings, task) + + mdobj = { + "name": task.driver.bios.cache_bios_settings, + "args": (task,) + } + self._test_ilo_error(ilo_error.IloCommandNotSupportedError, + [], + [], + mdobj, + ilo_object_mock.get_current_bios_settings) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_boot.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_boot.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_boot.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_boot.py 2019-03-21 20:07:40.000000000 +0000 @@ -204,14 +204,12 @@ task.context, 'image-uuid', ['boot_iso', 'kernel_id', 'ramdisk_id']) boot_object_name_mock.assert_called_once_with(task.node) - create_boot_iso_mock.assert_called_once_with(task.context, - 'tmpfile', - 'kernel_uuid', - 'ramdisk_uuid', - 'deploy_iso_uuid', - 'root-uuid', - 'kernel-params', - 'uefi') + create_boot_iso_mock.assert_called_once_with( + task.context, 'tmpfile', 'kernel_uuid', 'ramdisk_uuid', + deploy_iso_href='deploy_iso_uuid', + root_uuid='root-uuid', + kernel_params='kernel-params', + boot_mode='uefi') swift_obj_mock.create_object.assert_called_once_with('ilo-cont', 'abcdef', 'tmpfile') @@ -273,14 +271,12 @@ task.context, 'image-uuid', ['boot_iso', 'kernel_id', 'ramdisk_id']) boot_object_name_mock.assert_called_once_with(task.node) - create_boot_iso_mock.assert_called_once_with(task.context, - 'tmpfile', - kernel_href, - ramdisk_href, - 'deploy_iso_uuid', - 'root-uuid', - 'kernel-params', - 'uefi') + create_boot_iso_mock.assert_called_once_with( + task.context, 'tmpfile', kernel_href, ramdisk_href, + deploy_iso_href='deploy_iso_uuid', + root_uuid='root-uuid', + kernel_params='kernel-params', + boot_mode='uefi') boot_iso_expected = 'http://10.10.1.30/httpboot/new_boot_iso' self.assertEqual(boot_iso_expected, boot_iso_actual) copy_file_mock.assert_called_once_with(fileobj_mock.name, @@ -336,14 +332,12 @@ task.context, 'image-uuid', ['boot_iso', 'kernel_id', 'ramdisk_id']) boot_object_name_mock.assert_called_once_with(task.node) - create_boot_iso_mock.assert_called_once_with(task.context, - 'tmpfile', - kernel_href, - ramdisk_href, - 'deploy_iso_uuid', - 'root-uuid', - 'kernel-params', - 'uefi') + create_boot_iso_mock.assert_called_once_with( + task.context, 'tmpfile', kernel_href, ramdisk_href, + deploy_iso_href='deploy_iso_uuid', + root_uuid='root-uuid', + kernel_params='kernel-params', + boot_mode='uefi') boot_iso_expected = 'http://10.10.1.30/httpboot/abcdef' self.assertEqual(boot_iso_expected, boot_iso_actual) copy_file_mock.assert_called_once_with(fileobj_mock.name, diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_inspect.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_inspect.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_inspect.py 2019-03-21 20:07:40.000000000 +0000 @@ -68,7 +68,7 @@ properties = {'memory_mb': '512', 'local_gb': '10', 'cpus': '1', 'cpu_arch': 'x86_64'} macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'} - capabilities = '' + capabilities = {} result = {'properties': properties, 'macs': macs} get_essential_mock.return_value = result get_capabilities_mock.return_value = capabilities @@ -106,7 +106,7 @@ properties = {'memory_mb': '512', 'local_gb': 0, 'cpus': '1', 'cpu_arch': 'x86_64'} macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'} - capabilities = '' + capabilities = {} result = {'properties': properties, 'macs': macs} get_essential_mock.return_value = result get_capabilities_mock.return_value = capabilities @@ -129,6 +129,90 @@ ilo_object_mock) create_port_mock.assert_called_once_with(task, macs) + @mock.patch.object(ilo_inspect.LOG, 'warning', + spec_set=True, autospec=True) + @mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True, + autospec=True) + @mock.patch.object(inspect_utils, 'create_ports_if_not_exist', + spec_set=True, autospec=True) + @mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True, + autospec=True) + @mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True, + autospec=True) + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test_inspect_ok_gen8(self, get_ilo_object_mock, + power_mock, + get_essential_mock, + create_port_mock, + get_capabilities_mock, + log_mock): + ilo_object_mock = get_ilo_object_mock.return_value + properties = {'memory_mb': '512', 'local_gb': 10, + 'cpus': '1', 'cpu_arch': 'x86_64'} + macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'} + capabilities = {'server_model': 'Gen8'} + result = {'properties': properties, 'macs': macs} + get_essential_mock.return_value = result + get_capabilities_mock.return_value = capabilities + power_mock.return_value = states.POWER_ON + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + expected_properties = {'memory_mb': '512', 'local_gb': 10, + 'cpus': '1', 'cpu_arch': 'x86_64', + 'capabilities': 'server_model:Gen8'} + task.driver.inspect.inspect_hardware(task) + self.assertEqual(expected_properties, task.node.properties) + power_mock.assert_called_once_with(mock.ANY, task) + get_essential_mock.assert_called_once_with(task.node, + ilo_object_mock) + self.assertTrue(log_mock.called) + get_capabilities_mock.assert_called_once_with(task.node, + ilo_object_mock) + create_port_mock.assert_called_once_with(task, macs) + + @mock.patch.object(ilo_inspect.LOG, 'warning', + spec_set=True, autospec=True) + @mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True, + autospec=True) + @mock.patch.object(inspect_utils, 'create_ports_if_not_exist', + spec_set=True, autospec=True) + @mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True, + autospec=True) + @mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True, + autospec=True) + @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, + autospec=True) + def test_inspect_ok_gen10(self, get_ilo_object_mock, + power_mock, + get_essential_mock, + create_port_mock, + get_capabilities_mock, + log_mock): + ilo_object_mock = get_ilo_object_mock.return_value + properties = {'memory_mb': '512', 'local_gb': 10, + 'cpus': '1', 'cpu_arch': 'x86_64'} + macs = {'NIC.LOM.1.1': 'aa:aa:aa:aa:aa:aa'} + capabilities = {'server_model': 'Gen10'} + result = {'properties': properties, 'macs': macs} + get_essential_mock.return_value = result + get_capabilities_mock.return_value = capabilities + power_mock.return_value = states.POWER_ON + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + expected_properties = {'memory_mb': '512', 'local_gb': 10, + 'cpus': '1', 'cpu_arch': 'x86_64', + 'capabilities': 'server_model:Gen10'} + task.driver.inspect.inspect_hardware(task) + self.assertEqual(expected_properties, task.node.properties) + power_mock.assert_called_once_with(mock.ANY, task) + get_essential_mock.assert_called_once_with(task.node, + ilo_object_mock) + self.assertFalse(log_mock.called) + get_capabilities_mock.assert_called_once_with(task.node, + ilo_object_mock) + create_port_mock.assert_called_once_with(task, macs) + @mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True, autospec=True) @mock.patch.object(inspect_utils, 'create_ports_if_not_exist', @@ -151,7 +235,7 @@ properties = {'memory_mb': '512', 'local_gb': '10', 'cpus': '1', 'cpu_arch': 'x86_64'} macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'} - capabilities = '' + capabilities = {} result = {'properties': properties, 'macs': macs} get_essential_mock.return_value = result get_capabilities_mock.return_value = capabilities diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_raid.py ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_raid.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/ilo/test_raid.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/ilo/test_raid.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,342 @@ +# Copyright 2018 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test class for Raid Interface used by iLO5.""" + +import mock +from oslo_utils import importutils + +from ironic.common import exception +from ironic.common import raid +from ironic.common import states +from ironic.conductor import task_manager +from ironic.conductor import utils as manager_utils +from ironic.drivers.modules import deploy_utils +from ironic.drivers.modules.ilo import common as ilo_common +from ironic.drivers.modules.ilo import raid as ilo_raid +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + +ilo_error = importutils.try_import('proliantutils.exception') + +INFO_DICT = db_utils.get_test_ilo_info() + + +class Ilo5RAIDTestCase(db_base.DbTestCase): + + def setUp(self): + super(Ilo5RAIDTestCase, self).setUp() + self.driver = mock.Mock(raid=ilo_raid.Ilo5RAID()) + self.target_raid_config = { + "logical_disks": [ + {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True}, + {'size_gb': 200, 'raid_level': 5} + ]} + self.clean_step = {'step': 'create_configuration', + 'interface': 'raid'} + n = { + 'driver': 'ilo5', + 'driver_info': INFO_DICT, + 'target_raid_config': self.target_raid_config, + 'clean_step': self.clean_step, + } + self.config(enabled_hardware_types=['ilo5'], + enabled_boot_interfaces=['ilo-virtual-media'], + enabled_console_interfaces=['ilo'], + enabled_deploy_interfaces=['iscsi'], + enabled_inspect_interfaces=['ilo'], + enabled_management_interfaces=['ilo'], + enabled_power_interfaces=['ilo'], + enabled_raid_interfaces=['ilo5']) + self.node = obj_utils.create_test_node(self.context, **n) + + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test__prepare_for_read_raid_create_raid( + self, mock_reboot, mock_build_opt): + with task_manager.acquire(self.context, self.node.uuid) as task: + mock_build_opt.return_value = [] + task.driver.raid._prepare_for_read_raid(task, 'create_raid') + self.assertTrue( + task.node.driver_internal_info.get( + 'ilo_raid_create_in_progress')) + self.assertTrue( + task.node.driver_internal_info.get( + 'cleaning_reboot')) + self.assertFalse( + task.node.driver_internal_info.get( + 'skip_current_clean_step')) + mock_reboot.assert_called_once_with(task, states.REBOOT) + + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test__prepare_for_read_raid_delete_raid( + self, mock_reboot, mock_build_opt): + with task_manager.acquire(self.context, self.node.uuid) as task: + mock_build_opt.return_value = [] + task.driver.raid._prepare_for_read_raid(task, 'delete_raid') + self.assertTrue( + task.node.driver_internal_info.get( + 'ilo_raid_delete_in_progress')) + self.assertTrue( + task.node.driver_internal_info.get( + 'cleaning_reboot')) + self.assertEqual( + task.node.driver_internal_info.get( + 'skip_current_clean_step'), False) + mock_reboot.assert_called_once_with(task, states.REBOOT) + + @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid') + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration( + self, ilo_mock, filter_target_raid_config_mock, prepare_raid_mock): + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + filter_target_raid_config_mock.return_value = ( + self.target_raid_config) + result = task.driver.raid.create_configuration(task) + prepare_raid_mock.assert_called_once_with(task, 'create_raid') + (ilo_mock_object.create_raid_configuration. + assert_called_once_with(self.target_raid_config)) + self.assertEqual(states.CLEANWAIT, result) + + @mock.patch.object(raid, 'update_raid_info', autospec=True) + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_with_read_raid( + self, ilo_mock, filter_target_raid_config_mock, update_raid_mock): + raid_conf = {u'logical_disks': + [{u'size_gb': 89, + u'physical_disks': [u'5I:1:1'], + u'raid_level': u'0', + u'root_device_hint': {u'wwn': u'0x600508b1001c7e87'}, + u'controller': u'Smart Array P822 in Slot 1', + u'volume_name': u'0006EB7BPDVTF0BRH5L0EAEDDA'}] + } + ilo_mock_object = ilo_mock.return_value + self.node.driver_internal_info = {'ilo_raid_create_in_progress': True} + self.node.save() + with task_manager.acquire(self.context, self.node.uuid) as task: + filter_target_raid_config_mock.return_value = ( + self.target_raid_config) + ilo_mock_object.read_raid_configuration.return_value = raid_conf + task.driver.raid.create_configuration(task) + update_raid_mock.assert_called_once_with(task.node, raid_conf) + self.assertNotIn('ilo_raid_create_in_progress', + task.node.driver_internal_info) + self.assertNotIn('cleaning_reboot', + task.node.driver_internal_info) + self.assertNotIn('skip_current_clean_step', + task.node.driver_internal_info) + + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_with_read_raid_failed( + self, ilo_mock, filter_target_raid_config_mock): + raid_conf = {u'logical_disks': []} + self.node.driver_internal_info = {'ilo_raid_create_in_progress': True} + self.node.save() + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + filter_target_raid_config_mock.return_value = ( + self.target_raid_config) + ilo_mock_object.read_raid_configuration.return_value = raid_conf + self.assertRaises(exception.NodeCleaningFailure, + task.driver.raid.create_configuration, task) + self.assertNotIn('ilo_raid_create_in_progress', + task.node.driver_internal_info) + self.assertNotIn('cleaning_reboot', + task.node.driver_internal_info) + self.assertNotIn('skip_current_clean_step', + task.node.driver_internal_info) + + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_empty_target_raid_config( + self, ilo_mock, filter_target_raid_config_mock): + self.node.target_raid_config = {} + self.node.save() + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + msg = "Node %s has no target RAID configuration" % self.node.uuid + filter_target_raid_config_mock.side_effect = ( + exception.MissingParameterValue(msg)) + self.assertRaises(exception.MissingParameterValue, + task.driver.raid.create_configuration, task) + self.assertFalse(ilo_mock_object.create_raid_configuration.called) + + @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid') + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_skip_root( + self, ilo_mock, filter_target_raid_config_mock, + prepare_raid_mock): + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + exp_target_raid_config = { + "logical_disks": [ + {'size_gb': 200, 'raid_level': 5} + ]} + filter_target_raid_config_mock.return_value = ( + exp_target_raid_config) + result = task.driver.raid.create_configuration( + task, create_root_volume=False) + (ilo_mock_object.create_raid_configuration. + assert_called_once_with(exp_target_raid_config)) + self.assertEqual(states.CLEANWAIT, result) + prepare_raid_mock.assert_called_once_with(task, 'create_raid') + self.assertEqual( + exp_target_raid_config, + task.node.driver_internal_info['target_raid_config']) + + @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid') + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_skip_non_root( + self, ilo_mock, filter_target_raid_config_mock, prepare_raid_mock): + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + exp_target_raid_config = { + "logical_disks": [ + {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True} + ]} + filter_target_raid_config_mock.return_value = ( + exp_target_raid_config) + result = task.driver.raid.create_configuration( + task, create_nonroot_volumes=False) + (ilo_mock_object.create_raid_configuration. + assert_called_once_with(exp_target_raid_config)) + prepare_raid_mock.assert_called_once_with(task, 'create_raid') + self.assertEqual(states.CLEANWAIT, result) + self.assertEqual( + exp_target_raid_config, + task.node.driver_internal_info['target_raid_config']) + + @mock.patch.object(raid, 'filter_target_raid_config') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_skip_root_skip_non_root( + self, ilo_mock, filter_target_raid_config_mock): + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + msg = "Node %s has no target RAID configuration" % self.node.uuid + filter_target_raid_config_mock.side_effect = ( + exception.MissingParameterValue(msg)) + self.assertRaises( + exception.MissingParameterValue, + task.driver.raid.create_configuration, + task, False, False) + self.assertFalse(ilo_mock_object.create_raid_configuration.called) + + @mock.patch.object(ilo_raid.Ilo5RAID, '_set_clean_failed') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_create_configuration_ilo_error(self, ilo_mock, + set_clean_failed_mock): + ilo_mock_object = ilo_mock.return_value + exc = ilo_error.IloError('error') + ilo_mock_object.create_raid_configuration.side_effect = exc + with task_manager.acquire(self.context, self.node.uuid) as task: + task.driver.raid.create_configuration( + task, create_nonroot_volumes=False) + set_clean_failed_mock.assert_called_once_with( + task, + 'Failed to create raid configuration ' + 'on node %s' % self.node.uuid, exc) + self.assertNotIn('ilo_raid_create_in_progress', + task.node.driver_internal_info) + self.assertNotIn('cleaning_reboot', + task.node.driver_internal_info) + self.assertNotIn('skip_current_clean_step', + task.node.driver_internal_info) + + @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_delete_configuration(self, ilo_mock, prepare_raid_mock): + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + result = task.driver.raid.delete_configuration(task) + self.assertEqual(states.CLEANWAIT, result) + ilo_mock_object.delete_raid_configuration.assert_called_once_with() + prepare_raid_mock.assert_called_once_with(task, 'delete_raid') + + @mock.patch.object(ilo_raid.LOG, 'info', spec_set=True, + autospec=True) + @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_delete_configuration_no_logical_drive( + self, ilo_mock, prepare_raid_mock, log_mock): + ilo_mock_object = ilo_mock.return_value + exc = ilo_error.IloLogicalDriveNotFoundError('No logical drive found') + with task_manager.acquire(self.context, self.node.uuid) as task: + ilo_mock_object.delete_raid_configuration.side_effect = exc + task.driver.raid.delete_configuration(task) + self.assertTrue(log_mock.called) + + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_delete_configuration_with_read_raid(self, ilo_mock): + raid_conf = {u'logical_disks': []} + self.node.driver_internal_info = {'ilo_raid_delete_in_progress': True} + self.node.save() + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + ilo_mock_object.read_raid_configuration.return_value = raid_conf + task.driver.raid.delete_configuration(task) + self.assertEqual(self.node.raid_config, {}) + self.assertNotIn('ilo_raid_delete_in_progress', + task.node.driver_internal_info) + self.assertNotIn('cleaning_reboot', + task.node.driver_internal_info) + self.assertNotIn('skip_current_clean_step', + task.node.driver_internal_info) + + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_delete_configuration_with_read_raid_failed(self, ilo_mock): + raid_conf = {u'logical_disks': [{'size_gb': 200, + 'raid_level': 0, + 'is_root_volume': True}]} + self.node.driver_internal_info = {'ilo_raid_delete_in_progress': True} + self.node.save() + ilo_mock_object = ilo_mock.return_value + with task_manager.acquire(self.context, self.node.uuid) as task: + ilo_mock_object.read_raid_configuration.return_value = raid_conf + self.assertRaises(exception.NodeCleaningFailure, + task.driver.raid.delete_configuration, task) + self.assertNotIn('ilo_raid_delete_in_progress', + task.node.driver_internal_info) + self.assertNotIn('cleaning_reboot', + task.node.driver_internal_info) + self.assertNotIn('skip_current_clean_step', + task.node.driver_internal_info) + + @mock.patch.object(ilo_raid.Ilo5RAID, '_set_clean_failed') + @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True) + def test_delete_configuration_ilo_error(self, ilo_mock, + set_clean_failed_mock): + ilo_mock_object = ilo_mock.return_value + exc = ilo_error.IloError('error') + ilo_mock_object.delete_raid_configuration.side_effect = exc + with task_manager.acquire(self.context, self.node.uuid) as task: + task.driver.raid.delete_configuration(task) + ilo_mock_object.delete_raid_configuration.assert_called_once_with() + self.assertNotIn('ilo_raid_delete_in_progress', + task.node.driver_internal_info) + self.assertNotIn('cleaning_reboot', + task.node.driver_internal_info) + self.assertNotIn('skip_current_clean_step', + task.node.driver_internal_info) + set_clean_failed_mock.assert_called_once_with( + task, + 'Failed to delete raid configuration ' + 'on node %s' % self.node.uuid, exc) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/irmc/test_boot.py ironic-12.1.0/ironic/tests/unit/drivers/modules/irmc/test_boot.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/irmc/test_boot.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/irmc/test_boot.py 2019-03-21 20:07:40.000000000 +0000 @@ -498,8 +498,9 @@ '/remote_image_share_root/' "boot-%s.iso" % self.node.uuid, 'kernel_uuid', 'ramdisk_uuid', - '02f9d414-2ce0-4cf5-b48f-dbc1bf678f55', - 'root-uuid', 'kernel-params', 'uefi') + deploy_iso_href='02f9d414-2ce0-4cf5-b48f-dbc1bf678f55', + root_uuid='root-uuid', kernel_params='kernel-params', + boot_mode='uefi') task.node.refresh() self.assertEqual("boot-%s.iso" % self.node.uuid, task.node.driver_internal_info['irmc_boot_iso']) @@ -680,7 +681,7 @@ _get_iso_name_mock.assert_has_calls( [mock.call(task.node, label='deploy'), mock.call(task.node, label='rescue')]) - self.assertTrue(_remove_share_file_mock.call_count, 3) + self.assertEqual(3, _remove_share_file_mock.call_count) _remove_share_file_mock.assert_has_calls( [mock.call(_get_floppy_image_name_mock(task.node)), mock.call(_get_iso_name_mock(task.node, label='deploy')), diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/irmc/test_inspect.py ironic-12.1.0/ironic/tests/unit/drivers/modules/irmc/test_inspect.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/irmc/test_inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/irmc/test_inspect.py 2019-03-21 20:07:40.000000000 +0000 @@ -348,7 +348,7 @@ _inspect_hardware_mock.assert_called_once_with(task.node, existing_traits) - self.assertTrue(port_mock.call_count, 2) + self.assertEqual(2, port_mock.call_count) task.node.refresh() self.assertEqual(inspected_props, task.node.properties) self.assertEqual(states.MANAGEABLE, result) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/network/test_common.py ironic-12.1.0/ironic/tests/unit/drivers/modules/network/test_common.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/network/test_common.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/network/test_common.py 2019-03-21 20:07:40.000000000 +0000 @@ -430,6 +430,26 @@ common.plug_port_to_tenant_network, task, self.port) + @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) + @mock.patch.object(neutron_common, 'wait_for_port_status', autospec=True) + @mock.patch.object(neutron_common, 'get_client', autospec=True) + def test_plug_port_to_tenant_network_smartnic_port( + self, mock_gc, wait_port_mock, wait_agent_mock): + nclient = mock.MagicMock() + mock_gc.return_value = nclient + local_link_connection = self.port.local_link_connection + local_link_connection['hostname'] = 'hostname' + self.port.local_link_connection = local_link_connection + self.port.internal_info = {common.TENANT_VIF_KEY: self.vif_id} + self.port.is_smartnic = True + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + common.plug_port_to_tenant_network(task, self.port) + wait_agent_mock.assert_called_once_with( + nclient, 'hostname') + wait_port_mock.assert_called_once_with( + nclient, self.vif_id, 'ACTIVE') + class TestVifPortIDMixin(db_base.DbTestCase): diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/network/test_neutron.py ironic-12.1.0/ironic/tests/unit/drivers/modules/network/test_neutron.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/network/test_neutron.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/network/test_neutron.py 2019-03-21 20:07:40.000000000 +0000 @@ -545,6 +545,24 @@ mock_unbind_port.assert_called_once_with( self.port.extra['vif_port_id'], context=task.context) + @mock.patch.object(neutron_common, 'get_client') + @mock.patch.object(neutron_common, 'wait_for_host_agent') + @mock.patch.object(neutron_common, 'unbind_neutron_port') + def test_unconfigure_tenant_networks_smartnic( + self, mock_unbind_port, wait_agent_mock, client_mock): + nclient = mock.MagicMock() + client_mock.return_value = nclient + local_link_connection = self.port.local_link_connection + local_link_connection['hostname'] = 'hostname' + self.port.local_link_connection = local_link_connection + self.port.is_smartnic = True + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.interface.unconfigure_tenant_networks(task) + mock_unbind_port.assert_called_once_with( + self.port.extra['vif_port_id'], context=task.context) + wait_agent_mock.assert_called_once_with(nclient, 'hostname') + def test_configure_tenant_networks_no_ports_for_node(self): n = utils.create_test_node(self.context, network_interface='neutron', uuid=uuidutils.generate_uuid()) @@ -571,10 +589,11 @@ self.assertIn('No neutron ports or portgroups are associated with', log_mock.error.call_args[0][0]) + @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) @mock.patch.object(neutron_common, 'get_client') @mock.patch.object(neutron, 'LOG') def test_configure_tenant_networks_multiple_ports_one_vif_id( - self, log_mock, client_mock): + self, log_mock, client_mock, wait_agent_mock): expected_body = { 'port': { 'binding:vnic_type': 'baremetal', @@ -592,8 +611,10 @@ upd_mock.assert_called_once_with(self.port.extra['vif_port_id'], expected_body) + @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) @mock.patch.object(neutron_common, 'get_client') - def test_configure_tenant_networks_update_fail(self, client_mock): + def test_configure_tenant_networks_update_fail(self, client_mock, + wait_agent_mock): client = client_mock.return_value client.update_port.side_effect = neutron_exceptions.ConnectionFailed( reason='meow') @@ -603,8 +624,10 @@ self.interface.configure_tenant_networks, task) client_mock.assert_called_once_with(context=task.context) + @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) @mock.patch.object(neutron_common, 'get_client') - def _test_configure_tenant_networks(self, client_mock, is_client_id=False, + def _test_configure_tenant_networks(self, client_mock, wait_agent_mock, + is_client_id=False, vif_int_info=False): upd_mock = mock.Mock() client_mock.return_value.update_port = upd_mock @@ -687,11 +710,12 @@ self.node.save() self._test_configure_tenant_networks(is_client_id=True) + @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True) @mock.patch.object(neutron_common, 'get_client', autospec=True) @mock.patch.object(neutron_common, 'get_local_group_information', autospec=True) def test_configure_tenant_networks_with_portgroups( - self, glgi_mock, client_mock): + self, glgi_mock, client_mock, wait_agent_mock): pg = utils.create_test_portgroup( self.context, node_id=self.node.id, address='ff:54:00:cf:2d:32', extra={'vif_port_id': uuidutils.generate_uuid()}) @@ -745,3 +769,13 @@ [mock.call(self.port.extra['vif_port_id'], call1_body), mock.call(pg.extra['vif_port_id'], call2_body)] ) + + def test_need_power_on_true(self): + self.port.is_smartnic = True + self.port.save() + with task_manager.acquire(self.context, self.node.id) as task: + self.assertTrue(self.interface.need_power_on(task)) + + def test_need_power_on_false(self): + with task_manager.acquire(self.context, self.node.id) as task: + self.assertFalse(self.interface.need_power_on(task)) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/redfish/test_inspect.py ironic-12.1.0/ironic/tests/unit/drivers/modules/redfish/test_inspect.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/redfish/test_inspect.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/redfish/test_inspect.py 2019-03-21 20:07:44.000000000 +0000 @@ -49,6 +49,8 @@ system_mock.reset() + system_mock.boot.mode = 'uefi' + system_mock.memory_summary.size_gib = 2 system_mock.processors.summary = '8', 'MIPS' @@ -58,8 +60,9 @@ system_mock.storage.volumes_sizes_bytes = ( 2 * units.Gi, units.Gi * 4, units.Gi * 6) - system_mock.ethernet_interfaces.eth_summary = { - '1': '00:11:22:33:44:55', '2': '66:77:88:99:AA:BB' + system_mock.ethernet_interfaces.summary = { + '00:11:22:33:44:55': sushy.STATE_ENABLED, + '66:77:88:99:AA:BB': sushy.STATE_DISABLED, } return system_mock @@ -84,17 +87,17 @@ def test_inspect_hardware_ok(self, mock_create_ports_if_not_exist, mock_get_system): expected_properties = { + 'capabilities': 'boot_mode:uefi', 'cpu_arch': 'mips', 'cpus': '8', 'local_gb': '3', 'memory_mb': '2048' } - system = self.init_system_mock(mock_get_system.return_value) + self.init_system_mock(mock_get_system.return_value) with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: task.driver.inspect.inspect_hardware(task) - mock_create_ports_if_not_exist.assert_called_once_with( - task, system.ethernet_interfaces.eth_summary) + self.assertEqual(1, mock_create_ports_if_not_exist.call_count) mock_get_system.assert_called_once_with(task.node) self.assertEqual(expected_properties, task.node.properties) @@ -102,6 +105,7 @@ def test_inspect_hardware_fail_missing_cpu(self, mock_get_system): system_mock = self.init_system_mock(mock_get_system.return_value) system_mock.processors.summary = None, None + system_mock.boot.mode = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: @@ -113,10 +117,12 @@ def test_inspect_hardware_ignore_missing_cpu(self, mock_get_system): system_mock = self.init_system_mock(mock_get_system.return_value) system_mock.processors.summary = None, None + system_mock.boot.mode = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: expected_properties = { + 'capabilities': 'boot_mode:uefi', 'cpu_arch': 'x86_64', 'cpus': '8', 'local_gb': '3', 'memory_mb': '2048' } @@ -124,28 +130,18 @@ self.assertEqual(expected_properties, task.node.properties) @mock.patch.object(redfish_utils, 'get_system', autospec=True) - def test_inspect_hardware_fail_missing_local_gb(self, mock_get_system): - system_mock = self.init_system_mock(mock_get_system.return_value) - system_mock.simple_storage.disks_sizes_bytes = None - system_mock.storage.volumes_sizes_bytes = None - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - task.node.properties.pop('local_gb') - self.assertRaises(exception.HardwareInspectionFailure, - task.driver.inspect.inspect_hardware, task) - - @mock.patch.object(redfish_utils, 'get_system', autospec=True) def test_inspect_hardware_ignore_missing_local_gb(self, mock_get_system): system_mock = self.init_system_mock(mock_get_system.return_value) system_mock.simple_storage.disks_sizes_bytes = None system_mock.storage.volumes_sizes_bytes = None + system_mock.boot.mode = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: expected_properties = { + 'capabilities': 'boot_mode:uefi', 'cpu_arch': 'mips', 'cpus': '8', - 'local_gb': '10', 'memory_mb': '2048' + 'local_gb': '0', 'memory_mb': '2048' } task.driver.inspect.inspect_hardware(task) self.assertEqual(expected_properties, task.node.properties) @@ -154,6 +150,7 @@ def test_inspect_hardware_fail_missing_memory_mb(self, mock_get_system): system_mock = self.init_system_mock(mock_get_system.return_value) system_mock.memory_summary.size_gib = None + system_mock.boot.mode = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: @@ -165,10 +162,12 @@ def test_inspect_hardware_ignore_missing_memory_mb(self, mock_get_system): system_mock = self.init_system_mock(mock_get_system.return_value) system_mock.memory_summary.size_gib = None + system_mock.boot.mode = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: expected_properties = { + 'capabilities': 'boot_mode:uefi', 'cpu_arch': 'mips', 'cpus': '8', 'local_gb': '3', 'memory_mb': '4096' } @@ -181,9 +180,42 @@ def test_inspect_hardware_ignore_missing_nics( self, mock_create_ports_if_not_exist, mock_get_system): system_mock = self.init_system_mock(mock_get_system.return_value) - system_mock.ethernet_interfaces.eth_summary = None + system_mock.ethernet_interfaces.summary = None + system_mock.boot.mode = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: task.driver.inspect.inspect_hardware(task) self.assertFalse(mock_create_ports_if_not_exist.called) + + @mock.patch.object(redfish_utils, 'get_system', autospec=True) + def test_inspect_hardware_preserve_boot_mode(self, mock_get_system): + system_mock = self.init_system_mock(mock_get_system.return_value) + system_mock.boot.mode = 'uefi' + + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + task.node.properties = { + 'capabilities': 'boot_mode:bios' + } + expected_properties = { + 'capabilities': 'boot_mode:bios', + 'cpu_arch': 'mips', 'cpus': '8', + 'local_gb': '3', 'memory_mb': '2048' + } + task.driver.inspect.inspect_hardware(task) + self.assertEqual(expected_properties, task.node.properties) + + @mock.patch.object(redfish_utils, 'get_system', autospec=True) + def test_inspect_hardware_ignore_missing_boot_mode(self, mock_get_system): + system_mock = self.init_system_mock(mock_get_system.return_value) + system_mock.boot.mode = None + + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + expected_properties = { + 'cpu_arch': 'mips', 'cpus': '8', + 'local_gb': '3', 'memory_mb': '2048' + } + task.driver.inspect.inspect_hardware(task) + self.assertEqual(expected_properties, task.node.properties) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_agent_base_vendor.py 2019-03-21 20:07:40.000000000 +0000 @@ -22,6 +22,7 @@ from ironic.common import boot_devices from ironic.common import exception from ironic.common import states +from ironic.conductor import steps as conductor_steps from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.drivers import base as drivers_base @@ -213,7 +214,8 @@ @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) @mock.patch.object(agent_base_vendor.HeartbeatMixin, 'refresh_clean_steps', autospec=True) - @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', + autospec=True) @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', autospec=True) def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps, @@ -234,7 +236,8 @@ @mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True) @mock.patch.object(agent_base_vendor.HeartbeatMixin, 'refresh_clean_steps', autospec=True) - @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', + autospec=True) @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', autospec=True) def test_heartbeat_resume_clean_fails(self, mock_notify, mock_set_steps, @@ -355,8 +358,29 @@ task.node.driver_internal_info['agent_url']) self.assertEqual('3.2.0', task.node.driver_internal_info['agent_version']) + self.assertIsNotNone( + task.node.driver_internal_info['agent_last_heartbeat']) mock_touch.assert_called_once_with(mock.ANY) + def test_heartbeat_records_fast_track(self): + self.config(fast_track=True, group='deploy') + for provision_state in [states.ENROLL, states.MANAGEABLE, + states.AVAILABLE]: + self.node.driver_internal_info = {} + self.node.provision_state = provision_state + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + self.deploy.heartbeat(task, 'http://127.0.0.1:8080', '3.2.0') + self.assertEqual('http://127.0.0.1:8080', + task.node.driver_internal_info['agent_url']) + self.assertEqual('3.2.0', + task.node.driver_internal_info[ + 'agent_version']) + self.assertIsNotNone( + task.node.driver_internal_info['agent_last_heartbeat']) + self.assertEqual(provision_state, task.node.provision_state) + class AgentRescueTests(AgentDeployMixinBaseTest): @@ -424,8 +448,38 @@ self.deploy._finalize_rescue, task) mock_finalize_rescue.assert_called_once_with(task.node) + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(agent.AgentRescue, 'clean_up', + spec_set=True, autospec=True) + @mock.patch.object(agent_client.AgentClient, 'finalize_rescue', + spec=types.FunctionType) + def test__finalize_rescue_with_smartnic_port( + self, mock_finalize_rescue, mock_clean_up, + power_on_node_if_needed_mock, restore_power_state_mock): + node = self.node + node.provision_state = states.RESCUEWAIT + node.save() + mock_finalize_rescue.return_value = {'command_status': 'SUCCEEDED'} + with task_manager.acquire(self.context, self.node['uuid'], + shared=False) as task: + task.driver.network.configure_tenant_networks = mock.Mock() + task.process_event = mock.Mock() + power_on_node_if_needed_mock.return_value = states.POWER_OFF + self.deploy._finalize_rescue(task) + mock_finalize_rescue.assert_called_once_with(task.node) + task.process_event.assert_has_calls([mock.call('resume'), + mock.call('done')]) + mock_clean_up.assert_called_once_with(mock.ANY, task) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + class AgentDeployMixinTest(AgentDeployMixinBaseTest): + @mock.patch.object(manager_utils, 'power_on_node_if_needed') @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', autospec=True) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @@ -437,7 +491,8 @@ spec=types.FunctionType) def test_reboot_and_finish_deploy( self, power_off_mock, get_power_state_mock, - node_power_action_mock, collect_mock, resume_mock): + node_power_action_mock, collect_mock, resume_mock, + power_on_node_if_needed_mock): cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent') self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE @@ -448,6 +503,8 @@ shared=True) as task: get_power_state_mock.side_effect = [states.POWER_ON, states.POWER_OFF] + + power_on_node_if_needed_mock.return_value = None self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(2, get_power_state_mock.call_count) @@ -458,6 +515,8 @@ collect_mock.assert_called_once_with(task.node) resume_mock.assert_called_once_with(task) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', autospec=True) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @@ -469,7 +528,8 @@ spec=types.FunctionType) def test_reboot_and_finish_deploy_deprecated( self, power_off_mock, get_power_state_mock, - node_power_action_mock, collect_mock, resume_mock): + node_power_action_mock, collect_mock, resume_mock, + power_on_node_if_needed_mock): # TODO(rloo): no deploy steps; delete this when we remove support # for handling no deploy steps. cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent') @@ -481,6 +541,7 @@ shared=True) as task: get_power_state_mock.side_effect = [states.POWER_ON, states.POWER_OFF] + power_on_node_if_needed_mock.return_value = None self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(2, get_power_state_mock.call_count) @@ -491,6 +552,8 @@ collect_mock.assert_called_once_with(task.node) self.assertFalse(resume_mock.called) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @@ -505,12 +568,14 @@ def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete( self, configure_tenant_net_mock, remove_provisioning_net_mock, power_off_mock, get_power_state_mock, - node_power_action_mock, mock_collect): + node_power_action_mock, mock_collect, + power_on_node_if_needed_mock): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: + power_on_node_if_needed_mock.return_value = None get_power_state_mock.return_value = states.POWER_ON self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) @@ -554,6 +619,7 @@ self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertFalse(mock_collect.called) + @mock.patch.object(manager_utils, 'power_on_node_if_needed') @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @@ -568,13 +634,14 @@ def test_reboot_and_finish_deploy_get_power_state_fails( self, configure_tenant_net_mock, remove_provisioning_net_mock, power_off_mock, get_power_state_mock, node_power_action_mock, - mock_collect): + mock_collect, power_on_node_if_needed_mock): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: get_power_state_mock.side_effect = RuntimeError("boom") + power_on_node_if_needed_mock.return_value = None self.deploy.reboot_and_finish_deploy(task) power_off_mock.assert_called_once_with(task.node) self.assertEqual(7, get_power_state_mock.call_count) @@ -588,6 +655,8 @@ self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertFalse(mock_collect.called) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @@ -602,11 +671,12 @@ def test_reboot_and_finish_deploy_configure_tenant_network_exception( self, configure_tenant_net_mock, remove_provisioning_net_mock, power_off_mock, get_power_state_mock, node_power_action_mock, - mock_collect): + mock_collect, power_on_node_if_needed_mock): self.node.network_interface = 'neutron' self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() + power_on_node_if_needed_mock.return_value = None with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: configure_tenant_net_mock.side_effect = exception.NetworkError( @@ -649,6 +719,8 @@ self.assertEqual(states.ACTIVE, task.node.target_provision_state) mock_collect.assert_called_once_with(task.node) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) @mock.patch.object(time, 'sleep', lambda seconds: None) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @@ -663,12 +735,14 @@ def test_reboot_and_finish_deploy_power_on_fails( self, configure_tenant_net_mock, remove_provisioning_net_mock, power_off_mock, get_power_state_mock, - node_power_action_mock, mock_collect): + node_power_action_mock, mock_collect, + power_on_node_if_needed_mock): self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: + power_on_node_if_needed_mock.return_value = None get_power_state_mock.return_value = states.POWER_ON node_power_action_mock.side_effect = [None, RuntimeError("boom")] @@ -761,7 +835,7 @@ task.node.driver_internal_info['is_whole_disk_image'] = False self.deploy.configure_local_boot(task, root_uuid='some-root-uuid') try_set_boot_device_mock.assert_called_once_with( - task, boot_devices.DISK) + task, boot_devices.DISK, persistent=True) install_bootloader_mock.assert_called_once_with( mock.ANY, task.node, root_uuid='some-root-uuid', efi_system_part_uuid=None, prep_boot_part_uuid=None) @@ -779,7 +853,7 @@ self.deploy.configure_local_boot(task, root_uuid='some-root-uuid', prep_boot_part_uuid='fake-prep') try_set_boot_device_mock.assert_called_once_with( - task, boot_devices.DISK) + task, boot_devices.DISK, persistent=True) install_bootloader_mock.assert_called_once_with( mock.ANY, task.node, root_uuid='some-root-uuid', efi_system_part_uuid=None, prep_boot_part_uuid='fake-prep') @@ -798,7 +872,7 @@ task, root_uuid='some-root-uuid', efi_system_part_uuid='efi-system-part-uuid') try_set_boot_device_mock.assert_called_once_with( - task, boot_devices.DISK) + task, boot_devices.DISK, persistent=True) install_bootloader_mock.assert_called_once_with( mock.ANY, task.node, root_uuid='some-root-uuid', efi_system_part_uuid='efi-system-part-uuid', @@ -814,7 +888,7 @@ self.deploy.configure_local_boot(task) self.assertFalse(install_bootloader_mock.called) try_set_boot_device_mock.assert_called_once_with( - task, boot_devices.DISK) + task, boot_devices.DISK, persistent=True) @mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True) @mock.patch.object(agent_client.AgentClient, 'install_bootloader', @@ -827,7 +901,52 @@ self.deploy.configure_local_boot(task) self.assertFalse(install_bootloader_mock.called) try_set_boot_device_mock.assert_called_once_with( - task, boot_devices.DISK) + task, boot_devices.DISK, persistent=True) + + @mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True) + @mock.patch.object(agent_client.AgentClient, 'install_bootloader', + autospec=True) + def test_configure_local_boot_enforce_persistent_boot_device_default( + self, install_bootloader_mock, try_set_boot_device_mock): + with task_manager.acquire(self.context, self.node['uuid'], + shared=False) as task: + driver_info = task.node.driver_info + driver_info['force_persistent_boot_device'] = 'Default' + task.node.driver_info = driver_info + self.deploy.configure_local_boot(task) + self.assertFalse(install_bootloader_mock.called) + try_set_boot_device_mock.assert_called_once_with( + task, boot_devices.DISK, persistent=True) + + @mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True) + @mock.patch.object(agent_client.AgentClient, 'install_bootloader', + autospec=True) + def test_configure_local_boot_enforce_persistent_boot_device_always( + self, install_bootloader_mock, try_set_boot_device_mock): + with task_manager.acquire(self.context, self.node['uuid'], + shared=False) as task: + driver_info = task.node.driver_info + driver_info['force_persistent_boot_device'] = 'Always' + task.node.driver_info = driver_info + self.deploy.configure_local_boot(task) + self.assertFalse(install_bootloader_mock.called) + try_set_boot_device_mock.assert_called_once_with( + task, boot_devices.DISK, persistent=True) + + @mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True) + @mock.patch.object(agent_client.AgentClient, 'install_bootloader', + autospec=True) + def test_configure_local_boot_enforce_persistent_boot_device_never( + self, install_bootloader_mock, try_set_boot_device_mock): + with task_manager.acquire(self.context, self.node['uuid'], + shared=False) as task: + driver_info = task.node.driver_info + driver_info['force_persistent_boot_device'] = 'Never' + task.node.driver_info = driver_info + self.deploy.configure_local_boot(task) + self.assertFalse(install_bootloader_mock.called) + try_set_boot_device_mock.assert_called_once_with( + task, boot_devices.DISK, persistent=False) @mock.patch.object(agent_client.AgentClient, 'collect_system_logs', autospec=True) @@ -878,7 +997,7 @@ mock.ANY, task.node, root_uuid='some-root-uuid', efi_system_part_uuid=None, prep_boot_part_uuid=None) try_set_boot_device_mock.assert_called_once_with( - task, boot_devices.DISK) + task, boot_devices.DISK, persistent=True) collect_logs_mock.assert_called_once_with(mock.ANY, task.node) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) @@ -1235,7 +1354,8 @@ self.deploy.continue_cleaning(task) error_mock.assert_called_once_with(task, mock.ANY) - @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', + autospec=True) @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', autospec=True) @mock.patch.object(agent_base_vendor.AgentDeployMixin, @@ -1274,7 +1394,8 @@ self._test_continue_cleaning_clean_version_mismatch(manual=True) @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True) + @mock.patch.object(conductor_steps, 'set_node_cleaning_steps', + autospec=True) @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', autospec=True) @mock.patch.object(agent_base_vendor.AgentDeployMixin, @@ -1398,6 +1519,46 @@ hook_returned = agent_base_vendor._get_post_clean_step_hook(self.node) self.assertIsNone(hook_returned) + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed') + @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', + autospec=True) + @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True) + @mock.patch.object(time, 'sleep', lambda seconds: None) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + @mock.patch.object(fake.FakePower, 'get_power_state', + spec=types.FunctionType) + @mock.patch.object(agent_client.AgentClient, 'power_off', + spec=types.FunctionType) + def test_reboot_and_finish_deploy_with_smartnic_port( + self, power_off_mock, get_power_state_mock, + node_power_action_mock, collect_mock, resume_mock, + power_on_node_if_needed_mock, restore_power_state_mock): + cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent') + self.node.provision_state = states.DEPLOYING + self.node.target_provision_state = states.ACTIVE + self.node.deploy_step = { + 'step': 'deploy', 'priority': 50, 'interface': 'deploy'} + self.node.save() + with task_manager.acquire(self.context, self.node.uuid, + shared=True) as task: + get_power_state_mock.side_effect = [states.POWER_ON, + states.POWER_OFF] + power_on_node_if_needed_mock.return_value = states.POWER_OFF + self.deploy.reboot_and_finish_deploy(task) + power_off_mock.assert_called_once_with(task.node) + self.assertEqual(2, get_power_state_mock.call_count) + node_power_action_mock.assert_called_once_with( + task, states.POWER_ON) + self.assertEqual(states.DEPLOYING, task.node.provision_state) + self.assertEqual(states.ACTIVE, task.node.target_provision_state) + collect_mock.assert_called_once_with(task.node) + resume_mock.assert_called_once_with(task) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + class TestRefreshCleanSteps(AgentDeployMixinBaseTest): diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_agent.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_agent.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_agent.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_agent.py 2019-03-21 20:07:40.000000000 +0000 @@ -337,6 +337,39 @@ self.assertIsNone(driver_return) self.assertTrue(mock_pxe_instance.called) + @mock.patch.object(agent_client.AgentClient, 'prepare_image', + autospec=True) + @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) + @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) + def test_deploy_fast_track(self, power_mock, mock_pxe_instance, + mock_is_fast_track, prepare_image_mock): + mock_is_fast_track.return_value = True + self.node.target_provision_state = states.ACTIVE + self.node.provision_state = states.DEPLOYING + test_temp_url = 'http://image' + expected_image_info = { + 'urls': [test_temp_url], + 'id': 'fake-image', + 'node_uuid': self.node.uuid, + 'checksum': 'checksum', + 'disk_format': 'qcow2', + 'container_format': 'bare', + 'stream_raw_images': CONF.agent.stream_raw_images, + } + self.node.save() + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + self.driver.deploy(task) + self.assertFalse(power_mock.called) + self.assertFalse(mock_pxe_instance.called) + task.node.refresh() + prepare_image_mock.assert_called_with(mock.ANY, task.node, + expected_image_info) + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) + self.assertEqual(states.ACTIVE, + task.node.target_provision_state) + @mock.patch.object(noop_storage.NoopStorage, 'detach_volumes', autospec=True) @mock.patch.object(flat_network.FlatNetwork, @@ -778,6 +811,52 @@ build_options_mock.assert_not_called() pxe_prepare_ramdisk_mock.assert_not_called() + @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes', + autospec=True) + @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info') + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') + @mock.patch.object(deploy_utils, 'build_agent_options') + @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy') + @mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'validate', + spec_set=True, autospec=True) + def test_prepare_fast_track( + self, validate_net_mock, + unconfigure_tenant_net_mock, add_provisioning_net_mock, + build_instance_info_mock, build_options_mock, + pxe_prepare_ramdisk_mock, storage_driver_info_mock, + storage_attach_volumes_mock, is_fast_track_mock): + # TODO(TheJulia): We should revisit this test. Smartnic + # support didn't wire in tightly on testing for power in + # these tests, and largely fast_track impacts power operations. + node = self.node + node.network_interface = 'flat' + node.save() + is_fast_track_mock.return_value = True + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.node.provision_state = states.DEPLOYING + build_options_mock.return_value = {'a': 'b'} + self.driver.prepare(task) + storage_driver_info_mock.assert_called_once_with(task) + validate_net_mock.assert_called_once_with(mock.ANY, task) + add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) + unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task) + self.assertTrue(storage_attach_volumes_mock.called) + self.assertTrue(build_instance_info_mock.called) + # TODO(TheJulia): We should likely consider executing the + # next two methods at some point in order to facilitate + # continuity. While not explicitly required for this feature + # to work, reboots as part of deployment would need the ramdisk + # present and ready. + self.assertFalse(build_options_mock.called) + self.assertFalse(pxe_prepare_ramdisk_mock.called) + @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider') @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp') @mock.patch.object(pxe.PXEBoot, 'clean_up_instance') @@ -1001,6 +1080,8 @@ self.assertEqual(states.ACTIVE, task.node.target_provision_state) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(deploy_utils, 'remove_http_instance_symlink', autospec=True) @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True) @@ -1018,7 +1099,8 @@ def test_reboot_to_instance(self, check_deploy_mock, prepare_instance_mock, power_off_mock, get_power_state_mock, node_power_action_mock, - uuid_mock, log_mock, remove_symlink_mock): + uuid_mock, log_mock, remove_symlink_mock, + power_on_node_if_needed_mock): self.config(manage_agent_boot=True, group='agent') self.config(image_download_source='http', group='agent') check_deploy_mock.return_value = None @@ -1029,6 +1111,7 @@ with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: get_power_state_mock.return_value = states.POWER_OFF + power_on_node_if_needed_mock.return_value = None task.node.driver_internal_info['is_whole_disk_image'] = True task.driver.deploy.reboot_to_instance(task) check_deploy_mock.assert_called_once_with(mock.ANY, task.node) @@ -1048,6 +1131,8 @@ self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertTrue(remove_symlink_mock.called) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result', @@ -1061,13 +1146,10 @@ autospec=True) @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin' '.check_deploy_success', autospec=True) - def test_reboot_to_instance_no_manage_agent_boot(self, check_deploy_mock, - prepare_instance_mock, - power_off_mock, - get_power_state_mock, - node_power_action_mock, - uuid_mock, bootdev_mock, - log_mock): + def test_reboot_to_instance_no_manage_agent_boot( + self, check_deploy_mock, prepare_instance_mock, power_off_mock, + get_power_state_mock, node_power_action_mock, uuid_mock, + bootdev_mock, log_mock, power_on_node_if_needed_mock): self.config(manage_agent_boot=False, group='agent') check_deploy_mock.return_value = None uuid_mock.return_value = None @@ -1076,6 +1158,7 @@ self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: + power_on_node_if_needed_mock.return_value = None get_power_state_mock.return_value = states.POWER_OFF task.node.driver_internal_info['is_whole_disk_image'] = True task.driver.deploy.reboot_to_instance(task) @@ -1093,6 +1176,8 @@ self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True) @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy', autospec=True) @@ -1113,7 +1198,8 @@ get_power_state_mock, node_power_action_mock, uuid_mock, boot_mode_mock, - log_mock): + log_mock, + power_on_node_if_needed_mock): check_deploy_mock.return_value = None uuid_mock.return_value = 'root_uuid' self.node.provision_state = states.DEPLOYWAIT @@ -1122,6 +1208,7 @@ boot_mode_mock.return_value = 'bios' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: + power_on_node_if_needed_mock.return_value = None get_power_state_mock.return_value = states.POWER_OFF driver_internal_info = task.node.driver_internal_info driver_internal_info['is_whole_disk_image'] = False @@ -1146,6 +1233,8 @@ self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True) @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy', autospec=True) @@ -1163,7 +1252,8 @@ def test_reboot_to_instance_partition_localboot_ppc64( self, check_deploy_mock, prepare_instance_mock, power_off_mock, get_power_state_mock, - node_power_action_mock, uuid_mock, boot_mode_mock, log_mock): + node_power_action_mock, uuid_mock, boot_mode_mock, log_mock, + power_on_node_if_needed_mock): check_deploy_mock.return_value = None uuid_mock.side_effect = ['root_uuid', 'prep_boot_part_uuid'] self.node.provision_state = states.DEPLOYWAIT @@ -1172,6 +1262,7 @@ with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: + power_on_node_if_needed_mock.return_value = None get_power_state_mock.return_value = states.POWER_OFF driver_internal_info = task.node.driver_internal_info driver_internal_info['is_whole_disk_image'] = False @@ -1238,6 +1329,8 @@ self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True) @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy', autospec=True) @@ -1258,7 +1351,8 @@ get_power_state_mock, node_power_action_mock, uuid_mock, boot_mode_mock, - log_mock): + log_mock, + power_on_node_if_needed_mock): check_deploy_mock.return_value = None uuid_mock.side_effect = ['root_uuid', 'efi_uuid'] self.node.provision_state = states.DEPLOYWAIT @@ -1267,6 +1361,7 @@ with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: + power_on_node_if_needed_mock.return_value = None get_power_state_mock.return_value = states.POWER_OFF driver_internal_info = task.node.driver_internal_info driver_internal_info['is_whole_disk_image'] = False @@ -1367,6 +1462,125 @@ 'command_status': 'RUNNING'}] self.assertFalse(task.driver.deploy.deploy_is_done(task)) + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes', + autospec=True) + @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info') + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') + @mock.patch.object(deploy_utils, 'build_agent_options') + @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy') + @mock.patch.object(flat_network.FlatNetwork, + 'add_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'validate', + spec_set=True, autospec=True) + def test_prepare_with_smartnic_port( + self, validate_net_mock, + unconfigure_tenant_net_mock, add_provisioning_net_mock, + build_instance_info_mock, build_options_mock, + pxe_prepare_ramdisk_mock, storage_driver_info_mock, + storage_attach_volumes_mock, power_on_node_if_needed_mock, + restore_power_state_mock): + node = self.node + node.network_interface = 'flat' + node.save() + add_provisioning_net_mock.return_value = None + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.node.provision_state = states.DEPLOYING + build_instance_info_mock.return_value = {'foo': 'bar'} + build_options_mock.return_value = {'a': 'b'} + power_on_node_if_needed_mock.return_value = states.POWER_OFF + self.driver.prepare(task) + storage_driver_info_mock.assert_called_once_with(task) + validate_net_mock.assert_called_once_with(mock.ANY, task) + add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) + unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task) + storage_attach_volumes_mock.assert_called_once_with( + task.driver.storage, task) + build_instance_info_mock.assert_called_once_with(task) + build_options_mock.assert_called_once_with(task.node) + pxe_prepare_ramdisk_mock.assert_called_once_with( + task, {'a': 'b'}) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + self.node.refresh() + self.assertEqual('bar', self.node.instance_info['foo']) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'detach_volumes', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'remove_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) + def test_tear_down_with_smartnic_port( + self, power_mock, unconfigure_tenant_nets_mock, + remove_provisioning_net_mock, storage_detach_volumes_mock, + power_on_node_if_needed_mock, restore_power_state_mock): + object_utils.create_test_volume_target( + self.context, node_id=self.node.id) + node = self.node + node.network_interface = 'flat' + node.save() + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + driver_return = self.driver.tear_down(task) + power_mock.assert_called_once_with(task, states.POWER_OFF) + self.assertEqual(driver_return, states.DELETED) + unconfigure_tenant_nets_mock.assert_called_once_with(mock.ANY, + task) + remove_provisioning_net_mock.assert_called_once_with(mock.ANY, + task) + storage_detach_volumes_mock.assert_called_once_with( + task.driver.storage, task) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + # Verify no volumes exist for new task instances. + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + self.assertEqual(0, len(task.volume_targets)) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'should_write_image', + autospec=True) + def test_deploy_storage_should_write_image_false_with_smartnic_port( + self, mock_write, mock_pxe_instance, + power_on_node_if_needed_mock, restore_power_state_mock): + mock_write.return_value = False + self.node.provision_state = states.DEPLOYING + self.node.deploy_step = { + 'step': 'deploy', 'priority': 50, 'interface': 'deploy'} + self.node.save() + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + driver_return = self.driver.deploy(task) + self.assertIsNone(driver_return) + self.assertTrue(mock_pxe_instance.called) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + class AgentRAIDTestCase(db_base.DbTestCase): @@ -1406,12 +1620,15 @@ self.assertEqual(0, ret[0]['priority']) self.assertEqual(0, ret[1]['priority']) + @mock.patch.object(raid, 'filter_target_raid_config') @mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True) - def test_create_configuration(self, execute_mock): + def test_create_configuration(self, execute_mock, + filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: execute_mock.return_value = states.CLEANWAIT - + filter_target_raid_config_mock.return_value = ( + self.target_raid_config) return_value = task.driver.raid.create_configuration(task) self.assertEqual(states.CLEANWAIT, return_value) @@ -1420,65 +1637,76 @@ task.node.driver_internal_info['target_raid_config']) execute_mock.assert_called_once_with(task, self.clean_step) + @mock.patch.object(raid, 'filter_target_raid_config') @mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True) - def test_create_configuration_skip_root(self, execute_mock): + def test_create_configuration_skip_root(self, execute_mock, + filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: execute_mock.return_value = states.CLEANWAIT - - return_value = task.driver.raid.create_configuration( - task, create_root_volume=False) - - self.assertEqual(states.CLEANWAIT, return_value) - execute_mock.assert_called_once_with(task, self.clean_step) exp_target_raid_config = { "logical_disks": [ {'size_gb': 200, 'raid_level': 5} ]} + filter_target_raid_config_mock.return_value = ( + exp_target_raid_config) + return_value = task.driver.raid.create_configuration( + task, create_root_volume=False) + self.assertEqual(states.CLEANWAIT, return_value) + execute_mock.assert_called_once_with(task, self.clean_step) self.assertEqual( exp_target_raid_config, task.node.driver_internal_info['target_raid_config']) + @mock.patch.object(raid, 'filter_target_raid_config') @mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True) - def test_create_configuration_skip_nonroot(self, execute_mock): + def test_create_configuration_skip_nonroot(self, execute_mock, + filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: execute_mock.return_value = states.CLEANWAIT - - return_value = task.driver.raid.create_configuration( - task, create_nonroot_volumes=False) - - self.assertEqual(states.CLEANWAIT, return_value) - execute_mock.assert_called_once_with(task, self.clean_step) exp_target_raid_config = { "logical_disks": [ {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True}, ]} + filter_target_raid_config_mock.return_value = ( + exp_target_raid_config) + return_value = task.driver.raid.create_configuration( + task, create_nonroot_volumes=False) + self.assertEqual(states.CLEANWAIT, return_value) + execute_mock.assert_called_once_with(task, self.clean_step) self.assertEqual( exp_target_raid_config, task.node.driver_internal_info['target_raid_config']) + @mock.patch.object(raid, 'filter_target_raid_config') @mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True) def test_create_configuration_no_target_raid_config_after_skipping( - self, execute_mock): + self, execute_mock, filter_target_raid_config_mock): with task_manager.acquire(self.context, self.node.uuid) as task: + msg = "Node %s has no target RAID configuration" % self.node.uuid + filter_target_raid_config_mock.side_effect = ( + exception.MissingParameterValue(msg)) self.assertRaises( exception.MissingParameterValue, task.driver.raid.create_configuration, task, create_root_volume=False, create_nonroot_volumes=False) - self.assertFalse(execute_mock.called) + @mock.patch.object(raid, 'filter_target_raid_config') @mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True) def test_create_configuration_empty_target_raid_config( - self, execute_mock): + self, execute_mock, filter_target_raid_config_mock): execute_mock.return_value = states.CLEANING self.node.target_raid_config = {} self.node.save() with task_manager.acquire(self.context, self.node.uuid) as task: + msg = "Node %s has no target RAID configuration" % self.node.uuid + filter_target_raid_config_mock.side_effect = ( + exception.MissingParameterValue(msg)) self.assertRaises(exception.MissingParameterValue, task.driver.raid.create_configuration, task) @@ -1793,3 +2021,94 @@ self.assertNotIn('rescue_password', task.node.instance_info) self.assertFalse(mock_clean_ramdisk.called) mock_remove_rescue_net.assert_called_once_with(mock.ANY, task) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'add_rescuing_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(fake.FakeBoot, 'prepare_ramdisk', autospec=True) + @mock.patch.object(fake.FakeBoot, 'clean_up_instance', autospec=True) + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test_agent_rescue_with_smartnic_port( + self, mock_node_power_action, mock_build_agent_opts, + mock_clean_up_instance, mock_prepare_ramdisk, + mock_unconf_tenant_net, mock_add_rescue_net, + power_on_node_if_needed_mock, restore_power_state_mock): + self.config(manage_agent_boot=True, group='agent') + mock_build_agent_opts.return_value = {'ipa-api-url': 'fake-api'} + with task_manager.acquire(self.context, self.node.uuid) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + result = task.driver.rescue.rescue(task) + mock_node_power_action.assert_has_calls( + [mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON)]) + mock_clean_up_instance.assert_called_once_with(mock.ANY, task) + mock_unconf_tenant_net.assert_called_once_with(mock.ANY, task) + mock_add_rescue_net.assert_called_once_with(mock.ANY, task) + mock_build_agent_opts.assert_called_once_with(task.node) + mock_prepare_ramdisk.assert_called_once_with( + mock.ANY, task, {'ipa-api-url': 'fake-api'}) + self.assertEqual(states.RESCUEWAIT, result) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'remove_rescuing_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'configure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(fake.FakeBoot, 'prepare_instance', autospec=True) + @mock.patch.object(fake.FakeBoot, 'clean_up_ramdisk', autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test_agent_unrescue_with_smartnic_port( + self, mock_node_power_action, mock_clean_ramdisk, + mock_prepare_instance, mock_conf_tenant_net, + mock_remove_rescue_net, power_on_node_if_needed_mock, + restore_power_state_mock): + self.config(manage_agent_boot=True, group='agent') + with task_manager.acquire(self.context, self.node.uuid) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + result = task.driver.rescue.unrescue(task) + mock_node_power_action.assert_has_calls( + [mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_ON)]) + mock_clean_ramdisk.assert_called_once_with( + mock.ANY, task) + mock_remove_rescue_net.assert_called_once_with(mock.ANY, task) + mock_conf_tenant_net.assert_called_once_with(mock.ANY, task) + mock_prepare_instance.assert_called_once_with(mock.ANY, task) + self.assertEqual(states.ACTIVE, result) + self.assertEqual(2, power_on_node_if_needed_mock.call_count) + self.assertEqual(2, power_on_node_if_needed_mock.call_count) + restore_power_state_mock.assert_has_calls( + [mock.call(task, states.POWER_OFF), + mock.call(task, states.POWER_OFF)]) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'remove_rescuing_network', + spec_set=True, autospec=True) + @mock.patch.object(fake.FakeBoot, 'clean_up_ramdisk', autospec=True) + def test_agent_rescue_clean_up_smartnic( + self, mock_clean_ramdisk, mock_remove_rescue_net, + power_on_node_if_needed_mock, restore_power_state_mock): + with task_manager.acquire(self.context, self.node.uuid) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + task.driver.rescue.clean_up(task) + self.assertNotIn('rescue_password', task.node.instance_info) + mock_clean_ramdisk.assert_called_once_with( + mock.ANY, task) + mock_remove_rescue_net.assert_called_once_with(mock.ANY, task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_console_utils.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_console_utils.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_console_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_console_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -467,13 +467,15 @@ def test_start_socat_console_check_arg_bind_addr_default_ipv4(self): self.config(my_ip='10.0.0.1') args = self._test_start_socat_console_check_arg() - self.assertIn('TCP4-LISTEN:%s,bind=10.0.0.1,reuseaddr' % + self.assertIn('TCP4-LISTEN:%s,bind=10.0.0.1,reuseaddr,fork,' + 'max-children=1' % self.info['port'], args) def test_start_socat_console_check_arg_bind_addr_ipv4(self): self.config(socat_address='10.0.0.1', group='console') args = self._test_start_socat_console_check_arg() - self.assertIn('TCP4-LISTEN:%s,bind=10.0.0.1,reuseaddr' % + self.assertIn('TCP4-LISTEN:%s,bind=10.0.0.1,reuseaddr,fork,' + 'max-children=1' % self.info['port'], args) @mock.patch.object(os.path, 'exists', autospec=True) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_deploy_utils.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_deploy_utils.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_deploy_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_deploy_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -1113,6 +1113,14 @@ result = utils.get_pxe_config_template(self.node) self.assertEqual('bios-template', result) + def test_get_pxe_config_template_per_node(self): + node = obj_utils.create_test_node( + self.context, driver='fake-hardware', + driver_info={"pxe_template": "fake-template"}, + ) + result = utils.get_pxe_config_template(node) + self.assertEqual('fake-template', result) + @mock.patch('time.sleep', lambda sec: None) class OtherFunctionTestCase(db_base.DbTestCase): @@ -1691,6 +1699,7 @@ self.assertEqual(8, task.node.driver_internal_info[ 'disk_erasure_concurrency']) + @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) @mock.patch.object(utils, 'build_agent_options', autospec=True) @@ -1699,15 +1708,19 @@ def _test_prepare_inband_cleaning( self, add_cleaning_network_mock, build_options_mock, power_mock, prepare_ramdisk_mock, - manage_boot=True): + is_fast_track_mock, manage_boot=True, fast_track=False): build_options_mock.return_value = {'a': 'b'} + is_fast_track_mock.return_value = fast_track with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: self.assertEqual( states.CLEANWAIT, utils.prepare_inband_cleaning(task, manage_boot=manage_boot)) add_cleaning_network_mock.assert_called_once_with(task) - power_mock.assert_called_once_with(task, states.REBOOT) + if not fast_track: + power_mock.assert_called_once_with(task, states.REBOOT) + else: + self.assertFalse(power_mock.called) self.assertEqual(1, task.node.driver_internal_info[ 'agent_erase_devices_iterations']) self.assertIs(True, task.node.driver_internal_info[ @@ -1726,17 +1739,26 @@ def test_prepare_inband_cleaning_manage_boot_false(self): self._test_prepare_inband_cleaning(manage_boot=False) + def test_prepare_inband_cleaning_fast_track(self): + self._test_prepare_inband_cleaning(fast_track=True) + + @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True) @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' 'remove_cleaning_network') @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) def _test_tear_down_inband_cleaning( self, power_mock, remove_cleaning_network_mock, - clean_up_ramdisk_mock, manage_boot=True): + clean_up_ramdisk_mock, is_fast_track_mock, + manage_boot=True, fast_track=False): + is_fast_track_mock.return_value = fast_track with task_manager.acquire( self.context, self.node.uuid, shared=False) as task: utils.tear_down_inband_cleaning(task, manage_boot=manage_boot) - power_mock.assert_called_once_with(task, states.POWER_OFF) + if not fast_track: + power_mock.assert_called_once_with(task, states.POWER_OFF) + else: + self.assertFalse(power_mock.called) remove_cleaning_network_mock.assert_called_once_with(task) if manage_boot: clean_up_ramdisk_mock.assert_called_once_with( @@ -1750,6 +1772,9 @@ def test_tear_down_inband_cleaning_manage_boot_false(self): self._test_tear_down_inband_cleaning(manage_boot=False) + def test_tear_down_inband_cleaning_fast_track(self): + self._test_tear_down_inband_cleaning(fast_track=True) + def test_build_agent_options_conf(self): self.config(api_url='https://api-url', group='conductor') options = utils.build_agent_options(self.node) @@ -1947,9 +1972,10 @@ driver_internal_info=DRV_INTERNAL_INFO_DICT, ) inst_info = utils.get_image_instance_info(node) - self.assertRaises(exception.InvalidParameterValue, - utils.validate_image_properties, self.context, - inst_info, ['kernel', 'ramdisk']) + self.assertRaisesRegex(exception.InvalidParameterValue, + 'HTTPError', + utils.validate_image_properties, self.context, + inst_info, ['kernel', 'ramdisk']) class ValidateParametersTestCase(db_base.DbTestCase): @@ -2259,17 +2285,26 @@ ) instance_info = utils.parse_instance_info(node) self.assertIsNotNone(instance_info['image_source']) - self.assertIsNotNone(instance_info['root_mb']) - self.assertEqual(0, instance_info['swap_mb']) - self.assertEqual(0, instance_info['ephemeral_mb']) + self.assertNotIn('root_mb', instance_info) + self.assertNotIn('ephemeral_mb', instance_info) + self.assertNotIn('swap_mb', instance_info) self.assertIsNone(instance_info['configdrive']) def test_parse_instance_info_whole_disk_image_missing_root(self): + driver_internal_info = dict(DRV_INTERNAL_INFO_DICT) + driver_internal_info['is_whole_disk_image'] = True info = dict(INST_INFO_DICT) del info['root_gb'] - node = obj_utils.create_test_node(self.context, instance_info=info) - self.assertRaises(exception.InvalidParameterValue, - utils.parse_instance_info, node) + node = obj_utils.create_test_node( + self.context, instance_info=info, + driver_internal_info=driver_internal_info + ) + + instance_info = utils.parse_instance_info(node) + self.assertIsNotNone(instance_info['image_source']) + self.assertNotIn('root_mb', instance_info) + self.assertNotIn('ephemeral_mb', instance_info) + self.assertNotIn('swap_mb', instance_info) class TestBuildInstanceInfoForDeploy(db_base.DbTestCase): diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_inspect_utils.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_inspect_utils.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_inspect_utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_inspect_utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -15,6 +15,7 @@ import mock +from oslo_utils import importutils from ironic.common import exception from ironic.conductor import task_manager @@ -23,6 +24,8 @@ from ironic.tests.unit.db import base as db_base from ironic.tests.unit.objects import utils as obj_utils +sushy = importutils.try_import('sushy') + @mock.patch('time.sleep', lambda sec: None) class InspectFunctionTestCase(db_base.DbTestCase): @@ -63,3 +66,22 @@ shared=False) as task: utils.create_ports_if_not_exist(task, macs) self.assertEqual(2, log_mock.call_count) + + @mock.patch.object(utils.LOG, 'info', spec_set=True, autospec=True) + @mock.patch.object(objects, 'Port', spec_set=True, autospec=True) + def test_create_ports_if_not_exist_attempts_port_creation_blindly( + self, port_mock, log_info_mock): + macs = {'aa:bb:cc:dd:ee:ff': sushy.STATE_ENABLED, + 'aa:bb:aa:aa:aa:aa': sushy.STATE_DISABLED} + node_id = self.node.id + port_dict1 = {'address': 'aa:bb:cc:dd:ee:ff', 'node_id': node_id} + port_dict2 = {'address': 'aa:bb:aa:aa:aa:aa', 'node_id': node_id} + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + utils.create_ports_if_not_exist( + task, macs, get_mac_address=lambda x: x[0]) + self.assertTrue(log_info_mock.called) + expected_calls = [mock.call(task.context, **port_dict1), + mock.call(task.context, **port_dict2)] + port_mock.assert_has_calls(expected_calls, any_order=True) + self.assertEqual(2, port_mock.return_value.create.call_count) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_ipmitool.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_ipmitool.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_ipmitool.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_ipmitool.py 2019-03-21 20:07:40.000000000 +0000 @@ -779,6 +779,20 @@ self.assertRaises(exception.InvalidParameterValue, ipmi._parse_driver_info, node) + def test__parse_driver_info_ipmi_hex_kg_key(self): + info = dict(INFO_DICT) + info['ipmi_hex_kg_key'] = 'A115023E08E23F7F8DC4BB443A1A75F160763A43' + node = obj_utils.get_test_node(self.context, driver_info=info) + ret = ipmi._parse_driver_info(node) + self.assertEqual(info['ipmi_hex_kg_key'], ret['hex_kg_key']) + + def test__parse_driver_info_ipmi_hex_kg_key_odd_chars(self): + info = dict(INFO_DICT) + info['ipmi_hex_kg_key'] = 'A115023E08E23F7F8DC4BB443A1A75F160763A4' + node = obj_utils.get_test_node(self.context, driver_info=info) + self.assertRaises(exception.InvalidParameterValue, + ipmi._parse_driver_info, node) + def test__parse_driver_info_ipmi_port_valid(self): info = dict(INFO_DICT) info['ipmi_port'] = '623' @@ -822,6 +836,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -847,6 +862,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ], [ @@ -855,6 +871,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'D', 'E', 'F', ]] @@ -884,6 +901,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ], [ @@ -892,6 +910,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'D', 'E', 'F', ]] @@ -923,6 +942,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ], [ @@ -931,6 +951,7 @@ '-H', '127.127.127.127', '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'D', 'E', 'F', ]] @@ -959,6 +980,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -982,6 +1004,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-R', '12', '-N', '5', '-f', awesome_password_filename, @@ -1017,6 +1040,7 @@ '-I', 'lanplus', '-H', self.info['address'], '-L', self.info['priv_level'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1040,6 +1064,7 @@ '-I', 'lanplus', '-H', self.info['address'], '-L', self.info['priv_level'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1066,6 +1091,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1093,6 +1119,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1127,6 +1154,7 @@ '-T', info['transit_address'], '-b', info['target_channel'], '-t', info['target_address'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1164,6 +1192,7 @@ '-m', info['local_address'], '-b', info['target_channel'], '-t', info['target_address'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1187,6 +1216,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1213,6 +1243,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1236,6 +1267,7 @@ '-L', self.info['priv_level'], '-p', '1623', '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1260,6 +1292,7 @@ '-H', self.info['address'], '-L', self.info['priv_level'], '-U', self.info['username'], + '-v', '-f', awesome_password_filename, 'A', 'B', 'C', ] @@ -1922,15 +1955,34 @@ mock.call(self.info, "chassis bootdev pxe")] mock_exec.assert_has_calls(mock_calls) + @mock.patch.object(driver_utils, 'force_persistent_boot', autospec=True) + @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) + def test_management_interface_no_force_set_boot_device(self, + mock_exec, + mock_force_boot): + mock_exec.return_value = [None, None] + + with task_manager.acquire(self.context, self.node.uuid) as task: + driver_info = task.node.driver_info + driver_info['ipmi_force_boot_device'] = 'False' + task.node.driver_info = driver_info + self.info['force_boot_device'] = 'False' + self.management.set_boot_device(task, boot_devices.PXE) + + mock_calls = [mock.call(self.info, "raw 0x00 0x08 0x03 0x08"), + mock.call(self.info, "chassis bootdev pxe")] + mock_exec.assert_has_calls(mock_calls) + self.assertFalse(mock_force_boot.called) + @mock.patch.object(ipmi, '_exec_ipmitool', autospec=True) def test_management_interface_force_set_boot_device_ok(self, mock_exec): mock_exec.return_value = [None, None] with task_manager.acquire(self.context, self.node.uuid) as task: driver_info = task.node.driver_info - driver_info['ipmi_force_boot_device'] = True + driver_info['ipmi_force_boot_device'] = 'True' task.node.driver_info = driver_info - self.info['force_boot_device'] = True + self.info['force_boot_device'] = 'True' self.management.set_boot_device(task, boot_devices.PXE) task.node.refresh() self.assertIs( @@ -1948,9 +2000,9 @@ with task_manager.acquire(self.context, self.node.uuid) as task: driver_info = task.node.driver_info - driver_info['ipmi_force_boot_device'] = True + driver_info['ipmi_force_boot_device'] = 'True' task.node.driver_info = driver_info - self.info['force_boot_device'] = True + self.info['force_boot_device'] = 'True' self.management.set_boot_device(task, boot_devices.PXE, True) self.assertEqual( boot_devices.PXE, @@ -2117,7 +2169,7 @@ def test_get_force_boot_device_persistent(self): with task_manager.acquire(self.context, self.node.uuid) as task: - task.node.driver_info['ipmi_force_boot_device'] = True + task.node.driver_info['ipmi_force_boot_device'] = 'True' task.node.driver_internal_info['persistent_boot_device'] = 'pxe' bootdev = self.management.get_boot_device(task) self.assertEqual('pxe', bootdev['boot_device']) @@ -2442,8 +2494,8 @@ driver_info = ipmi._parse_driver_info(task.node) ipmi_cmd = self.console._get_ipmi_cmd(driver_info, 'pw_file') expected_ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool " - "-H %(address)s -I lanplus -U %(user)s " - "-f pw_file" % + "-I lanplus -H %(address)s -L ADMINISTRATOR " + "-U %(user)s -f pw_file -v" % {'uid': os.getuid(), 'gid': os.getgid(), 'address': driver_info['address'], 'user': driver_info['username']}) @@ -2456,8 +2508,8 @@ driver_info['username'] = None ipmi_cmd = self.console._get_ipmi_cmd(driver_info, 'pw_file') expected_ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool " - "-H %(address)s -I lanplus " - "-f pw_file" % + "-I lanplus -H %(address)s -L ADMINISTRATOR " + "-f pw_file -v" % {'uid': os.getuid(), 'gid': os.getgid(), 'address': driver_info['address']}) self.assertEqual(expected_ipmi_cmd, ipmi_cmd) @@ -2589,8 +2641,9 @@ self.node.uuid) as task: driver_info = ipmi._parse_driver_info(task.node) ipmi_cmd = self.console._get_ipmi_cmd(driver_info, 'pw_file') - expected_ipmi_cmd = ("ipmitool -H %(address)s -I lanplus " - "-U %(user)s -f pw_file" % + expected_ipmi_cmd = ("ipmitool -I lanplus -H %(address)s " + "-L ADMINISTRATOR -U %(user)s " + "-f pw_file -v" % {'address': driver_info['address'], 'user': driver_info['username']}) self.assertEqual(expected_ipmi_cmd, ipmi_cmd) @@ -2601,8 +2654,9 @@ driver_info = ipmi._parse_driver_info(task.node) driver_info['username'] = None ipmi_cmd = self.console._get_ipmi_cmd(driver_info, 'pw_file') - expected_ipmi_cmd = ("ipmitool -H %(address)s -I lanplus " - "-f pw_file" % + expected_ipmi_cmd = ("ipmitool -I lanplus -H %(address)s " + "-L ADMINISTRATOR " + "-f pw_file -v" % {'address': driver_info['address']}) self.assertEqual(expected_ipmi_cmd, ipmi_cmd) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_ipxe.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_ipxe.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_ipxe.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_ipxe.py 2019-03-21 20:07:40.000000000 +0000 @@ -233,7 +233,8 @@ ipxe_use_swift=False, whole_disk_image=False, mode='deploy', - node_boot_mode=None): + node_boot_mode=None, + persistent=False): mock_build_pxe.return_value = {} kernel_label = '%s_kernel' % mode ramdisk_label = '%s_ramdisk' % mode @@ -260,13 +261,14 @@ dhcp_opts = pxe_utils.dhcp_options_for_instance( task, ipxe_enabled=True) task.driver.boot.prepare_ramdisk(task, {'foo': 'bar'}) - mock_deploy_img_info.assert_called_once_with(task.node, mode=mode) + mock_deploy_img_info.assert_called_once_with(task.node, mode=mode, + ipxe_enabled=True) provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts) if self.node.provision_state == states.DEPLOYING: get_boot_mode_mock.assert_called_once_with(task) set_boot_device_mock.assert_called_once_with(task, boot_devices.PXE, - persistent=False) + persistent=persistent) if ipxe_use_swift: if whole_disk_image: self.assertFalse(mock_cache_r_k.called) @@ -306,6 +308,80 @@ self.node.save() self._test_prepare_ramdisk() + def test_prepare_ramdisk_force_persistent_boot_device_true(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'True' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_bool_true(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = True + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_sloppy_true(self): + for value in ['true', 't', '1', 'on', 'y', 'YES']: + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = value + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_false(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'False' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk() + + def test_prepare_ramdisk_force_persistent_boot_device_bool_false(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = False + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=False) + + def test_prepare_ramdisk_force_persistent_boot_device_sloppy_false(self): + for value in ['false', 'f', '0', 'off', 'n', 'NO', 'yxz']: + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = value + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk() + + def test_prepare_ramdisk_force_persistent_boot_device_default(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'Default' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=False) + + def test_prepare_ramdisk_force_persistent_boot_device_always(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'Always' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_never(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'Never' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=False) + def test_prepare_ramdisk_rescue(self): self.node.provision_state = states.RESCUING self.node.save() @@ -735,25 +811,6 @@ set_boot_device_mock.assert_called_once_with(task, boot_devices.DISK, persistent=True) - - @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) - @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True) - def test_is_force_persistent_boot_device_enabled( - self, clean_up_pxe_config_mock, set_boot_device_mock): - with task_manager.acquire(self.context, self.node.uuid) as task: - instance_info = task.node.instance_info - instance_info['capabilities'] = {'boot_option': 'local'} - task.node.instance_info = instance_info - task.node.save() - task.driver.boot.prepare_instance(task) - clean_up_pxe_config_mock.assert_called_once_with( - task, ipxe_enabled=True) - driver_info = task.node.driver_info - driver_info['force_persistent _boot_device'] = True - task.node.driver_info = driver_info - set_boot_device_mock.assert_called_once_with(task, - boot_devices.DISK, - persistent=True) @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py 2019-03-21 20:07:40.000000000 +0000 @@ -129,6 +129,18 @@ self.assertFalse(get_image_mb_mock.called) @mock.patch.object(disk_utils, 'get_image_mb', autospec=True) + def test_check_image_size_whole_disk_image_no_root(self, + get_image_mb_mock): + get_image_mb_mock.return_value = 1025 + with task_manager.acquire(self.context, self.node.uuid, + shared=False) as task: + del task.node.instance_info['root_gb'] + task.node.driver_internal_info['is_whole_disk_image'] = True + # No error for whole disk images + iscsi_deploy.check_image_size(task) + self.assertFalse(get_image_mb_mock.called) + + @mock.patch.object(disk_utils, 'get_image_mb', autospec=True) def test_check_image_size_fails(self, get_image_mb_mock): get_image_mb_mock.return_value = 1025 with task_manager.acquire(self.context, self.node.uuid, @@ -452,6 +464,18 @@ self.assertEqual('target-iqn', ret_val['iqn']) self.assertEqual('My configdrive', ret_val['configdrive']) + def test_get_deploy_info_whole_disk_image_no_root(self): + instance_info = self.node.instance_info + instance_info['configdrive'] = 'My configdrive' + del instance_info['root_gb'] + self.node.instance_info = instance_info + self.node.driver_internal_info['is_whole_disk_image'] = True + kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn'} + ret_val = iscsi_deploy.get_deploy_info(self.node, **kwargs) + self.assertEqual('1.1.1.1', ret_val['address']) + self.assertEqual('target-iqn', ret_val['iqn']) + self.assertEqual('My configdrive', ret_val['configdrive']) + @mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True) def test_do_agent_iscsi_deploy_okay(self, continue_deploy_mock): agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient) @@ -710,6 +734,53 @@ task.driver.storage, task) self.assertEqual(2, storage_should_write_mock.call_count) + @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes', + autospec=True) + @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info') + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk') + @mock.patch.object(deploy_utils, 'build_agent_options') + @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy') + @mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'validate', + spec_set=True, autospec=True) + def test_prepare_fast_track( + self, validate_net_mock, + unconfigure_tenant_net_mock, add_provisioning_net_mock, + build_instance_info_mock, build_options_mock, + pxe_prepare_ramdisk_mock, storage_driver_info_mock, + storage_attach_volumes_mock, is_fast_track_mock): + # TODO(TheJulia): We should revisit this test. Smartnic + # support didn't wire in tightly on testing for power in + # these tests, and largely fast_track impacts power operations. + node = self.node + node.network_interface = 'flat' + node.save() + is_fast_track_mock.return_value = True + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.node.provision_state = states.DEPLOYING + build_options_mock.return_value = {'a': 'b'} + task.driver.deploy.prepare(task) + storage_driver_info_mock.assert_called_once_with(task) + # NOTE: Validate is the primary difference between agent/iscsi + self.assertFalse(validate_net_mock.called) + add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) + unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task) + self.assertTrue(storage_attach_volumes_mock.called) + self.assertFalse(build_instance_info_mock.called) + # TODO(TheJulia): We should likely consider executing the + # next two methods at some point in order to facilitate + # continuity. While not explicitly required for this feature + # to work, reboots as part of deployment would need the ramdisk + # present and ready. + self.assertFalse(build_options_mock.called) + self.assertFalse(pxe_prepare_ramdisk_mock.called) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True) @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True) @@ -763,6 +834,36 @@ self.assertEqual(2, mock_node_power_action.call_count) self.assertEqual(states.DEPLOYING, task.node.provision_state) + @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True) + @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True) + @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'continue_deploy', + autospec=True) + @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) + @mock.patch('ironic.conductor.utils.node_power_action', autospec=True) + def test_deploy_fast_track(self, power_mock, mock_pxe_instance, + mock_is_fast_track, continue_deploy_mock, + cache_image_mock, check_image_size_mock): + mock_is_fast_track.return_value = True + self.node.target_provision_state = states.ACTIVE + self.node.provision_state = states.DEPLOYING + i_info = self.node.driver_internal_info + i_info['agent_url'] = 'http://1.2.3.4:1234' + self.node.driver_internal_info = i_info + self.node.save() + with task_manager.acquire( + self.context, self.node['uuid'], shared=False) as task: + task.driver.deploy.deploy(task) + self.assertFalse(power_mock.called) + self.assertFalse(mock_pxe_instance.called) + task.node.refresh() + self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) + self.assertEqual(states.ACTIVE, + task.node.target_provision_state) + cache_image_mock.assert_called_with(mock.ANY, task.node) + check_image_size_mock.assert_called_with(task) + continue_deploy_mock.assert_called_with(mock.ANY, task) + @mock.patch.object(noop_storage.NoopStorage, 'detach_volumes', autospec=True) @mock.patch.object(flat_network.FlatNetwork, @@ -943,6 +1044,124 @@ set_boot_device_mock.assert_called_once_with( mock.ANY, task, device=boot_devices.DISK, persistent=True) + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'attach_volumes', + autospec=True) + @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info', + autospec=True) + @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) + @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) + @mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + def test_prepare_node_deploying_with_smartnic_port( + self, unconfigure_tenant_net_mock, add_provisioning_net_mock, + mock_prepare_ramdisk, mock_agent_options, + storage_driver_info_mock, storage_attach_volumes_mock, + power_on_node_if_needed_mock, restore_power_state_mock): + mock_agent_options.return_value = {'c': 'd'} + with task_manager.acquire(self.context, self.node.uuid) as task: + task.node.provision_state = states.DEPLOYING + power_on_node_if_needed_mock.return_value = states.POWER_OFF + task.driver.deploy.prepare(task) + mock_agent_options.assert_called_once_with(task.node) + mock_prepare_ramdisk.assert_called_once_with( + task.driver.boot, task, {'c': 'd'}) + add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) + unconfigure_tenant_net_mock.assert_called_once_with(mock.ANY, task) + storage_driver_info_mock.assert_called_once_with(task) + storage_attach_volumes_mock.assert_called_once_with( + task.driver.storage, task) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'detach_volumes', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'remove_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'unconfigure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + def test_tear_down_with_smartnic_port( + self, node_power_action_mock, unconfigure_tenant_nets_mock, + remove_provisioning_net_mock, storage_detach_volumes_mock, + power_on_node_if_needed_mock, restore_power_state_mock): + obj_utils.create_test_volume_target( + self.context, node_id=self.node.id) + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + state = task.driver.deploy.tear_down(task) + self.assertEqual(state, states.DELETED) + node_power_action_mock.assert_called_once_with( + task, states.POWER_OFF) + unconfigure_tenant_nets_mock.assert_called_once_with( + mock.ANY, task) + remove_provisioning_net_mock.assert_called_once_with( + mock.ANY, task) + storage_detach_volumes_mock.assert_called_once_with( + task.driver.storage, task) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + # Verify no volumes exist for new task instances. + with task_manager.acquire(self.context, + self.node.uuid, shared=False) as task: + self.assertEqual(0, len(task.volume_targets)) + + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True) + @mock.patch.object(noop_storage.NoopStorage, 'should_write_image', + autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'configure_tenant_networks', + spec_set=True, autospec=True) + @mock.patch.object(flat_network.FlatNetwork, + 'remove_provisioning_network', + spec_set=True, autospec=True) + @mock.patch.object(pxe.PXEBoot, + 'prepare_instance', + spec_set=True, autospec=True) + @mock.patch.object(manager_utils, 'node_power_action', autospec=True) + @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True) + @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True) + def test_deploy_storage_check_write_image_false_with_smartnic_port( + self, mock_cache_instance_image, mock_check_image_size, + mock_node_power_action, mock_prepare_instance, + mock_remove_network, mock_tenant_network, mock_write, + power_on_node_if_needed_mock, restore_power_state_mock): + mock_write.return_value = False + self.node.provision_state = states.DEPLOYING + self.node.deploy_step = { + 'step': 'deploy', 'priority': 50, 'interface': 'deploy'} + self.node.save() + with task_manager.acquire( + self.context, self.node.uuid, shared=False) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + ret = task.driver.deploy.deploy(task) + self.assertIsNone(ret) + self.assertFalse(mock_cache_instance_image.called) + self.assertFalse(mock_check_image_size.called) + mock_remove_network.assert_called_once_with(mock.ANY, task) + mock_tenant_network.assert_called_once_with(mock.ANY, task) + mock_prepare_instance.assert_called_once_with(mock.ANY, task) + self.assertEqual(2, mock_node_power_action.call_count) + self.assertEqual(states.DEPLOYING, task.node.provision_state) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + # Cleanup of iscsi_deploy with pxe boot interface class CleanUpFullFlowTestCase(db_base.DbTestCase): diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/test_pxe.py ironic-12.1.0/ironic/tests/unit/drivers/modules/test_pxe.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/test_pxe.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/test_pxe.py 2019-03-21 20:07:40.000000000 +0000 @@ -231,7 +231,8 @@ ipxe_use_swift=False, whole_disk_image=False, mode='deploy', - node_boot_mode=None): + node_boot_mode=None, + persistent=False): mock_build_pxe.return_value = {} kernel_label = '%s_kernel' % mode ramdisk_label = '%s_ramdisk' % mode @@ -264,7 +265,7 @@ get_boot_mode_mock.assert_called_once_with(task) set_boot_device_mock.assert_called_once_with(task, boot_devices.PXE, - persistent=False) + persistent=persistent) if ipxe_use_swift: if whole_disk_image: self.assertFalse(mock_cache_r_k.called) @@ -305,6 +306,80 @@ self.node.save() self._test_prepare_ramdisk() + def test_prepare_ramdisk_force_persistent_boot_device_true(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'True' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_bool_true(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = True + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_sloppy_true(self): + for value in ['true', 't', '1', 'on', 'y', 'YES']: + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = value + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_false(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'False' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk() + + def test_prepare_ramdisk_force_persistent_boot_device_bool_false(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = False + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=False) + + def test_prepare_ramdisk_force_persistent_boot_device_sloppy_false(self): + for value in ['false', 'f', '0', 'off', 'n', 'NO', 'yxz']: + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = value + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk() + + def test_prepare_ramdisk_force_persistent_boot_device_default(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'Default' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=False) + + def test_prepare_ramdisk_force_persistent_boot_device_always(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'Always' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=True) + + def test_prepare_ramdisk_force_persistent_boot_device_never(self): + self.node.provision_state = states.DEPLOYING + driver_info = self.node.driver_info + driver_info['force_persistent_boot_device'] = 'Never' + self.node.driver_info = driver_info + self.node.save() + self._test_prepare_ramdisk(persistent=False) + def test_prepare_ramdisk_rescue(self): self.node.provision_state = states.RESCUING self.node.save() @@ -740,25 +815,6 @@ @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True) - def test_is_force_persistent_boot_device_enabled( - self, clean_up_pxe_config_mock, set_boot_device_mock): - with task_manager.acquire(self.context, self.node.uuid) as task: - instance_info = task.node.instance_info - instance_info['capabilities'] = {'boot_option': 'local'} - task.node.instance_info = instance_info - task.node.save() - task.driver.boot.prepare_instance(task) - clean_up_pxe_config_mock.assert_called_once_with( - task, ipxe_enabled=CONF.pxe.ipxe_enabled) - driver_info = task.node.driver_info - driver_info['force_persistent _boot_device'] = True - task.node.driver_info = driver_info - set_boot_device_mock.assert_called_once_with(task, - boot_devices.DISK, - persistent=True) - - @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True) - @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True) def test_prepare_instance_localboot_active(self, clean_up_pxe_config_mock, set_boot_device_mock): self.node.provision_state = states.ACTIVE @@ -1014,6 +1070,44 @@ task.driver.deploy.validate(task) mock_validate.assert_called_once_with(mock.ANY, task) + @mock.patch.object(manager_utils, 'restore_power_state_if_needed', + autospec=True) + @mock.patch.object(manager_utils, 'power_on_node_if_needed', + autospec=True) + @mock.patch.object(pxe.LOG, 'warning', autospec=True) + @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True) + @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True) + @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True) + @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True) + def test_deploy_with_smartnic_port( + self, mock_image_info, mock_cache, + mock_dhcp_factory, mock_switch_config, mock_warning, + power_on_node_if_needed_mock, restore_power_state_mock): + image_info = {'kernel': ('', '/path/to/kernel'), + 'ramdisk': ('', '/path/to/ramdisk')} + mock_image_info.return_value = image_info + i_info = self.node.instance_info + i_info.update({'capabilities': {'boot_option': 'ramdisk'}}) + self.node.instance_info = i_info + self.node.save() + with task_manager.acquire(self.context, self.node.uuid) as task: + power_on_node_if_needed_mock.return_value = states.POWER_OFF + self.assertIsNone(task.driver.deploy.deploy(task)) + mock_image_info.assert_called_once_with(task) + mock_cache.assert_called_once_with( + task, image_info, ipxe_enabled=CONF.pxe.ipxe_enabled) + self.assertFalse(mock_warning.called) + power_on_node_if_needed_mock.assert_called_once_with(task) + restore_power_state_mock.assert_called_once_with( + task, states.POWER_OFF) + i_info['configdrive'] = 'meow' + self.node.instance_info = i_info + self.node.save() + mock_warning.reset_mock() + with task_manager.acquire(self.context, self.node.uuid) as task: + self.assertIsNone(task.driver.deploy.deploy(task)) + self.assertTrue(mock_warning.called) + class PXEValidateRescueTestCase(db_base.DbTestCase): diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/xclarity/test_management.py ironic-12.1.0/ironic/tests/unit/drivers/modules/xclarity/test_management.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/xclarity/test_management.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/xclarity/test_management.py 2019-03-21 20:07:40.000000000 +0000 @@ -118,3 +118,40 @@ exception.XClarityError, task.driver.management.get_boot_device, task) + + def test_get_boot_device_current_none(self, mock_xc_client): + with task_manager.acquire(self.context, self.node.uuid) as task: + reference = {'boot_device': None, 'persistent': None} + mock_xc_client.return_value.get_node_all_boot_info.return_value = \ + { + 'bootOrder': { + 'bootOrderList': [{ + 'fakeBootOrderDevices': [] + }] + } + } + expected_boot_device = task.driver.management.get_boot_device( + task=task) + self.assertEqual(reference, expected_boot_device) + + def test_get_boot_device_primary_none(self, mock_xc_client): + with task_manager.acquire(self.context, self.node.uuid) as task: + reference = {'boot_device': None, 'persistent': None} + mock_xc_client.return_value.get_node_all_boot_info.return_value = \ + { + 'bootOrder': { + 'bootOrderList': [ + { + 'bootType': 'SingleUse', + 'CurrentBootOrderDevices': [] + }, + { + 'bootType': 'Permanent', + 'CurrentBootOrderDevices': [] + }, + ] + } + } + expected_boot_device = task.driver.management.get_boot_device( + task=task) + self.assertEqual(reference, expected_boot_device) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/modules/xclarity/test_power.py ironic-12.1.0/ironic/tests/unit/drivers/modules/xclarity/test_power.py --- ironic-12.0.0/ironic/tests/unit/drivers/modules/xclarity/test_power.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/modules/xclarity/test_power.py 2019-03-21 20:07:40.000000000 +0000 @@ -72,7 +72,9 @@ result = power.XClarityPower.get_power_state(task) self.assertEqual(STATE_POWER_ON, result) - def test_get_power_state_fail(self, mock_xc_client): + @mock.patch.object(common, 'translate_xclarity_power_state', + spec_set=True, autospec=True) + def test_get_power_state_fail(self, mock_translate_state, mock_xc_client): with task_manager.acquire(self.context, self.node.uuid) as task: xclarity_client_exceptions.XClarityError = Exception sys.modules['xclarity_client.exceptions'] = ( @@ -85,6 +87,7 @@ self.assertRaises(exception.XClarityError, task.driver.power.get_power_state, task) + self.assertFalse(mock_translate_state.called) @mock.patch.object(power.LOG, 'warning') @mock.patch.object(power.XClarityPower, 'get_power_state', diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/test_base.py ironic-12.1.0/ironic/tests/unit/drivers/test_base.py --- ironic-12.0.0/ironic/tests/unit/drivers/test_base.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/test_base.py 2019-03-21 20:07:40.000000000 +0000 @@ -344,6 +344,111 @@ method_args_mock.assert_called_once_with(task_mock, **args) +class DeployStepDecoratorTestCase(base.TestCase): + + def setUp(self): + super(DeployStepDecoratorTestCase, self).setUp() + method_mock = mock.MagicMock() + del method_mock._is_deploy_step + del method_mock._deploy_step_priority + del method_mock._deploy_step_argsinfo + self.method = method_mock + + def test_deploy_step_priority_only(self): + d = driver_base.deploy_step(priority=10) + d(self.method) + self.assertTrue(self.method._is_deploy_step) + self.assertEqual(10, self.method._deploy_step_priority) + self.assertIsNone(self.method._deploy_step_argsinfo) + + def test_deploy_step_all_args(self): + argsinfo = {'arg1': {'description': 'desc1', + 'required': True}} + d = driver_base.deploy_step(priority=0, argsinfo=argsinfo) + d(self.method) + self.assertTrue(self.method._is_deploy_step) + self.assertEqual(0, self.method._deploy_step_priority) + self.assertEqual(argsinfo, self.method._deploy_step_argsinfo) + + def test_deploy_step_bad_priority(self): + d = driver_base.deploy_step(priority='hi') + self.assertRaisesRegex(exception.InvalidParameterValue, 'priority', + d, self.method) + self.assertTrue(self.method._is_deploy_step) + self.assertFalse(hasattr(self.method, '_deploy_step_priority')) + self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo')) + + @mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True, + autospec=True) + def test_deploy_step_bad_argsinfo(self, mock_valid): + mock_valid.side_effect = exception.InvalidParameterValue('bad') + d = driver_base.deploy_step(priority=0, argsinfo=100) + self.assertRaises(exception.InvalidParameterValue, d, self.method) + self.assertTrue(self.method._is_deploy_step) + self.assertEqual(0, self.method._deploy_step_priority) + self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo')) + + +class DeployAndCleanStepDecoratorTestCase(base.TestCase): + + def setUp(self): + super(DeployAndCleanStepDecoratorTestCase, self).setUp() + method_mock = mock.MagicMock() + del method_mock._is_deploy_step + del method_mock._deploy_step_priority + del method_mock._deploy_step_argsinfo + del method_mock._is_clean_step + del method_mock._clean_step_priority + del method_mock._clean_step_abortable + del method_mock._clean_step_argsinfo + self.method = method_mock + + def test_deploy_and_clean_step_priority_only(self): + dd = driver_base.deploy_step(priority=10) + dc = driver_base.clean_step(priority=11) + dd(dc(self.method)) + self.assertTrue(self.method._is_deploy_step) + self.assertEqual(10, self.method._deploy_step_priority) + self.assertIsNone(self.method._deploy_step_argsinfo) + self.assertTrue(self.method._is_clean_step) + self.assertEqual(11, self.method._clean_step_priority) + self.assertFalse(self.method._clean_step_abortable) + self.assertIsNone(self.method._clean_step_argsinfo) + + def test_deploy_and_clean_step_all_args(self): + dargsinfo = {'arg1': {'description': 'desc1', + 'required': True}} + cargsinfo = {'arg2': {'description': 'desc2', + 'required': False}} + dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo) + dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo) + dd(dc(self.method)) + self.assertTrue(self.method._is_deploy_step) + self.assertEqual(0, self.method._deploy_step_priority) + self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo) + self.assertTrue(self.method._is_clean_step) + self.assertEqual(0, self.method._clean_step_priority) + self.assertFalse(self.method._clean_step_abortable) + self.assertEqual(cargsinfo, self.method._clean_step_argsinfo) + + def test_clean_and_deploy_step_all_args(self): + # Opposite ordering, should make no difference. + dargsinfo = {'arg1': {'description': 'desc1', + 'required': True}} + cargsinfo = {'arg2': {'description': 'desc2', + 'required': False}} + dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo) + dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo) + dc(dd(self.method)) + self.assertTrue(self.method._is_deploy_step) + self.assertEqual(0, self.method._deploy_step_priority) + self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo) + self.assertTrue(self.method._is_clean_step) + self.assertEqual(0, self.method._clean_step_priority) + self.assertFalse(self.method._clean_step_abortable) + self.assertEqual(cargsinfo, self.method._clean_step_argsinfo) + + class DeployStepTestCase(base.TestCase): def test_get_and_execute_deploy_steps(self): # Create a fake Driver class, create some deploy steps, make sure @@ -467,7 +572,9 @@ class MyRAIDInterface(driver_base.RAIDInterface): - def create_configuration(self, task): + def create_configuration(self, task, + create_root_volume=True, + create_nonroot_volumes=True): pass def delete_configuration(self, task): @@ -539,9 +646,11 @@ def validate(self, task): pass + @driver_base.cache_bios_settings def apply_configuration(self, task, settings): return "return_value_apply_configuration" + @driver_base.cache_bios_settings def factory_reset(self, task): return "return_value_factory_reset" diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/test_ibmc.py ironic-12.1.0/ironic/tests/unit/drivers/test_ibmc.py --- ironic-12.0.0/ironic/tests/unit/drivers/test_ibmc.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/test_ibmc.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,47 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Version 1.0.0 + +from ironic.conductor import task_manager +from ironic.drivers.modules.ibmc import management as ibmc_mgmt +from ironic.drivers.modules.ibmc import power as ibmc_power +from ironic.drivers.modules.ibmc import vendor as ibmc_vendor +from ironic.drivers.modules import iscsi_deploy +from ironic.drivers.modules import noop +from ironic.drivers.modules import pxe +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.objects import utils as obj_utils + + +class IBMCHardwareTestCase(db_base.DbTestCase): + + def setUp(self): + super(IBMCHardwareTestCase, self).setUp() + self.config(enabled_hardware_types=['ibmc'], + enabled_power_interfaces=['ibmc'], + enabled_management_interfaces=['ibmc'], + enabled_vendor_interfaces=['ibmc']) + + def test_default_interfaces(self): + node = obj_utils.create_test_node(self.context, driver='ibmc') + with task_manager.acquire(self.context, node.id) as task: + self.assertIsInstance(task.driver.management, + ibmc_mgmt.IBMCManagement) + self.assertIsInstance(task.driver.power, + ibmc_power.IBMCPower) + self.assertIsInstance(task.driver.boot, pxe.PXEBoot) + self.assertIsInstance(task.driver.deploy, iscsi_deploy.ISCSIDeploy) + self.assertIsInstance(task.driver.console, noop.NoConsole) + self.assertIsInstance(task.driver.raid, noop.NoRAID) + self.assertIsInstance(task.driver.vendor, ibmc_vendor.IBMCVendor) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/test_ilo.py ironic-12.1.0/ironic/tests/unit/drivers/test_ilo.py --- ironic-12.0.0/ironic/tests/unit/drivers/test_ilo.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/test_ilo.py 2019-03-21 20:07:40.000000000 +0000 @@ -19,6 +19,7 @@ from ironic.conductor import task_manager from ironic.drivers import ilo from ironic.drivers.modules import agent +from ironic.drivers.modules.ilo import raid from ironic.drivers.modules import inspector from ironic.drivers.modules import iscsi_deploy from ironic.drivers.modules import noop @@ -165,3 +166,47 @@ agent.AgentDeploy) self.assertIsInstance(task.driver.raid, agent.AgentRAID) + + +class Ilo5HardwareTestCase(db_base.DbTestCase): + + def setUp(self): + super(Ilo5HardwareTestCase, self).setUp() + self.config(enabled_hardware_types=['ilo5'], + enabled_boot_interfaces=['ilo-virtual-media', 'ilo-pxe'], + enabled_console_interfaces=['ilo'], + enabled_deploy_interfaces=['iscsi', 'direct'], + enabled_inspect_interfaces=['ilo'], + enabled_management_interfaces=['ilo'], + enabled_power_interfaces=['ilo'], + enabled_raid_interfaces=['ilo5'], + enabled_rescue_interfaces=['no-rescue', 'agent'], + enabled_vendor_interfaces=['ilo', 'no-vendor']) + + def test_default_interfaces(self): + node = obj_utils.create_test_node(self.context, driver='ilo5') + with task_manager.acquire(self.context, node.id) as task: + self.assertIsInstance(task.driver.raid, raid.Ilo5RAID) + + def test_override_with_no_raid(self): + self.config(enabled_raid_interfaces=['no-raid', 'ilo5']) + node = obj_utils.create_test_node(self.context, driver='ilo5', + raid_interface='no-raid') + with task_manager.acquire(self.context, node.id) as task: + self.assertIsInstance(task.driver.raid, noop.NoRAID) + self.assertIsInstance(task.driver.boot, + ilo.boot.IloVirtualMediaBoot) + self.assertIsInstance(task.driver.console, + ilo.console.IloConsoleInterface) + self.assertIsInstance(task.driver.deploy, + iscsi_deploy.ISCSIDeploy) + self.assertIsInstance(task.driver.inspect, + ilo.inspect.IloInspect) + self.assertIsInstance(task.driver.management, + ilo.management.IloManagement) + self.assertIsInstance(task.driver.power, + ilo.power.IloPower) + self.assertIsInstance(task.driver.rescue, + noop.NoRescue) + self.assertIsInstance(task.driver.vendor, + ilo.vendor.VendorPassthru) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/third_party_driver_mock_specs.py ironic-12.1.0/ironic/tests/unit/drivers/third_party_driver_mock_specs.py --- ironic-12.0.0/ironic/tests/unit/drivers/third_party_driver_mock_specs.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/third_party_driver_mock_specs.py 2019-03-21 20:07:40.000000000 +0000 @@ -142,6 +142,9 @@ 'PROCESSOR_ARCH_ARM', 'PROCESSOR_ARCH_MIPS', 'PROCESSOR_ARCH_OEM', + 'STATE_ENABLED', + 'STATE_DISABLED', + 'STATE_ABSENT', ) SUSHY_AUTH_SPEC = ( @@ -167,3 +170,10 @@ 'STATE_POWER_OFF', 'STATE_POWER_ON', ) + +# python-ibmcclient +IBMCCLIENT_SPEC = ( + 'connect', + 'exceptions', + 'constants', +) diff -Nru ironic-12.0.0/ironic/tests/unit/drivers/third_party_driver_mocks.py ironic-12.1.0/ironic/tests/unit/drivers/third_party_driver_mocks.py --- ironic-12.0.0/ironic/tests/unit/drivers/third_party_driver_mocks.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/drivers/third_party_driver_mocks.py 2019-03-21 20:07:40.000000000 +0000 @@ -26,6 +26,7 @@ - pysnmp - scciclient - python-dracclient +- python-ibmcclient """ import sys @@ -56,6 +57,8 @@ sys.modules['proliantutils.utils'] = proliantutils.utils proliantutils.utils.process_firmware_image = mock.MagicMock() proliantutils.exception.IloError = type('IloError', (Exception,), {}) + proliantutils.exception.IloLogicalDriveNotFoundError = ( + type('IloLogicalDriveNotFoundError', (Exception,), {})) command_exception = type('IloCommandNotSupportedError', (Exception,), {}) proliantutils.exception.IloCommandNotSupportedError = command_exception proliantutils.exception.IloCommandNotSupportedInBiosError = type( @@ -224,6 +227,9 @@ PROCESSOR_ARCH_ARM='ARM', PROCESSOR_ARCH_MIPS='MIPS', PROCESSOR_ARCH_OEM='OEM-defined', + STATE_ENABLED='enabled', + STATE_DISABLED='disabled', + STATE_ABSENT='absent', ) sys.modules['sushy'] = sushy @@ -258,3 +264,45 @@ xclarity_client.exceptions.XClarityException = type('XClarityException', (Exception,), {}) sys.modules['xclarity_client.models'] = xclarity_client.models + + +# python-ibmcclient mocks for HUAWEI rack server driver +ibmc_client = importutils.try_import('ibmc_client') +if not ibmc_client: + ibmc_client = mock.MagicMock(spec_set=mock_specs.IBMCCLIENT_SPEC) + sys.modules['ibmc_client'] = ibmc_client + + # Mock iBMC client exceptions + exceptions = mock.MagicMock() + exceptions.ConnectionError = ( + type('ConnectionError', (MockKwargsException,), {})) + exceptions.IBMCClientError = ( + type('IBMCClientError', (MockKwargsException,), {})) + sys.modules['ibmc_client.exceptions'] = exceptions + + # Mock iIBMC client constants + constants = mock.MagicMock( + SYSTEM_POWER_STATE_ON='On', + SYSTEM_POWER_STATE_OFF='Off', + BOOT_SOURCE_TARGET_NONE='None', + BOOT_SOURCE_TARGET_PXE='Pxe', + BOOT_SOURCE_TARGET_FLOPPY='Floppy', + BOOT_SOURCE_TARGET_CD='Cd', + BOOT_SOURCE_TARGET_HDD='Hdd', + BOOT_SOURCE_TARGET_BIOS_SETUP='BiosSetup', + BOOT_SOURCE_MODE_BIOS='Legacy', + BOOT_SOURCE_MODE_UEFI='UEFI', + BOOT_SOURCE_ENABLED_ONCE='Once', + BOOT_SOURCE_ENABLED_CONTINUOUS='Continuous', + BOOT_SOURCE_ENABLED_DISABLED='Disabled', + RESET_NMI='Nmi', + RESET_ON='On', + RESET_FORCE_OFF='ForceOff', + RESET_GRACEFUL_SHUTDOWN='GracefulShutdown', + RESET_FORCE_RESTART='ForceRestart', + RESET_FORCE_POWER_CYCLE='ForcePowerCycle') + sys.modules['ibmc_client.constants'] = constants + + if 'ironic.drivers.modules.ibmc' in sys.modules: + six.moves.reload_module( + sys.modules['ironic.drivers.modules.ibmc']) diff -Nru ironic-12.0.0/ironic/tests/unit/objects/test_allocation.py ironic-12.1.0/ironic/tests/unit/objects/test_allocation.py --- ironic-12.0.0/ironic/tests/unit/objects/test_allocation.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/objects/test_allocation.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,144 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import mock +from testtools import matchers + +from ironic.common import exception +from ironic import objects +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + + +class TestAllocationObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn): + + def setUp(self): + super(TestAllocationObject, self).setUp() + self.fake_allocation = db_utils.get_test_allocation(name='host1') + + def test_get_by_id(self): + allocation_id = self.fake_allocation['id'] + with mock.patch.object(self.dbapi, 'get_allocation_by_id', + autospec=True) as mock_get_allocation: + mock_get_allocation.return_value = self.fake_allocation + + allocation = objects.Allocation.get(self.context, allocation_id) + + mock_get_allocation.assert_called_once_with(allocation_id) + self.assertEqual(self.context, allocation._context) + + def test_get_by_uuid(self): + uuid = self.fake_allocation['uuid'] + with mock.patch.object(self.dbapi, 'get_allocation_by_uuid', + autospec=True) as mock_get_allocation: + mock_get_allocation.return_value = self.fake_allocation + + allocation = objects.Allocation.get(self.context, uuid) + + mock_get_allocation.assert_called_once_with(uuid) + self.assertEqual(self.context, allocation._context) + + def test_get_by_name(self): + name = self.fake_allocation['name'] + with mock.patch.object(self.dbapi, 'get_allocation_by_name', + autospec=True) as mock_get_allocation: + mock_get_allocation.return_value = self.fake_allocation + allocation = objects.Allocation.get(self.context, name) + + mock_get_allocation.assert_called_once_with(name) + self.assertEqual(self.context, allocation._context) + + def test_get_bad_id_and_uuid_and_name(self): + self.assertRaises(exception.InvalidIdentity, + objects.Allocation.get, + self.context, + 'not:a_name_or_uuid') + + def test_create(self): + allocation = objects.Allocation(self.context, **self.fake_allocation) + with mock.patch.object(self.dbapi, 'create_allocation', + autospec=True) as mock_create_allocation: + mock_create_allocation.return_value = ( + db_utils.get_test_allocation()) + + allocation.create() + + args, _kwargs = mock_create_allocation.call_args + self.assertEqual(objects.Allocation.VERSION, args[0]['version']) + + def test_save(self): + uuid = self.fake_allocation['uuid'] + test_time = datetime.datetime(2000, 1, 1, 0, 0) + with mock.patch.object(self.dbapi, 'get_allocation_by_uuid', + autospec=True) as mock_get_allocation: + mock_get_allocation.return_value = self.fake_allocation + with mock.patch.object(self.dbapi, 'update_allocation', + autospec=True) as mock_update_allocation: + mock_update_allocation.return_value = ( + db_utils.get_test_allocation(name='newname', + updated_at=test_time)) + p = objects.Allocation.get_by_uuid(self.context, uuid) + p.name = 'newname' + p.save() + + mock_get_allocation.assert_called_once_with(uuid) + mock_update_allocation.assert_called_once_with( + uuid, {'version': objects.Allocation.VERSION, + 'name': 'newname'}) + self.assertEqual(self.context, p._context) + res_updated_at = (p.updated_at).replace(tzinfo=None) + self.assertEqual(test_time, res_updated_at) + + def test_refresh(self): + uuid = self.fake_allocation['uuid'] + returns = [self.fake_allocation, + db_utils.get_test_allocation(name='newname')] + expected = [mock.call(uuid), mock.call(uuid)] + with mock.patch.object(self.dbapi, 'get_allocation_by_uuid', + side_effect=returns, + autospec=True) as mock_get_allocation: + p = objects.Allocation.get_by_uuid(self.context, uuid) + self.assertEqual(self.fake_allocation['name'], p.name) + p.refresh() + self.assertEqual('newname', p.name) + + self.assertEqual(expected, mock_get_allocation.call_args_list) + self.assertEqual(self.context, p._context) + + def test_save_after_refresh(self): + # Ensure that it's possible to do object.save() after object.refresh() + db_allocation = db_utils.create_test_allocation() + p = objects.Allocation.get_by_uuid(self.context, db_allocation.uuid) + p_copy = objects.Allocation.get_by_uuid(self.context, + db_allocation.uuid) + p.name = 'newname' + p.save() + p_copy.refresh() + p.copy = 'newname2' + # Ensure this passes and an exception is not generated + p_copy.save() + + def test_list(self): + with mock.patch.object(self.dbapi, 'get_allocation_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_allocation] + allocations = objects.Allocation.list(self.context) + self.assertThat(allocations, matchers.HasLength(1)) + self.assertIsInstance(allocations[0], objects.Allocation) + self.assertEqual(self.context, allocations[0]._context) + + def test_payload_schemas(self): + self._check_payload_schemas(objects.allocation, + objects.Allocation.fields) diff -Nru ironic-12.0.0/ironic/tests/unit/objects/test_deploy_template.py ironic-12.1.0/ironic/tests/unit/objects/test_deploy_template.py --- ironic-12.0.0/ironic/tests/unit/objects/test_deploy_template.py 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/objects/test_deploy_template.py 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,159 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from ironic.common import context +from ironic.db import api as dbapi +from ironic import objects +from ironic.tests.unit.db import base as db_base +from ironic.tests.unit.db import utils as db_utils +from ironic.tests.unit.objects import utils as obj_utils + + +class TestDeployTemplateObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn): + + def setUp(self): + super(TestDeployTemplateObject, self).setUp() + self.ctxt = context.get_admin_context() + self.fake_template = db_utils.get_test_deploy_template() + + @mock.patch.object(dbapi.IMPL, 'create_deploy_template', autospec=True) + def test_create(self, mock_create): + template = objects.DeployTemplate(context=self.context, + **self.fake_template) + + mock_create.return_value = db_utils.get_test_deploy_template() + + template.create() + + args, _kwargs = mock_create.call_args + self.assertEqual(1, mock_create.call_count) + + self.assertEqual(self.fake_template['name'], template.name) + self.assertEqual(self.fake_template['steps'], template.steps) + self.assertEqual(self.fake_template['extra'], template.extra) + + @mock.patch.object(dbapi.IMPL, 'update_deploy_template', autospec=True) + def test_save(self, mock_update): + template = objects.DeployTemplate(context=self.context, + **self.fake_template) + template.obj_reset_changes() + + mock_update.return_value = db_utils.get_test_deploy_template( + name='CUSTOM_DT2') + + template.name = 'CUSTOM_DT2' + template.save() + + mock_update.assert_called_once_with( + self.fake_template['uuid'], + {'name': 'CUSTOM_DT2', 'version': objects.DeployTemplate.VERSION}) + + self.assertEqual('CUSTOM_DT2', template.name) + + @mock.patch.object(dbapi.IMPL, 'destroy_deploy_template', autospec=True) + def test_destroy(self, mock_destroy): + template = objects.DeployTemplate(context=self.context, + id=self.fake_template['id']) + + template.destroy() + + mock_destroy.assert_called_once_with(self.fake_template['id']) + + @mock.patch.object(dbapi.IMPL, 'get_deploy_template_by_id', autospec=True) + def test_get_by_id(self, mock_get): + mock_get.return_value = self.fake_template + + template = objects.DeployTemplate.get_by_id( + self.context, self.fake_template['id']) + + mock_get.assert_called_once_with(self.fake_template['id']) + self.assertEqual(self.fake_template['name'], template.name) + self.assertEqual(self.fake_template['uuid'], template.uuid) + self.assertEqual(self.fake_template['steps'], template.steps) + self.assertEqual(self.fake_template['extra'], template.extra) + + @mock.patch.object(dbapi.IMPL, 'get_deploy_template_by_uuid', + autospec=True) + def test_get_by_uuid(self, mock_get): + mock_get.return_value = self.fake_template + + template = objects.DeployTemplate.get_by_uuid( + self.context, self.fake_template['uuid']) + + mock_get.assert_called_once_with(self.fake_template['uuid']) + self.assertEqual(self.fake_template['name'], template.name) + self.assertEqual(self.fake_template['uuid'], template.uuid) + self.assertEqual(self.fake_template['steps'], template.steps) + self.assertEqual(self.fake_template['extra'], template.extra) + + @mock.patch.object(dbapi.IMPL, 'get_deploy_template_by_name', + autospec=True) + def test_get_by_name(self, mock_get): + mock_get.return_value = self.fake_template + + template = objects.DeployTemplate.get_by_name( + self.context, self.fake_template['name']) + + mock_get.assert_called_once_with(self.fake_template['name']) + self.assertEqual(self.fake_template['name'], template.name) + self.assertEqual(self.fake_template['uuid'], template.uuid) + self.assertEqual(self.fake_template['steps'], template.steps) + self.assertEqual(self.fake_template['extra'], template.extra) + + @mock.patch.object(dbapi.IMPL, 'get_deploy_template_list', autospec=True) + def test_list(self, mock_list): + mock_list.return_value = [self.fake_template] + + templates = objects.DeployTemplate.list(self.context) + + mock_list.assert_called_once_with(limit=None, marker=None, + sort_dir=None, sort_key=None) + self.assertEqual(1, len(templates)) + self.assertEqual(self.fake_template['name'], templates[0].name) + self.assertEqual(self.fake_template['uuid'], templates[0].uuid) + self.assertEqual(self.fake_template['steps'], templates[0].steps) + self.assertEqual(self.fake_template['extra'], templates[0].extra) + + @mock.patch.object(dbapi.IMPL, 'get_deploy_template_list_by_names', + autospec=True) + def test_list_by_names(self, mock_list): + mock_list.return_value = [self.fake_template] + + names = [self.fake_template['name']] + templates = objects.DeployTemplate.list_by_names(self.context, names) + + mock_list.assert_called_once_with(names) + self.assertEqual(1, len(templates)) + self.assertEqual(self.fake_template['name'], templates[0].name) + self.assertEqual(self.fake_template['uuid'], templates[0].uuid) + self.assertEqual(self.fake_template['steps'], templates[0].steps) + self.assertEqual(self.fake_template['extra'], templates[0].extra) + + @mock.patch.object(dbapi.IMPL, 'get_deploy_template_by_uuid', + autospec=True) + def test_refresh(self, mock_get): + uuid = self.fake_template['uuid'] + mock_get.side_effect = [dict(self.fake_template), + dict(self.fake_template, name='CUSTOM_DT2')] + + template = objects.DeployTemplate.get_by_uuid(self.context, uuid) + + self.assertEqual(self.fake_template['name'], template.name) + + template.refresh() + + self.assertEqual('CUSTOM_DT2', template.name) + expected = [mock.call(uuid), mock.call(uuid)] + self.assertEqual(expected, mock_get.call_args_list) + self.assertEqual(self.context, template._context) diff -Nru ironic-12.0.0/ironic/tests/unit/objects/test_node.py ironic-12.1.0/ironic/tests/unit/objects/test_node.py --- ironic-12.0.0/ironic/tests/unit/objects/test_node.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/objects/test_node.py 2019-03-21 20:07:40.000000000 +0000 @@ -775,9 +775,7 @@ delattr(node, 'protected') delattr(node, 'protected_reason') node.obj_reset_changes() - node._convert_to_version("1.29") - self.assertFalse(node.protected) self.assertIsNone(node.protected_reason) self.assertEqual({'protected': False, 'protected_reason': None}, @@ -829,6 +827,190 @@ self.assertEqual({'protected': False, 'protected_reason': None}, node.obj_get_changes()) + def test_owner_supported_missing(self): + # owner_interface not set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + delattr(node, 'owner') + node.obj_reset_changes() + node._convert_to_version("1.30") + self.assertIsNone(node.owner) + self.assertEqual({'owner': None}, + node.obj_get_changes()) + + def test_owner_supported_set(self): + # owner set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.owner = "Sure, there is an owner" + node.obj_reset_changes() + node._convert_to_version("1.30") + self.assertEqual("Sure, there is an owner", node.owner) + self.assertEqual({}, node.obj_get_changes()) + + def test_owner_unsupported_missing(self): + # owner not set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + delattr(node, 'owner') + node.obj_reset_changes() + node._convert_to_version("1.29") + self.assertNotIn('owner', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_owner_unsupported_set_remove(self): + # owner set, should be removed. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.owner = "magic" + node.obj_reset_changes() + node._convert_to_version("1.29") + self.assertNotIn('owner', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_owner_unsupported_set_no_remove_non_default(self): + # owner set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.owner = "magic" + node.obj_reset_changes() + node._convert_to_version("1.29", False) + self.assertIsNone(node.owner) + self.assertEqual({'owner': None}, + node.obj_get_changes()) + + def test_owner_unsupported_set_no_remove_default(self): + # owner set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.owner = None + node.obj_reset_changes() + node._convert_to_version("1.29", False) + self.assertIsNone(node.owner) + self.assertEqual({}, node.obj_get_changes()) + + def test_allocation_id_supported_missing(self): + # allocation_id_interface not set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + delattr(node, 'allocation_id') + node.obj_reset_changes() + node._convert_to_version("1.31") + self.assertIsNone(node.allocation_id) + self.assertEqual({'allocation_id': None}, + node.obj_get_changes()) + + def test_allocation_id_supported_set(self): + # allocation_id set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.allocation_id = 42 + node.obj_reset_changes() + node._convert_to_version("1.31") + self.assertEqual(42, node.allocation_id) + self.assertEqual({}, node.obj_get_changes()) + + def test_allocation_id_unsupported_missing(self): + # allocation_id not set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + delattr(node, 'allocation_id') + node.obj_reset_changes() + node._convert_to_version("1.30") + self.assertNotIn('allocation_id', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_allocation_id_unsupported_set_remove(self): + # allocation_id set, should be removed. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.allocation_id = 42 + node.obj_reset_changes() + node._convert_to_version("1.30") + self.assertNotIn('allocation_id', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_allocation_id_unsupported_set_no_remove_non_default(self): + # allocation_id set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.allocation_id = 42 + node.obj_reset_changes() + node._convert_to_version("1.30", False) + self.assertIsNone(node.allocation_id) + self.assertEqual({'allocation_id': None}, + node.obj_get_changes()) + + def test_allocation_id_unsupported_set_no_remove_default(self): + # allocation_id set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.allocation_id = None + node.obj_reset_changes() + node._convert_to_version("1.30", False) + self.assertIsNone(node.allocation_id) + self.assertEqual({}, node.obj_get_changes()) + + def test_description_supported_missing(self): + # description not set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + delattr(node, 'description') + node.obj_reset_changes() + node._convert_to_version("1.32") + self.assertIsNone(node.description) + self.assertEqual({'description': None}, + node.obj_get_changes()) + + def test_description_supported_set(self): + # description set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.description = "Useful information relates to this node" + node.obj_reset_changes() + node._convert_to_version("1.32") + self.assertEqual("Useful information relates to this node", + node.description) + self.assertEqual({}, node.obj_get_changes()) + + def test_description_unsupported_missing(self): + # description not set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + delattr(node, 'description') + node.obj_reset_changes() + node._convert_to_version("1.31") + self.assertNotIn('description', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_description_unsupported_set_remove(self): + # description set, should be removed. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.description = "Useful piece" + node.obj_reset_changes() + node._convert_to_version("1.31") + self.assertNotIn('description', node) + self.assertEqual({}, node.obj_get_changes()) + + def test_description_unsupported_set_no_remove_non_default(self): + # description set, should be set to default. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.description = "Useful piece" + node.obj_reset_changes() + node._convert_to_version("1.31", False) + self.assertIsNone(node.description) + self.assertEqual({'description': None}, + node.obj_get_changes()) + + def test_description_unsupported_set_no_remove_default(self): + # description set, no change required. + node = obj_utils.get_test_node(self.ctxt, **self.fake_node) + + node.description = None + node.obj_reset_changes() + node._convert_to_version("1.31", False) + self.assertIsNone(node.description) + self.assertEqual({}, node.obj_get_changes()) + class TestNodePayloads(db_base.DbTestCase): @@ -886,6 +1068,7 @@ self.assertEqual(self.node.traits.get_trait_names(), payload.traits) self.assertEqual(self.node.updated_at, payload.updated_at) self.assertEqual(self.node.uuid, payload.uuid) + self.assertEqual(self.node.owner, payload.owner) def test_node_payload(self): payload = objects.NodePayload(self.node) diff -Nru ironic-12.0.0/ironic/tests/unit/objects/test_objects.py ironic-12.1.0/ironic/tests/unit/objects/test_objects.py --- ironic-12.0.0/ironic/tests/unit/objects/test_objects.py 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/objects/test_objects.py 2019-03-21 20:07:40.000000000 +0000 @@ -677,31 +677,31 @@ # version bump. It is an MD5 hash of the object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { - 'Node': '1.29-7af860bb4017751104558139c52a1327', + 'Node': '1.32-525750e76f07b62142ed5297334b7832', 'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6', 'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905', - 'Port': '1.8-898a47921f4a1f53fcdddd4eeb179e0b', + 'Port': '1.9-0cb9202a4ec442e8c0d87a324155eaaf', 'Portgroup': '1.4-71923a81a86743b313b190f5c675e258', 'Conductor': '1.3-d3f53e853b4d58cae5bfbd9a8341af4a', 'EventType': '1.1-aa2ba1afd38553e3880c267404e8d370', 'NotificationPublisher': '1.0-51a09397d6c0687771fb5be9a999605d', - 'NodePayload': '1.11-f323602c2e9c3edbf2a5567eca087ff5', + 'NodePayload': '1.13-18a34d461ef7d5dbc1c3e5a55fcb867a', 'NodeSetPowerStateNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeSetPowerStatePayload': '1.11-b61e66ef9d100a2cc564d16b12810855', + 'NodeSetPowerStatePayload': '1.13-4f96e52568e058e3fd6ffc9b0cf15764', 'NodeCorrectedPowerStateNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeCorrectedPowerStatePayload': '1.11-e6e32a38ca655509802ac3c6d8bc17f6', + 'NodeCorrectedPowerStatePayload': '1.13-929af354e7c3474520ce6162ee794717', 'NodeSetProvisionStateNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeSetProvisionStatePayload': '1.11-d13cb3472eea163de5b0723a08e95d2c', + 'NodeSetProvisionStatePayload': '1.13-fa15d2954961d8edcaba9d737a1cad91', 'VolumeConnector': '1.0-3e0252c0ab6e6b9d158d09238a577d97', 'VolumeTarget': '1.0-0b10d663d8dae675900b2c7548f76f5e', 'ChassisCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', 'ChassisCRUDPayload': '1.0-dce63895d8186279a7dd577cffccb202', 'NodeCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', - 'NodeCRUDPayload': '1.9-c5e57432274371f7fe32f269519033cf', + 'NodeCRUDPayload': '1.11-f1c6a6b099e8e28f55378c448c033de0', 'PortCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', - 'PortCRUDPayload': '1.2-233d259df442eb15cc584fae1fe81504', + 'PortCRUDPayload': '1.3-21235916ed54a91b2a122f59571194e7', 'NodeMaintenanceNotification': '1.0-59acc533c11d306f149846f922739c15', 'NodeConsoleNotification': '1.0-59acc533c11d306f149846f922739c15', 'PortgroupCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', @@ -714,6 +714,12 @@ 'TraitList': '1.0-33a2e1bb91ad4082f9f63429b77c1244', 'BIOSSetting': '1.0-fd4a791dc2139a7cc21cefbbaedfd9e7', 'BIOSSettingList': '1.0-33a2e1bb91ad4082f9f63429b77c1244', + 'Allocation': '1.0-25ebf609743cd3f332a4f80fcb818102', + 'AllocationCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', + 'AllocationCRUDPayload': '1.0-a82389d019f37cfe54b50049f73911b3', + 'DeployTemplate': '1.1-4e30c8e9098595e359bb907f095bf1a9', + 'DeployTemplateCRUDNotification': '1.0-59acc533c11d306f149846f922739c15', + 'DeployTemplateCRUDPayload': '1.0-200857e7e715f58a5b6d6b700ab73a3b', } diff -Nru ironic-12.0.0/ironic/tests/unit/objects/test_port.py ironic-12.1.0/ironic/tests/unit/objects/test_port.py --- ironic-12.0.0/ironic/tests/unit/objects/test_port.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/objects/test_port.py 2019-03-21 20:07:40.000000000 +0000 @@ -165,6 +165,20 @@ def test_payload_schemas(self): self._check_payload_schemas(objects.port, objects.Port.fields) + @mock.patch.object(obj_base.IronicObject, 'supports_version', + spec_set=types.FunctionType) + def test_supports_is_smartnic_supported(self, mock_sv): + mock_sv.return_value = True + self.assertTrue(objects.Port.supports_is_smartnic()) + mock_sv.assert_called_once_with((1, 9)) + + @mock.patch.object(obj_base.IronicObject, 'supports_version', + spec_set=types.FunctionType) + def test_supports_is_smartnic_unsupported(self, mock_sv): + mock_sv.return_value = False + self.assertFalse(objects.Port.supports_is_smartnic()) + mock_sv.assert_called_once_with((1, 9)) + class TestConvertToVersion(db_base.DbTestCase): @@ -255,6 +269,72 @@ # no change self.assertEqual(vif2, port.internal_info['tenant_vif_port_id']) + def test_is_smartnic_unsupported(self): + port = objects.Port(self.context, **self.fake_port) + port._convert_to_version("1.8") + self.assertNotIn('is_smartnic', port) + + def test_is_smartnic_supported(self): + port = objects.Port(self.context, **self.fake_port) + port._convert_to_version("1.9") + self.assertIn('is_smartnic', port) + + def test_is_smartnic_supported_missing(self): + # is_smartnic is not set, should be set to default. + port = objects.Port(self.context, **self.fake_port) + delattr(port, 'is_smartnic') + port.obj_reset_changes() + port._convert_to_version("1.9") + self.assertFalse(port.is_smartnic) + self.assertIn('is_smartnic', port.obj_get_changes()) + self.assertFalse(port.obj_get_changes()['is_smartnic']) + + def test_is_smartnic_supported_set(self): + # is_smartnic is set, no change required. + port = objects.Port(self.context, **self.fake_port) + port.is_smartnic = True + port.obj_reset_changes() + port._convert_to_version("1.9") + self.assertTrue(port.is_smartnic) + self.assertNotIn('is_smartnic', port.obj_get_changes()) + + def test_is_smartnic_unsupported_missing(self): + # is_smartnic is not set, no change required. + port = objects.Port(self.context, **self.fake_port) + delattr(port, 'is_smartnic') + port.obj_reset_changes() + port._convert_to_version("1.8") + self.assertNotIn('is_smartnic', port) + self.assertNotIn('is_smartnic', port.obj_get_changes()) + + def test_is_smartnic_unsupported_set_remove(self): + # is_smartnic is set, should be removed. + port = objects.Port(self.context, **self.fake_port) + port.is_smartnic = False + port.obj_reset_changes() + port._convert_to_version("1.8") + self.assertNotIn('is_smartnic', port) + self.assertNotIn('is_smartnic', port.obj_get_changes()) + + def test_is_smartnic_unsupported_set_no_remove_non_default(self): + # is_smartnic is set, should be set to default. + port = objects.Port(self.context, **self.fake_port) + port.is_smartnic = True + port.obj_reset_changes() + port._convert_to_version("1.8", False) + self.assertFalse(port.is_smartnic) + self.assertIn('is_smartnic', port.obj_get_changes()) + self.assertFalse(port.obj_get_changes()['is_smartnic']) + + def test_is_smartnic_unsupported_set_no_remove_default(self): + # is_smartnic is set, no change required. + port = objects.Port(self.context, **self.fake_port) + port.is_smartnic = False + port.obj_reset_changes() + port._convert_to_version("1.8", False) + self.assertFalse(port.is_smartnic) + self.assertNotIn('is_smartnic', port.obj_get_changes()) + class TestMigrateVifPortId(db_base.DbTestCase): @@ -278,9 +358,10 @@ total, done = objects.port.migrate_vif_port_id(self.context, 0) self.assertEqual(3, total) self.assertEqual(3, done) - mock_get_not_versions.assert_called_once_with('Port', ['1.8']) + mock_get_not_versions.assert_called_once_with('Port', ['1.8', + '1.9']) calls = 3 * [ - mock.call(mock.ANY, '1.8', remove_unavailable_fields=False), + mock.call(mock.ANY, '1.9', remove_unavailable_fields=False), ] self.assertEqual(calls, mock_convert.call_args_list) @@ -292,8 +373,9 @@ total, done = objects.port.migrate_vif_port_id(self.context, 1) self.assertEqual(3, total) self.assertEqual(1, done) - mock_get_not_versions.assert_called_once_with('Port', ['1.8']) + mock_get_not_versions.assert_called_once_with('Port', ['1.8', + '1.9']) calls = [ - mock.call(mock.ANY, '1.8', remove_unavailable_fields=False), + mock.call(mock.ANY, '1.9', remove_unavailable_fields=False), ] self.assertEqual(calls, mock_convert.call_args_list) diff -Nru ironic-12.0.0/ironic/tests/unit/objects/utils.py ironic-12.1.0/ironic/tests/unit/objects/utils.py --- ironic-12.0.0/ironic/tests/unit/objects/utils.py 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/ironic/tests/unit/objects/utils.py 2019-03-21 20:07:40.000000000 +0000 @@ -265,6 +265,72 @@ return conductor +def get_test_allocation(ctxt, **kw): + """Return an Allocation object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + kw['object_type'] = 'allocation' + get_db_allocation_checked = check_keyword_arguments( + db_utils.get_test_allocation) + db_allocation = get_db_allocation_checked(**kw) + + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_allocation['id'] + allocation = objects.Allocation(ctxt) + for key in db_allocation: + setattr(allocation, key, db_allocation[key]) + return allocation + + +def create_test_allocation(ctxt, **kw): + """Create and return a test allocation object. + + Create an allocation in the DB and return an Allocation object with + appropriate attributes. + """ + allocation = get_test_allocation(ctxt, **kw) + allocation.create() + return allocation + + +def get_test_deploy_template(ctxt, **kw): + """Return a DeployTemplate object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + db_template = db_utils.get_test_deploy_template(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_template['id'] + if 'steps' not in kw: + for step in db_template['steps']: + del step['id'] + del step['deploy_template_id'] + else: + for kw_step, template_step in zip(kw['steps'], db_template['steps']): + if 'id' not in kw_step and 'id' in template_step: + del template_step['id'] + template = objects.DeployTemplate(ctxt) + for key in db_template: + setattr(template, key, db_template[key]) + return template + + +def create_test_deploy_template(ctxt, **kw): + """Create and return a test deploy template object. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + template = get_test_deploy_template(ctxt, **kw) + template.create() + return template + + def get_payloads_with_schemas(from_module): """Get the Payload classes with SCHEMAs defined. diff -Nru ironic-12.0.0/ironic.egg-info/entry_points.txt ironic-12.1.0/ironic.egg-info/entry_points.txt --- ironic-12.0.0/ironic.egg-info/entry_points.txt 2018-12-19 10:03:56.000000000 +0000 +++ ironic-12.1.0/ironic.egg-info/entry_points.txt 2019-03-21 20:09:08.000000000 +0000 @@ -54,6 +54,7 @@ [ironic.hardware.interfaces.management] cimc = ironic.drivers.modules.cimc.management:CIMCManagement fake = ironic.drivers.modules.fake:FakeManagement +ibmc = ironic.drivers.modules.ibmc.management:IBMCManagement idrac = ironic.drivers.modules.drac.management:DracManagement ilo = ironic.drivers.modules.ilo.management:IloManagement ipmitool = ironic.drivers.modules.ipmitool:IPMIManagement @@ -71,6 +72,7 @@ [ironic.hardware.interfaces.power] cimc = ironic.drivers.modules.cimc.power:Power fake = ironic.drivers.modules.fake:FakePower +ibmc = ironic.drivers.modules.ibmc.power:IBMCPower idrac = ironic.drivers.modules.drac.power:DracPower ilo = ironic.drivers.modules.ilo.power:IloPower ipmitool = ironic.drivers.modules.ipmitool:IPMIPower @@ -84,6 +86,7 @@ agent = ironic.drivers.modules.agent:AgentRAID fake = ironic.drivers.modules.fake:FakeRAID idrac = ironic.drivers.modules.drac.raid:DracRAID +ilo5 = ironic.drivers.modules.ilo.raid:Ilo5RAID irmc = ironic.drivers.modules.irmc.raid:IRMCRAID no-raid = ironic.drivers.modules.noop:NoRAID @@ -100,6 +103,7 @@ [ironic.hardware.interfaces.vendor] fake = ironic.drivers.modules.fake:FakeVendorB +ibmc = ironic.drivers.modules.ibmc.vendor:IBMCVendor idrac = ironic.drivers.modules.drac.vendor_passthru:DracVendorPassthru ilo = ironic.drivers.modules.ilo.vendor:VendorPassthru ipmitool = ironic.drivers.modules.ipmitool:VendorPassthru @@ -109,8 +113,10 @@ cisco-ucs-managed = ironic.drivers.cisco_ucs:CiscoUCSManaged cisco-ucs-standalone = ironic.drivers.cisco_ucs:CiscoUCSStandalone fake-hardware = ironic.drivers.fake_hardware:FakeHardware +ibmc = ironic.drivers.ibmc:IBMCHardware idrac = ironic.drivers.drac:IDRACHardware ilo = ironic.drivers.ilo:IloHardware +ilo5 = ironic.drivers.ilo:Ilo5Hardware ipmi = ironic.drivers.ipmi:IPMIHardware irmc = ironic.drivers.irmc:IRMCHardware manual-management = ironic.drivers.generic:ManualManagementHardware diff -Nru ironic-12.0.0/ironic.egg-info/pbr.json ironic-12.1.0/ironic.egg-info/pbr.json --- ironic-12.0.0/ironic.egg-info/pbr.json 2018-12-19 10:03:56.000000000 +0000 +++ ironic-12.1.0/ironic.egg-info/pbr.json 2019-03-21 20:09:08.000000000 +0000 @@ -1 +1 @@ -{"git_version": "be8d81b", "is_release": true} \ No newline at end of file +{"git_version": "d4d1a0132", "is_release": true} \ No newline at end of file diff -Nru ironic-12.0.0/ironic.egg-info/PKG-INFO ironic-12.1.0/ironic.egg-info/PKG-INFO --- ironic-12.0.0/ironic.egg-info/PKG-INFO 2018-12-19 10:03:56.000000000 +0000 +++ ironic-12.1.0/ironic.egg-info/PKG-INFO 2019-03-21 20:09:08.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: ironic -Version: 12.0.0 +Version: 12.1.0 Summary: OpenStack Bare Metal Provisioning Home-page: https://docs.openstack.org/ironic/latest/ Author: OpenStack diff -Nru ironic-12.0.0/ironic.egg-info/requires.txt ironic-12.1.0/ironic.egg-info/requires.txt --- ironic-12.0.0/ironic.egg-info/requires.txt 2018-12-19 10:03:56.000000000 +0000 +++ ironic-12.1.0/ironic.egg-info/requires.txt 2019-03-21 20:09:08.000000000 +0000 @@ -12,7 +12,6 @@ python-swiftclient>=3.2.0 pytz>=2013.6 stevedore>=1.20.0 -pysendfile>=2.0.0 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.context>=2.19.2 @@ -34,7 +33,7 @@ rfc3986>=0.3.1 six>=1.10.0 jsonpatch!=1.20,>=1.16 -WSME>=0.8.0 +WSME>=0.9.3 Jinja2>=2.10 keystonemiddleware>=4.17.0 oslo.messaging>=5.29.0 @@ -44,3 +43,7 @@ psutil>=3.2.2 futurist>=1.2.0 tooz>=1.58.0 +openstacksdk>=0.25.0 + +[:(sys_platform!='win32')] +pysendfile>=2.0.0 diff -Nru ironic-12.0.0/ironic.egg-info/SOURCES.txt ironic-12.1.0/ironic.egg-info/SOURCES.txt --- ironic-12.0.0/ironic.egg-info/SOURCES.txt 2018-12-19 10:03:56.000000000 +0000 +++ ironic-12.1.0/ironic.egg-info/SOURCES.txt 2019-03-21 20:09:08.000000000 +0000 @@ -15,10 +15,14 @@ test-requirements.txt tox.ini api-ref/regenerate-samples.sh +api-ref/source/baremetal-api-v1-allocation.inc api-ref/source/baremetal-api-v1-chassis.inc +api-ref/source/baremetal-api-v1-conductors.inc +api-ref/source/baremetal-api-v1-deploy-templates.inc api-ref/source/baremetal-api-v1-driver-passthru.inc api-ref/source/baremetal-api-v1-drivers.inc api-ref/source/baremetal-api-v1-misc.inc +api-ref/source/baremetal-api-v1-node-allocation.inc api-ref/source/baremetal-api-v1-node-management.inc api-ref/source/baremetal-api-v1-node-passthru.inc api-ref/source/baremetal-api-v1-nodes-bios.inc @@ -36,6 +40,11 @@ api-ref/source/conf.py api-ref/source/index.rst api-ref/source/parameters.yaml +api-ref/source/samples/allocation-create-request-2.json +api-ref/source/samples/allocation-create-request.json +api-ref/source/samples/allocation-create-response.json +api-ref/source/samples/allocation-show-response.json +api-ref/source/samples/allocations-list-response.json api-ref/source/samples/api-root-response.json api-ref/source/samples/api-v1-root-response.json api-ref/source/samples/chassis-create-request.json @@ -44,6 +53,16 @@ api-ref/source/samples/chassis-show-response.json api-ref/source/samples/chassis-update-request.json api-ref/source/samples/chassis-update-response.json +api-ref/source/samples/conductor-list-details-response.json +api-ref/source/samples/conductor-list-response.json +api-ref/source/samples/conductor-show-response.json +api-ref/source/samples/deploy-template-create-request.json +api-ref/source/samples/deploy-template-create-response.json +api-ref/source/samples/deploy-template-detail-response.json +api-ref/source/samples/deploy-template-list-response.json +api-ref/source/samples/deploy-template-show-response.json +api-ref/source/samples/deploy-template-update-request.json +api-ref/source/samples/deploy-template-update-response.json api-ref/source/samples/driver-get-response.json api-ref/source/samples/driver-logical-disk-properties-response.json api-ref/source/samples/driver-property-response.json @@ -155,6 +174,7 @@ doc/source/admin/inspection.rst doc/source/admin/metrics.rst doc/source/admin/multitenancy.rst +doc/source/admin/node-deployment.rst doc/source/admin/notifications.rst doc/source/admin/portgroups.rst doc/source/admin/radosgw.rst @@ -167,6 +187,7 @@ doc/source/admin/upgrade-to-hardware-types.rst doc/source/admin/drivers/ansible.rst doc/source/admin/drivers/cimc.rst +doc/source/admin/drivers/ibmc.rst doc/source/admin/drivers/idrac.rst doc/source/admin/drivers/ilo.rst doc/source/admin/drivers/ipa.rst @@ -210,6 +231,7 @@ doc/source/contributor/webapi.rst doc/source/images/conceptual_architecture.png doc/source/images/deployment_architecture_2.png +doc/source/images/ironic_standalone_with_ibmc_driver.svg doc/source/images/logical_architecture.png doc/source/images/sample_trace.svg doc/source/images/sample_trace_details.svg @@ -291,11 +313,14 @@ ironic/api/controllers/root.py ironic/api/controllers/version.py ironic/api/controllers/v1/__init__.py +ironic/api/controllers/v1/allocation.py ironic/api/controllers/v1/bios.py ironic/api/controllers/v1/chassis.py ironic/api/controllers/v1/collection.py ironic/api/controllers/v1/conductor.py +ironic/api/controllers/v1/deploy_template.py ironic/api/controllers/v1/driver.py +ironic/api/controllers/v1/event.py ironic/api/controllers/v1/node.py ironic/api/controllers/v1/notification_utils.py ironic/api/controllers/v1/port.py @@ -354,11 +379,16 @@ ironic/common/glance_service/service_utils.py ironic/common/glance_service/v2/__init__.py ironic/common/glance_service/v2/image_service.py +ironic/common/json_rpc/__init__.py +ironic/common/json_rpc/client.py +ironic/common/json_rpc/server.py ironic/conductor/__init__.py +ironic/conductor/allocations.py ironic/conductor/base_manager.py ironic/conductor/manager.py ironic/conductor/notification_utils.py ironic/conductor/rpcapi.py +ironic/conductor/steps.py ironic/conductor/task_manager.py ironic/conductor/utils.py ironic/conf/__init__.py @@ -378,11 +408,13 @@ ironic/conf/drac.py ironic/conf/glance.py ironic/conf/healthcheck.py +ironic/conf/ibmc.py ironic/conf/ilo.py ironic/conf/inspector.py ironic/conf/ipmi.py ironic/conf/irmc.py ironic/conf/iscsi.py +ironic/conf/json_rpc.py ironic/conf/metrics.py ironic/conf/metrics_statsd.py ironic/conf/neutron.py @@ -407,11 +439,14 @@ ironic/db/sqlalchemy/alembic/versions/10b163d4481e_add_port_portgroup_internal_info.py ironic/db/sqlalchemy/alembic/versions/1a59178ebdf6_add_volume_targets_table.py ironic/db/sqlalchemy/alembic/versions/1d6951876d68_add_storage_interface_db_field_and_.py +ironic/db/sqlalchemy/alembic/versions/1e15e7122cc9_add_extra_column_to_deploy_templates.py ironic/db/sqlalchemy/alembic/versions/1e1d5ace7dc6_add_inspection_started_at_and_.py ironic/db/sqlalchemy/alembic/versions/21b331f883ef_add_provision_updated_at.py ironic/db/sqlalchemy/alembic/versions/2353895ecfae_add_conductor_hardware_interfaces_table.py ironic/db/sqlalchemy/alembic/versions/242cc6a923b3_add_node_maintenance_reason.py ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py +ironic/db/sqlalchemy/alembic/versions/28c44432c9c3_add_node_description.py +ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py ironic/db/sqlalchemy/alembic/versions/2d13bc3d6bba_add_bios_config_and_interface.py ironic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py ironic/db/sqlalchemy/alembic/versions/31baaf680d2b_add_node_instance_info.py @@ -433,6 +468,7 @@ ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py ironic/db/sqlalchemy/alembic/versions/868cb606a74a_add_version_field_in_base_class.py ironic/db/sqlalchemy/alembic/versions/93706939026c_add_node_protected_field.py +ironic/db/sqlalchemy/alembic/versions/9cbeefa3763f_add_port_is_smartnic.py ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py ironic/db/sqlalchemy/alembic/versions/b9117ac17882_add_node_deploy_step.py ironic/db/sqlalchemy/alembic/versions/bb59b63f55a_add_node_driver_internal_info.py @@ -442,8 +478,10 @@ ironic/db/sqlalchemy/alembic/versions/daa1ba02d98_add_volume_connectors_table.py ironic/db/sqlalchemy/alembic/versions/dbefd6bdaa2c_add_default_column_to_.py ironic/db/sqlalchemy/alembic/versions/dd34e1f1303b_add_resource_class_to_node.py +ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py ironic/db/sqlalchemy/alembic/versions/e294876e8028_add_node_network_interface.py ironic/db/sqlalchemy/alembic/versions/e918ff30eb42_resize_column_nodes_instance_info.py +ironic/db/sqlalchemy/alembic/versions/f190f9d00a11_add_node_owner.py ironic/db/sqlalchemy/alembic/versions/f6fdb920c182_set_pxe_enabled_true.py ironic/db/sqlalchemy/alembic/versions/fb3f10dd262e_add_fault_to_node_table.py ironic/dhcp/__init__.py @@ -457,6 +495,7 @@ ironic/drivers/fake_hardware.py ironic/drivers/generic.py ironic/drivers/hardware_type.py +ironic/drivers/ibmc.py ironic/drivers/ilo.py ironic/drivers/ipmi.py ironic/drivers/irmc.py @@ -537,6 +576,12 @@ ironic/drivers/modules/drac/power.py ironic/drivers/modules/drac/raid.py ironic/drivers/modules/drac/vendor_passthru.py +ironic/drivers/modules/ibmc/__init__.py +ironic/drivers/modules/ibmc/management.py +ironic/drivers/modules/ibmc/mappings.py +ironic/drivers/modules/ibmc/power.py +ironic/drivers/modules/ibmc/utils.py +ironic/drivers/modules/ibmc/vendor.py ironic/drivers/modules/ilo/__init__.py ironic/drivers/modules/ilo/bios.py ironic/drivers/modules/ilo/boot.py @@ -546,6 +591,7 @@ ironic/drivers/modules/ilo/inspect.py ironic/drivers/modules/ilo/management.py ironic/drivers/modules/ilo/power.py +ironic/drivers/modules/ilo/raid.py ironic/drivers/modules/ilo/vendor.py ironic/drivers/modules/irmc/__init__.py ironic/drivers/modules/irmc/bios.py @@ -581,10 +627,12 @@ ironic/hacking/__init__.py ironic/hacking/checks.py ironic/objects/__init__.py +ironic/objects/allocation.py ironic/objects/base.py ironic/objects/bios.py ironic/objects/chassis.py ironic/objects/conductor.py +ironic/objects/deploy_template.py ironic/objects/fields.py ironic/objects/indirection.py ironic/objects/node.py @@ -594,6 +642,7 @@ ironic/objects/trait.py ironic/objects/volume_connector.py ironic/objects/volume_target.py +ironic/releasenotes/notes/add-protection-for-available-nodes-25f163d69782ef63.yaml ironic/tests/__init__.py ironic/tests/base.py ironic/tests/functional/__init__.py @@ -615,9 +664,12 @@ ironic/tests/unit/api/controllers/__init__.py ironic/tests/unit/api/controllers/test_base.py ironic/tests/unit/api/controllers/v1/__init__.py +ironic/tests/unit/api/controllers/v1/test_allocation.py ironic/tests/unit/api/controllers/v1/test_chassis.py ironic/tests/unit/api/controllers/v1/test_conductor.py +ironic/tests/unit/api/controllers/v1/test_deploy_template.py ironic/tests/unit/api/controllers/v1/test_driver.py +ironic/tests/unit/api/controllers/v1/test_event.py ironic/tests/unit/api/controllers/v1/test_expose.py ironic/tests/unit/api/controllers/v1/test_node.py ironic/tests/unit/api/controllers/v1/test_notification_utils.py @@ -645,6 +697,7 @@ ironic/tests/unit/common/test_hash_ring.py ironic/tests/unit/common/test_image_service.py ironic/tests/unit/common/test_images.py +ironic/tests/unit/common/test_json_rpc.py ironic/tests/unit/common/test_keystone.py ironic/tests/unit/common/test_network.py ironic/tests/unit/common/test_neutron.py @@ -660,20 +713,24 @@ ironic/tests/unit/common/test_wsgi_service.py ironic/tests/unit/conductor/__init__.py ironic/tests/unit/conductor/mgr_utils.py +ironic/tests/unit/conductor/test_allocations.py ironic/tests/unit/conductor/test_base_manager.py ironic/tests/unit/conductor/test_manager.py ironic/tests/unit/conductor/test_notification_utils.py ironic/tests/unit/conductor/test_rpcapi.py +ironic/tests/unit/conductor/test_steps.py ironic/tests/unit/conductor/test_task_manager.py ironic/tests/unit/conductor/test_utils.py ironic/tests/unit/conf/__init__.py ironic/tests/unit/conf/test_auth.py ironic/tests/unit/db/__init__.py ironic/tests/unit/db/base.py +ironic/tests/unit/db/test_allocations.py ironic/tests/unit/db/test_api.py ironic/tests/unit/db/test_bios_settings.py ironic/tests/unit/db/test_chassis.py ironic/tests/unit/db/test_conductor.py +ironic/tests/unit/db/test_deploy_templates.py ironic/tests/unit/db/test_node_tags.py ironic/tests/unit/db/test_node_traits.py ironic/tests/unit/db/test_nodes.py @@ -704,6 +761,7 @@ ironic/tests/unit/drivers/test_drac.py ironic/tests/unit/drivers/test_fake_hardware.py ironic/tests/unit/drivers/test_generic.py +ironic/tests/unit/drivers/test_ibmc.py ironic/tests/unit/drivers/test_ilo.py ironic/tests/unit/drivers/test_ipmi.py ironic/tests/unit/drivers/test_irmc.py @@ -746,6 +804,12 @@ ironic/tests/unit/drivers/modules/drac/test_power.py ironic/tests/unit/drivers/modules/drac/test_raid.py ironic/tests/unit/drivers/modules/drac/utils.py +ironic/tests/unit/drivers/modules/ibmc/__init__.py +ironic/tests/unit/drivers/modules/ibmc/base.py +ironic/tests/unit/drivers/modules/ibmc/test_management.py +ironic/tests/unit/drivers/modules/ibmc/test_power.py +ironic/tests/unit/drivers/modules/ibmc/test_utils.py +ironic/tests/unit/drivers/modules/ibmc/test_vendor.py ironic/tests/unit/drivers/modules/ilo/__init__.py ironic/tests/unit/drivers/modules/ilo/test_bios.py ironic/tests/unit/drivers/modules/ilo/test_boot.py @@ -755,6 +819,7 @@ ironic/tests/unit/drivers/modules/ilo/test_inspect.py ironic/tests/unit/drivers/modules/ilo/test_management.py ironic/tests/unit/drivers/modules/ilo/test_power.py +ironic/tests/unit/drivers/modules/ilo/test_raid.py ironic/tests/unit/drivers/modules/ilo/test_vendor.py ironic/tests/unit/drivers/modules/irmc/__init__.py ironic/tests/unit/drivers/modules/irmc/fake_sensors_data_ng.xml @@ -790,9 +855,11 @@ ironic/tests/unit/drivers/modules/xclarity/test_management.py ironic/tests/unit/drivers/modules/xclarity/test_power.py ironic/tests/unit/objects/__init__.py +ironic/tests/unit/objects/test_allocation.py ironic/tests/unit/objects/test_bios.py ironic/tests/unit/objects/test_chassis.py ironic/tests/unit/objects/test_conductor.py +ironic/tests/unit/objects/test_deploy_template.py ironic/tests/unit/objects/test_fields.py ironic/tests/unit/objects/test_node.py ironic/tests/unit/objects/test_notification.py @@ -819,7 +886,9 @@ releasenotes/notes/add-agent-erase-fallback-b07613a7042fe236.yaml releasenotes/notes/add-agent-iboot-0a4b5471c6ace461.yaml releasenotes/notes/add-agent-proxy-support-790e629634ca2eb7.yaml +releasenotes/notes/add-ansible-python-interpreter-2035e0f23d407aaf.yaml releasenotes/notes/add-boot-from-volume-support-9f64208f083d0691.yaml +releasenotes/notes/add-boot-mode-redfish-inspect-48e2b27ef022932a.yaml releasenotes/notes/add-chassis_uuid-removal-possibility-8b06341a91f7c676.yaml releasenotes/notes/add-choice-to-some-options-9fb327c48e6bfda1.yaml releasenotes/notes/add-cisco-ucs-hardware-types-ee597ff0416f158f.yaml @@ -836,12 +905,18 @@ releasenotes/notes/add-more-retryable-ipmitool-errors-1c9351a89ff0ec1a.yaml releasenotes/notes/add-node-bios-9c1c3d442e8acdac.yaml releasenotes/notes/add-node-boot-mode-control-9761d4bcbd8c3a0d.yaml +releasenotes/notes/add-node-description-790097704f45af91.yaml releasenotes/notes/add-node-resource-class-c31e26df4196293e.yaml releasenotes/notes/add-notifications-97b6c79c18b48073.yaml releasenotes/notes/add-oneview-driver-96088bf470b16c34.yaml +releasenotes/notes/add-option-persistent-boot-device-139cf280fb66f4f7.yaml +releasenotes/notes/add-owner-information-52e153faf570747e.yaml +releasenotes/notes/add-parallel-power-syncs-b099d66e80aab616.yaml releasenotes/notes/add-port-advanced-net-fields-55465091f019d962.yaml releasenotes/notes/add-port-internal-info-b7e02889416570f7.yaml +releasenotes/notes/add-port-is-smartnic-4ce6974c8fe2732d.yaml releasenotes/notes/add-prep-partition-support-d808849795906e64.yaml +releasenotes/notes/add-pxe-per-node-526fd79df17efda8.yaml releasenotes/notes/add-pxe-support-for-petitboot-50d1fe4e7da4bfba.yaml releasenotes/notes/add-redfish-auth-type-5fe78071b528e53b.yaml releasenotes/notes/add-redfish-boot-mode-support-2f1a2568e71c65d0.yaml @@ -857,6 +932,7 @@ releasenotes/notes/add-ssl-support-4547801eedba5942.yaml releasenotes/notes/add-storage-interface-d4e64224804207fc.yaml releasenotes/notes/add-support-for-no-poweroff-on-failure-86e43b3e39043990.yaml +releasenotes/notes/add-support-for-smart-nic-0fc5b10ba6772f7f.yaml releasenotes/notes/add-timeout-parameter-to-power-methods-5f632c936497685e.yaml releasenotes/notes/add-tooz-dep-85c56c74733a222d.yaml releasenotes/notes/add-validate-rescue-2202e8ce9a174ece.yaml @@ -884,6 +960,7 @@ releasenotes/notes/agent-takeover-60f27cef21ebfb48.yaml releasenotes/notes/agent-wol-driver-4116f64907d0db9c.yaml releasenotes/notes/agent_partition_image-48a03700f41a3980.yaml +releasenotes/notes/allocation-api-6ac2d262689f5f59.yaml releasenotes/notes/allow-pxelinux-config-folder-to-be-defined-da0ddd397d58dcc8.yaml releasenotes/notes/allow-set-interface-to-node-in-available-bd6f695620c2d77f.yaml releasenotes/notes/allow-to-attach-vif-to-active-node-55963be2ec269043.yaml @@ -900,6 +977,7 @@ releasenotes/notes/bmc_reset-warm-9396ac444cafd734.yaml releasenotes/notes/boot-from-url-98d21670e726c518.yaml releasenotes/notes/boot-ipxe-inc-workaround-548e10d1d6616752.yaml +releasenotes/notes/broken-driver-update-fc5303340080ef04.yaml releasenotes/notes/bug-1506657-3bcb4ef46623124d.yaml releasenotes/notes/bug-1518374-decd73fd82c2eb94.yaml releasenotes/notes/bug-1548086-ed88646061b88faf.yaml @@ -922,12 +1000,17 @@ releasenotes/notes/bug-2002062-959b865ced05b746.yaml releasenotes/notes/bug-2002093-9fcb3613d2daeced.yaml releasenotes/notes/bug-2003972-dae9b7d0f6180339.yaml +releasenotes/notes/bug-2004947-e5f27e11b8f9c96d.yaml +releasenotes/notes/build-configdrive-5b3b9095824faf4e.yaml +releasenotes/notes/build-iso-from-esp-d156036aa8ef85fb.yaml +releasenotes/notes/build-uefi-only-iso-ce6bcb0da578d1d6.yaml releasenotes/notes/build_instance_info-c7e3f12426b48965.yaml releasenotes/notes/catch-third-party-driver-validate-exceptions-94ed2a91c50d2d8e.yaml releasenotes/notes/change-ramdisk-log-filename-142b10d0b02a5ca6.yaml releasenotes/notes/change-updated-at-object-field-a74466f7c4541072.yaml releasenotes/notes/check-dynamic-allocation-enabled-e94f3b8963b114d0.yaml releasenotes/notes/check-for-whole-disk-image-uefi-3bf2146588de2423.yaml +releasenotes/notes/check_obj_versions-e86d897df673e833.yaml releasenotes/notes/check_protocol_for_ironic_api-32f35c93a140d3ae.yaml releasenotes/notes/classic-drivers-deprecation-de464065187d4c14.yaml releasenotes/notes/clean-nodes-stuck-in-cleaning-on-startup-443823ea4f937965.yaml @@ -957,13 +1040,16 @@ releasenotes/notes/default_boot_option-f22c01f976bc2de7.yaml releasenotes/notes/dell-boss-raid1-ec33e5b9c59d4021.yaml releasenotes/notes/deny-too-long-chassis-description-0690d6f67ed002d5.yaml +releasenotes/notes/deploy-templates-5df3368df862631c.yaml releasenotes/notes/deploy_steps-243b341cf742f7cc.yaml releasenotes/notes/deprecate-agent-passthru-67d1e2cf25b30a30.yaml +releasenotes/notes/deprecate-cisco-drivers-3ae79a24b76ff963.yaml releasenotes/notes/deprecate-clustered-compute-manager-3dd68557446bcc5c.yaml releasenotes/notes/deprecate-dhcp-update-mac-address-f12a4959432c8e20.yaml releasenotes/notes/deprecate-elilo-2beca4800f475426.yaml releasenotes/notes/deprecate-glance-url-scheme-ceff3008cf9cf590.yaml releasenotes/notes/deprecate-global-region-4dbea91de71ebf59.yaml +releasenotes/notes/deprecate-hash-distribution-replicas-ef0626ccc592b70e.yaml releasenotes/notes/deprecate-inspector-enabled-901fd9c9426046c7.yaml releasenotes/notes/deprecate-oneview-drivers-5a487e1940bcbbc6.yaml releasenotes/notes/deprecate-support-for-glance-v1-8b194e6b20cbfebb.yaml @@ -1003,6 +1089,7 @@ releasenotes/notes/fail-when-vif-port-id-is-missing-7640669f9d9e705d.yaml releasenotes/notes/fake-noop-bebc43983eb801d1.yaml releasenotes/notes/fake_soft_power-32683a848a989fc2.yaml +releasenotes/notes/fast-track-deployment-f09a8b921b3aae36.yaml releasenotes/notes/fix-agent-clean-up-9a25deb85bc53d9b.yaml releasenotes/notes/fix-agent-ilo-temp-image-cleanup-711429d0e67807ae.yaml releasenotes/notes/fix-api-access-logs-68b9ca4f411f339c.yaml @@ -1015,10 +1102,12 @@ releasenotes/notes/fix-clean-steps-not-running-0d065cb022bc0419.yaml releasenotes/notes/fix-cleaning-spawn-error-60b60281f3be51c2.yaml releasenotes/notes/fix-cleaning-with-traits-3a54faa70d594fd0.yaml +releasenotes/notes/fix-conductor-list-raise-131ac76719b74032.yaml releasenotes/notes/fix-cpu-count-8904a4e1a24456f4.yaml releasenotes/notes/fix-cve-2016-4985-b62abae577025365.yaml releasenotes/notes/fix-dir-permissions-bc56e83a651bbdb0.yaml releasenotes/notes/fix-disk-identifier-overwrite-42b33a5a0f7742d8.yaml +releasenotes/notes/fix-drac-job-state-8c5422bbeaf15226.yaml releasenotes/notes/fix-get-boot-device-not-persistent-de6159d8d2b60656.yaml releasenotes/notes/fix-get-deploy-info-port.yaml releasenotes/notes/fix-ilo-drivers-log-message-c3c64c1ca0a0bca8.yaml @@ -1050,10 +1139,12 @@ releasenotes/notes/fix-socat-command-afc840284446870a.yaml releasenotes/notes/fix-swift-ssl-options-d93d653dcd404960.yaml releasenotes/notes/fix-sync-power-state-last-error-65fa42bad8e38c3b.yaml +releasenotes/notes/fix-tftp-master-path-config-77face94f5db9af7.yaml releasenotes/notes/fix-updating-node-driver-to-classic-16b0d5ba47e74d10.yaml releasenotes/notes/fix-url-collisions-43abfc8364ca34e7.yaml releasenotes/notes/fix-vif-detach-fca221f1a1c0e9fa.yaml releasenotes/notes/fix-virtualbox-localboot-not-working-558a3dec72b5116b.yaml +releasenotes/notes/fix-xclarity-management-defect-ec5af0cc6d1045d9.yaml releasenotes/notes/fix_deploy_validation_resp_code-ed93627d1b0dfa94.yaml releasenotes/notes/flag_always_reboot-62468a7058b58823.yaml releasenotes/notes/force-out-hung-ipmitool-process-519c7567bcbaa882.yaml @@ -1069,9 +1160,12 @@ releasenotes/notes/hexraw-support-removed-8e8fa07595a629f4.yaml releasenotes/notes/html-errors-27579342e7e8183b.yaml releasenotes/notes/hw-ifaces-periodics-af8c9b93ecca9fcd.yaml +releasenotes/notes/ibmc-driver-45fcf9f50ebf0193.yaml +releasenotes/notes/idrac-advance-python-dracclient-version-01c6ef671670ffb3.yaml releasenotes/notes/idrac-hardware-type-54383960af3459d0.yaml releasenotes/notes/idrac-no-vendor-911904dd69457826.yaml releasenotes/notes/idrac-uefi-boot-mode-86f4694b4247a1ca.yaml +releasenotes/notes/ilo-async-bios-clean-steps-15e49545ba818997.yaml releasenotes/notes/ilo-automated-cleaning-fails-14ee438de3dd8690.yaml releasenotes/notes/ilo-bios-settings-bc91524c459a4fd9.yaml releasenotes/notes/ilo-boot-from-iscsi-volume-41e8d510979c5037.yaml @@ -1087,6 +1181,7 @@ releasenotes/notes/ilo-soft-power-operations-eaef33a3ff56b047.yaml releasenotes/notes/ilo-update-proliantutils-version-fd41a7c2a27be735.yaml releasenotes/notes/ilo-vendor-e8d299ae13388184.yaml +releasenotes/notes/ilo5-oob-raid-a0eac60f7d77a4fc.yaml releasenotes/notes/image-no-data-c281f638d3dedfb2.yaml releasenotes/notes/implement-policy-in-code-cbb0216ef5f8224f.yaml releasenotes/notes/improve-conductor-shutdown-42687d8b9dac4054.yaml @@ -1101,8 +1196,10 @@ releasenotes/notes/ipa-command-retries-and-timeout-29b0be3f2c21328c.yaml releasenotes/notes/ipa-streams-raw-images-1010327b0dad763c.yaml releasenotes/notes/ipmi-cmd-for-ipmi-consoles-2e1104f22df3efcd.yaml +releasenotes/notes/ipmi-console-port-ec6348df4eee6746.yaml releasenotes/notes/ipmi-disable-timeout-option-e730362007f9bedd.yaml releasenotes/notes/ipmi-noop-mgmt-8fad89dc2b4665b8.yaml +releasenotes/notes/ipmi_hex_kg_key-8f6caabe5b7d7a9b.yaml releasenotes/notes/ipminative-bootdev-uefi-954a0dd825bcef97.yaml releasenotes/notes/ipmitool-bootdev-persistent-uefi-b1181a3c82343c8f.yaml releasenotes/notes/ipmitool-vendor-3f0f52240ebbe489.yaml @@ -1113,6 +1210,7 @@ releasenotes/notes/ipxe-dhcp-b799bc326cd2529a.yaml releasenotes/notes/ipxe-uefi-f5be11c7b0606a84.yaml releasenotes/notes/ipxe-use-swift-5ccf490daab809cc.yaml +releasenotes/notes/ipxe-with-dhcpv6-2bc7bd7f53a70f51.yaml releasenotes/notes/ipxe_retry_on_failure-e71fc6b3e9a5be3b.yaml releasenotes/notes/ipxe_timeout_parameter-03fc3c76c520fac2.yaml releasenotes/notes/irmc-add-clean-step-reset-bios-config-a8bed625670b7fdf.yaml @@ -1132,6 +1230,8 @@ releasenotes/notes/iscsi-inband-cleaning-bff87aac16e5d488.yaml releasenotes/notes/iscsi-optional-cpu-arch-ebf6a90dde34172c.yaml releasenotes/notes/iscsi-whole-disk-cd464d589d029b01.yaml +releasenotes/notes/issue-conntrack-bionic-7483671771cf2e82.yaml +releasenotes/notes/json-rpc-0edc429696aca6f9.yaml releasenotes/notes/keystone-auth-3155762c524e44df.yaml releasenotes/notes/keystoneauth-adapter-opts-ca4f68f568e6cf6f.yaml releasenotes/notes/keystoneauth-config-1baa45a0a2dd93b4.yaml @@ -1141,11 +1241,13 @@ releasenotes/notes/lookup-heartbeat-f9772521d12a0549.yaml releasenotes/notes/lookup-ignore-malformed-macs-09e7e909f3a134a3.yaml releasenotes/notes/make-terminal-session-timeout-configurable-b2365b7699b0f98b.yaml +releasenotes/notes/make-versioned-notifications-topics-configurable-18d70d573c27809e.yaml releasenotes/notes/manual-abort-d3d8985a5de7376a.yaml releasenotes/notes/manual-clean-4cc2437be1aea69a.yaml releasenotes/notes/mask-configdrive-contents-77fc557d6bc63b2b.yaml releasenotes/notes/mask-ssh-creds-54ab7b2656578d2e.yaml releasenotes/notes/messaging-log-level-5f870ea69db53d26.yaml +releasenotes/notes/metrics-notifier-information-17858c8e27c795d7.yaml releasenotes/notes/migrate-to-pysnmp-hlapi-477075b5e69cc5bc.yaml releasenotes/notes/migrate_to_hardware_types-0c85c6707c4f296d.yaml releasenotes/notes/migrate_vif_port_id-5e1496638240933d.yaml @@ -1220,8 +1322,10 @@ releasenotes/notes/portgroup-crud-notifications-91204635528972b2.yaml releasenotes/notes/power-fault-recovery-6e22f0114ceee203.yaml releasenotes/notes/poweroff-after-10-tries-c592506f02c167c0.yaml +releasenotes/notes/prelude-to-the-stein-f25b6073b6d1c598.yaml releasenotes/notes/proliantutils_version_update-b6e5ff0e496215a5.yaml releasenotes/notes/protected-650acb2c8a387e17.yaml +releasenotes/notes/provide_mountpoint-58cfd25b6dd4cfde.yaml releasenotes/notes/pxe-enabled-ports-check-c1736215dce76e97.yaml releasenotes/notes/pxe-snmp-driver-supported-9c559c6182c6ec4b.yaml releasenotes/notes/pxe-takeover-d8f14bcb60e5b121.yaml @@ -1249,6 +1353,7 @@ releasenotes/notes/remove-deprecated-deploy-erase-devices-iterations-55680ab95cbce3e9.yaml releasenotes/notes/remove-deprecated-dhcp-provider-method-89926a8f0f4793a4.yaml releasenotes/notes/remove-deprecated-dhcp-provider-methods-582742f3000be3c7.yaml +releasenotes/notes/remove-deprecated-drac_host-865be09c6e8fcb90.yaml releasenotes/notes/remove-deprecated-ilo-clean-priority-erase-devices-bb3073da562ed41d.yaml releasenotes/notes/remove-deprecated-option-names-6d5d53cc70dd2d49.yaml releasenotes/notes/remove-discoverd-group-03eaf75e9f94d7be.yaml @@ -1293,6 +1398,7 @@ releasenotes/notes/scciclient-0.4.0-6f01c0f0a5c39062.yaml releasenotes/notes/security_groups-b57a5d6c30c2fae4.yaml releasenotes/notes/server_profile_template_uri-c79e4f15cc20a1cf.yaml +releasenotes/notes/set-boot-mode-4c42b3fd0b5f5b37.yaml releasenotes/notes/setting_provisioning_cleaning_network-fb60caa1cf59cdcf.yaml releasenotes/notes/shellinabox-locking-fix-2fae2a451a8a489a.yaml releasenotes/notes/shred-final-overwrite-with-zeros-50b5ba5b19c0da27.yaml @@ -1303,6 +1409,7 @@ releasenotes/notes/snmp-outlet-validate-ffbe8e6687172efc.yaml releasenotes/notes/snmp-reboot-delay-d18ee3f6c6fc0998.yaml releasenotes/notes/socat-address-conf-5cf043fabb10bd76.yaml +releasenotes/notes/socat-respawn-de9e8805c820a7ac.yaml releasenotes/notes/soft-power-operations-oneview-e7ac054668235998.yaml releasenotes/notes/soft-reboot-poweroff-9fdb0a4306dd668d.yaml releasenotes/notes/sort_key_allowed_field-091f8eeedd0a2ace.yaml @@ -1311,10 +1418,12 @@ releasenotes/notes/story-2002600-return-503-if-no-conductors-online-ead1512628182ec4.yaml releasenotes/notes/story-2002637-4825d60b096e475b.yaml releasenotes/notes/story-2004266-4725d327900850bf.yaml +releasenotes/notes/story-2004444-f540d9bbc3532ad0.yaml releasenotes/notes/streaming-partition-images-d58fe619658b066e.yaml releasenotes/notes/sum-based-update-firmware-manual-clean-step-e69ade488060cf27.yaml releasenotes/notes/support-root-device-hints-with-operators-96cf34fa37b5b2e8.yaml releasenotes/notes/tempest_plugin_removal-009f9ce8456b16fe.yaml +releasenotes/notes/type-error-str-6826c53d7e5e1243.yaml releasenotes/notes/uefi-first-prepare-e7fa1e2a78b4af99.yaml releasenotes/notes/uefi-grub2-by-default-6b797a9e690d2dd5.yaml releasenotes/notes/undeprecate-xclarity-4f4752017e8310e7.yaml @@ -1337,6 +1446,7 @@ releasenotes/notes/vif-detach-locking-fix-7be66f8150e19819.yaml releasenotes/notes/vif-detach-locking-fix-revert-3961d47fe419460a.yaml releasenotes/notes/volume-connector-and-target-api-dd172f121ab3af8e.yaml +releasenotes/notes/whole-disk-root-gb-9132e5a354e6cb9d.yaml releasenotes/notes/wipe-disk-before-deployment-0a8b9cede4a659e9.yaml releasenotes/notes/wsgi-applications-5d36cf2a8885a56d.yaml releasenotes/notes/wwn-extension-root-device-hints-de40ca1444ba4888.yaml diff -Nru ironic-12.0.0/lower-constraints.txt ironic-12.1.0/lower-constraints.txt --- ironic-12.0.0/lower-constraints.txt 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/lower-constraints.txt 2019-03-21 20:07:40.000000000 +0000 @@ -61,7 +61,7 @@ netaddr==0.7.19 netifaces==0.10.6 openstackdocstheme==1.18.1 -openstacksdk==0.12.0 +openstacksdk==0.25.0 os-api-ref==1.4.0 os-client-config==1.29.0 os-service-types==1.2.0 @@ -98,7 +98,7 @@ Pillow==5.0.0 prettytable==0.7.2 psutil==3.2.2 -psycopg2==2.6.2 +psycopg2==2.7.3 pycadf==2.7.0 pycodestyle==2.3.1 pycparser==2.18 @@ -163,4 +163,4 @@ WebOb==1.7.1 WebTest==2.0.27 wrapt==1.10.11 -WSME==0.8.0 +WSME==0.9.3 diff -Nru ironic-12.0.0/PKG-INFO ironic-12.1.0/PKG-INFO --- ironic-12.0.0/PKG-INFO 2018-12-19 10:03:57.000000000 +0000 +++ ironic-12.1.0/PKG-INFO 2019-03-21 20:09:08.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: ironic -Version: 12.0.0 +Version: 12.1.0 Summary: OpenStack Bare Metal Provisioning Home-page: https://docs.openstack.org/ironic/latest/ Author: OpenStack diff -Nru ironic-12.0.0/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml ironic-12.1.0/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml --- ironic-12.0.0/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/playbooks/legacy/grenade-dsvm-ironic-multinode-multitenant/run.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -51,6 +51,13 @@ - shell: cmd: | + + # Precreate brbm so that it is created before neutron services are started, as they fail if it + # is not present + DEBIAN_FRONTEND=noninteractive sudo -E apt-get --option Dpkg::Options::=--force-confold --assume-yes install openvswitch-switch + sudo systemctl restart openvswitch-switch + sudo ovs-vsctl -- --may-exist add-br brbm + cat << 'EOF' >> ironic-extra-vars export PROJECTS="openstack-dev/grenade $PROJECTS" export DEVSTACK_GATE_GRENADE=pullup @@ -69,13 +76,14 @@ export PROJECTS="openstack/networking-generic-switch $PROJECTS" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin networking-generic-switch git://git.openstack.org/openstack/networking-generic-switch" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_USE_LINK_LOCAL=True" + export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVS_BRIDGE_MAPPINGS=mynetwork:brbm,public:br_ironic_vxlan" export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVS_PHYSICAL_BRIDGE=brbm" export DEVSTACK_LOCAL_CONFIG+=$'\n'"PHYSICAL_NETWORK=mynetwork" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_PROVISION_NETWORK_NAME=ironic-provision" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_PROVISION_SUBNET_GATEWAY=10.0.5.1" export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_PLUGIN=ml2" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_USE_PROVIDERNET_FOR_PUBLIC=False" + export DEVSTACK_LOCAL_CONFIG+=$'\n'"PUBLIC_BRIDGE=br_ironic_vxlan" export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_TENANT_VLANS=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_ML2_TENANT_NETWORK_TYPE=vlan" export DEVSTACK_LOCAL_CONFIG+=$'\n'"TENANT_VLAN_RANGE=100:150" diff -Nru ironic-12.0.0/releasenotes/notes/add-ansible-python-interpreter-2035e0f23d407aaf.yaml ironic-12.1.0/releasenotes/notes/add-ansible-python-interpreter-2035e0f23d407aaf.yaml --- ironic-12.0.0/releasenotes/notes/add-ansible-python-interpreter-2035e0f23d407aaf.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-ansible-python-interpreter-2035e0f23d407aaf.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,15 @@ +--- +features: + - Adds option ``[ansible]default_python_interpreter`` to choose + the python interpreter that ansible uses on managed machines. + By default, ansible uses ``/usr/bin/python`` as interpreter, making the + assumption that that path is always present on remote managed systems. + This might not be always the case, for example in custom build + images or Python 3 native distributions. + With this option the operator has the ability to set the absolute + path of the python interpreter on the remote machines, for example + ``/usr/bin/python3``. + The same interpreter will be used in all operations that use the + ansible deploy interface. + It is also possible to override the value set in the configuration for a + node by passing ``ansible_python_interpreter`` in its ``driver_info``. \ No newline at end of file diff -Nru ironic-12.0.0/releasenotes/notes/add-boot-mode-redfish-inspect-48e2b27ef022932a.yaml ironic-12.1.0/releasenotes/notes/add-boot-mode-redfish-inspect-48e2b27ef022932a.yaml --- ironic-12.0.0/releasenotes/notes/add-boot-mode-redfish-inspect-48e2b27ef022932a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-boot-mode-redfish-inspect-48e2b27ef022932a.yaml 2019-03-21 20:07:44.000000000 +0000 @@ -0,0 +1,6 @@ +--- +features: + - | + Adds currently used boot mode into node ``properties/capabilities`` upon + ``redfish`` inspect interface run. The idea behind this change is to align + with the in-band ``inspector`` behavior. diff -Nru ironic-12.0.0/releasenotes/notes/add-node-description-790097704f45af91.yaml ironic-12.1.0/releasenotes/notes/add-node-description-790097704f45af91.yaml --- ironic-12.0.0/releasenotes/notes/add-node-description-790097704f45af91.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-node-description-790097704f45af91.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +features: + - | + Adds a ``description`` field to the node object to enable operators to + store any information related to the node. The field is up to 4096 + UTF-8 characters. diff -Nru ironic-12.0.0/releasenotes/notes/add-option-persistent-boot-device-139cf280fb66f4f7.yaml ironic-12.1.0/releasenotes/notes/add-option-persistent-boot-device-139cf280fb66f4f7.yaml --- ironic-12.0.0/releasenotes/notes/add-option-persistent-boot-device-139cf280fb66f4f7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-option-persistent-boot-device-139cf280fb66f4f7.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,17 @@ +--- +features: + - | + Adds capability to control the persistency of boot order changes during + instance deployment via (i)PXE on a per-node level. The option + 'force_persistent_boot_device' in the node's driver info for the (i)PXE + drivers is extended to allow the values 'Default' (make all changes + but the last one upon deployment non-persistent), 'Always' (make all + changes persistent), and 'Never' (make all boot order changes + non-persistent). +deprecations: + - | + The values 'True'/'False' for the option 'force_persistent_boot_device' + in the node's driver info for the (i)PXE drivers are deprecated and + support for them may be removed in a future release. The former default + value 'False' is replaced by the new value 'Default', the value 'True' + is replaced by 'Always'. diff -Nru ironic-12.0.0/releasenotes/notes/add-owner-information-52e153faf570747e.yaml ironic-12.1.0/releasenotes/notes/add-owner-information-52e153faf570747e.yaml --- ironic-12.0.0/releasenotes/notes/add-owner-information-52e153faf570747e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-owner-information-52e153faf570747e.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,7 @@ +--- +features: + - | + Adds API version 1.50 which allows for the storage of an ``owner`` field + on node objects. This is intended for either storage of human parsable + information or the storage of a tenant UUID which could be leveraged + in a future version of the Bare Metal as a Service API. diff -Nru ironic-12.0.0/releasenotes/notes/add-parallel-power-syncs-b099d66e80aab616.yaml ironic-12.1.0/releasenotes/notes/add-parallel-power-syncs-b099d66e80aab616.yaml --- ironic-12.0.0/releasenotes/notes/add-parallel-power-syncs-b099d66e80aab616.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-parallel-power-syncs-b099d66e80aab616.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Parallelizes periodic power sync calls by running up to ironic + configuration ``[conductor]/sync_power_state_workers`` simultaneously. + The default is to run up to ``8`` workers. This change should let + larger-scale setups running power syncs more frequently and make the whole + power sync procedure more resilient to slow or dead BMCs. diff -Nru ironic-12.0.0/releasenotes/notes/add-port-is-smartnic-4ce6974c8fe2732d.yaml ironic-12.1.0/releasenotes/notes/add-port-is-smartnic-4ce6974c8fe2732d.yaml --- ironic-12.0.0/releasenotes/notes/add-port-is-smartnic-4ce6974c8fe2732d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-port-is-smartnic-4ce6974c8fe2732d.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,21 @@ +--- +features: + - | + Adds an ``is_smartnic`` field to the port object in REST API version + 1.53. + + ``is_smartnic`` field indicates if this port is a Smart NIC port, + False by default. This field may be set by operator to use baremetal + nodes with Smart NICs as ironic nodes. + + The REST API endpoints related to ports provide support for the + ``is_smartnic`` field. The `ironic admin documentation + `_ + provides information on how to configure and use Smart NIC ports. +upgrade: + - | + Adds an ``is_smartnic`` field to the port object in REST API version + 1.53. + + Upgrading to this release will set ``is_smartnic`` to False for all + ports. diff -Nru ironic-12.0.0/releasenotes/notes/add-pxe-per-node-526fd79df17efda8.yaml ironic-12.1.0/releasenotes/notes/add-pxe-per-node-526fd79df17efda8.yaml --- ironic-12.0.0/releasenotes/notes/add-pxe-per-node-526fd79df17efda8.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-pxe-per-node-526fd79df17efda8.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Add a new field pxe_template that can be set at driver-info level. This + will specify a path for a custom pxe boot template. If present, this + template will be read and will have priority in front of the per-arch and + general pxe templates. + diff -Nru ironic-12.0.0/releasenotes/notes/add-support-for-smart-nic-0fc5b10ba6772f7f.yaml ironic-12.1.0/releasenotes/notes/add-support-for-smart-nic-0fc5b10ba6772f7f.yaml --- ironic-12.0.0/releasenotes/notes/add-support-for-smart-nic-0fc5b10ba6772f7f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/add-support-for-smart-nic-0fc5b10ba6772f7f.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Adds support to enable deployment workflow changes necessary to support + the use of Smart NICs in the ``ansible``, ``direct``, ``iscsi`` and + ``ramdisk`` deployment interfaces. Networking service integration for + this functionality is not anticipated until the Train release of the + Networking service. diff -Nru ironic-12.0.0/releasenotes/notes/allocation-api-6ac2d262689f5f59.yaml ironic-12.1.0/releasenotes/notes/allocation-api-6ac2d262689f5f59.yaml --- ironic-12.0.0/releasenotes/notes/allocation-api-6ac2d262689f5f59.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/allocation-api-6ac2d262689f5f59.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,10 @@ +--- +features: + - | + Introduces allocation API. This API allows finding and reserving a node + by its resource class, traits and optional list of candidate nodes. + Introduces new API endpoints: + + * ``GET/POST /v1/allocations`` + * ``GET/DELETE /v1/allocations/`` + * ``GET/DELETE /v1/nodes//allocation`` diff -Nru ironic-12.0.0/releasenotes/notes/broken-driver-update-fc5303340080ef04.yaml ironic-12.1.0/releasenotes/notes/broken-driver-update-fc5303340080ef04.yaml --- ironic-12.0.0/releasenotes/notes/broken-driver-update-fc5303340080ef04.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/broken-driver-update-fc5303340080ef04.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + A bug has been fixed in the node update code that could cause the nodes + to become not updatable if their driver is no longer available. diff -Nru ironic-12.0.0/releasenotes/notes/bug-2004947-e5f27e11b8f9c96d.yaml ironic-12.1.0/releasenotes/notes/bug-2004947-e5f27e11b8f9c96d.yaml --- ironic-12.0.0/releasenotes/notes/bug-2004947-e5f27e11b8f9c96d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/bug-2004947-e5f27e11b8f9c96d.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where setting the ``conductor_group`` for a node was not + entirely case-sensitive, in that this could fail if case-sensitivity did + not match between the conductor configuration and the API request. diff -Nru ironic-12.0.0/releasenotes/notes/build-configdrive-5b3b9095824faf4e.yaml ironic-12.1.0/releasenotes/notes/build-configdrive-5b3b9095824faf4e.yaml --- ironic-12.0.0/releasenotes/notes/build-configdrive-5b3b9095824faf4e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/build-configdrive-5b3b9095824faf4e.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,9 @@ +--- +features: + - | + Adds support for building config drives. Starting with API version 1.56, + the ``configdrive`` parameter of ``/v1/nodes//states/provision`` can + be a JSON object with optional keys ``meta_data`` (JSON object), + ``network_data`` (JSON object) and ``user_data`` (JSON object, array or + string). See `story 2005083 + `_ for more details. diff -Nru ironic-12.0.0/releasenotes/notes/build-iso-from-esp-d156036aa8ef85fb.yaml ironic-12.1.0/releasenotes/notes/build-iso-from-esp-d156036aa8ef85fb.yaml --- ironic-12.0.0/releasenotes/notes/build-iso-from-esp-d156036aa8ef85fb.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/build-iso-from-esp-d156036aa8ef85fb.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,12 @@ +--- +features: + - | + Allows the user to supply EFI system partition image to ironic, for + building UEFI-bootable ISO images, in form of a local file or UUID + or URI reference. The new ``[conductor]esp_image`` option can be used + to configure ironic to use local file. +fixes: + - | + Makes ironic building UEFI-only bootable ISO image (when being asked to + build a UEFI-bootable image) rather than building a hybrid + BIOS/UEFI-bootable ISO. diff -Nru ironic-12.0.0/releasenotes/notes/build-uefi-only-iso-ce6bcb0da578d1d6.yaml ironic-12.1.0/releasenotes/notes/build-uefi-only-iso-ce6bcb0da578d1d6.yaml --- ironic-12.0.0/releasenotes/notes/build-uefi-only-iso-ce6bcb0da578d1d6.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/build-uefi-only-iso-ce6bcb0da578d1d6.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,7 @@ +--- +other: + - | + The Bare Metal service now builds UEFI-only bootable ISO image (when being asked to + build a UEFI-bootable image) rather than building a hybrid + BIOS/UEFI-bootable ISO. + diff -Nru ironic-12.0.0/releasenotes/notes/check_obj_versions-e86d897df673e833.yaml ironic-12.1.0/releasenotes/notes/check_obj_versions-e86d897df673e833.yaml --- ironic-12.0.0/releasenotes/notes/check_obj_versions-e86d897df673e833.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/check_obj_versions-e86d897df673e833.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Adds a check to the ``ironic-status upgrade check`` command, to check for + compatibility of the object versions with the release of ironic. diff -Nru ironic-12.0.0/releasenotes/notes/deploy-templates-5df3368df862631c.yaml ironic-12.1.0/releasenotes/notes/deploy-templates-5df3368df862631c.yaml --- ironic-12.0.0/releasenotes/notes/deploy-templates-5df3368df862631c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/deploy-templates-5df3368df862631c.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,15 @@ +--- +features: + - | + Adds the deploy templates API. Deploy templates can be used to customise + the node deployment process, each specifying a list of deploy steps to + execute with configurable priority and arguments. + + Introduces the following new API endpoints, available from Bare Metal API + version 1.55: + + * ``GET /v1/deploy_templates`` + * ``GET /v1/deploy_templates/`` + * ``POST /v1/deploy_templates`` + * ``PATCH /v1/deploy_templates/`` + * ``DELETE /v1/deploy_templates/`` diff -Nru ironic-12.0.0/releasenotes/notes/deprecate-cisco-drivers-3ae79a24b76ff963.yaml ironic-12.1.0/releasenotes/notes/deprecate-cisco-drivers-3ae79a24b76ff963.yaml --- ironic-12.0.0/releasenotes/notes/deprecate-cisco-drivers-3ae79a24b76ff963.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/deprecate-cisco-drivers-3ae79a24b76ff963.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,11 @@ +--- +deprecations: + - | + The Cisco ``cisco-ucs-managed`` and ``cisco-ucs-standalone`` drivers have + been deprecated due to a lack of reporting third-party CI and vendor + maintenance of the driver code. In the present state of these drivers, + they would have been removed as part of the eventual removal of support + for Python2. These drivers should be anticipated to be removed prior to + the final Train release of the Bare Metal service. More information + can be found + `here `_. diff -Nru ironic-12.0.0/releasenotes/notes/deprecate-hash-distribution-replicas-ef0626ccc592b70e.yaml ironic-12.1.0/releasenotes/notes/deprecate-hash-distribution-replicas-ef0626ccc592b70e.yaml --- ironic-12.0.0/releasenotes/notes/deprecate-hash-distribution-replicas-ef0626ccc592b70e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/deprecate-hash-distribution-replicas-ef0626ccc592b70e.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +deprecations: + - | + The "hash_distribution_replicas" configuration option is now deprecated. + If specified in the config file, a warning is logged. diff -Nru ironic-12.0.0/releasenotes/notes/fast-track-deployment-f09a8b921b3aae36.yaml ironic-12.1.0/releasenotes/notes/fast-track-deployment-f09a8b921b3aae36.yaml --- ironic-12.0.0/releasenotes/notes/fast-track-deployment-f09a8b921b3aae36.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/fast-track-deployment-f09a8b921b3aae36.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,16 @@ +--- +features: + - | + Adds a new feature called `fast-track` which allows an operator to + optionally configure the Bare Metal API Service and the Bare Metal + conductor service to permit lookup and heartbeat for nodes that are + in the process of being enrolled and created. + + These nodes can be left online, from a process such as discovery. + If ironic-python-agent has communicated with the Bare Metal Service + API endpoint with-in the last `300` seconds, then setup steps that + are normally involved with preparing to launch a ramdisk on the node, + are skipped along with power operations to enable a baremetal node to + undergo discovery through to deployment with a single power cycle. + Fast track functionality may be enabled through the ``[deploy]fast_track`` + option. diff -Nru ironic-12.0.0/releasenotes/notes/fix-conductor-list-raise-131ac76719b74032.yaml ironic-12.1.0/releasenotes/notes/fix-conductor-list-raise-131ac76719b74032.yaml --- ironic-12.0.0/releasenotes/notes/fix-conductor-list-raise-131ac76719b74032.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/fix-conductor-list-raise-131ac76719b74032.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue that node list with conductor fails if any of the nodes + has an invalid hardware type, which may happen when some conductor is + out of service. diff -Nru ironic-12.0.0/releasenotes/notes/fix-drac-job-state-8c5422bbeaf15226.yaml ironic-12.1.0/releasenotes/notes/fix-drac-job-state-8c5422bbeaf15226.yaml --- ironic-12.0.0/releasenotes/notes/fix-drac-job-state-8c5422bbeaf15226.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/fix-drac-job-state-8c5422bbeaf15226.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue in the ``idrac`` RAID interface seen when creating RAID + configurations using ``python-dracclient`` version ``2.0.0`` or higher. diff -Nru ironic-12.0.0/releasenotes/notes/fix-tftp-master-path-config-77face94f5db9af7.yaml ironic-12.1.0/releasenotes/notes/fix-tftp-master-path-config-77face94f5db9af7.yaml --- ironic-12.0.0/releasenotes/notes/fix-tftp-master-path-config-77face94f5db9af7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/fix-tftp-master-path-config-77face94f5db9af7.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes an issue where the master TFTP image cache could not be disabled. + The configuration option ``[pxe]/tftp_master_path`` may now be set to + the empty string to disable the cache. For more information, see + story `2004608 `_. diff -Nru ironic-12.0.0/releasenotes/notes/fix-xclarity-management-defect-ec5af0cc6d1045d9.yaml ironic-12.1.0/releasenotes/notes/fix-xclarity-management-defect-ec5af0cc6d1045d9.yaml --- ironic-12.0.0/releasenotes/notes/fix-xclarity-management-defect-ec5af0cc6d1045d9.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/fix-xclarity-management-defect-ec5af0cc6d1045d9.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixes an issue where ``xclarity`` management interface fails to get + boot order. Now the driver correctly gets boot device and this has + been verified in the 3rd party CI. See story + `2004576 `_ for + details. diff -Nru ironic-12.0.0/releasenotes/notes/ibmc-driver-45fcf9f50ebf0193.yaml ironic-12.1.0/releasenotes/notes/ibmc-driver-45fcf9f50ebf0193.yaml --- ironic-12.0.0/releasenotes/notes/ibmc-driver-45fcf9f50ebf0193.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/ibmc-driver-45fcf9f50ebf0193.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,10 @@ +--- +features: + - | + Adds a new hardware type ``ibmc`` for HUAWEI 2288H V5, CH121 V5 series + servers. This hardware type supports PXE based boot using HUAWEI iBMC + RESTful APIs. The following driver interfaces are supported: + + * management: ``ibmc`` + * power: ``ibmc`` + * vendor: ``ibmc`` diff -Nru ironic-12.0.0/releasenotes/notes/idrac-advance-python-dracclient-version-01c6ef671670ffb3.yaml ironic-12.1.0/releasenotes/notes/idrac-advance-python-dracclient-version-01c6ef671670ffb3.yaml --- ironic-12.0.0/releasenotes/notes/idrac-advance-python-dracclient-version-01c6ef671670ffb3.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/idrac-advance-python-dracclient-version-01c6ef671670ffb3.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Advances required ``python-dracclient`` version to 1.5.0 and later. That + version is required by the fix to the ``idrac`` hardware type's + `bug 2004340 `_. diff -Nru ironic-12.0.0/releasenotes/notes/ilo5-oob-raid-a0eac60f7d77a4fc.yaml ironic-12.1.0/releasenotes/notes/ilo5-oob-raid-a0eac60f7d77a4fc.yaml --- ironic-12.0.0/releasenotes/notes/ilo5-oob-raid-a0eac60f7d77a4fc.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/ilo5-oob-raid-a0eac60f7d77a4fc.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,12 @@ +--- +features: + - Adds new hardware type ``ilo5``. Including all other hardware interfaces + ``ilo`` hardware type supports, this has one new RAID interface ``ilo5``. + - Adds functionality to perform out-of-band RAID operation for iLO5 based + HPE Proliant servers. +upgrade: + - The ``create_raid_configuration``, ``delete_raid_configuration`` and + ``read_raid_configuration`` interfaces of 'proliantutils' library has been + enhanced to support out-of-band RAID operation for ``ilo5`` hardware type. + To leverage this feature, the 'proliantutils' library needs to be upgraded + to version '2.7.0'. diff -Nru ironic-12.0.0/releasenotes/notes/ilo-async-bios-clean-steps-15e49545ba818997.yaml ironic-12.1.0/releasenotes/notes/ilo-async-bios-clean-steps-15e49545ba818997.yaml --- ironic-12.0.0/releasenotes/notes/ilo-async-bios-clean-steps-15e49545ba818997.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/ilo-async-bios-clean-steps-15e49545ba818997.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + Makes all ``ilo`` driver BIOS interface clean steps as asynchronous. This is + required to ensure the settings on the baremetal node are consistent with + the settings stored in the database irrespective of the node clean step + status. Refer bug `2004066 + `_ for details. diff -Nru ironic-12.0.0/releasenotes/notes/ipmi-console-port-ec6348df4eee6746.yaml ironic-12.1.0/releasenotes/notes/ipmi-console-port-ec6348df4eee6746.yaml --- ironic-12.0.0/releasenotes/notes/ipmi-console-port-ec6348df4eee6746.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/ipmi-console-port-ec6348df4eee6746.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes the IPMI console implementation to respect all supported IPMI + ``driver_info`` and configuration options, particularly ``ipmi_port``. diff -Nru ironic-12.0.0/releasenotes/notes/ipmi_hex_kg_key-8f6caabe5b7d7a9b.yaml ironic-12.1.0/releasenotes/notes/ipmi_hex_kg_key-8f6caabe5b7d7a9b.yaml --- ironic-12.0.0/releasenotes/notes/ipmi_hex_kg_key-8f6caabe5b7d7a9b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/ipmi_hex_kg_key-8f6caabe5b7d7a9b.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,7 @@ +--- +features: + - | + New property ``ipmi_hex_kg_key`` for the ipmi based interfaces. + The property enables user to set the Kg key for IPMIv2 authentication in + hexadecimal format. This value is provided to ``ipmitool`` as + the -y argument. diff -Nru ironic-12.0.0/releasenotes/notes/ipxe-with-dhcpv6-2bc7bd7f53a70f51.yaml ironic-12.1.0/releasenotes/notes/ipxe-with-dhcpv6-2bc7bd7f53a70f51.yaml --- ironic-12.0.0/releasenotes/notes/ipxe-with-dhcpv6-2bc7bd7f53a70f51.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/ipxe-with-dhcpv6-2bc7bd7f53a70f51.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixes an issue has been corrected where hosts executing ``iPXE`` to boot + would error indicating that no configuration was found for networks where + IPv6 is in use. This has been remedied through a minor addition to the + Networking service in the Stein development cycle. For more information + please see `story 2004502 `_. diff -Nru ironic-12.0.0/releasenotes/notes/issue-conntrack-bionic-7483671771cf2e82.yaml ironic-12.1.0/releasenotes/notes/issue-conntrack-bionic-7483671771cf2e82.yaml --- ironic-12.0.0/releasenotes/notes/issue-conntrack-bionic-7483671771cf2e82.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/issue-conntrack-bionic-7483671771cf2e82.yaml 2019-03-21 20:07:44.000000000 +0000 @@ -0,0 +1,13 @@ +--- +issues: + - | + As good security practice[0], in Ubuntu Bionic the ``nf_conntrack_helper`` + is disabled. + This causes an issue when using the ``pxe`` boot interface with the PXE + environment that breaks some of the Ironic CI tests, since Ironic needs + conntrack for TFTP traffic. + It's still possible to use Ironic with PXE on Ubuntu Xenial, and it's also + possible to use Ironic with PXE on Ubuntu Bionic using a workaround based + on custom firewall rules as shown in [0]. + + [0] https://home.regit.org/netfilter-en/secure-use-of-helpers/ diff -Nru ironic-12.0.0/releasenotes/notes/json-rpc-0edc429696aca6f9.yaml ironic-12.1.0/releasenotes/notes/json-rpc-0edc429696aca6f9.yaml --- ironic-12.0.0/releasenotes/notes/json-rpc-0edc429696aca6f9.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/json-rpc-0edc429696aca6f9.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Adds the ability to use JSON RPC for communication between API and + conductor services. To use it set the new ``rpc_transport`` configuration + options to ``json-rpc`` and configure the credentials and the ``host_ip`` + in the ``json_rpc`` section. Hostnames of all conductors must be + resolvable for this implementation to work. diff -Nru ironic-12.0.0/releasenotes/notes/make-versioned-notifications-topics-configurable-18d70d573c27809e.yaml ironic-12.1.0/releasenotes/notes/make-versioned-notifications-topics-configurable-18d70d573c27809e.yaml --- ironic-12.0.0/releasenotes/notes/make-versioned-notifications-topics-configurable-18d70d573c27809e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/make-versioned-notifications-topics-configurable-18d70d573c27809e.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +features: + - | + Adds a ``[DEFAULT]/versioned_notifications_topics`` configuration option. + This enables operators to configure the topics used for versioned + notifications. diff -Nru ironic-12.0.0/releasenotes/notes/metrics-notifier-information-17858c8e27c795d7.yaml ironic-12.1.0/releasenotes/notes/metrics-notifier-information-17858c8e27c795d7.yaml --- ironic-12.0.0/releasenotes/notes/metrics-notifier-information-17858c8e27c795d7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/metrics-notifier-information-17858c8e27c795d7.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,13 @@ +--- +features: + - | + Notification events for metrics data now contains a ``node_name`` + field to assist operators with relating metrics data being transmitted + by the conductor service. +fixes: + - | + Notification event types now include the hardware type name string as + opposed to a static string of "ipmi". This allows event processors and + operators to understand what the actual notification event data source + is as opposed to having to rely upon fingerprints of the data to make + such determinations. diff -Nru ironic-12.0.0/releasenotes/notes/prelude-to-the-stein-f25b6073b6d1c598.yaml ironic-12.1.0/releasenotes/notes/prelude-to-the-stein-f25b6073b6d1c598.yaml --- ironic-12.0.0/releasenotes/notes/prelude-to-the-stein-f25b6073b6d1c598.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/prelude-to-the-stein-f25b6073b6d1c598.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,32 @@ +--- +prelude: | + The Bare Metal as a Service team joyfully announces our OpenStack Stein + release of ironic 12.1.0. While no steins nor speakers were harmed during + the development of this release, we might have suffered some hearing + damage after we learned that we could increase the volume well + past eleven! + + Notable items include: + + * Increased parallelism of power synchronization to improve overall + conductor efficiency. + * API fields to support node ``description`` and ``owner`` values. + * HPE iLO ``ilo5`` and Huawei ``ibmc`` hardware types. + * Allocations API interface to enable operators to find and select + bare metal nodes for deployment. + * JSON-RPC can now be used for ``ironic-api`` to ``ironic-conductor`` + communication as opposed to using an AMQP messaging provider. + * Support for customizable PXE templates and streamlined deployment + sequences. + * Initial support for the definition of "deployment templates" to + enable operators to define and match customized deployment sequences. + * Initial work for supporting SmartNIC configuration is included, + however the Networking Service changes required are not anticipated + until sometime during the Train development cycle. + * And numerous bug fixes, including ones for IPv6 and IPMI. + + This release includes the changes in ironic's ``12.0.0`` release which + was also released during the Stein development cycle and includes a number + of improvements for Bare Metal infrastructure operators. More about our + earlier stein release can be found in our + `release notes `_. diff -Nru ironic-12.0.0/releasenotes/notes/provide_mountpoint-58cfd25b6dd4cfde.yaml ironic-12.1.0/releasenotes/notes/provide_mountpoint-58cfd25b6dd4cfde.yaml --- ironic-12.0.0/releasenotes/notes/provide_mountpoint-58cfd25b6dd4cfde.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/provide_mountpoint-58cfd25b6dd4cfde.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes a bug where cinder block storage service volumes volume fail to attach expecting a + mountpoint to be a valid string. See `story 2004864 + `_ for additional + information. diff -Nru ironic-12.0.0/releasenotes/notes/remove-deprecated-drac_host-865be09c6e8fcb90.yaml ironic-12.1.0/releasenotes/notes/remove-deprecated-drac_host-865be09c6e8fcb90.yaml --- ironic-12.0.0/releasenotes/notes/remove-deprecated-drac_host-865be09c6e8fcb90.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/remove-deprecated-drac_host-865be09c6e8fcb90.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +upgrade: + - Removes deprecated ``driver_info["drac_host"]`` property for + ``idrac`` hardware type that was marked for removal in Pike. + Please use ``driver_info["drac_address"]`` instead. diff -Nru ironic-12.0.0/releasenotes/notes/set-boot-mode-4c42b3fd0b5f5b37.yaml ironic-12.1.0/releasenotes/notes/set-boot-mode-4c42b3fd0b5f5b37.yaml --- ironic-12.0.0/releasenotes/notes/set-boot-mode-4c42b3fd0b5f5b37.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/set-boot-mode-4c42b3fd0b5f5b37.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +features: + - | + Set boot_mode in node properties during OOB Introspection + for ``idrac`` hardware type. diff -Nru ironic-12.0.0/releasenotes/notes/socat-respawn-de9e8805c820a7ac.yaml ironic-12.1.0/releasenotes/notes/socat-respawn-de9e8805c820a7ac.yaml --- ironic-12.0.0/releasenotes/notes/socat-respawn-de9e8805c820a7ac.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/socat-respawn-de9e8805c820a7ac.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,10 @@ +--- +fixes: + - Fixes an issue where the socat process would exit on client disconnect, + which would (a) leave a zombie socat process in the process table and (b) + disable any subsequent serial console connections. This issue was + addressed by updating ironic to call socat with the + ``fork,max-children=1`` options, which makes socat persist and accept + multiple connections (but only one at a time). + Please see story `2005024 `_ + for additional information. diff -Nru ironic-12.0.0/releasenotes/notes/story-2004444-f540d9bbc3532ad0.yaml ironic-12.1.0/releasenotes/notes/story-2004444-f540d9bbc3532ad0.yaml --- ironic-12.0.0/releasenotes/notes/story-2004444-f540d9bbc3532ad0.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/story-2004444-f540d9bbc3532ad0.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue with the ``ipmi`` hardware type where + ``node['driver_info']['ipmi_force_boot_device']`` could be interpreted + as ``True`` when set to values such as "False". diff -Nru ironic-12.0.0/releasenotes/notes/type-error-str-6826c53d7e5e1243.yaml ironic-12.1.0/releasenotes/notes/type-error-str-6826c53d7e5e1243.yaml --- ironic-12.0.0/releasenotes/notes/type-error-str-6826c53d7e5e1243.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/type-error-str-6826c53d7e5e1243.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Returns the correct error message on providing an invalid reference to + ``image_source``. Previously an internal error was raised. diff -Nru ironic-12.0.0/releasenotes/notes/whole-disk-root-gb-9132e5a354e6cb9d.yaml ironic-12.1.0/releasenotes/notes/whole-disk-root-gb-9132e5a354e6cb9d.yaml --- ironic-12.0.0/releasenotes/notes/whole-disk-root-gb-9132e5a354e6cb9d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ironic-12.1.0/releasenotes/notes/whole-disk-root-gb-9132e5a354e6cb9d.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + The ``instance_info[root_gb]`` property is no longer required for + whole-disk images. It has always been ignored for them, but the validation + code still expected it to be present. diff -Nru ironic-12.0.0/requirements.txt ironic-12.1.0/requirements.txt --- ironic-12.0.0/requirements.txt 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/requirements.txt 2019-03-21 20:07:40.000000000 +0000 @@ -15,7 +15,7 @@ python-swiftclient>=3.2.0 # Apache-2.0 pytz>=2013.6 # MIT stevedore>=1.20.0 # Apache-2.0 -pysendfile>=2.0.0 # MIT +pysendfile>=2.0.0;sys_platform!='win32' # MIT oslo.concurrency>=3.26.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 @@ -37,7 +37,7 @@ rfc3986>=0.3.1 # Apache-2.0 six>=1.10.0 # MIT jsonpatch!=1.20,>=1.16 # BSD -WSME>=0.8.0 # MIT +WSME>=0.9.3 # MIT Jinja2>=2.10 # BSD License (3 clause) keystonemiddleware>=4.17.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 @@ -47,3 +47,4 @@ psutil>=3.2.2 # BSD futurist>=1.2.0 # Apache-2.0 tooz>=1.58.0 # Apache-2.0 +openstacksdk>=0.25.0 # Apache-2.0 diff -Nru ironic-12.0.0/setup.cfg ironic-12.1.0/setup.cfg --- ironic-12.0.0/setup.cfg 2018-12-19 10:03:57.000000000 +0000 +++ ironic-12.1.0/setup.cfg 2019-03-21 20:09:08.000000000 +0000 @@ -83,6 +83,7 @@ ironic.hardware.interfaces.management = cimc = ironic.drivers.modules.cimc.management:CIMCManagement fake = ironic.drivers.modules.fake:FakeManagement + ibmc = ironic.drivers.modules.ibmc.management:IBMCManagement idrac = ironic.drivers.modules.drac.management:DracManagement ilo = ironic.drivers.modules.ilo.management:IloManagement ipmitool = ironic.drivers.modules.ipmitool:IPMIManagement @@ -98,6 +99,7 @@ ironic.hardware.interfaces.power = cimc = ironic.drivers.modules.cimc.power:Power fake = ironic.drivers.modules.fake:FakePower + ibmc = ironic.drivers.modules.ibmc.power:IBMCPower idrac = ironic.drivers.modules.drac.power:DracPower ilo = ironic.drivers.modules.ilo.power:IloPower ipmitool = ironic.drivers.modules.ipmitool:IPMIPower @@ -110,6 +112,7 @@ agent = ironic.drivers.modules.agent:AgentRAID fake = ironic.drivers.modules.fake:FakeRAID idrac = ironic.drivers.modules.drac.raid:DracRAID + ilo5 = ironic.drivers.modules.ilo.raid:Ilo5RAID irmc = ironic.drivers.modules.irmc.raid:IRMCRAID no-raid = ironic.drivers.modules.noop:NoRAID ironic.hardware.interfaces.rescue = @@ -123,6 +126,7 @@ external = ironic.drivers.modules.storage.external:ExternalStorage ironic.hardware.interfaces.vendor = fake = ironic.drivers.modules.fake:FakeVendorB + ibmc = ironic.drivers.modules.ibmc.vendor:IBMCVendor idrac = ironic.drivers.modules.drac.vendor_passthru:DracVendorPassthru ilo = ironic.drivers.modules.ilo.vendor:VendorPassthru ipmitool = ironic.drivers.modules.ipmitool:VendorPassthru @@ -131,8 +135,10 @@ cisco-ucs-managed = ironic.drivers.cisco_ucs:CiscoUCSManaged cisco-ucs-standalone = ironic.drivers.cisco_ucs:CiscoUCSStandalone fake-hardware = ironic.drivers.fake_hardware:FakeHardware + ibmc = ironic.drivers.ibmc:IBMCHardware idrac = ironic.drivers.drac:IDRACHardware ilo = ironic.drivers.ilo:IloHardware + ilo5 = ironic.drivers.ilo:Ilo5Hardware ipmi = ironic.drivers.ipmi:IPMIHardware irmc = ironic.drivers.irmc:IRMCHardware manual-management = ironic.drivers.generic:ManualManagementHardware diff -Nru ironic-12.0.0/test-requirements.txt ironic-12.1.0/test-requirements.txt --- ironic-12.0.0/test-requirements.txt 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/test-requirements.txt 2019-03-21 20:07:40.000000000 +0000 @@ -11,7 +11,7 @@ iso8601>=0.1.11 # MIT oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 -psycopg2>=2.6.2 # LGPL/ZPL +psycopg2>=2.7.3 # LGPL/ZPL testtools>=2.2.0 # MIT testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD diff -Nru ironic-12.0.0/tox.ini ironic-12.1.0/tox.ini --- ironic-12.0.0/tox.ini 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/tox.ini 2019-03-21 20:07:40.000000000 +0000 @@ -1,5 +1,5 @@ [tox] -minversion = 1.8 +minversion = 2.0 skipsdist = True envlist = py3,py27,pep8 @@ -25,6 +25,11 @@ deps = {[testenv]deps} -r{toxinidir}/driver-requirements.txt +[testenv:unit-with-driver-libs-python3] +basepython = python3 +deps = {[testenv]deps} + -r{toxinidir}/driver-requirements.txt + [testenv:genstates] basepython = python3 deps = {[testenv]deps} diff -Nru ironic-12.0.0/zuul.d/ironic-jobs.yaml ironic-12.1.0/zuul.d/ironic-jobs.yaml --- ironic-12.0.0/zuul.d/ironic-jobs.yaml 2018-12-19 10:02:37.000000000 +0000 +++ ironic-12.1.0/zuul.d/ironic-jobs.yaml 2019-03-21 20:07:40.000000000 +0000 @@ -2,6 +2,7 @@ name: ironic-base description: Base job for devstack/tempest based ironic jobs. parent: devstack-tempest + nodeset: openstack-single-node-bionic timeout: 10800 required-projects: - openstack/ironic @@ -50,9 +51,6 @@ Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan - - SWIFT_ENABLE_TEMPURLS: True - SWIFT_TEMPURL_KEY: secretkey devstack_plugins: ironic: git://git.openstack.org/openstack/ironic zuul_copy_output: @@ -69,36 +67,8 @@ s-object: False s-proxy: False - dstat: True - g-api: True - g-reg: True - key: True - mysql: True - n-api: True - n-api-meta: True - n-cauth: True - n-cond: True - n-cpu: True - n-novnc: True - n-obj: True - n-sch: True - placement-api: True - q-agt: False - q-dhcp: False - q-l3: False - q-meta: False - q-metering: False - q-svc: False - neutron-api: True - neutron-agent: True - neutron-dhcp: True - neutron-l3: True - neutron-metadata-agent: True - neutron-metering: True - rabbit: True - - job: - name: ironic-dsvm-standalone + name: ironic-standalone description: Test ironic standalone parent: ironic-base irrelevant-files: @@ -122,7 +92,10 @@ IRONIC_AUTOMATED_CLEAN_ENABLED: False IRONIC_DEFAULT_DEPLOY_INTERFACE: direct IRONIC_ENABLED_DEPLOY_INTERFACES: "iscsi,direct,ansible" + IRONIC_RPC_TRANSPORT: json-rpc IRONIC_VM_COUNT: 6 + SWIFT_ENABLE_TEMPURLS: True + SWIFT_TEMPURL_KEY: secretkey devstack_services: n-api: False n-api-meta: False @@ -140,8 +113,8 @@ s-proxy: True - job: - name: ironic-tempest-dsvm-ipa-partition-redfish-tinyipa - description: ironic-tempest-dsvm-ipa-partition-redfish-tinyipa + name: ironic-tempest-ipa-partition-redfish-tinyipa + description: ironic-tempest-ipa-partition-redfish-tinyipa parent: ironic-base timeout: 5400 vars: @@ -150,8 +123,8 @@ IRONIC_ENABLED_HARDWARE_TYPES: redfish - job: - name: ironic-tempest-dsvm-pxe_ipmitool-postgres - description: ironic-tempest-dsvm-pxe_ipmitool-postgres + name: ironic-tempest-pxe_ipmitool-postgres + description: ironic-tempest-pxe_ipmitool-postgres parent: ironic-base vars: devstack_localrc: @@ -161,8 +134,8 @@ postgresql: True - job: - name: ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa - description: ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa + name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa parent: ironic-base timeout: 5400 vars: @@ -170,6 +143,8 @@ IRONIC_DEFAULT_DEPLOY_INTERFACE: direct IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True IRONIC_VM_EPHEMERAL_DISK: 0 + SWIFT_ENABLE_TEMPURLS: True + SWIFT_TEMPURL_KEY: secretkey devstack_services: s-account: True s-container: True @@ -177,8 +152,8 @@ s-proxy: True - job: - name: ironic-tempest-dsvm-ipa-wholedisk-bios-pxe_snmp-tinyipa - description: ironic-tempest-dsvm-ipa-wholedisk-bios-pxe_snmp-tinyipa + name: ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa + description: ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa parent: ironic-base timeout: 5400 vars: @@ -190,9 +165,10 @@ IRONIC_AUTOMATED_CLEAN_ENABLED: False - job: - name: ironic-tempest-dsvm-ipa-partition-uefi-pxe_ipmitool-tinyipa - description: ironic-tempest-dsvm-ipa-partition-uefi-pxe_ipmitool-tinyipa + name: ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa + description: ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa parent: ironic-base + nodeset: openstack-single-node-xenial timeout: 5400 vars: devstack_localrc: @@ -201,8 +177,8 @@ IRONIC_AUTOMATED_CLEAN_ENABLED: False - job: - name: ironic-tempest-dsvm-ipa-partition-pxe_ipmitool-tinyipa-python3 - description: ironic-tempest-dsvm-ipa-partition-pxe_ipmitool-tinyipa-python3 + name: ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa-python3 + description: ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa-python3 parent: ironic-base timeout: 5400 vars: @@ -210,8 +186,8 @@ USE_PYTHON3: True - job: - name: ironic-tempest-dsvm-bfv - description: ironic-tempest-dsvm-bfv + name: ironic-tempest-bfv + description: ironic-tempest-bfv parent: ironic-base timeout: 9600 vars: @@ -226,6 +202,8 @@ IRONIC_VM_EPHEMERAL_DISK: 0 IRONIC_VM_COUNT: 3 IRONIC_AUTOMATED_CLEAN_ENABLED: False + SWIFT_ENABLE_TEMPURLS: True + SWIFT_TEMPURL_KEY: secretkey devstack_services: c-api: True c-bak: True @@ -234,8 +212,8 @@ cinder: True - job: - name: ironic-tempest-dsvm-ironic-inspector - description: ironic-tempest-dsvm-ironic-inspector + name: ironic-inspector-tempest + description: ironic-inspector-tempest parent: ironic-base required-projects: - openstack/ironic-inspector @@ -247,6 +225,8 @@ IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True IRONIC_VM_EPHEMERAL_DISK: 0 IRONIC_AUTOMATED_CLEAN_ENABLED: False + SWIFT_ENABLE_TEMPURLS: True + SWIFT_TEMPURL_KEY: secretkey devstack_plugins: ironic-inspector: git://git.openstack.org/openstack/ironic-inspector devstack_services: @@ -256,9 +236,9 @@ s-proxy: True - job: - name: ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - description: ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - parent: ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa + name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect + description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect + parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa timeout: 5400 vars: devstack_localrc: @@ -266,9 +246,9 @@ IRONIC_AUTOMATED_CLEAN_ENABLED: False - job: - name: ironic-tempest-dsvm-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - description: ironic-tempest-dsvm-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - parent: ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa + name: ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect + description: ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect + parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa timeout: 5400 vars: devstack_localrc: @@ -277,8 +257,8 @@ IRONIC_AUTOMATED_CLEAN_ENABLED: False - job: - name: ironic-tempest-dsvm-functional-python2 - description: ironic-tempest-dsvm-functional-python2 + name: ironic-tempest-functional-python2 + description: ironic-tempest-functional-python2 parent: ironic-base timeout: 5400 pre-run: playbooks/ci-workarounds/etc-neutron.yaml @@ -290,7 +270,9 @@ IRONIC_DEFAULT_NETWORK_INTERFACE: noop IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True IRONIC_VM_EPHEMERAL_DISK: 0 + IRONIC_RPC_TRANSPORT: json-rpc devstack_services: + rabbit: False g-api: False g-reg: False n-api: False @@ -303,17 +285,17 @@ n-sch: False nova: False placement-api: False - neutron-api: False - neutron-agent: False - neutron-dhcp: False - neutron-l3: False - neutron-metadata-agent: False - neutron-metering: False + q-agt: False + q-dhcp: False + q-l3: False + q-meta: False + q-metering: False + q-svc: False - job: - name: ironic-tempest-dsvm-functional-python3 - description: ironic-tempest-dsvm-functional-python3 - parent: ironic-tempest-dsvm-functional-python2 + name: ironic-tempest-functional-python3 + description: ironic-tempest-functional-python3 + parent: ironic-tempest-functional-python2 vars: devstack_localrc: IRONIC_TEMPEST_WHOLE_DISK_IMAGE: False @@ -321,8 +303,8 @@ USE_PYTHON3: True - job: - name: ironic-tempest-dsvm-ipa-wholedisk-direct-tinyipa-multinode - description: ironic-tempest-dsvm-ipa-wholedisk-direct-tinyipa-multinode + name: ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode + description: ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode parent: tempest-multinode-full pre-run: playbooks/ci-workarounds/set-stack-key.yaml timeout: 10800 @@ -383,12 +365,13 @@ IRONIC_VM_SPECS_RAM: 384 OS_TEST_TIMEOUT: 2400 OVERRIDE_PUBLIC_BRIDGE_MTU: 1400 + OVS_BRIDGE_MAPPINGS: 'mynetwork:brbm,public:br-infra' OVS_PHYSICAL_BRIDGE: brbm PHYSICAL_NETWORK: mynetwork + PUBLIC_BRIDGE: br-infra Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vlan Q_PLUGIN: ml2 - Q_USE_PROVIDERNET_FOR_PUBLIC: False SWIFT_ENABLE_TEMPURLS: True SWIFT_TEMPURL_KEY: secretkey TEMPEST_PLUGINS: "'{{ ansible_user_dir }}/src/git.openstack.org/openstack/ironic-tempest-plugin'" @@ -453,6 +436,7 @@ IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs' IRONIC_VM_NETWORK_BRIDGE: sub1brbm IRONIC_VM_SPECS_RAM: 384 + OVS_BRIDGE_MAPPINGS: 'mynetwork:sub1brbm,public:br-infra' OVS_PHYSICAL_BRIDGE: sub1brbm PHYSICAL_NETWORK: mynetwork Q_ML2_TENANT_NETWORK_TYPE: vlan @@ -466,3 +450,30 @@ q-agt: True n-cpu: True + +- job: + name: ironic-tox-unit-with-driver-libs + parent: tox + description: | + Run unit tests with driver dependencies installed. + vars: + tox_envlist: unit-with-driver-libs + +- job: + name: ironic-tox-unit-with-driver-libs-python3 + parent: tox + description: | + Run python 3 unit tests with driver dependencies installed. + vars: + tox_envlist: unit-with-driver-libs-python3 + +- job: + name: ironic-inspector-tempest-discovery-fast-track + description: ironic-inspector-tempest-discovery-fast-track + parent: ironic-inspector-tempest-discovery + vars: + tempest_test_regex: BareMetalFastTrackTest + devstack_localrc: + IRONIC_INSPECTOR_POWER_OFF: False + IRONIC_DEPLOY_FAST_TRACK: True + IRONIC_DEPLOY_FAST_TRACK_CLEANING: True diff -Nru ironic-12.0.0/zuul.d/project.yaml ironic-12.1.0/zuul.d/project.yaml --- ironic-12.0.0/zuul.d/project.yaml 2018-12-19 10:02:33.000000000 +0000 +++ ironic-12.1.0/zuul.d/project.yaml 2019-03-21 20:07:44.000000000 +0000 @@ -4,48 +4,53 @@ - openstack-cover-jobs - openstack-lower-constraints-jobs - openstack-python-jobs - - openstack-python35-jobs - openstack-python36-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - - ironic-dsvm-standalone - - ironic-tempest-dsvm-functional-python2 - - ironic-tempest-dsvm-functional-python3 + - ironic-tox-unit-with-driver-libs + - ironic-tox-unit-with-driver-libs-python3 + - ironic-standalone + - ironic-tempest-functional-python2 + - ironic-tempest-functional-python3 - ironic-grenade-dsvm - ironic-grenade-dsvm-multinode-multitenant - - ironic-tempest-dsvm-ipa-partition-pxe_ipmitool-tinyipa-python3 - - ironic-tempest-dsvm-ipa-partition-redfish-tinyipa - - ironic-tempest-dsvm-ipa-partition-uefi-pxe_ipmitool-tinyipa - - ironic-tempest-dsvm-ipa-wholedisk-direct-tinyipa-multinode - - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa - - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - - ironic-tempest-dsvm-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - - ironic-tempest-dsvm-bfv + - ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa-python3 + - ironic-tempest-ipa-partition-redfish-tinyipa + - ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa + - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect + - ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect + - ironic-tempest-bfv # Non-voting jobs - - ironic-tempest-dsvm-ipa-wholedisk-bios-pxe_snmp-tinyipa: + - ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa: voting: false - - ironic-tempest-dsvm-ironic-inspector: + - ironic-inspector-tempest: voting: false - bifrost-integration-tinyipa-ubuntu-xenial: voting: false - - ironic-tempest-dsvm-pxe_ipmitool-postgres: + - metalsmith-integration-glance-localboot-centos7: + voting: false + - ironic-tempest-pxe_ipmitool-postgres: voting: false gate: queue: ironic jobs: - - ironic-dsvm-standalone - - ironic-tempest-dsvm-functional-python2 - - ironic-tempest-dsvm-functional-python3 + - ironic-tox-unit-with-driver-libs + - ironic-tox-unit-with-driver-libs-python3 + - ironic-standalone + - ironic-tempest-functional-python2 + - ironic-tempest-functional-python3 - ironic-grenade-dsvm - ironic-grenade-dsvm-multinode-multitenant - - ironic-tempest-dsvm-ipa-partition-pxe_ipmitool-tinyipa-python3 - - ironic-tempest-dsvm-ipa-partition-redfish-tinyipa - - ironic-tempest-dsvm-ipa-partition-uefi-pxe_ipmitool-tinyipa - - ironic-tempest-dsvm-ipa-wholedisk-direct-tinyipa-multinode - - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa - - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - - ironic-tempest-dsvm-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - - ironic-tempest-dsvm-bfv + - ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa-python3 + - ironic-tempest-ipa-partition-redfish-tinyipa + - ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa + - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect + - ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect + - ironic-tempest-bfv