diff -Nru octavia-4.1.1/api-ref/source/parameters.yaml octavia-4.1.4/api-ref/source/parameters.yaml --- octavia-4.1.1/api-ref/source/parameters.yaml 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/api-ref/source/parameters.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -369,13 +369,15 @@ type: string default_pool_id: description: | - The ID of the pool used by the listener if no L7 policies match. + The ID of the pool used by the listener if no L7 policies match. The pool + has some restrictions. See :ref:`valid_protocol`. in: body required: true type: uuid default_pool_id-optional: description: | - The ID of the pool used by the listener if no L7 policies match. + The ID of the pool used by the listener if no L7 policies match. The pool + has some restrictions. See :ref:`valid_protocol`. in: body required: false type: uuid @@ -757,14 +759,16 @@ l7policy-redirect-pool_id: description: | Requests matching this policy will be redirected to the pool with this ID. - Only valid if ``action`` is ``REDIRECT_TO_POOL``. + Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some + restrictions, See :ref:`valid_protocol`. in: body required: true type: uuid l7policy-redirect-pool_id-optional: description: | Requests matching this policy will be redirected to the pool with this ID. - Only valid if ``action`` is ``REDIRECT_TO_POOL``. + Only valid if ``action`` is ``REDIRECT_TO_POOL``. The pool has some + restrictions, See :ref:`valid_protocol`. in: body required: false type: uuid @@ -926,7 +930,8 @@ listener-id-pool-optional: description: | The ID of the listener for the pool. Either ``listener_id`` or - ``loadbalancer_id`` must be specified. + ``loadbalancer_id`` must be specified. The listener has some restrictions, + See :ref:`valid_protocol`. in: body required: false type: uuid diff -Nru octavia-4.1.1/api-ref/source/v2/general.inc octavia-4.1.4/api-ref/source/v2/general.inc --- octavia-4.1.1/api-ref/source/v2/general.inc 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/api-ref/source/v2/general.inc 2020-11-04 15:39:51.000000000 +0000 @@ -569,3 +569,55 @@ An entity in ``ERROR`` has failed provisioning. The entity may be deleted and recreated. + + +.. _valid_protocol: + +Protocol Combinations +===================== + +The listener and pool can be associated through the listener's +``default_pool_id`` or l7policy's ``redirect_pool_id``. Both listener and pool +must set the protocol parameter. But the association between the listener and +the pool isn't arbitrarily and has some constraints at the protocol aspect. + +Valid protocol combinations +--------------------------- + +.. |1| unicode:: U+2002 .. nut ( ) +.. |2| unicode:: U+2003 .. mutton ( ) +.. |listener| replace:: |2| |2| Listener +.. |1Y| replace:: |1| Y +.. |1N| replace:: |1| N +.. |2Y| replace:: |2| Y +.. |2N| replace:: |2| N +.. |8Y| replace:: |2| |2| |2| |2| Y +.. |8N| replace:: |2| |2| |2| |2| N + ++-------------+-------+--------+------+-------------------+------+ +|| |listener| || HTTP || HTTPS || TCP || TERMINATED_HTTPS || UDP | +|| Pool || || || || || | ++=============+=======+========+======+===================+======+ +| HTTP | |2Y| | |2N| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| HTTPS | |2N| | |2Y| | |1Y| | |8N| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| PROXY | |2Y| | |2Y| | |1Y| | |8Y| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| TCP | |2N| | |2Y| | |1Y| | |8N| | |1N| | ++-------------+-------+--------+------+-------------------+------+ +| UDP | |2N| | |2N| | |1N| | |8N| | |1Y| | ++-------------+-------+--------+------+-------------------+------+ + +"Y" means the combination is valid and "N" means invalid. + +The HTTPS protocol is HTTPS pass-through. For most providers, this is treated +as a TCP protocol. Some advanced providers may support HTTPS session +persistence features by using the session ID. The Amphora provider treats +HTTPS as a TCP flow, but currently does not support HTTPS session persistence +using the session ID. + +The pool protocol of PROXY will use the listener protocol as the pool protocol +but will wrap that protocol in the proxy protocol. In the case of listener +protocol TERMINATED_HTTPS, a pool protocol of PROXY will be HTTP wrapped in the +proxy protocol. diff -Nru octavia-4.1.1/AUTHORS octavia-4.1.4/AUTHORS --- octavia-4.1.1/AUTHORS 2019-12-16 10:46:30.000000000 +0000 +++ octavia-4.1.4/AUTHORS 2020-11-04 15:41:11.000000000 +0000 @@ -27,6 +27,7 @@ Bo Chi Bo Wang Brandon Logan +Brian Haley CALIN Cristian Andrei Cao Xuan Hoang Carlos D. Garza @@ -42,6 +43,7 @@ Corey Bryant Daniel Mellado Dao Cong Tien +David Moreau Simard Deepak Dong Jun Doug Fish @@ -84,6 +86,7 @@ Jamie Lennox Jason Niesz Jeffrey Longstaff +Jens Harbott Jeremy Liu Jeremy Stanley Jiahao Liang @@ -129,6 +132,7 @@ Omer Anson OpenStack Release Bot Paul Glass +Paul Peereboom Pavel Abalikhin Peng Zhi Xiong Pradeep Kumar Singh @@ -144,6 +148,7 @@ Sherif Abdelwahab Shuquan Huang Sindhu Devale +Stefan Nica Stephen Balukoff Stephen Balukoff Susanne Balle @@ -173,6 +178,7 @@ ZhiQiang Fan ajmiller akhiljain23 +archiephan bharath caoyuan caoyue diff -Nru octavia-4.1.1/ChangeLog octavia-4.1.4/ChangeLog --- octavia-4.1.1/ChangeLog 2019-12-16 10:46:30.000000000 +0000 +++ octavia-4.1.4/ChangeLog 2020-11-04 15:41:11.000000000 +0000 @@ -1,6 +1,65 @@ CHANGES ======= +4.1.4 +----- + +* Remove unnecessary joinedload +* Fix pool delete race on load balancer cascade delete +* Temporarily set octavia-v2-dsvm-py2-scenario non-voting +* Fix python2 AttributeError with strptime +* Fix backend certificate file paths +* Fix AttributeError on TLS-enabled pool provisioning +* Ignore DELETED amphorae when performing certificate rotation +* Fix the amphora no-op driver +* Fixes API list handling of unscoped tokens +* Fix operational status for disabled UDP listeners +* Add some details on enable\_anti\_affinity option +* Add missing log line for finishing amp operations +* Update devstack plugin +* Set Grub timeout to 0 for fast boot times +* Fix API sort key for complex columns +* Increase the devstack secuirty group rules quota +* Remove scenario bionic job from check +* Disable log offloading scenario tests +* Fix accepting 'insert\_headers' when unsupported +* Prioritize policy validation + +4.1.3 +----- + +* Fix batch member create for v1 amphora driver +* Remove install DIB from Git option +* Set octavia-grenade job non-voting + +4.1.2 +----- + +* Fix multi-listener LB client auth/re-encryption +* Fix allow\_invisible\_resource\_usage typo in relnotes +* Workaround peer name starting with hyphen +* Fix healthmanager not update amphora health when LB disable +* Validate resource access when creating loadbalancer or member +* Fix py3 amphora-agent cert-rotation type bug +* Correct delay between UDP healthchecks +* Fix load balancer update with provider filtered params +* Fix padding logic for UDP health daemon +* Do not install diskimage-builder from Git +* Pick stale amphora randomly +* Do not run Tempest in octavia-grenade job +* Remove xenial based jobs from stein gates +* Remove the barbican "Grant access" from cookbook +* Fix uncaught DB exception when trying to get a spare amphora +* Fix the interface filenames for Red Hat amphora images +* Use stable upper-constraints.txt in Amphora builds +* Fix pep8 failures on stable/stein branch +* Fix multi-listener LB with missing certificate +* Fix house keeping graceful shutdown +* Fix update API when barbican secret is missing +* Add listener and pool protocol validation +* Cap hacking version to <2 +* Accept oslopolicy-policy-generator path arguments + 4.1.1 ----- diff -Nru octavia-4.1.1/debian/changelog octavia-4.1.4/debian/changelog --- octavia-4.1.1/debian/changelog 2020-07-08 05:38:52.000000000 +0000 +++ octavia-4.1.4/debian/changelog 2020-11-18 14:50:21.000000000 +0000 @@ -1,3 +1,11 @@ +octavia (4.1.4-0ubuntu1~cloud0) bionic-stein; urgency=medium + + * d/control: Update VCS paths for move to lp:~ubuntu-openstack-dev. + * New stable point release for OpenStack Stein (LP: #1904563). + * d/p/add-missing-import-octavia-opts.py.patch: Removed as the fix landed upstream. + + -- Chris MacNaughton Wed, 18 Nov 2020 14:50:21 +0000 + octavia (4.1.1-0ubuntu1~cloud1) bionic-stein; urgency=medium * Fix batch creation of new members failure due to timeouts (LP: #1882657) diff -Nru octavia-4.1.1/debian/control octavia-4.1.4/debian/control --- octavia-4.1.1/debian/control 2020-07-08 05:38:52.000000000 +0000 +++ octavia-4.1.4/debian/control 2020-11-17 12:45:12.000000000 +0000 @@ -94,8 +94,8 @@ python3-webob (>= 1:1.7.1), python3-wsme (>= 0.8.0), Standards-Version: 4.1.3 -Vcs-Browser: https://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/octavia -Vcs-Git: https://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/octavia +Vcs-Browser: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/octavia +Vcs-Git: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/octavia Homepage: https://github.com/openstack/octavia Package: amphora-agent diff -Nru octavia-4.1.1/debian/patches/fix-batch-member-create-for-v1-amphora-driver.patch octavia-4.1.4/debian/patches/fix-batch-member-create-for-v1-amphora-driver.patch --- octavia-4.1.1/debian/patches/fix-batch-member-create-for-v1-amphora-driver.patch 2020-07-08 05:38:52.000000000 +0000 +++ octavia-4.1.4/debian/patches/fix-batch-member-create-for-v1-amphora-driver.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -From d2bef9a47c6497b479d91471fbaec60d993e86ac Mon Sep 17 00:00:00 2001 -From: Michael Johnson -Date: Thu, 4 Jun 2020 10:45:35 -0700 -Subject: [PATCH] Fix batch member create for v1 amphora driver - -A previous patch[1] missed batch_member_update when adding database -repository "get" method retries for new object creation actions. -This patch fixes batch member create to retry the database get call -when new members are being created via batch member update. -This issue only impacts the v1 amphora driver as the v2 driver -does not need to get these objects from the database. - -Story: 2007581 -Task: 39503 - -[1] https://github.com/openstack/octavia/commit/48e85569f7e43e3dd5f09fd1ef4cb165526a92cd - -Change-Id: Ia3476ab7b24dc3fd6e29ff2abe6eb6bacd9908ed ---- - octavia/controller/worker/controller_worker.py | 15 +++++++++++++-- - .../controller/worker/test_controller_worker.py | 4 +++- - 2 files changed, 16 insertions(+), 3 deletions(-) - -diff --git a/octavia/controller/worker/controller_worker.py b/octavia/controller/worker/controller_worker.py -index 3aa74c44..495869db 100644 ---- a/octavia/controller/worker/controller_worker.py -+++ b/octavia/controller/worker/controller_worker.py -@@ -457,12 +457,23 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): - log=LOG): - delete_member_tf.run() - -+ @tenacity.retry( -+ retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), -+ wait=tenacity.wait_incrementing( -+ RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), -+ stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) - def batch_update_members(self, old_member_ids, new_member_ids, - updated_members): -- old_members = [self._member_repo.get(db_apis.get_session(), id=mid) -- for mid in old_member_ids] - new_members = [self._member_repo.get(db_apis.get_session(), id=mid) - for mid in new_member_ids] -+ # The API may not have commited all of the new member records yet. -+ # Make sure we retry looking them up. -+ if None in new_members or len(new_members) != len(new_member_ids): -+ LOG.warning('Failed to fetch one of the new members from DB. ' -+ 'Retrying for up to 60 seconds.') -+ raise db_exceptions.NoResultFound -+ old_members = [self._member_repo.get(db_apis.get_session(), id=mid) -+ for mid in old_member_ids] - updated_members = [ - (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m) - for m in updated_members] -diff --git a/octavia/tests/unit/controller/worker/test_controller_worker.py b/octavia/tests/unit/controller/worker/test_controller_worker.py -index 65d2557f..f8c117a6 100644 ---- a/octavia/tests/unit/controller/worker/test_controller_worker.py -+++ b/octavia/tests/unit/controller/worker/test_controller_worker.py -@@ -824,7 +824,8 @@ class TestControllerWorker(base.TestCase): - mock_amp_repo_get): - - _flow_mock.reset_mock() -- -+ mock_member_repo_get.side_effect = [None, _member_mock, -+ _member_mock, _member_mock] - cw = controller_worker.ControllerWorker() - cw.batch_update_members([9], [11], [MEMBER_UPDATE_DICT]) - -@@ -837,6 +838,7 @@ class TestControllerWorker(base.TestCase): - constants.POOL: _pool_mock})) - - _flow_mock.run.assert_called_once_with() -+ self.assertEqual(4, mock_member_repo_get.call_count) - - @mock.patch('octavia.controller.worker.flows.' - 'pool_flows.PoolFlows.get_create_pool_flow', --- -2.17.1 - diff -Nru octavia-4.1.1/debian/patches/series octavia-4.1.4/debian/patches/series --- octavia-4.1.1/debian/patches/series 2020-07-08 05:38:52.000000000 +0000 +++ octavia-4.1.4/debian/patches/series 2020-11-17 13:40:10.000000000 +0000 @@ -1,2 +1 @@ 0001-db-add-missing-primary-key-in-spares_pool-table.patch -fix-batch-member-create-for-v1-amphora-driver.patch diff -Nru octavia-4.1.1/devstack/plugin.sh octavia-4.1.4/devstack/plugin.sh --- octavia-4.1.1/devstack/plugin.sh 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/devstack/plugin.sh 2020-11-04 15:39:51.000000000 +0000 @@ -38,16 +38,6 @@ fi } -function install_diskimage_builder { - if use_library_from_git "diskimage-builder"; then - GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL - GITDIR["diskimage-builder"]=$DISKIMAGE_BUILDER_DIR - GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF - git_clone_by_name "diskimage-builder" - setup_dev_lib "diskimage-builder" - fi -} - function set_octavia_worker_image_owner_id { image_id=$(openstack image list --property name=${OCTAVIA_AMP_IMAGE_NAME} -f value -c ID) owner_id=$(openstack image show ${image_id} -c owner -f value) @@ -55,9 +45,6 @@ } function build_octavia_worker_image { - # set up diskimage-builder if we need to - install_diskimage_builder - # Pull in DIB local elements if they are defined in devstack if [ -n "$DIB_LOCAL_ELEMENTS" ]; then export DIB_LOCAL_ELEMENTS=$DIB_LOCAL_ELEMENTS @@ -185,9 +172,10 @@ function create_octavia_accounts { create_service_user $OCTAVIA - # Increase the service account secgroups quota + # Increase the octavia account secgroups quota # This is imporant for concurrent tempest testing - openstack quota set --secgroups 100 $SERVICE_PROJECT_NAME + openstack quota set --secgroups 100 $OCTAVIA_PROJECT_NAME + openstack quota set --secgroup-rules 1000 $OCTAVIA_PROJECT_NAME local octavia_service=$(get_or_create_service "octavia" \ $OCTAVIA_SERVICE_TYPE "Octavia Load Balancing Service") @@ -195,20 +183,14 @@ if [[ "$WSGI_MODE" == "uwsgi" ]] && [[ "$OCTAVIA_NODE" == "main" ]] ; then get_or_create_endpoint $octavia_service \ "$REGION_NAME" \ - "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/$OCTAVIA_SERVICE_TYPE" \ - "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/$OCTAVIA_SERVICE_TYPE" \ "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/$OCTAVIA_SERVICE_TYPE" elif [[ "$WSGI_MODE" == "uwsgi" ]]; then get_or_create_endpoint $octavia_service \ "$REGION_NAME" \ - "$OCTAVIA_PROTOCOL://$SERVICE_HOST/$OCTAVIA_SERVICE_TYPE" \ - "$OCTAVIA_PROTOCOL://$SERVICE_HOST/$OCTAVIA_SERVICE_TYPE" \ "$OCTAVIA_PROTOCOL://$SERVICE_HOST/$OCTAVIA_SERVICE_TYPE" else get_or_create_endpoint $octavia_service \ "$REGION_NAME" \ - "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/" \ - "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/" \ "$OCTAVIA_PROTOCOL://$SERVICE_HOST:$OCTAVIA_PORT/" fi } diff -Nru octavia-4.1.1/devstack/README.md octavia-4.1.4/devstack/README.md --- octavia-4.1.1/devstack/README.md 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/devstack/README.md 2020-11-04 15:39:51.000000000 +0000 @@ -23,7 +23,7 @@ For example - ENABLED_SERVICES+=octavia,o-api,o-cw,o-hk,o-hm,o-da + ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hk,o-hm,o-da For more information, see the "Externally Hosted Plugins" section of https://docs.openstack.org/devstack/latest/plugins.html diff -Nru octavia-4.1.1/devstack/settings octavia-4.1.4/devstack/settings --- octavia-4.1.1/devstack/settings 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/devstack/settings 2020-11-04 15:39:51.000000000 +0000 @@ -2,9 +2,6 @@ OCTAVIA=${OCTAVIA:-"octavia"} OCTAVIA_DIR=${OCTAVIA_DIR:-"${DEST}/octavia"} -DISKIMAGE_BUILDER_REPO_URL=${DISKIMAGE_BUILDER_REPO_URL:-${GIT_BASE}/openstack/diskimage-builder.git} -DISKIMAGE_BUILDER_REPO_REF=${DISKIMAGE_BUILDER_REPO_REF:-master} -DISKIMAGE_BUILDER_DIR=$DEST/diskimage-builder OCTAVIA_BIN_DIR=${OCTAVIA_BIN_DIR:-$(get_python_exec_prefix)} OCTAVIA_CONF_DIR=${OCTAVIA_CONF_DIR:-"/etc/octavia"} OCTAVIA_SSH_DIR=${OCTAVIA_SSH_DIR:-${OCTAVIA_CONF_DIR}/.ssh} @@ -28,7 +25,7 @@ OCTAVIA_USER_DOMAIN_NAME=${OCTAVIA_USER_DOMAIN_NAME:-"Default"} OCTAVIA_PROJECT_DOMAIN_NAME=${OCTAVIA_PROJECT_DOMAIN_NAME:-"Default"} -OCTAVIA_PROTOCOL=${OCTAVIA_PROTOCOL:-"http"} +OCTAVIA_PROTOCOL=${OCTAVIA_PROTOCOL:-$SERVICE_PROTOCOL} OCTAVIA_PORT=${OCTAVIA_PORT:-"9876"} OCTAVIA_HA_PORT=${OCTAVIA_HA_PORT:-"9875"} OCTAVIA_HM_LISTEN_PORT=${OCTAVIA_HM_LISTEN_PORT:-"5555"} diff -Nru octavia-4.1.1/diskimage-create/diskimage-create.sh octavia-4.1.4/diskimage-create/diskimage-create.sh --- octavia-4.1.1/diskimage-create/diskimage-create.sh 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/diskimage-create/diskimage-create.sh 2020-11-04 15:39:51.000000000 +0000 @@ -391,6 +391,9 @@ AMP_element_sequence="$AMP_element_sequence $DIB_LOCAL_ELEMENTS" fi +# Set Grub timeout to 0 (no timeout) for fast boot times +export DIB_GRUB_TIMEOUT=${DIB_GRUB_TIMEOUT:-0} + # Build the image if [ "$AMP_BASEOS" = "ubuntu-minimal" ]; then diff -Nru octavia-4.1.1/doc/source/user/guides/basic-cookbook.rst octavia-4.1.4/doc/source/user/guides/basic-cookbook.rst --- octavia-4.1.1/doc/source/user/guides/basic-cookbook.rst 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/doc/source/user/guides/basic-cookbook.rst 2020-11-04 15:39:51.000000000 +0000 @@ -384,12 +384,11 @@ 1. Combine the individual cert/key/intermediates to a single PKCS12 file. 2. Create a barbican *secret* resource for the PKCS12 file. We will call this *tls_secret1*. -3. Grant the *admin* user access to the *tls_secret1* barbican resource. -4. Create load balancer *lb1* on subnet *public-subnet*. -5. Create listener *listener1* as a TERMINATED_HTTPS listener referencing +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container. -6. Create pool *pool1* as *listener1*'s default pool. -7. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: @@ -437,13 +436,12 @@ 1. Combine the individual cert/key/intermediates to single PKCS12 files. 2. Create barbican *secret* resources for the PKCS12 files. We will call them *tls_secret1* and *tls_secret2*. -3. Grant the *admin* user access to both *tls_secret* barbican resources. -4. Create load balancer *lb1* on subnet *public-subnet*. -5. Create listener *listener1* as a TERMINATED_HTTPS listener referencing +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container, and referencing both *tls_secret1* and *tls_secret2* using SNI. -6. Create pool *pool1* as *listener1*'s default pool. -7. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. **CLI commands**: @@ -500,13 +498,12 @@ 1. Combine the individual cert/key/intermediates to a single PKCS12 file. 2. Create a barbican *secret* resource for the PKCS12 file. We will call this *tls_secret1*. -3. Grant the *admin* user access to the *tls_secret1* barbican resource. -4. Create load balancer *lb1* on subnet *public-subnet*. -5. Create listener *listener1* as a TERMINATED_HTTPS listener referencing +3. Create load balancer *lb1* on subnet *public-subnet*. +4. Create listener *listener1* as a TERMINATED_HTTPS listener referencing *tls_secret1* as its default TLS container. -6. Create pool *pool1* as *listener1*'s default pool. -7. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. -8. Create listener *listener2* as an HTTP listener with *pool1* as its +5. Create pool *pool1* as *listener1*'s default pool. +6. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*. +7. Create listener *listener2* as an HTTP listener with *pool1* as its default pool. **CLI commands**: diff -Nru octavia-4.1.1/elements/amphora-agent/source-repository-amphora-agent octavia-4.1.4/elements/amphora-agent/source-repository-amphora-agent --- octavia-4.1.1/elements/amphora-agent/source-repository-amphora-agent 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/elements/amphora-agent/source-repository-amphora-agent 2020-11-04 15:39:51.000000000 +0000 @@ -1,3 +1,3 @@ # This is used for source-based builds -amphora-agent git /opt/amphora-agent https://git.openstack.org/openstack/octavia -upper-constraints file /opt/upper-constraints.txt https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt +amphora-agent git /opt/amphora-agent https://git.openstack.org/openstack/octavia stable/stein +upper-constraints file /opt/upper-constraints.txt https://releases.openstack.org/constraints/upper/stein diff -Nru octavia-4.1.1/etc/octavia.conf octavia-4.1.4/etc/octavia.conf --- octavia-4.1.1/etc/octavia.conf 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/etc/octavia.conf 2020-11-04 15:39:51.000000000 +0000 @@ -159,21 +159,31 @@ [networking] # The maximum attempts to retry an action with the networking service. # max_retries = 15 + # Seconds to wait before retrying an action with the networking service. # retry_interval = 1 + # The maximum time to wait, in seconds, for a port to detach from an amphora # port_detach_timeout = 300 + # Allow/disallow specific network object types when creating VIPs. # allow_vip_network_id = True # allow_vip_subnet_id = True # allow_vip_port_id = True + # List of network_ids that are valid for VIP creation. # If this field empty, no validation is performed. # valid_vip_networks = + # List of reserved IP addresses that cannot be used for member addresses # The default is the nova metadata service address # reserved_ips = ['169.254.169.254'] +# When True, users can use network resources they cannot normally see as VIP +# or member subnets. Making this True may allow users to access resources on +# subnets they do not normally have access to via neutron RBAC policies. +# allow_invisible_resource_usage = False + [haproxy_amphora] # base_path = /var/lib/octavia # base_cert_dir = /var/lib/octavia/certs diff -Nru octavia-4.1.1/octavia/amphorae/backends/agent/api_server/certificate_update.py octavia-4.1.4/octavia/amphorae/backends/agent/api_server/certificate_update.py --- octavia-4.1.1/octavia/amphorae/backends/agent/api_server/certificate_update.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/amphorae/backends/agent/api_server/certificate_update.py 2020-11-04 15:39:50.000000000 +0000 @@ -30,7 +30,7 @@ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC # mode 00600 mode = stat.S_IRUSR | stat.S_IWUSR - with os.fdopen(os.open(file_path, flags, mode), 'w') as crt_file: + with os.fdopen(os.open(file_path, flags, mode), 'wb') as crt_file: b = stream.read(BUFFER) while b: crt_file.write(b) diff -Nru octavia-4.1.1/octavia/amphorae/backends/agent/api_server/osutils.py octavia-4.1.4/octavia/amphorae/backends/agent/api_server/osutils.py --- octavia-4.1.1/octavia/amphorae/backends/agent/api_server/osutils.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/amphorae/backends/agent/api_server/osutils.py 2020-11-04 15:39:51.000000000 +0000 @@ -357,24 +357,30 @@ name = self._map_package_name(package_name) return "rpm -q --queryformat %{{VERSION}} {name}".format(name=name) - def get_network_interface_file(self, interface): + @staticmethod + def _get_network_interface_file(prefix, interface): if CONF.amphora_agent.agent_server_network_file: return CONF.amphora_agent.agent_server_network_file if CONF.amphora_agent.agent_server_network_dir: - return os.path.join(CONF.amphora_agent.agent_server_network_dir, - 'ifcfg-' + interface) - network_dir = consts.RH_AMP_NET_DIR_TEMPLATE.format( - netns=consts.AMPHORA_NAMESPACE) - return os.path.join(network_dir, 'ifcfg-' + interface) + network_dir = CONF.amphora_agent.agent_server_network_dir + else: + network_dir = consts.RH_AMP_NET_DIR_TEMPLATE.format( + netns=consts.AMPHORA_NAMESPACE) + return os.path.join(network_dir, prefix + interface) + + def get_network_interface_file(self, interface): + return self._get_network_interface_file('ifcfg-', interface) def get_alias_network_interface_file(self, interface): return self.get_network_interface_file(interface + ':0') - def get_static_routes_interface_file(self, interface): - return self.get_network_interface_file('route-' + interface) - - def get_route_rules_interface_file(self, interface): - return self.get_network_interface_file('rule-' + interface) + def get_static_routes_interface_file(self, interface, version): + route = 'route6-' if version == 6 else 'route-' + return self._get_network_interface_file(route, interface) + + def get_route_rules_interface_file(self, interface, version): + rule = 'rule6-' if version == 6 else 'rule-' + return self._get_network_interface_file(rule, interface) def get_network_path(self): return '/etc/sysconfig/network-scripts' @@ -432,7 +438,8 @@ render_host_routes, template_vip_alias) routes_interface_file_path = ( - self.get_static_routes_interface_file(primary_interface)) + self.get_static_routes_interface_file(primary_interface, + ip.version)) template_routes = j2_env.get_template(self.ROUTE_ETH_X_CONF) self.write_static_routes_interface_file( @@ -443,7 +450,8 @@ if (CONF.controller_worker.loadbalancer_topology == consts.TOPOLOGY_SINGLE): route_rules_interface_file_path = ( - self.get_route_rules_interface_file(primary_interface)) + self.get_route_rules_interface_file(primary_interface, + ip.version)) template_rules = j2_env.get_template(self.RULE_ETH_X_CONF) self.write_static_routes_interface_file( @@ -493,16 +501,32 @@ if fixed_ips: host_routes = [] + host_routes_ipv6 = [] for fixed_ip in fixed_ips: - host_routes.extend(self.get_host_routes(fixed_ip)) + ip_addr = fixed_ip['ip_address'] + ip = ipaddress.ip_address(ip_addr if isinstance( + ip_addr, six.text_type) else six.u(ip_addr)) + if ip.version == 6: + host_routes_ipv6.extend(self.get_host_routes(fixed_ip)) + else: + host_routes.extend(self.get_host_routes(fixed_ip)) routes_interface_file_path = ( - self.get_static_routes_interface_file(netns_interface)) + self.get_static_routes_interface_file(netns_interface, 4)) template_routes = j2_env.get_template(self.ROUTE_ETH_X_CONF) self.write_static_routes_interface_file( routes_interface_file_path, netns_interface, host_routes, template_routes, None, None, None) + + routes_interface_file_path_ipv6 = ( + self.get_static_routes_interface_file(netns_interface, 6)) + template_routes = j2_env.get_template(self.ROUTE_ETH_X_CONF) + + self.write_static_routes_interface_file( + routes_interface_file_path_ipv6, netns_interface, + host_routes_ipv6, template_routes, None, None, None) + self._write_ifup_ifdown_local_scripts_if_possible() @classmethod diff -Nru octavia-4.1.1/octavia/amphorae/backends/utils/keepalivedlvs_query.py octavia-4.1.4/octavia/amphorae/backends/utils/keepalivedlvs_query.py --- octavia-4.1.1/octavia/amphorae/backends/utils/keepalivedlvs_query.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/amphorae/backends/utils/keepalivedlvs_query.py 2020-11-04 15:39:51.000000000 +0000 @@ -35,8 +35,8 @@ V6_RS_REGEX = re.compile(r"real_server\s([\w*:]+\b)\s(\d{1,5})") CONFIG_COMMENT_REGEX = re.compile( r"#\sConfiguration\sfor\s(\w+)\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12})") -DISABLED_MEMBER_COMMENT_REGEX = re.compile( - r"#\sMember\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12}) is disabled") +DISABLED_CONFIG_COMMENT_REGEX = re.compile( + r"#\s(\w+)\s(\w{8}-\w{4}-\w{4}-\w{4}-\w{12}) is disabled") CHECKER_REGEX = re.compile(r"MISC_CHECK") @@ -73,7 +73,7 @@ ip_obj = ipaddress.ip_address(six.text_type(listener_ip.strip('[]'))) output = read_kernel_file(ns_name, KERNEL_LVS_PATH).split('\n') if ip_obj.version == 4: - ip_to_hex_format = "0%X" % ip_obj._ip + ip_to_hex_format = "%.8X" % ip_obj._ip else: ip_to_hex_format = '\[' + ip_obj.exploded + '\]' port_hex_format = "%.4X" % int(listener_port) @@ -152,6 +152,14 @@ listener_ip_port = V6_VS_REGEX.findall(cfg) listener_ip_port = listener_ip_port[0] if listener_ip_port else [] + disabled_resource_ids = DISABLED_CONFIG_COMMENT_REGEX.findall(cfg) + + listener_disabled = any(True + for resource in disabled_resource_ids + if resource[0] == 'Listener') + if listener_disabled: + return None, ns_name + if not listener_ip_port: # If not get listener_ip_port from the lvs config file, # that means the udp listener's default pool have no enabled member @@ -181,7 +189,11 @@ elif resource_type == 'Members': resource_ipport_mapping[resource_type].append(value) - disabled_member_ids = DISABLED_MEMBER_COMMENT_REGEX.findall(cfg) + disabled_member_ids = [ + resource[1] + for resource in disabled_resource_ids + if resource[0] == 'Member' + ] resource_type = 'Members' for member_id in disabled_member_ids: @@ -363,6 +375,10 @@ (resource_ipport_mapping, ns_name) = get_udp_listener_resource_ipports_nsname(check_listener_id) + # Listener is disabled, we don't need to send an update + if resource_ipport_mapping is None: + continue + # Since we found the keepalived running, acknowledge the listener # in the heartbeat. If this listener has a pool and members, # the stats will be updated later in the code flow. diff -Nru octavia-4.1.1/octavia/amphorae/drivers/driver_base.py octavia-4.1.4/octavia/amphorae/drivers/driver_base.py --- octavia-4.1.1/octavia/amphorae/drivers/driver_base.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/amphorae/drivers/driver_base.py 2020-11-04 15:39:51.000000000 +0000 @@ -200,7 +200,7 @@ """ pass - def update_agent_config(self, amphora, agent_config): + def update_amphora_agent_config(self, amphora, agent_config): """Upload and update the amphora agent configuration. :param amphora: amphora object, needs id and network ip(s) diff -Nru octavia-4.1.1/octavia/amphorae/drivers/haproxy/rest_api_driver.py octavia-4.1.4/octavia/amphorae/drivers/haproxy/rest_api_driver.py --- octavia-4.1.1/octavia/amphorae/drivers/haproxy/rest_api_driver.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/amphorae/drivers/haproxy/rest_api_driver.py 2020-11-04 15:39:51.000000000 +0000 @@ -36,6 +36,9 @@ from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg from octavia.common.tls_utils import cert_parser from octavia.common import utils +from octavia.db import api as db_apis +from octavia.db import repositories as repo + LOG = logging.getLogger(__name__) API_VERSION = consts.API_VERSION @@ -144,6 +147,8 @@ 'process mode.', amphora.id, loadbalancer.id) has_tcp = False + certs = {} + listeners_to_update = [] for listener in loadbalancer.listeners: LOG.debug("%s updating listener %s on amphora %s", self.__class__.__name__, listener.id, amphora.id) @@ -161,42 +166,67 @@ else: obj_id = loadbalancer.id - self._process_tls_certificates(listener, amphora, obj_id) - - client_ca_filename = self._process_secret( - listener, listener.client_ca_tls_certificate_id, - amphora, obj_id) - crl_filename = self._process_secret( - listener, listener.client_crl_container_id, - amphora, obj_id) - pool_tls_certs = self._process_listener_pool_certs( - listener, amphora, obj_id) - - if split_config: - config = self.jinja_split.build_config( - host_amphora=amphora, listener=listener, - haproxy_versions=haproxy_versions, - client_ca_filename=client_ca_filename, - client_crl=crl_filename, - pool_tls_certs=pool_tls_certs) - self.clients[amphora.api_version].upload_config( - amphora, listener.id, config, - timeout_dict=timeout_dict) - self.clients[amphora.api_version].reload_listener( - amphora, listener.id, timeout_dict=timeout_dict) + try: + certs.update({ + listener.tls_certificate_id: + self._process_tls_certificates( + listener, amphora, obj_id)['tls_cert']}) + certs.update({listener.client_ca_tls_certificate_id: + self._process_secret( + listener, + listener.client_ca_tls_certificate_id, + amphora, obj_id)}) + certs.update({listener.client_crl_container_id: + self._process_secret( + listener, + listener.client_crl_container_id, + amphora, obj_id)}) + + certs.update(self._process_listener_pool_certs( + listener, amphora, obj_id)) + + if split_config: + config = self.jinja_split.build_config( + host_amphora=amphora, listener=listener, + haproxy_versions=haproxy_versions, + client_ca_filename=certs[ + listener.client_ca_tls_certificate_id], + client_crl=certs[listener.client_crl_container_id], + pool_tls_certs=certs) + self.clients[amphora.api_version].upload_config( + amphora, listener.id, config, + timeout_dict=timeout_dict) + self.clients[amphora.api_version].reload_listener( + amphora, listener.id, timeout_dict=timeout_dict) + else: + listeners_to_update.append(listener) + except Exception as e: + LOG.exception('Unable to update listener {0} due to ' + '"{1}". Skipping this listener.'.format( + listener.id, e)) + listener_repo = repo.ListenerRepository() + listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=consts.ERROR, + operating_status=consts.ERROR) if has_tcp and not split_config: - # Generate HaProxy configuration from listener object - config = self.jinja_combo.build_config( - host_amphora=amphora, listeners=loadbalancer.listeners, - haproxy_versions=haproxy_versions, - client_ca_filename=client_ca_filename, - client_crl=crl_filename, - pool_tls_certs=pool_tls_certs) - self.clients[amphora.api_version].upload_config( - amphora, loadbalancer.id, config, timeout_dict=timeout_dict) - self.clients[amphora.api_version].reload_listener( - amphora, loadbalancer.id, timeout_dict=timeout_dict) + if listeners_to_update: + # Generate HaProxy configuration from listener object + config = self.jinja_combo.build_config( + host_amphora=amphora, listeners=listeners_to_update, + tls_certs=certs, + haproxy_versions=haproxy_versions) + self.clients[amphora.api_version].upload_config( + amphora, loadbalancer.id, config, + timeout_dict=timeout_dict) + self.clients[amphora.api_version].reload_listener( + amphora, loadbalancer.id, timeout_dict=timeout_dict) + else: + # If we aren't updating any listeners, make sure there are + # no listeners hanging around. For example if this update + # was called from a listener delete. + self.clients[amphora.api_version].delete_listener( + amphora, loadbalancer.id) def _udp_update(self, listener, vip): LOG.debug("Amphora %s keepalivedlvs, updating " @@ -460,8 +490,7 @@ amphora, obj_id, pem=secret, md5=md5, name=name) return name - def _process_listener_pool_certs(self, listener, amphora=None, - obj_id=None): + def _process_listener_pool_certs(self, listener, amphora, obj_id): # {'POOL-ID': { # 'client_cert': client_full_filename, # 'ca_cert': ca_cert_full_filename, @@ -479,33 +508,34 @@ amphora, obj_id)) return pool_certs_dict - def _process_pool_certs(self, listener, pool, amphora=None, obj_id=None): + def _process_pool_certs(self, listener, pool, amphora, obj_id): pool_cert_dict = dict() # Handle the client cert(s) and key if pool.tls_certificate_id: data = cert_parser.load_certificates_data(self.cert_manager, pool) - pem = cert_parser.build_pem(data) + tls_cert = data['tls_cert'] + pem = cert_parser.build_pem(tls_cert) try: pem = pem.encode('utf-8') except AttributeError: pass md5 = hashlib.md5(pem).hexdigest() # nosec - name = '{id}.pem'.format(id=data.id) + name = '{id}.pem'.format(id=tls_cert.id) if amphora and obj_id: self._upload_cert(amphora, obj_id, pem=pem, md5=md5, name=name) pool_cert_dict['client_cert'] = os.path.join( - CONF.haproxy_amphora.base_cert_dir, listener.id, name) + CONF.haproxy_amphora.base_cert_dir, obj_id, name) if pool.ca_tls_certificate_id: name = self._process_secret(listener, pool.ca_tls_certificate_id, amphora, obj_id) pool_cert_dict['ca_cert'] = os.path.join( - CONF.haproxy_amphora.base_cert_dir, listener.id, name) + CONF.haproxy_amphora.base_cert_dir, obj_id, name) if pool.crl_container_id: name = self._process_secret(listener, pool.crl_container_id, amphora, obj_id) pool_cert_dict['crl'] = os.path.join( - CONF.haproxy_amphora.base_cert_dir, listener.id, name) + CONF.haproxy_amphora.base_cert_dir, obj_id, name) return pool_cert_dict diff -Nru octavia-4.1.1/octavia/amphorae/drivers/noop_driver/driver.py octavia-4.1.4/octavia/amphorae/drivers/noop_driver/driver.py --- octavia-4.1.1/octavia/amphorae/drivers/noop_driver/driver.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/amphorae/drivers/noop_driver/driver.py 2020-11-04 15:39:51.000000000 +0000 @@ -106,12 +106,12 @@ self.amphoraconfig[amphora.id, pem_file] = (amphora.id, pem_file, 'update_amp_cert_file') - def update_agent_config(self, amphora, agent_config): + def update_amphora_agent_config(self, amphora, agent_config): LOG.debug("Amphora %s no-op, update agent config amphora " "%s, with agent config %s", self.__class__.__name__, amphora.id, agent_config) self.amphoraconfig[amphora.id, agent_config] = ( - amphora.id, agent_config, 'update_agent_config') + amphora.id, agent_config, 'update_amphora_agent_config') class NoopAmphoraLoadBalancerDriver( @@ -163,8 +163,8 @@ self.driver.upload_cert_amp(amphora, pem_file) - def update_agent_config(self, amphora, agent_config): - self.driver.update_agent_config(amphora, agent_config) + def update_amphora_agent_config(self, amphora, agent_config): + self.driver.update_amphora_agent_config(amphora, agent_config) def update_vrrp_conf(self, loadbalancer): pass diff -Nru octavia-4.1.1/octavia/api/common/pagination.py octavia-4.1.4/octavia/api/common/pagination.py --- octavia-4.1.1/octavia/api/common/pagination.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/common/pagination.py 2020-11-04 15:39:51.000000000 +0000 @@ -311,13 +311,26 @@ self.sort_keys.append((key, self.sort_dir)) for current_sort_key, current_sort_dir in self.sort_keys: + # Translate sort_key from API standard to data model's name + current_sort_key = ( + model.__v2_wsme__.translate_key_to_data_model( + current_sort_key)) sort_dir_func = { constants.ASC: sqlalchemy.asc, constants.DESC: sqlalchemy.desc, }[current_sort_dir] try: - sort_key_attr = getattr(model, current_sort_key) + # The translated object may be a nested parameter + # such as vip.ip_address, so handle that case by + # joining with the nested table. + if '.' in current_sort_key: + parent, child = current_sort_key.split('.') + parent_obj = getattr(model, parent) + query = query.join(parent_obj) + sort_key_attr = child + else: + sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exceptions.InvalidSortKey(key=current_sort_key) query = query.order_by(sort_dir_func(sort_key_attr)) diff -Nru octavia-4.1.1/octavia/api/common/types.py octavia-4.1.4/octavia/api/common/types.py --- octavia-4.1.1/octavia/api/common/types.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/common/types.py 2020-11-04 15:39:51.000000000 +0000 @@ -155,6 +155,14 @@ res[k] = v return res + @classmethod + def translate_key_to_data_model(cls, key): + """Translate the keys from wsme class type, to data_model.""" + if not hasattr(cls, '_type_to_model_map') or ( + key not in cls._type_to_model_map): + return key + return cls._type_to_model_map[key] + def to_dict(self, render_unsets=False): """Converts Octavia WSME type to dictionary. diff -Nru octavia-4.1.1/octavia/api/v2/controllers/base.py octavia-4.1.4/octavia/api/v2/controllers/base.py --- octavia-4.1.1/octavia/api/v2/controllers/base.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/base.py 2020-11-04 15:39:51.000000000 +0000 @@ -208,6 +208,10 @@ if project_id is None: project_id = context.project_id + # If we still don't know who it is, reject it. + if project_id is None: + raise exceptions.PolicyForbidden() + # Check authorization to list objects under this project self._auth_validate_action(context, project_id, constants.RBAC_GET_ALL) @@ -307,3 +311,15 @@ raise exceptions.ValidationException(detail=_( "The CRL specified is not valid for client certificate " "authority reference supplied.")) + + @staticmethod + def _validate_protocol(listener_protocol, pool_protocol): + proto_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + for valid_pool_proto in proto_map[listener_protocol]: + if pool_protocol == valid_pool_proto: + return + detail = _("The pool protocol '%(pool_protocol)s' is invalid while " + "the listener protocol is '%(listener_protocol)s'.") % { + "pool_protocol": pool_protocol, + "listener_protocol": listener_protocol} + raise exceptions.ValidationException(detail=detail) diff -Nru octavia-4.1.1/octavia/api/v2/controllers/health_monitor.py octavia-4.1.4/octavia/api/v2/controllers/health_monitor.py --- octavia-4.1.1/octavia/api/v2/controllers/health_monitor.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/health_monitor.py 2020-11-04 15:39:51.000000000 +0000 @@ -199,16 +199,19 @@ context = pecan.request.context.get('octavia_context') health_monitor = health_monitor_.healthmonitor - if (not CONF.api_settings.allow_ping_health_monitors and - health_monitor.type == consts.HEALTH_MONITOR_PING): - raise exceptions.DisabledOption( - option='type', value=consts.HEALTH_MONITOR_PING) - pool = self._get_db_pool(context.session, health_monitor.pool_id) health_monitor.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) + self._auth_validate_action(context, health_monitor.project_id, + consts.RBAC_POST) + + if (not CONF.api_settings.allow_ping_health_monitors and + health_monitor.type == consts.HEALTH_MONITOR_PING): + raise exceptions.DisabledOption( + option='type', value=consts.HEALTH_MONITOR_PING) + if pool.protocol == consts.PROTOCOL_UDP: self._validate_healthmonitor_request_for_udp(health_monitor) else: @@ -218,9 +221,6 @@ "%(protocol)s.") % {'type': health_monitor.type, 'protocol': consts.PROTOCOL_UDP}) - self._auth_validate_action(context, health_monitor.project_id, - consts.RBAC_POST) - # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) diff -Nru octavia-4.1.1/octavia/api/v2/controllers/l7policy.py octavia-4.1.4/octavia/api/v2/controllers/l7policy.py --- octavia-4.1.1/octavia/api/v2/controllers/l7policy.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/l7policy.py 2020-11-04 15:39:51.000000000 +0000 @@ -32,7 +32,6 @@ from octavia.common import validate from octavia.db import api as db_api from octavia.db import prepare as db_prepare -from octavia.i18n import _ CONF = cfg.CONF @@ -111,23 +110,13 @@ # do not give any information as to what constraint failed raise exceptions.InvalidOption(value='', option='') - def _escape_l7policy_udp_pool_request(self, pool): - if pool.protocol == constants.PROTOCOL_UDP: - raise exceptions.ValidationException( - detail=_("%s protocol pool can not be assigned to " - "l7policy.") % constants.PROTOCOL_UDP) - @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, body=l7policy_types.L7PolicyRootPOST, status_code=201) def post(self, l7policy_): """Creates a l7policy on a listener.""" l7policy = l7policy_.l7policy context = pecan.request.context.get('octavia_context') - # Make sure any pool specified by redirect_pool_id exists - if l7policy.redirect_pool_id: - db_pool = self._get_db_pool( - context.session, l7policy.redirect_pool_id) - self._escape_l7policy_udp_pool_request(db_pool) + # Verify the parent listener exists listener_id = l7policy.listener_id listener = self._get_db_listener( @@ -139,6 +128,12 @@ self._auth_validate_action(context, l7policy.project_id, constants.RBAC_POST) + # Make sure any pool specified by redirect_pool_id exists + if l7policy.redirect_pool_id: + db_pool = self._get_db_pool( + context.session, l7policy.redirect_pool_id) + self._validate_protocol(listener.protocol, db_pool.protocol) + # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) @@ -206,6 +201,16 @@ def put(self, id, l7policy_): """Updates a l7policy.""" l7policy = l7policy_.l7policy + context = pecan.request.context.get('octavia_context') + db_l7policy = self._get_db_l7policy(context.session, id, + show_deleted=False) + load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( + db_l7policy) + project_id, provider = self._get_lb_project_id_provider( + context.session, load_balancer_id) + + self._auth_validate_action(context, project_id, constants.RBAC_PUT) + l7policy_dict = validate.sanitize_l7policy_api_args( l7policy.to_dict(render_unsets=False)) # Reset renamed attributes @@ -213,20 +218,13 @@ if val in l7policy_dict: l7policy_dict[attr] = l7policy_dict.pop(val) sanitized_l7policy = l7policy_types.L7PolicyPUT(**l7policy_dict) - context = pecan.request.context.get('octavia_context') + listener = self._get_db_listener( + context.session, db_l7policy.listener_id) # Make sure any specified redirect_pool_id exists if l7policy_dict.get('redirect_pool_id'): db_pool = self._get_db_pool( context.session, l7policy_dict['redirect_pool_id']) - self._escape_l7policy_udp_pool_request(db_pool) - db_l7policy = self._get_db_l7policy(context.session, id, - show_deleted=False) - load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( - db_l7policy) - project_id, provider = self._get_lb_project_id_provider( - context.session, load_balancer_id) - - self._auth_validate_action(context, project_id, constants.RBAC_PUT) + self._validate_protocol(listener.protocol, db_pool.protocol) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) diff -Nru octavia-4.1.1/octavia/api/v2/controllers/l7rule.py octavia-4.1.4/octavia/api/v2/controllers/l7rule.py --- octavia-4.1.1/octavia/api/v2/controllers/l7rule.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/l7rule.py 2020-11-04 15:39:51.000000000 +0000 @@ -123,10 +123,6 @@ def post(self, rule_): """Creates a l7rule on an l7policy.""" l7rule = rule_.rule - try: - validate.l7rule_data(l7rule) - except Exception as e: - raise exceptions.L7RuleValidation(error=e) context = pecan.request.context.get('octavia_context') db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, @@ -135,12 +131,16 @@ db_l7policy) l7rule.project_id, provider = self._get_lb_project_id_provider( context.session, load_balancer_id) - - self._check_l7policy_max_rules(context.session) - self._auth_validate_action(context, l7rule.project_id, constants.RBAC_POST) + try: + validate.l7rule_data(l7rule) + except Exception as e: + raise exceptions.L7RuleValidation(error=e) + + self._check_l7policy_max_rules(context.session) + # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) @@ -192,15 +192,6 @@ context = pecan.request.context.get('octavia_context') db_l7rule = self._get_db_l7rule(context.session, id, show_deleted=False) - - # Handle the invert unset - if l7rule.invert is None: - l7rule.invert = False - - new_l7rule = db_l7rule.to_dict() - new_l7rule.update(l7rule.to_dict()) - new_l7rule = data_models.L7Rule.from_dict(new_l7rule) - db_l7policy = self._get_db_l7policy(context.session, self.l7policy_id, show_deleted=False) load_balancer_id, listener_id = self._get_listener_and_loadbalancer_id( @@ -210,6 +201,14 @@ self._auth_validate_action(context, project_id, constants.RBAC_PUT) + # Handle the invert unset + if l7rule.invert is None: + l7rule.invert = False + + new_l7rule = db_l7rule.to_dict() + new_l7rule.update(l7rule.to_dict()) + new_l7rule = data_models.L7Rule.from_dict(new_l7rule) + try: validate.l7rule_data(new_l7rule) except Exception as e: diff -Nru octavia-4.1.1/octavia/api/v2/controllers/listener.py octavia-4.1.4/octavia/api/v2/controllers/listener.py --- octavia-4.1.1/octavia/api/v2/controllers/listener.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/listener.py 2020-11-04 15:39:51.000000000 +0000 @@ -115,11 +115,7 @@ if not db_pool: raise exceptions.NotFound( resource=data_models.Pool._name(), id=pool_id) - if (db_pool.protocol == constants.PROTOCOL_UDP and - db_pool.protocol != listener_protocol): - msg = _("Listeners of type %s can only have pools of " - "type UDP.") % constants.PROTOCOL_UDP - raise exceptions.ValidationException(detail=msg) + self._validate_protocol(listener_protocol, db_pool.protocol) def _has_tls_container_refs(self, listener_dict): return (listener_dict.get('tls_certificate_id') or @@ -131,6 +127,11 @@ listener_dict.get('insert_headers')) def _validate_insert_headers(self, insert_header_list, listener_protocol): + if (listener_protocol not in + constants.LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION): + raise exceptions.InvalidOption( + value='insert-headers', + option=('a %s protocol listener.' % listener_protocol)) if list(set(insert_header_list) - ( set(constants.SUPPORTED_HTTP_HEADERS + constants.SUPPORTED_SSL_HEADERS))): @@ -165,9 +166,9 @@ # Check for UDP compatibility if (listener_protocol == constants.PROTOCOL_UDP and self._is_tls_or_insert_header(listener_dict)): - raise exceptions.ValidationException(detail=_( - "%s protocol listener does not support TLS or header " - "insertion.") % constants.PROTOCOL_UDP) + raise exceptions.ValidationException( + detail=_("%s protocol listener does not " + "support TLS.") % constants.PROTOCOL_UDP) # Check for TLS disabled if (not CONF.api_settings.allow_tls_terminated_listeners and @@ -375,9 +376,9 @@ # Make sure we have a client CA cert if they enable client auth if ((listener.client_authentication != wtypes.Unset and - listener.client_authentication != constants.CLIENT_AUTH_NONE) - and not (db_listener.client_ca_tls_certificate_id or - listener.client_ca_tls_container_ref)): + listener.client_authentication != constants.CLIENT_AUTH_NONE) and + not (db_listener.client_ca_tls_certificate_id or + listener.client_ca_tls_container_ref)): raise exceptions.ValidationException(detail=_( "Client authentication setting %s requires a client CA " "container reference.") % @@ -480,15 +481,16 @@ driver_utils.listener_dict_to_provider_dict(listener_dict)) # Also prepare the baseline object data - old_provider_llistener = ( - driver_utils.db_listener_to_provider_listener(db_listener)) + old_provider_listener = ( + driver_utils.db_listener_to_provider_listener(db_listener, + for_delete=True)) # Dispatch to the driver LOG.info("Sending update Listener %s to provider %s", id, driver.name) driver_utils.call_provider( driver.name, driver.listener_update, - old_provider_llistener, + old_provider_listener, driver_dm.Listener.from_dict(provider_listener_dict)) # Update the database to reflect what the driver just accepted diff -Nru octavia-4.1.1/octavia/api/v2/controllers/load_balancer.py octavia-4.1.4/octavia/api/v2/controllers/load_balancer.py --- octavia-4.1.1/octavia/api/v2/controllers/load_balancer.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/load_balancer.py 2020-11-04 15:39:51.000000000 +0000 @@ -117,10 +117,12 @@ state=prov_status, id=id) @staticmethod - def _validate_network_and_fill_or_validate_subnet(load_balancer): + def _validate_network_and_fill_or_validate_subnet(load_balancer, + context=None): network = validate.network_exists_optionally_contains_subnet( network_id=load_balancer.vip_network_id, - subnet_id=load_balancer.vip_subnet_id) + subnet_id=load_balancer.vip_subnet_id, + context=context) if not load_balancer.vip_subnet_id: network_driver = utils.get_network_driver() if load_balancer.vip_address: @@ -168,8 +170,10 @@ break @staticmethod - def _validate_port_and_fill_or_validate_subnet(load_balancer): - port = validate.port_exists(port_id=load_balancer.vip_port_id) + def _validate_port_and_fill_or_validate_subnet(load_balancer, + context=None): + port = validate.port_exists(port_id=load_balancer.vip_port_id, + context=context) validate.check_port_in_use(port) load_balancer.vip_network_id = port.network_id @@ -184,7 +188,8 @@ # Identify the subnet for this port if load_balancer.vip_subnet_id: - validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id) + validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id, + context=context) else: if load_balancer.vip_address: for port_fixed_ip in port.fixed_ips: @@ -202,7 +207,7 @@ "VIP port's subnet could not be determined. Please " "specify either a VIP subnet or address.")) - def _validate_vip_request_object(self, load_balancer): + def _validate_vip_request_object(self, load_balancer, context=None): allowed_network_objects = [] if CONF.networking.allow_vip_port_id: allowed_network_objects.append('vip_port_id') @@ -234,10 +239,12 @@ # Validate the port id if load_balancer.vip_port_id: - self._validate_port_and_fill_or_validate_subnet(load_balancer) + self._validate_port_and_fill_or_validate_subnet(load_balancer, + context=context) # If no port id, validate the network id (and subnet if provided) elif load_balancer.vip_network_id: - self._validate_network_and_fill_or_validate_subnet(load_balancer) + self._validate_network_and_fill_or_validate_subnet(load_balancer, + context=context) # Validate just the subnet id elif load_balancer.vip_subnet_id: subnet = validate.subnet_exists( @@ -347,7 +354,7 @@ self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_POST) - self._validate_vip_request_object(load_balancer) + self._validate_vip_request_object(load_balancer, context=context) self._validate_flavor(context.session, load_balancer) @@ -588,7 +595,8 @@ # Also prepare the baseline object data old_provider_lb = ( - driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb)) + driver_utils.db_loadbalancer_to_provider_loadbalancer( + db_lb, for_delete=True)) # Dispatch to the driver LOG.info("Sending update Load Balancer %s to provider " diff -Nru octavia-4.1.1/octavia/api/v2/controllers/member.py octavia-4.1.4/octavia/api/v2/controllers/member.py --- octavia-4.1.1/octavia/api/v2/controllers/member.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/member.py 2020-11-04 15:39:51.000000000 +0000 @@ -145,12 +145,6 @@ member = member_.member context = pecan.request.context.get('octavia_context') - validate.ip_not_reserved(member.address) - - # Validate member subnet - if member.subnet_id and not validate.subnet_exists(member.subnet_id): - raise exceptions.NotFound(resource='Subnet', - id=member.subnet_id) pool = self.repositories.pool.get(context.session, id=self.pool_id) member.project_id, provider = self._get_lb_project_id_provider( context.session, pool.load_balancer_id) @@ -158,6 +152,13 @@ self._auth_validate_action(context, member.project_id, constants.RBAC_POST) + validate.ip_not_reserved(member.address) + + # Validate member subnet + if (member.subnet_id and + not validate.subnet_exists(member.subnet_id, context=context)): + raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) + # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) @@ -231,7 +232,6 @@ context = pecan.request.context.get('octavia_context') db_member = self._get_db_member(context.session, id, show_deleted=False) - pool = self.repositories.pool.get(context.session, id=db_member.pool_id) project_id, provider = self._get_lb_project_id_provider( @@ -341,7 +341,7 @@ # Validate member subnets for member in members: if member.subnet_id and not validate.subnet_exists( - member.subnet_id): + member.subnet_id, context=context): raise exceptions.NotFound(resource='Subnet', id=member.subnet_id) diff -Nru octavia-4.1.1/octavia/api/v2/controllers/pool.py octavia-4.1.4/octavia/api/v2/controllers/pool.py --- octavia-4.1.1/octavia/api/v2/controllers/pool.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/api/v2/controllers/pool.py 2020-11-04 15:39:51.000000000 +0000 @@ -136,7 +136,7 @@ def _is_only_specified_in_request(self, request, **kwargs): request_attrs = [] check_attrs = kwargs['check_exist_attrs'] - escaped_attrs = ['from_data_model', + escaped_attrs = ['from_data_model', 'translate_key_to_data_model', 'translate_dict_keys_to_data_model', 'to_dict'] for attr in dir(request): @@ -190,15 +190,7 @@ # pool_dict: pool = pool_.pool context = pecan.request.context.get('octavia_context') - if pool.protocol == constants.PROTOCOL_UDP: - self._validate_pool_request_for_udp(pool) - else: - if (pool.session_persistence and ( - pool.session_persistence.persistence_timeout or - pool.session_persistence.persistence_granularity)): - raise exceptions.ValidationException(detail=_( - "persistence_timeout and persistence_granularity " - "is only for UDP protocol pools.")) + listener = None if pool.loadbalancer_id: pool.project_id, provider = self._get_lb_project_id_provider( context.session, pool.loadbalancer_id) @@ -216,6 +208,19 @@ self._auth_validate_action(context, pool.project_id, constants.RBAC_POST) + if pool.listener_id and listener: + self._validate_protocol(listener.protocol, pool.protocol) + + if pool.protocol == constants.PROTOCOL_UDP: + self._validate_pool_request_for_udp(pool) + else: + if (pool.session_persistence and ( + pool.session_persistence.persistence_timeout or + pool.session_persistence.persistence_granularity)): + raise exceptions.ValidationException(detail=_( + "persistence_timeout and persistence_granularity " + "is only for UDP protocol pools.")) + if pool.session_persistence: sp_dict = pool.session_persistence.to_dict(render_unsets=False) validate.check_session_persistence(sp_dict) @@ -404,7 +409,7 @@ # Also prepare the baseline object data old_provider_pool = driver_utils.db_pool_to_provider_pool( - db_pool) + db_pool, for_delete=True) # Dispatch to the driver LOG.info("Sending update Pool %s to provider %s", id, driver.name) @@ -431,15 +436,16 @@ """Deletes a pool from a load balancer.""" context = pecan.request.context.get('octavia_context') db_pool = self._get_db_pool(context.session, id, show_deleted=False) - if db_pool.l7policies: - raise exceptions.PoolInUseByL7Policy( - id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) project_id, provider = self._get_lb_project_id_provider( context.session, db_pool.load_balancer_id) self._auth_validate_action(context, project_id, constants.RBAC_DELETE) + if db_pool.l7policies: + raise exceptions.PoolInUseByL7Policy( + id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) + # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) diff -Nru octavia-4.1.1/octavia/cmd/house_keeping.py octavia-4.1.4/octavia/cmd/house_keeping.py --- octavia-4.1.1/octavia/cmd/house_keeping.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/cmd/house_keeping.py 2020-11-04 15:39:50.000000000 +0000 @@ -17,7 +17,6 @@ import signal import sys import threading -import time from oslo_config import cfg from oslo_log import log as logging @@ -104,28 +103,27 @@ timestamp = str(datetime.datetime.utcnow()) LOG.info("Starting house keeping at %s", timestamp) + threads = [] + # Thread to perform spare amphora check spare_amp_thread = threading.Thread(target=spare_amphora_check) spare_amp_thread.daemon = True spare_amp_thread.start() + threads.append(spare_amp_thread) # Thread to perform db cleanup db_cleanup_thread = threading.Thread(target=db_cleanup) db_cleanup_thread.daemon = True db_cleanup_thread.start() + threads.append(db_cleanup_thread) # Thread to perform certificate rotation cert_rotate_thread = threading.Thread(target=cert_rotation) cert_rotate_thread.daemon = True cert_rotate_thread.start() + threads.append(cert_rotate_thread) - signal.signal(signal.SIGHUP, _mutate_config) - - # Try-Exception block should be at the end to gracefully exit threads - try: - while True: - time.sleep(1) - except KeyboardInterrupt: + def process_cleanup(*args, **kwargs): LOG.info("Attempting to gracefully terminate House-Keeping") spare_amp_thread_event.set() db_cleanup_thread_event.set() @@ -134,3 +132,12 @@ db_cleanup_thread.join() cert_rotate_thread.join() LOG.info("House-Keeping process terminated") + + signal.signal(signal.SIGTERM, process_cleanup) + signal.signal(signal.SIGHUP, _mutate_config) + + try: + for thread in threads: + thread.join() + except KeyboardInterrupt: + process_cleanup() diff -Nru octavia-4.1.1/octavia/common/base_taskflow.py octavia-4.1.4/octavia/common/base_taskflow.py --- octavia-4.1.1/octavia/common/base_taskflow.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/common/base_taskflow.py 2020-11-04 15:39:51.000000000 +0000 @@ -16,6 +16,9 @@ import concurrent.futures import datetime +# work around for https://bugs.python.org/issue7980 +import _strptime # noqa: F401 pylint: disable=unused-import + from oslo_config import cfg from taskflow import engines as tf_engines diff -Nru octavia-4.1.1/octavia/common/clients.py octavia-4.1.4/octavia/common/clients.py --- octavia-4.1.1/octavia/common/clients.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/clients.py 2020-11-04 15:39:51.000000000 +0000 @@ -11,6 +11,8 @@ # under the License. from glanceclient import client as glance_client +from keystoneauth1.identity.generic import token +from keystoneauth1 import session from neutronclient.neutron import client as neutron_client from novaclient import api_versions from novaclient import client as nova_client @@ -105,6 +107,32 @@ LOG.exception("Error creating Neutron client.") return cls.neutron_client + @classmethod + def get_user_neutron_client(cls, context): + # get a normal session + ksession = keystone.KeystoneSession() + service_auth = ksession.get_auth() + + # make user auth and swap it in session + user_auth = token.Token(auth_url=service_auth.auth_url, + token=context.auth_token, + project_id=context.project_id) + user_session = session.Session(auth=user_auth) + + kwargs = { + 'session': user_session, + 'region_name': CONF.neutron.region_name, + 'endpoint_type': CONF.neutron.endpoint_type, + 'service_name': CONF.neutron.service_name, + 'insecure': CONF.neutron.insecure, + 'ca_cert': CONF.neutron.ca_certificates_file + } + if CONF.neutron.endpoint: + kwargs['endpoint_override'] = CONF.neutron.endpoint + + # create neutron client using user's session + return neutron_client.Client(NEUTRON_VERSION, **kwargs) + class GlanceAuth(object): glance_client = None diff -Nru octavia-4.1.1/octavia/common/config.py octavia-4.1.4/octavia/common/config.py --- octavia-4.1.1/octavia/common/config.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/config.py 2020-11-04 15:39:51.000000000 +0000 @@ -148,6 +148,12 @@ help=_('List of IP addresses reserved from being used for ' 'member addresses. IPv6 addresses should be in ' 'expanded, uppercase form.')), + cfg.BoolOpt('allow_invisible_resource_usage', default=False, + help=_("When True, users can use network resources they " + "cannot normally see as VIP or member subnets. Making " + "this True may allow users to access resources on " + "subnets they do not normally have access to via " + "neutron RBAC policies.")), ] healthmanager_opts = [ @@ -520,7 +526,8 @@ help=_('Disable certificate validation on SSL connections')), cfg.BoolOpt('enable_anti_affinity', default=False, help=_('Flag to indicate if nova anti-affinity feature is ' - 'turned on.')), + 'turned on. This option is only used when creating ' + 'amphorae in ACTIVE_STANDBY topology.')), cfg.StrOpt('anti_affinity_policy', default=constants.ANTI_AFFINITY, choices=[constants.ANTI_AFFINITY, constants.SOFT_ANTI_AFFINITY], help=_('Sets the anti-affinity policy for nova')), diff -Nru octavia-4.1.1/octavia/common/constants.py octavia-4.1.4/octavia/common/constants.py --- octavia-4.1.1/octavia/common/constants.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/constants.py 2020-11-04 15:39:51.000000000 +0000 @@ -205,6 +205,14 @@ UPDATE_STATS = 'UPDATE_STATS' UPDATE_HEALTH = 'UPDATE_HEALTH' +VALID_LISTENER_POOL_PROTOCOL_MAP = { + PROTOCOL_TCP: [PROTOCOL_HTTP, PROTOCOL_HTTPS, + PROTOCOL_PROXY, PROTOCOL_TCP], + PROTOCOL_HTTP: [PROTOCOL_HTTP, PROTOCOL_PROXY], + PROTOCOL_HTTPS: [PROTOCOL_HTTPS, PROTOCOL_PROXY, PROTOCOL_TCP], + PROTOCOL_TERMINATED_HTTPS: [PROTOCOL_HTTP, PROTOCOL_PROXY], + PROTOCOL_UDP: [PROTOCOL_UDP]} + # API Integer Ranges MIN_PORT_NUMBER = 1 MAX_PORT_NUMBER = 65535 @@ -310,6 +318,7 @@ AMPS_DATA = 'amps_data' NICS = 'nics' VIP = 'vip' +VIP_ADDRESS = 'vip_address' POOL = 'pool' POOL_CHILD_COUNT = 'pool_child_count' POOL_ID = 'pool_id' @@ -668,3 +677,8 @@ CLIENT_AUTH_MANDATORY = 'MANDATORY' SUPPORTED_CLIENT_AUTH_MODES = [CLIENT_AUTH_NONE, CLIENT_AUTH_OPTIONAL, CLIENT_AUTH_MANDATORY] + +# Sadly in the LBaaS v2 API, header insertions are on the listener objects +# but they should be on the pool. Dealing with it until v3. +LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION = [PROTOCOL_HTTP, + PROTOCOL_TERMINATED_HTTPS] diff -Nru octavia-4.1.1/octavia/common/data_models.py octavia-4.1.4/octavia/common/data_models.py --- octavia-4.1.1/octavia/common/data_models.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/data_models.py 2020-11-04 15:39:51.000000000 +0000 @@ -16,11 +16,14 @@ import re +from oslo_log import log as logging import six from sqlalchemy.orm import collections from octavia.common import constants +LOG = logging.getLogger(__name__) + class BaseDataModel(object): def to_dict(self, calling_classes=None, recurse=False, **kwargs): @@ -318,7 +321,11 @@ break for pool in self.load_balancer.pools: if pool.id == self.id: - self.load_balancer.pools.remove(pool) + try: + self.load_balancer.pools.remove(pool) + except ValueError: + LOG.debug("Pool %s has already been removed from load " + "balancer pools list.", pool.id) break for l7policy in self.l7policies: if l7policy.redirect_pool_id == self.id: diff -Nru octavia-4.1.1/octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py octavia-4.1.4/octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py --- octavia-4.1.1/octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py 2020-11-04 15:39:51.000000000 +0000 @@ -81,9 +81,8 @@ self.log_server = log_server self.connection_logging = connection_logging - def build_config(self, host_amphora, listeners, haproxy_versions, - socket_path=None, client_ca_filename=None, - client_crl=None, pool_tls_certs=None): + def build_config(self, host_amphora, listeners, tls_certs, + haproxy_versions, socket_path=None): """Convert a logical configuration to the HAProxy version :param host_amphora: The Amphora this configuration is hosted on @@ -102,10 +101,9 @@ feature_compatibility[constants.HTTP_REUSE] = True return self.render_loadbalancer_obj( - host_amphora, listeners, socket_path=socket_path, - feature_compatibility=feature_compatibility, - client_ca_filename=client_ca_filename, client_crl=client_crl, - pool_tls_certs=pool_tls_certs) + host_amphora, listeners, tls_certs=tls_certs, + socket_path=socket_path, + feature_compatibility=feature_compatibility) def _get_template(self): """Returns the specified Jinja configuration template.""" @@ -122,14 +120,13 @@ return JINJA_ENV.get_template(os.path.basename(self.haproxy_template)) def render_loadbalancer_obj(self, host_amphora, listeners, - socket_path=None, feature_compatibility=None, - client_ca_filename=None, client_crl=None, - pool_tls_certs=None): + tls_certs=None, socket_path=None, + feature_compatibility=None): """Renders a templated configuration from a load balancer object :param host_amphora: The Amphora this configuration is hosted on :param listener: The listener configuration - :param client_ca_filename: The CA certificate for client authorization + :param tls_certs: Dict of the TLS certificates for the listener :param socket_path: The socket path for Haproxy process :return: Rendered configuration """ @@ -138,10 +135,8 @@ host_amphora, listeners[0].load_balancer, listeners, - feature_compatibility, - client_ca_filename=client_ca_filename, - client_crl=client_crl, - pool_tls_certs=pool_tls_certs) + tls_certs, + feature_compatibility,) if not socket_path: socket_path = '%s/%s.sock' % (self.base_amp_path, listeners[0].load_balancer.id) @@ -154,8 +149,7 @@ constants=constants) def _transform_loadbalancer(self, host_amphora, loadbalancer, listeners, - feature_compatibility, client_ca_filename=None, - client_crl=None, pool_tls_certs=None): + tls_certs, feature_compatibility): """Transforms a load balancer into an object that will be processed by the templating system @@ -165,9 +159,7 @@ if listener.protocol == constants.PROTOCOL_UDP: continue listener_transforms.append(self._transform_listener( - listener, feature_compatibility, loadbalancer, - client_ca_filename=client_ca_filename, client_crl=client_crl, - pool_tls_certs=pool_tls_certs)) + listener, tls_certs, feature_compatibility, loadbalancer)) ret_value = { 'id': loadbalancer.id, @@ -217,9 +209,8 @@ 'vrrp_priority': amphora.vrrp_priority } - def _transform_listener(self, listener, feature_compatibility, - loadbalancer, client_ca_filename=None, - client_crl=None, pool_tls_certs=None): + def _transform_listener(self, listener, tls_certs, feature_compatibility, + loadbalancer): """Transforms a listener into an object that will be processed by the templating system @@ -253,23 +244,29 @@ CONF.haproxy_amphora.base_cert_dir, loadbalancer.id, '{}.pem'.format(listener.id)) - if listener.client_ca_tls_certificate_id: - ret_value['client_ca_tls_path'] = '%s' % ( - os.path.join(self.base_crt_dir, loadbalancer.id, - client_ca_filename)) - ret_value['client_auth'] = CLIENT_AUTH_MAP.get( - listener.client_authentication) - if listener.client_crl_container_id: - ret_value['client_crl_path'] = '%s' % ( - os.path.join(self.base_crt_dir, loadbalancer.id, client_crl)) + if tls_certs is not None: + if listener.client_ca_tls_certificate_id: + ret_value['client_ca_tls_path'] = '%s' % ( + os.path.join( + self.base_crt_dir, loadbalancer.id, + tls_certs[listener.client_ca_tls_certificate_id])) + ret_value['client_auth'] = CLIENT_AUTH_MAP.get( + listener.client_authentication) + + if listener.client_crl_container_id: + ret_value['client_crl_path'] = '%s' % ( + os.path.join(self.base_crt_dir, loadbalancer.id, + tls_certs[listener.client_crl_container_id])) pools = [] - for x in listener.pools: + pool_gen = (pool for pool in listener.pools if + pool.provisioning_status != constants.PENDING_DELETE) + for pool in pool_gen: kwargs = {} - if pool_tls_certs and pool_tls_certs.get(x.id): - kwargs = {'pool_tls_certs': pool_tls_certs.get(x.id)} + if tls_certs is not None and tls_certs.get(pool.id): + kwargs = {'pool_tls_certs': tls_certs.get(pool.id)} pools.append(self._transform_pool( - x, feature_compatibility, **kwargs)) + pool, feature_compatibility, **kwargs)) ret_value['pools'] = pools if listener.default_pool: for pool in pools: @@ -278,7 +275,7 @@ break l7policies = [self._transform_l7policy( - x, feature_compatibility, pool_tls_certs) + x, feature_compatibility, tls_certs) for x in listener.l7policies] ret_value['l7policies'] = l7policies return ret_value @@ -383,7 +380,7 @@ } def _transform_l7policy(self, l7policy, feature_compatibility, - pool_tls_certs=None): + tls_certs=None): """Transforms an L7 policy into an object that will be processed by the templating system @@ -397,10 +394,10 @@ } if l7policy.redirect_pool: kwargs = {} - if pool_tls_certs and pool_tls_certs.get( + if tls_certs is not None and tls_certs.get( l7policy.redirect_pool.id): kwargs = {'pool_tls_certs': - pool_tls_certs.get(l7policy.redirect_pool.id)} + tls_certs.get(l7policy.redirect_pool.id)} ret_value['redirect_pool'] = self._transform_pool( l7policy.redirect_pool, feature_compatibility, **kwargs) else: diff -Nru octavia-4.1.1/octavia/common/jinja/lvs/templates/base.j2 octavia-4.1.4/octavia/common/jinja/lvs/templates/base.j2 --- octavia-4.1.1/octavia/common/jinja/lvs/templates/base.j2 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/common/jinja/lvs/templates/base.j2 2020-11-04 15:39:50.000000000 +0000 @@ -14,7 +14,11 @@ # #} # Configuration for Loadbalancer {{ loadbalancer.id }} +{% if loadbalancer.listener.enabled %} # Configuration for Listener {{ udp_listener_id }} +{% else %} +# Listener {{ udp_listener_id }} is disabled +{% endif %} {% block global_definitions %}{% endblock global_definitions %} diff -Nru octavia-4.1.1/octavia/common/jinja/lvs/templates/macros.j2 octavia-4.1.4/octavia/common/jinja/lvs/templates/macros.j2 --- octavia-4.1.1/octavia/common/jinja/lvs/templates/macros.j2 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/common/jinja/lvs/templates/macros.j2 2020-11-04 15:39:51.000000000 +0000 @@ -25,18 +25,12 @@ {%- macro misc_check_macro(pool, member, health_monitor) -%} MISC_CHECK { {{ misc_path_macro(member, health_monitor) }} - misc_timeout {{ pool.health_monitor.delay }} + misc_timeout {{ pool.health_monitor.timeout }} } {%- endmacro -%} {% macro health_monitor_rs_macro(constants, pool, member) %} {% if pool.health_monitor and pool.health_monitor.enabled %} - {% if pool.health_monitor.timeout %} - delay_before_retry {{ pool.health_monitor.timeout }} - {% endif %} - {% if pool.health_monitor.fall_threshold %} - retry {{ pool.health_monitor.fall_threshold }} - {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_UDP_CONNECT %} {{ misc_check_macro(pool, member, pool.health_monitor) -}} {% endif %} @@ -62,9 +56,7 @@ {% if default_pool and default_pool.health_monitor and default_pool.health_monitor.enabled %} {% if default_pool.health_monitor.delay %} delay_loop {{ default_pool.health_monitor.delay }} - {% endif %} - {% if default_pool.health_monitor.timeout %} - delay_before_retry {{ default_pool.health_monitor.timeout }} + delay_before_retry {{ default_pool.health_monitor.delay }} {% endif %} {% if default_pool.health_monitor.fall_threshold %} retry {{ default_pool.health_monitor.fall_threshold }} diff -Nru octavia-4.1.1/octavia/common/policy.py octavia-4.1.4/octavia/common/policy.py --- octavia-4.1.1/octavia/common/policy.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/policy.py 2020-11-04 15:39:51.000000000 +0000 @@ -12,6 +12,8 @@ """Policy Engine For Octavia.""" +import sys + from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy as oslo_policy @@ -151,5 +153,14 @@ # This is used for the oslopolicy-policy-generator tool def get_no_context_enforcer(): - config.init([]) + + # oslo.config needs access to the --config-dir and --config-file + # command line args + filtered_args = ['--config-dir', '--config-file'] + # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] + conf_args = [arg for idx, arg in enumerate(sys.argv[1:]) + if (arg in filtered_args or + sys.argv[idx] in filtered_args)] + + config.init(conf_args) return Policy() diff -Nru octavia-4.1.1/octavia/common/utils.py octavia-4.1.4/octavia/common/utils.py --- octavia-4.1.1/octavia/common/utils.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/common/utils.py 2020-11-04 15:39:51.000000000 +0000 @@ -20,6 +20,7 @@ import base64 import hashlib +import re import socket import netaddr @@ -45,7 +46,9 @@ # break backwards compatibility with existing loadbalancers. hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest() # nosec b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii')) - return b64_str.decode('UTF-8') + b64_sha1 = b64_str.decode('UTF-8') + # https://github.com/haproxy/haproxy/issues/644 + return re.sub(r"^-", "x", b64_sha1) def get_network_driver(): diff -Nru octavia-4.1.1/octavia/common/validate.py octavia-4.1.4/octavia/common/validate.py --- octavia-4.1.1/octavia/common/validate.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/common/validate.py 2020-11-04 15:39:51.000000000 +0000 @@ -313,11 +313,11 @@ return l7policy -def port_exists(port_id): +def port_exists(port_id, context=None): """Raises an exception when a port does not exist.""" network_driver = utils.get_network_driver() try: - port = network_driver.get_port(port_id) + port = network_driver.get_port(port_id, context=context) except Exception: raise exceptions.InvalidSubresource(resource='Port', id=port_id) return port @@ -332,11 +332,11 @@ return False -def subnet_exists(subnet_id): +def subnet_exists(subnet_id, context=None): """Raises an exception when a subnet does not exist.""" network_driver = utils.get_network_driver() try: - subnet = network_driver.get_subnet(subnet_id) + subnet = network_driver.get_subnet(subnet_id, context=context) except Exception: raise exceptions.InvalidSubresource(resource='Subnet', id=subnet_id) return subnet @@ -359,14 +359,15 @@ "VIP QoS policy is not allowed in this deployment.")) -def network_exists_optionally_contains_subnet(network_id, subnet_id=None): +def network_exists_optionally_contains_subnet(network_id, subnet_id=None, + context=None): """Raises an exception when a network does not exist. If a subnet is provided, also validate the network contains that subnet. """ network_driver = utils.get_network_driver() try: - network = network_driver.get_network(network_id) + network = network_driver.get_network(network_id, context=context) except Exception: raise exceptions.InvalidSubresource(resource='Network', id=network_id) if subnet_id: diff -Nru octavia-4.1.1/octavia/controller/healthmanager/health_drivers/update_db.py octavia-4.1.4/octavia/controller/healthmanager/health_drivers/update_db.py --- octavia-4.1.1/octavia/controller/healthmanager/health_drivers/update_db.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/controller/healthmanager/health_drivers/update_db.py 2020-11-04 15:39:51.000000000 +0000 @@ -183,7 +183,8 @@ if db_lb: expected_listener_count = 0 - if 'PENDING' in db_lb['provisioning_status']: + if ('PENDING' in db_lb['provisioning_status'] or + not db_lb['enabled']): ignore_listener_count = True else: for key, listener in db_lb.get('listeners', {}).items(): @@ -286,13 +287,19 @@ health_msg_version = health.get('ver', 0) - for listener_id in db_lb.get('listeners', {}): - db_op_status = db_lb['listeners'][listener_id]['operating_status'] + for listener_id in db_lb.get(constants.LISTENERS, {}): + db_listener = db_lb[constants.LISTENERS][listener_id] + db_op_status = db_listener[constants.OPERATING_STATUS] listener_status = None listener = None if listener_id not in listeners: - listener_status = constants.OFFLINE + if (db_listener['enabled'] and + db_lb[constants.PROVISIONING_STATUS] == + constants.ACTIVE): + listener_status = constants.ERROR + else: + listener_status = constants.OFFLINE else: listener = listeners[listener_id] diff -Nru octavia-4.1.1/octavia/controller/worker/controller_worker.py octavia-4.1.4/octavia/controller/worker/controller_worker.py --- octavia-4.1.1/octavia/controller/worker/controller_worker.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/controller/worker/controller_worker.py 2020-11-04 15:39:51.000000000 +0000 @@ -457,12 +457,23 @@ log=LOG): delete_member_tf.run() + @tenacity.retry( + retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), + wait=tenacity.wait_incrementing( + RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), + stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) def batch_update_members(self, old_member_ids, new_member_ids, updated_members): - old_members = [self._member_repo.get(db_apis.get_session(), id=mid) - for mid in old_member_ids] new_members = [self._member_repo.get(db_apis.get_session(), id=mid) for mid in new_member_ids] + # The API may not have commited all of the new member records yet. + # Make sure we retry looking them up. + if None in new_members or len(new_members) != len(new_member_ids): + LOG.warning('Failed to fetch one of the new members from DB. ' + 'Retrying for up to 60 seconds.') + raise db_exceptions.NoResultFound + old_members = [self._member_repo.get(db_apis.get_session(), id=mid) + for mid in old_member_ids] updated_members = [ (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m) for m in updated_members] @@ -948,7 +959,8 @@ amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) - LOG.info("Start amphora cert rotation, amphora's id is: %s", amp.id) + LOG.info("Start amphora cert rotation, amphora's id is: %s", + amphora_id) certrotation_amphora_tf = self._taskflow_load( self._amphora_flows.cert_rotate_amphora_flow(), @@ -958,6 +970,8 @@ with tf_logging.DynamicLoggingListener(certrotation_amphora_tf, log=LOG): certrotation_amphora_tf.run() + LOG.info("Finished amphora cert rotation, amphora's id was: %s", + amphora_id) def update_amphora_agent_config(self, amphora_id): """Update the amphora agent configuration. @@ -987,3 +1001,5 @@ with tf_logging.DynamicLoggingListener(update_amphora_tf, log=LOG): update_amphora_tf.run() + LOG.info("Finished amphora agent configuration update, amphora's id " + "was: %s", amphora_id) diff -Nru octavia-4.1.1/octavia/controller/worker/tasks/database_tasks.py octavia-4.1.4/octavia/controller/worker/tasks/database_tasks.py --- octavia-4.1.1/octavia/controller/worker/tasks/database_tasks.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/controller/worker/tasks/database_tasks.py 2020-11-04 15:39:51.000000000 +0000 @@ -522,9 +522,16 @@ "allocation.") return None - amp = self.amphora_repo.allocate_and_associate( - db_apis.get_session(), - loadbalancer_id) + try: + amp = self.amphora_repo.allocate_and_associate( + db_apis.get_session(), + loadbalancer_id) + except Exception as e: + LOG.error("Failed to get a spare amphora for " + "loadbalancer {} due to: {}".format( + loadbalancer_id, e)) + return None + if amp is None: LOG.debug("No Amphora available for load balancer with id %s", loadbalancer_id) @@ -1176,14 +1183,14 @@ """ LOG.debug("Mark ACTIVE in DB for load balancer id: %s " - "and listener ids: %s", loadbalancer.id, + "and updating status for listener ids: %s", loadbalancer.id, ', '.join([l.id for l in listeners])) self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=constants.ACTIVE) for listener in listeners: - self.listener_repo.update(db_apis.get_session(), listener.id, - provisioning_status=constants.ACTIVE) + self.listener_repo.prov_status_active_if_not_error( + db_apis.get_session(), listener.id) def revert(self, loadbalancer, listeners, *args, **kwargs): """Mark the load balancer and listeners as broken. @@ -1202,35 +1209,6 @@ self.task_utils.mark_listener_prov_status_error(listener.id) -class MarkListenerActiveInDB(BaseDatabaseTask): - """Mark the listener active in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, listener): - """Mark the listener as active in DB - - :param listener: The listener to be marked active - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for listener id: %s ", listener.id) - self.listener_repo.update(db_apis.get_session(), listener.id, - provisioning_status=constants.ACTIVE) - - def revert(self, listener, *args, **kwargs): - """Mark the listener ERROR since the delete couldn't happen - - :param listener: The listener that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting mark listener active in DB " - "for listener id %s", listener.id) - self.task_utils.mark_listener_prov_status_error(listener.id) - - class MarkListenerDeletedInDB(BaseDatabaseTask): """Mark the listener deleted in the DB. diff -Nru octavia-4.1.1/octavia/controller/worker/tasks/network_tasks.py octavia-4.1.4/octavia/controller/worker/tasks/network_tasks.py --- octavia-4.1.1/octavia/controller/worker/tasks/network_tasks.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/controller/worker/tasks/network_tasks.py 2020-11-04 15:39:51.000000000 +0000 @@ -595,7 +595,7 @@ """Apply qos policy on the vrrp ports which are related with vip.""" qos_policy_id = loadbalancer.vip.qos_policy_id if not qos_policy_id and ( - update_dict and ( + not update_dict or ( 'vip' not in update_dict or 'qos_policy_id' not in update_dict['vip'])): return diff -Nru octavia-4.1.1/octavia/db/repositories.py octavia-4.1.4/octavia/db/repositories.py --- octavia-4.1.1/octavia/db/repositories.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/db/repositories.py 2020-11-04 15:39:51.000000000 +0000 @@ -27,9 +27,10 @@ from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import uuidutils -from sqlalchemy.orm import joinedload from sqlalchemy.orm import noload from sqlalchemy.orm import subqueryload +from sqlalchemy.sql.expression import false +from sqlalchemy.sql import func from octavia.common import constants as consts from octavia.common import data_models @@ -1070,6 +1071,16 @@ session.add(model) return model.to_data_model() + def prov_status_active_if_not_error(self, session, listener_id): + """Update provisioning_status to ACTIVE if not already in ERROR.""" + with session.begin(subtransactions=True): + (session.query(self.model_class).filter_by(id=listener_id). + # Don't mark ERROR or already ACTIVE as ACTIVE + filter(~self.model_class.provisioning_status.in_( + [consts.ERROR, consts.ACTIVE])). + update({self.model_class.provisioning_status: consts.ACTIVE}, + synchronize_session='fetch')) + class ListenerStatisticsRepository(BaseRepository): model_class = models.ListenerStatistics @@ -1182,10 +1193,7 @@ .filter(models.Amphora.status != consts.DELETED) # And the LB is also not DELETED .filter(models.LoadBalancer.provisioning_status != - consts.DELETED) - # And what does this do? Some SQLAlchemy magic? - .options(joinedload('*')) - ).first() + consts.DELETED)).first() if db_lb: return db_lb.to_data_model() return None @@ -1214,9 +1222,12 @@ seconds=expired_seconds) with session.begin(subtransactions=True): - amp = session.query(self.model_class).with_for_update().filter_by( - cert_busy=False).filter( - self.model_class.cert_expiration < expired_date).first() + amp = session.query(self.model_class).with_for_update().filter( + self.model_class.status.notin_( + [consts.DELETED, consts.PENDING_DELETE]), + self.model_class.cert_busy == false(), + self.model_class.cert_expiration < expired_date + ).first() if amp is None: return None @@ -1456,7 +1467,8 @@ amp = session.query(self.model_class).with_for_update().filter_by( busy=False).filter( - self.model_class.last_update < expired_time).first() + self.model_class.last_update < expired_time).order_by( + func.random()).first() if amp is None: return None diff -Nru octavia-4.1.1/octavia/network/base.py octavia-4.1.4/octavia/network/base.py --- octavia-4.1.1/octavia/network/base.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/network/base.py 2020-11-04 15:39:51.000000000 +0000 @@ -192,30 +192,33 @@ pass @abc.abstractmethod - def get_network(self, network_id): + def get_network(self, network_id, context=None): """Retrieves network from network id. :param network_id: id of an network to retrieve + :param context: A request context :return: octavia.network.data_models.Network :raises: NetworkException, NetworkNotFound """ pass @abc.abstractmethod - def get_subnet(self, subnet_id): + def get_subnet(self, subnet_id, context=None): """Retrieves subnet from subnet id. :param subnet_id: id of a subnet to retrieve + :param context: A request context :return: octavia.network.data_models.Subnet :raises: NetworkException, SubnetNotFound """ pass @abc.abstractmethod - def get_port(self, port_id): + def get_port(self, port_id, context=None): """Retrieves port from port id. :param port_id: id of a port to retrieve + :param context: A request context :return: octavia.network.data_models.Port :raises: NetworkException, PortNotFound """ diff -Nru octavia-4.1.1/octavia/network/drivers/neutron/base.py octavia-4.1.4/octavia/network/drivers/neutron/base.py --- octavia-4.1.1/octavia/network/drivers/neutron/base.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/network/drivers/neutron/base.py 2020-11-04 15:39:51.000000000 +0000 @@ -170,9 +170,14 @@ return [self._port_to_octavia_interface( compute_id, port) for port in ports['ports']] - def _get_resource(self, resource_type, resource_id): + def _get_resource(self, resource_type, resource_id, context=None): + neutron_client = self.neutron_client + if context and not CONF.networking.allow_invisible_resource_usage: + neutron_client = clients.NeutronAuth.get_user_neutron_client( + context) + try: - resource = getattr(self.neutron_client, 'show_%s' % + resource = getattr(neutron_client, 'show_%s' % resource_type)(resource_id) return getattr(utils, 'convert_%s_dict_to_model' % resource_type)(resource) @@ -222,14 +227,14 @@ LOG.exception(message) raise base.NetworkException(message) - def get_network(self, network_id): - return self._get_resource('network', network_id) + def get_network(self, network_id, context=None): + return self._get_resource('network', network_id, context=context) - def get_subnet(self, subnet_id): - return self._get_resource('subnet', subnet_id) + def get_subnet(self, subnet_id, context=None): + return self._get_resource('subnet', subnet_id, context=context) - def get_port(self, port_id): - return self._get_resource('port', port_id) + def get_port(self, port_id, context=None): + return self._get_resource('port', port_id, context=context) def get_network_by_name(self, network_name): return self._get_resources_by_filters( diff -Nru octavia-4.1.1/octavia/network/drivers/noop_driver/driver.py octavia-4.1.4/octavia/network/drivers/noop_driver/driver.py --- octavia-4.1.1/octavia/network/drivers/noop_driver/driver.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/network/drivers/noop_driver/driver.py 2020-11-04 15:39:51.000000000 +0000 @@ -310,13 +310,13 @@ def update_vip(self, loadbalancer, for_delete=False): self.driver.update_vip(loadbalancer, for_delete) - def get_network(self, network_id): + def get_network(self, network_id, context=None): return self.driver.get_network(network_id) - def get_subnet(self, subnet_id): + def get_subnet(self, subnet_id, context=None): return self.driver.get_subnet(subnet_id) - def get_port(self, port_id): + def get_port(self, port_id, context=None): return self.driver.get_port(port_id) def get_qos_policy(self, qos_policy_id): diff -Nru octavia-4.1.1/octavia/tests/common/constants.py octavia-4.1.4/octavia/tests/common/constants.py --- octavia-4.1.1/octavia/tests/common/constants.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/common/constants.py 2020-11-04 15:39:51.000000000 +0000 @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +from octavia.common import constants + class MockNovaInterface(object): net_id = None @@ -213,3 +215,22 @@ 'total_ips': MOCK_NETWORK_TOTAL_IPS, 'used_ips': MOCK_NETWORK_USED_IPS, 'subnet_ip_availability': MOCK_SUBNET_IP_AVAILABILITY})} + +INVALID_LISTENER_POOL_PROTOCOL_MAP = { + constants.PROTOCOL_HTTP: [constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TCP, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_HTTPS: [constants.PROTOCOL_HTTP, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_TCP: [constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_UDP], + constants.PROTOCOL_TERMINATED_HTTPS: [constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TCP, + constants.PROTOCOL_UDP], + constants.PROTOCOL_UDP: [constants.PROTOCOL_TCP, + constants.PROTOCOL_HTTP, + constants.PROTOCOL_HTTPS, + constants.PROTOCOL_TERMINATED_HTTPS, + constants.PROTOCOL_PROXY]} diff -Nru octavia-4.1.1/octavia/tests/contrib/gate_hook.sh octavia-4.1.4/octavia/tests/contrib/gate_hook.sh --- octavia-4.1.1/octavia/tests/contrib/gate_hook.sh 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/contrib/gate_hook.sh 2020-11-04 15:39:51.000000000 +0000 @@ -16,9 +16,6 @@ export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin barbican https://git.openstack.org/openstack/barbican"$'\n' -# Allow testing against diskimage-builder changes with depends-on -export DEVSTACK_LOCAL_CONFIG+=$'\n'"LIBS_FROM_GIT+=,diskimage-builder"$'\n' - # Sort out our gate args . $(dirname "$0")/decode_args.sh diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/base.py octavia-4.1.4/octavia/tests/functional/api/v2/base.py --- octavia-4.1.1/octavia/tests/functional/api/v2/base.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/base.py 2020-11-04 15:39:51.000000000 +0000 @@ -348,6 +348,9 @@ response = self.put(path, body, status=202) return response.json + # NOTE: This method should be used cautiously. On load balancers with a + # significant amount of children resources, it will update the status for + # each and every resource and thus taking a lot of DB time. def _set_lb_and_children_statuses(self, lb_id, prov_status, op_status, autodetect=True): self.set_object_status(self.lb_repo, lb_id, @@ -417,6 +420,9 @@ provisioning_status=hm_prov, operating_status=op_status) + # NOTE: This method should be used cautiously. On load balancers with a + # significant amount of children resources, it will update the status for + # each and every resource and thus taking a lot of DB time. def set_lb_status(self, lb_id, status=None): explicit_status = True if status is not None else False if not explicit_status: diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_flavors.py octavia-4.1.4/octavia/tests/functional/api/v2/test_flavors.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_flavors.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_flavors.py 2020-11-04 15:39:51.000000000 +0000 @@ -290,7 +290,6 @@ flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'), True) self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id'))) - response = self.get(self.FLAVORS_PATH) self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) auth_strategy = self.conf.conf.api_settings.get('auth_strategy') @@ -314,6 +313,7 @@ with mock.patch( "oslo_context.context.RequestContext.to_policy_values", return_value=override_credentials): + response = self.get(self.FLAVORS_PATH) api_list = response.json.get(self.root_tag_list) self.conf.config(group='api_settings', auth_strategy=auth_strategy) self.assertEqual(2, len(api_list)) diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_health_monitor.py octavia-4.1.4/octavia/tests/functional/api/v2/test_health_monitor.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_health_monitor.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_health_monitor.py 2020-11-04 15:39:51.000000000 +0000 @@ -285,6 +285,58 @@ hm_id_protocols = [(hm.get('id'), hm.get('type')) for hm in hms] self.assertIn((hm3.get('id'), hm3.get('type')), hm_id_protocols) + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + pool2 = self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_lb_status(lb1_id) + self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_health_monitor( + self.pool_id, constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 1).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.HMS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', @@ -455,6 +507,64 @@ hm_id_names_asc = [(hm.get('id'), hm.get('name')) for hm in hms_asc] self.assertEqual(hm_id_names_asc, list(reversed(hm_id_names_desc))) + def test_get_all_sorted_by_max_retries(self): + pool1 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool1').get('pool') + self.set_lb_status(self.lb_id) + pool2 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool2').get('pool') + self.set_lb_status(self.lb_id) + pool3 = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + name='pool3').get('pool') + self.set_lb_status(self.lb_id) + hm1 = self.create_health_monitor( + pool1.get('id'), constants.HEALTH_MONITOR_HTTP, + 1, 1, 1, 2, name='hm1').get(self.root_tag) + self.set_lb_status(self.lb_id) + hm2 = self.create_health_monitor( + pool2.get('id'), constants.HEALTH_MONITOR_PING, + 1, 1, 1, 1, name='hm2').get(self.root_tag) + self.set_lb_status(self.lb_id) + hm3 = self.create_health_monitor( + pool3.get('id'), constants.HEALTH_MONITOR_TCP, + 1, 1, 1, 3, name='hm3').get(self.root_tag) + self.set_lb_status(self.lb_id) + + response = self.get(self.HMS_PATH, params={'sort': 'max_retries:desc'}) + hms_desc = response.json.get(self.root_tag_list) + response = self.get(self.HMS_PATH, params={'sort': 'max_retries:asc'}) + hms_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(hms_desc)) + self.assertEqual(3, len(hms_asc)) + + hm_id_names_desc = [(hm.get('id'), hm.get('name')) for hm in hms_desc] + hm_id_names_asc = [(hm.get('id'), hm.get('name')) for hm in hms_asc] + self.assertEqual(hm_id_names_asc, list(reversed(hm_id_names_desc))) + + self.assertEqual(hm2[constants.MAX_RETRIES], + hms_asc[0][constants.MAX_RETRIES]) + self.assertEqual(hm1[constants.MAX_RETRIES], + hms_asc[1][constants.MAX_RETRIES]) + self.assertEqual(hm3[constants.MAX_RETRIES], + hms_asc[2][constants.MAX_RETRIES]) + + self.assertEqual(hm3[constants.MAX_RETRIES], + hms_desc[0][constants.MAX_RETRIES]) + self.assertEqual(hm1[constants.MAX_RETRIES], + hms_desc[1][constants.MAX_RETRIES]) + self.assertEqual(hm2[constants.MAX_RETRIES], + hms_desc[2][constants.MAX_RETRIES]) + def test_get_all_limited(self): pool1 = self.create_pool( self.lb_id, diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_l7policy.py octavia-4.1.4/octavia/tests/functional/api/v2/test_l7policy.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_l7policy.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_l7policy.py 2020-11-04 15:39:51.000000000 +0000 @@ -22,6 +22,7 @@ import octavia.common.context from octavia.common import data_models from octavia.common import exceptions +from octavia.tests.common import constants as c_const from octavia.tests.functional.api.v2 import base @@ -223,6 +224,58 @@ self.assertIn((api_l7p_c.get('id'), api_l7p_c.get('action')), policy_id_actions) + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + listener1 = self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + listener1_id = listener1.get('listener').get('id') + self.set_lb_status(lb1_id) + pool1 = self.create_pool(lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool1_id = pool1.get('pool').get('id') + self.set_lb_status(lb1_id) + self.create_l7policy( + listener1_id, + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_l7policy( + listener1_id, constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + position=2, redirect_pool_id=pool1_id).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_l7policy( + self.listener_id, constants.L7POLICY_ACTION_REDIRECT_TO_URL, + redirect_url='http://localhost/').get(self.root_tag) + self.set_lb_status(lb1_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.L7POLICIES_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', @@ -770,21 +823,6 @@ 'redirect_pool_id': uuidutils.generate_uuid()} self.post(self.L7POLICIES_PATH, self._build_body(l7policy), status=404) - def test_bad_create_redirect_to_udp_pool(self): - udp_pool_id = self.create_pool( - self.lb_id, - constants.PROTOCOL_UDP, - constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') - l7policy = { - 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, - 'listener_id': self.listener_id, - 'redirect_pool_id': udp_pool_id} - res = self.post(self.L7POLICIES_PATH, self._build_body(l7policy), - status=400, expect_errors=True) - expect_error_msg = ("Validation failure: %s protocol pool can not be " - "assigned to l7policy.") % constants.PROTOCOL_UDP - self.assertEqual(expect_error_msg, res.json['faultstring']) - def test_bad_create_redirect_to_url(self): l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, @@ -935,27 +973,6 @@ l7policy_id=api_l7policy.get('id')), self._build_body(new_l7policy), status=400) - def test_bad_update_redirect_to_udp_pool(self): - api_l7policy = self.create_l7policy(self.listener_id, - constants.L7POLICY_ACTION_REJECT, - ).get(self.root_tag) - self.set_lb_status(self.lb_id) - udp_pool_id = self.create_pool( - self.lb_id, - constants.PROTOCOL_UDP, - constants.LB_ALGORITHM_ROUND_ROBIN).get('pool').get('id') - self.set_lb_status(self.lb_id) - new_l7policy = { - 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, - 'redirect_pool_id': udp_pool_id} - res = self.put(self.L7POLICY_PATH.format( - l7policy_id=api_l7policy.get('id')), - self._build_body(new_l7policy), - status=400, expect_errors=True) - expect_error_msg = ("Validation failure: %s protocol pool can not be " - "assigned to l7policy.") % constants.PROTOCOL_UDP - self.assertEqual(expect_error_msg, res.json['faultstring']) - def test_bad_update_redirect_to_url(self): api_l7policy = self.create_l7policy(self.listener_id, constants.L7POLICY_ACTION_REJECT, @@ -1298,3 +1315,118 @@ self.delete(self.L7POLICY_PATH.format( l7policy_id=l7policy.get('id')), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_post(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + + l7policy['listener_id'] = listener.get('id') + l7policy['redirect_pool_id'] = pool.get('id') + self.set_object_status(self.lb_repo, self.lb_id) + self.post(self.L7POLICIES_PATH, + self._build_body(l7policy), status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + + l7policy['listener_id'] = listener.get('id') + l7policy['redirect_pool_id'] = pool.get('id') + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + res = self.post(self.L7POLICIES_PATH, + self._build_body(l7policy), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_put(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + new_l7policy = {'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL} + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + l7policy = self.create_l7policy( + listener.get('id'), + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_object_status(self.lb_repo, self.lb_id) + new_l7policy['redirect_pool_id'] = pool.get('id') + + self.put( + self.L7POLICY_PATH.format(l7policy_id=l7policy.get('id')), + self._build_body(new_l7policy), status=200) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + l7policy = self.create_l7policy( + listener.get('id'), + constants.L7POLICY_ACTION_REJECT).get(self.root_tag) + self.set_object_status(self.lb_repo, self.lb_id) + new_l7policy['redirect_pool_id'] = pool.get('id') + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + res = self.put(self.L7POLICY_PATH.format( + l7policy_id=l7policy.get('id')), + self._build_body(new_l7policy), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_l7rule.py octavia-4.1.4/octavia/tests/functional/api/v2/test_l7rule.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_l7rule.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_l7rule.py 2020-11-04 15:39:51.000000000 +0000 @@ -193,6 +193,42 @@ self.assertIn((api_l7r_b.get('id'), api_l7r_b.get('type')), rule_id_types) + def test_get_all_unscoped_token(self): + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_PATH, + constants.L7RULE_COMPARE_TYPE_STARTS_WITH, + '/api').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.create_l7rule( + self.l7policy_id, constants.L7RULE_TYPE_COOKIE, + constants.L7RULE_COMPARE_TYPE_CONTAINS, 'some-value', + key='some-cookie').get(self.root_tag) + self.set_lb_status(self.lb_id) + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.l7rules_path, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_not_authorized(self): self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_listener.py octavia-4.1.4/octavia/tests/functional/api/v2/test_listener.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_listener.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_listener.py 2020-11-04 15:39:51.000000000 +0000 @@ -26,6 +26,7 @@ from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api +from octavia.tests.common import constants as c_const from octavia.tests.functional.api.v2 import base from octavia.tests.unit.common.sample_configs import sample_certs @@ -129,6 +130,47 @@ self.assertIn((listener3.get('id'), listener3.get('protocol_port')), listener_id_ports) + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 80, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 81, + lb1_id) + self.set_lb_status(lb1_id) + self.create_listener(constants.PROTOCOL_HTTP, 82, + self.lb_id).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.LISTENERS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), @@ -670,13 +712,10 @@ 'protocol_port': 6666, 'connection_limit': 10, 'default_tls_container_ref': uuidutils.generate_uuid(), 'sni_container_refs': [sni1, sni2], - 'insert_headers': { - "X-Forwarded-Port": "true", - "X-Forwarded-For": "true"}, + 'insert_headers': {}, 'loadbalancer_id': self.lb_id} - expect_error_msg = ( - "Validation failure: %s protocol listener does not support TLS or " - "header insertion.") % constants.PROTOCOL_UDP + expect_error_msg = ("Validation failure: %s protocol listener does " + "not support TLS.") % constants.PROTOCOL_UDP res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) @@ -695,9 +734,10 @@ 'protocol': constants.PROTOCOL_HTTP, 'protocol_port': 80, 'loadbalancer_id': self.lb_id} - expect_error_msg = ("Validation failure: Listeners of type %s can " - "only have pools of " - "type UDP.") % constants.PROTOCOL_UDP + expect_error_msg = ("Validation failure: The pool protocol '%s' is " + "invalid while the listener protocol is '%s'.") % ( + constants.PROTOCOL_UDP, + lb_listener['protocol']) res = self.post(self.LISTENERS_PATH, self._build_body(lb_listener), status=400, expect_errors=True) self.assertEqual(expect_error_msg, res.json['faultstring']) @@ -1137,6 +1177,33 @@ "It must be a valid x509 PEM format certificate.", response['faultstring']) + def _test_negative_create_with_headers(self, protocol): + req_dict = {'name': 'listener1', 'default_pool_id': None, + 'description': 'desc1', + 'admin_state_up': False, + 'protocol': protocol, + 'protocol_port': 6666, 'connection_limit': 10, + 'insert_headers': { + "X-Forwarded-Port": "true", + "X-Forwarded-For": "true"}, + 'loadbalancer_id': self.lb_id} + res = self.post(self.LISTENERS_PATH, self._build_body(req_dict), + status=400) + self.assertIn(protocol, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_negative_create_HTTPS_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_HTTPS) + + def test_negative_create_PROXY_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_PROXY) + + def test_negative_create_TCP_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_TCP) + + def test_negative_create_UDP_with_headers(self): + self._test_negative_create_with_headers(constants.PROTOCOL_UDP) + @mock.patch('octavia.api.drivers.utils.call_provider') def test_update_with_bad_provider(self, mock_provider): api_listener = self.create_listener( @@ -1215,6 +1282,26 @@ api_listener['id']) return ori_listener, api_listener + def test_update_with_bad_tls_ref(self): + listener = self.create_listener(constants.PROTOCOL_TCP, + 443, self.lb_id) + tls_uuid = uuidutils.generate_uuid() + self.set_lb_status(self.lb_id) + self.listener_repo.update(db_api.get_session(), + listener['listener']['id'], + tls_certificate_id=tls_uuid, + protocol=constants.PROTOCOL_TERMINATED_HTTPS) + + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + update_data = {'name': 'listener2'} + body = self._build_body(update_data) + api_listener = self.put(listener_path, body).json.get(self.root_tag) + response = self.get(self.listener_path.format( + listener_id=listener['listener']['id'])) + api_listener = response.json.get(self.root_tag) + self.assertEqual('listener2', api_listener['name']) + def test_negative_update_udp_case(self): api_listener = self.create_listener(constants.PROTOCOL_UDP, 6666, self.lb_id).get(self.root_tag) @@ -2135,7 +2222,9 @@ lb_listener = {'protocol': 'HTTP', 'protocol_port': 80, 'loadbalancer_id': self.lb_id, - 'insert_headers': {'X-Forwarded-For': 'true'}} + 'insert_headers': {'X-Forwarded-For': 'true', + 'X-Forwarded-Port': 'true', + 'X-Forwarded-Proto': 'true'}} body = self._build_body(lb_listener) self.post(self.LISTENERS_PATH, body, status=201) @@ -2180,7 +2269,9 @@ constants.PROTOCOL_HTTP, 80, self.lb_id) self.set_lb_status(self.lb_id) new_listener = self._build_body( - {'insert_headers': {'X-Forwarded-For': 'true'}}) + {'insert_headers': {'X-Forwarded-For': 'true', + 'X-Forwarded-Port': 'true', + 'X-Forwarded-Proto': 'true'}}) listener_path = self.LISTENER_PATH.format( listener_id=listener['listener'].get('id')) update_listener = self.put( @@ -2231,6 +2322,35 @@ # the status. self.put(listener_path, new_listener, status=400).json + def _test_update_protocol_insert_headers_mismatch(self, protocol): + listener = self.create_listener( + protocol, 80, self.lb_id) + self.set_lb_status(self.lb_id) + new_listener = self._build_body( + {'insert_headers': {'X-Forwarded-Port': 'true'}}) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener'].get('id')) + update_listener = self.put( + listener_path, new_listener, status=400).json + self.assertIn(protocol, update_listener['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + def test_update_protocol_HTTPS_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_HTTPS) + + def test_update_protocol_PROXY_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_PROXY) + + def test_update_protocol_TCP_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_TCP) + + def test_update_protocol_UDP_insert_headers(self): + self._test_update_protocol_insert_headers_mismatch( + constants.PROTOCOL_UDP) + def _getStats(self, listener_id): res = self.get(self.LISTENER_PATH.format( listener_id=listener_id + "/stats")) @@ -2352,3 +2472,101 @@ self.set_lb_status(lb['id'], status=constants.DELETED) self.get(self.LISTENER_PATH.format( listener_id=li.get('id') + "/stats"), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_post(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + listener = {'protocol': listener_proto, + 'protocol_port': port, + 'loadbalancer_id': self.lb_id, + 'default_pool_id': pool.get('id')} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + listener.update( + {'sni_container_refs': [uuidutils.generate_uuid()]}) + body = self._build_body(listener) + self.post(self.LISTENERS_PATH, body, status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in invalid_map: + for pool_proto in invalid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + listener = {'protocol': listener_proto, + 'protocol_port': port, + 'loadbalancer_id': self.lb_id, + 'default_pool_id': pool.get('id')} + body = self._build_body(listener) + res = self.post(self.LISTENERS_PATH, body, + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_listener_pool_protocol_map_put(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + new_listener = {'default_pool_id': pool.get('id')} + res = self.put( + self.LISTENER_PATH.format(listener_id=listener.get('id')), + self._build_body(new_listener), status=200) + self.set_object_status(self.lb_repo, self.lb_id) + + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 100 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + pool = self.create_pool( + self.lb_id, pool_proto, + constants.LB_ALGORITHM_ROUND_ROBIN).get('pool') + self.set_object_status(self.lb_repo, self.lb_id) + new_listener = {'default_pool_id': pool.get('id')} + res = self.put( + self.LISTENER_PATH.format(listener_id=listener.get('id')), + self._build_body(new_listener), status=400) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_load_balancer.py octavia-4.1.4/octavia/tests/functional/api/v2/test_load_balancer.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_load_balancer.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_load_balancer.py 2020-11-04 15:39:51.000000000 +0000 @@ -1133,6 +1133,41 @@ lb_id_names = [(lb.get('id'), lb.get('name')) for lb in lbs] self.assertIn((lb3.get('id'), lb3.get('name')), lb_id_names) + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', project_id=project_id) + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', project_id=project_id) + lb3 = self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', project_id=self.project_id) + lb3 = lb3.get(self.root_tag) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.LBS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), @@ -1250,6 +1285,41 @@ lb_id_names_asc = [(lb.get('id'), lb.get('name')) for lb in lbs_asc] self.assertEqual(lb_id_names_asc, list(reversed(lb_id_names_desc))) + def test_get_all_sorted_by_vip_ip_address(self): + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb1', + project_id=self.project_id, + vip_address='198.51.100.2') + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb2', + project_id=self.project_id, + vip_address='198.51.100.1') + self.create_load_balancer(uuidutils.generate_uuid(), + name='lb3', + project_id=self.project_id, + vip_address='198.51.100.3') + response = self.get(self.LBS_PATH, + params={'sort': 'vip_address:desc'}) + lbs_desc = response.json.get(self.root_tag_list) + response = self.get(self.LBS_PATH, + params={'sort': 'vip_address:asc'}) + lbs_asc = response.json.get(self.root_tag_list) + + self.assertEqual(3, len(lbs_desc)) + self.assertEqual(3, len(lbs_asc)) + + lb_id_names_desc = [(lb.get('id'), lb.get('name')) for lb in lbs_desc] + lb_id_names_asc = [(lb.get('id'), lb.get('name')) for lb in lbs_asc] + self.assertEqual(lb_id_names_asc, list(reversed(lb_id_names_desc))) + + self.assertEqual('198.51.100.1', lbs_asc[0][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.2', lbs_asc[1][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.3', lbs_asc[2][constants.VIP_ADDRESS]) + + self.assertEqual('198.51.100.3', lbs_desc[0][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.2', lbs_desc[1][constants.VIP_ADDRESS]) + self.assertEqual('198.51.100.1', lbs_desc[2][constants.VIP_ADDRESS]) + def test_get_all_limited(self): self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', @@ -3055,7 +3125,7 @@ expected_members=[expected_member], create_hm=create_hm, expected_hm=expected_hm, - protocol=constants.PROTOCOL_TCP) + protocol=constants.PROTOCOL_HTTP) create_sni_containers, expected_sni_containers = ( self._get_sni_container_bodies()) create_l7rules, expected_l7rules = self._get_l7rules_bodies() diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_member.py octavia-4.1.4/octavia/tests/functional/api/v2/test_member.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_member.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_member.py 2020-11-04 15:39:51.000000000 +0000 @@ -211,6 +211,45 @@ for m in [api_m_1, api_m_2]: self.assertIn(m, response) + def test_get_all_unscoped_token(self): + api_m_1 = self.create_member( + self.pool_id, '192.0.2.1', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + api_m_2 = self.create_member( + self.pool_id, '192.0.2.2', 80).get(self.root_tag) + self.set_lb_status(self.lb_id) + # Original objects didn't have the updated operating/provisioning + # status that exists in the DB. + for m in [api_m_1, api_m_2]: + m['operating_status'] = constants.ONLINE + m['provisioning_status'] = constants.ACTIVE + m.pop('updated_at') + + self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', auth_strategy=constants.TESTING) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.members_path, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_not_authorized(self): api_m_1 = self.create_member( self.pool_id, '192.0.2.1', 80).get(self.root_tag) diff -Nru octavia-4.1.1/octavia/tests/functional/api/v2/test_pool.py octavia-4.1.4/octavia/tests/functional/api/v2/test_pool.py --- octavia-4.1.1/octavia/tests/functional/api/v2/test_pool.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/api/v2/test_pool.py 2020-11-04 15:39:51.000000000 +0000 @@ -23,6 +23,7 @@ from octavia.common import data_models from octavia.common import exceptions from octavia.db import api as db_api +from octavia.tests.common import constants as c_const from octavia.tests.functional.api.v2 import base from octavia.tests.unit.common.sample_configs import sample_certs @@ -260,6 +261,50 @@ self.assertIn((pool3.get('id'), pool3.get('protocol')), pool_id_protocols) + def test_get_all_unscoped_token(self): + project_id = uuidutils.generate_uuid() + lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', + project_id=project_id) + lb1_id = lb1.get('loadbalancer').get('id') + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + lb1_id, constants.PROTOCOL_HTTPS, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(lb1_id) + self.create_pool( + self.lb_id, constants.PROTOCOL_TCP, + constants.LB_ALGORITHM_ROUND_ROBIN).get(self.root_tag) + self.set_lb_status(self.lb_id) + + auth_strategy = self.conf.conf.api_settings.get('auth_strategy') + self.conf.config(group='api_settings', + auth_strategy=constants.KEYSTONE) + with mock.patch.object(octavia.common.context.Context, 'project_id', + None): + override_credentials = { + 'service_user_id': None, + 'user_domain_id': None, + 'is_admin_project': True, + 'service_project_domain_id': None, + 'service_project_id': None, + 'roles': ['load-balancer_member'], + 'user_id': None, + 'is_admin': False, + 'service_user_domain_id': None, + 'project_domain_id': None, + 'service_roles': [], + 'project_id': None} + with mock.patch( + "oslo_context.context.RequestContext.to_policy_values", + return_value=override_credentials): + result = self.get(self.POOLS_PATH, status=403).json + self.conf.config(group='api_settings', auth_strategy=auth_strategy) + self.assertEqual(self.NOT_AUTHORIZED_BODY, result) + def test_get_all_non_admin_global_observer(self): project_id = uuidutils.generate_uuid() lb1 = self.create_load_balancer(uuidutils.generate_uuid(), name='lb1', @@ -1443,6 +1488,34 @@ lb_id=self.lb_id, listener_id=self.listener_id, pool_id=response.get('id')) + def test_update_with_bad_tls_ref(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id).get(self.root_tag) + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + api_pool['provisioning_status'] = constants.ACTIVE + api_pool['operating_status'] = constants.ONLINE + api_pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + response.pop('updated_at') + self.assertEqual(api_pool, response) + + tls_uuid = uuidutils.generate_uuid() + self.pool_repo.update(db_api.get_session(), + api_pool.get('id'), + tls_certificate_id=tls_uuid) + update_data = {'name': 'pool2'} + self.put(self.POOL_PATH.format(pool_id=api_pool.get('id')), + self._build_body(update_data)) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json.get(self.root_tag) + self.assertEqual('pool2', response.get('name')) + def test_bad_update(self): api_pool = self.create_pool( self.lb_id, @@ -2299,3 +2372,58 @@ self.set_lb_status(self.lb_id, status=constants.DELETED) self.delete(self.POOL_PATH.format(pool_id=api_pool.get('id')), status=404) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_valid_listener_pool_protocol(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + lb_pool = { + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + mock_cert_data.return_value = {'sni_certs': [cert]} + valid_map = constants.VALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in valid_map: + for pool_proto in valid_map[listener_proto]: + port = port + 1 + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + if listener['default_pool_id'] is None: + lb_pool['protocol'] = pool_proto + lb_pool['listener_id'] = listener.get('id') + self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=201) + self.set_object_status(self.lb_repo, self.lb_id) + + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_invalid_listener_pool_protocol_map(self, mock_cert_data): + cert = data_models.TLSContainer(certificate='cert') + lb_pool = { + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + mock_cert_data.return_value = {'sni_certs': [cert]} + invalid_map = c_const.INVALID_LISTENER_POOL_PROTOCOL_MAP + port = 1 + for listener_proto in invalid_map: + opts = {} + if listener_proto == constants.PROTOCOL_TERMINATED_HTTPS: + opts['sni_container_refs'] = [uuidutils.generate_uuid()] + listener = self.create_listener( + listener_proto, port, self.lb_id, **opts).get('listener') + self.set_object_status(self.lb_repo, self.lb_id) + port = port + 1 + for pool_proto in invalid_map[listener_proto]: + expect_error_msg = ("Validation failure: The pool protocol " + "'%s' is invalid while the listener " + "protocol is '%s'.") % (pool_proto, + listener_proto) + if listener['default_pool_id'] is None: + lb_pool['protocol'] = pool_proto + lb_pool['listener_id'] = listener.get('id') + res = self.post(self.POOLS_PATH, self._build_body(lb_pool), + status=400, expect_errors=True) + self.assertEqual(expect_error_msg, res.json['faultstring']) + self.assert_correct_status(lb_id=self.lb_id) diff -Nru octavia-4.1.1/octavia/tests/functional/db/test_repositories.py octavia-4.1.4/octavia/tests/functional/db/test_repositories.py --- octavia-4.1.1/octavia/tests/functional/db/test_repositories.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/functional/db/test_repositories.py 2020-11-04 15:39:51.000000000 +0000 @@ -2233,15 +2233,16 @@ operating_status=constants.ONLINE, enabled=True, server_group_id=self.FAKE_UUID_1) - def create_listener(self, listener_id, port, default_pool_id=None): + def create_listener(self, listener_id, port, default_pool_id=None, + provisioning_status=constants.ACTIVE): listener = self.listener_repo.create( self.session, id=listener_id, project_id=self.FAKE_UUID_2, name="listener_name", description="listener_description", protocol=constants.PROTOCOL_HTTP, protocol_port=port, connection_limit=1, load_balancer_id=self.load_balancer.id, default_pool_id=default_pool_id, operating_status=constants.ONLINE, - provisioning_status=constants.ACTIVE, enabled=True, peer_port=1025, - tags=['test_tag']) + provisioning_status=provisioning_status, enabled=True, + peer_port=1025, tags=['test_tag']) return listener def create_amphora(self, amphora_id, loadbalancer_id): @@ -2468,6 +2469,40 @@ new_listener = self.listener_repo.get(self.session, id=listener.id) self.assertIsNone(new_listener.default_pool) + def test_prov_status_active_if_not_error_active(self): + listener = self.create_listener(self.FAKE_UUID_1, 80, + provisioning_status=constants.ACTIVE) + self.listener_repo.prov_status_active_if_not_error(self.session, + listener.id) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) + + def test_prov_status_active_if_not_error_error(self): + listener = self.create_listener(self.FAKE_UUID_1, 80, + provisioning_status=constants.ERROR) + self.listener_repo.prov_status_active_if_not_error(self.session, + listener.id) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.ERROR, new_listener.provisioning_status) + + def test_prov_status_active_if_not_error_pending_update(self): + listener = self.create_listener( + self.FAKE_UUID_1, 80, provisioning_status=constants.PENDING_UPDATE) + self.listener_repo.prov_status_active_if_not_error(self.session, + listener.id) + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.ACTIVE, new_listener.provisioning_status) + + def test_prov_status_active_if_not_error_bogus_listener(self): + listener = self.create_listener( + self.FAKE_UUID_1, 80, provisioning_status=constants.PENDING_UPDATE) + # Should not raise an exception nor change any status + self.listener_repo.prov_status_active_if_not_error(self.session, + 'bogus_id') + new_listener = self.listener_repo.get(self.session, id=listener.id) + self.assertEqual(constants.PENDING_UPDATE, + new_listener.provisioning_status) + class ListenerStatisticsRepositoryTest(BaseRepositoryTest): @@ -3250,6 +3285,18 @@ self.assertEqual(cert_expired_amphora.cert_expiration, expiration) self.assertEqual(cert_expired_amphora.id, amphora2.id) + def test_get_cert_expired_amphora_deleted(self): + amphora = self.create_amphora(self.FAKE_UUID_3) + expiration = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) + self.amphora_repo.update(self.session, amphora.id, + status=constants.DELETED, + cert_expiration=expiration) + + cert_expired_amphora = self.amphora_repo.get_cert_expiring_amphora( + self.session) + + self.assertIsNone(cert_expired_amphora) + def test_get_lb_for_health_update(self): amphora1 = self.create_amphora(self.FAKE_UUID_1) amphora2 = self.create_amphora(self.FAKE_UUID_3) diff -Nru octavia-4.1.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py octavia-4.1.4/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py --- octavia-4.1.1/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py 2020-11-04 15:39:51.000000000 +0000 @@ -160,6 +160,114 @@ get_network_interface_file(consts.NETNS_PRIMARY_INTERFACE)) self.assertEqual(ubuntu_real_nic_path, ubuntu_interface_file) + def _test_RH_get_static_routes_interface_file(self, version): + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + + fake_agent_server_network_dir = "/path/to/interface" + fake_agent_server_network_file = "/path/to/interfaces_file" + + route = 'route6' if version == 6 else 'route' + rh_route_name = '{route}-{nic}'.format( + route=route, nic=consts.NETNS_PRIMARY_INTERFACE) + rh_fake_route_path = os.path.join(fake_agent_server_network_dir, + rh_route_name) + rh_real_route_path = os.path.join( + consts.RH_AMP_NET_DIR_TEMPLATE.format( + netns=consts.AMPHORA_NAMESPACE), + rh_route_name) + + # Check that agent_server_network_file is returned, when provided + conf.config(group="amphora_agent", + agent_server_network_file=fake_agent_server_network_file) + + rh_route_file = ( + self.rh_os_util. + get_static_routes_interface_file(consts.NETNS_PRIMARY_INTERFACE, + version)) + self.assertEqual(fake_agent_server_network_file, rh_route_file) + + # Check that agent_server_network_dir is used, when provided + conf.config(group="amphora_agent", agent_server_network_file=None) + conf.config(group="amphora_agent", + agent_server_network_dir=fake_agent_server_network_dir) + + rh_route_file = ( + self.rh_os_util. + get_static_routes_interface_file(consts.NETNS_PRIMARY_INTERFACE, + version)) + self.assertEqual(rh_fake_route_path, rh_route_file) + + # Check When neither agent_server_network_dir or + # agent_server_network_file where provided. + conf.config(group="amphora_agent", agent_server_network_file=None) + conf.config(group="amphora_agent", agent_server_network_dir=None) + + rh_route_file = ( + self.rh_os_util. + get_static_routes_interface_file(consts.NETNS_PRIMARY_INTERFACE, + version)) + self.assertEqual(rh_real_route_path, rh_route_file) + + def test_RH_get_static_routes_interface_file(self): + self._test_RH_get_static_routes_interface_file(4) + + def test_RH_get_static_routes_interface_file_ipv6(self): + self._test_RH_get_static_routes_interface_file(6) + + def _test_RH_get_route_rules_interface_file(self, version): + conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF)) + + fake_agent_server_network_dir = "/path/to/interface" + fake_agent_server_network_file = "/path/to/interfaces_file" + + rule = 'rule6' if version == 6 else 'rule' + rh_route_rules_name = '{rule}-{nic}'.format( + rule=rule, nic=consts.NETNS_PRIMARY_INTERFACE) + rh_fake_route_rules_path = os.path.join(fake_agent_server_network_dir, + rh_route_rules_name) + rh_real_route_rules_path = os.path.join( + consts.RH_AMP_NET_DIR_TEMPLATE.format( + netns=consts.AMPHORA_NAMESPACE), + rh_route_rules_name) + + # Check that agent_server_network_file is returned, when provided + conf.config(group="amphora_agent", + agent_server_network_file=fake_agent_server_network_file) + + rh_route_rules_file = ( + self.rh_os_util. + get_route_rules_interface_file(consts.NETNS_PRIMARY_INTERFACE, + version)) + self.assertEqual(fake_agent_server_network_file, rh_route_rules_file) + + # Check that agent_server_network_dir is used, when provided + conf.config(group="amphora_agent", agent_server_network_file=None) + conf.config(group="amphora_agent", + agent_server_network_dir=fake_agent_server_network_dir) + + rh_route_rules_file = ( + self.rh_os_util. + get_route_rules_interface_file(consts.NETNS_PRIMARY_INTERFACE, + version)) + self.assertEqual(rh_fake_route_rules_path, rh_route_rules_file) + + # Check When neither agent_server_network_dir or + # agent_server_network_file where provided. + conf.config(group="amphora_agent", agent_server_network_file=None) + conf.config(group="amphora_agent", agent_server_network_dir=None) + + rh_route_rules_file = ( + self.rh_os_util. + get_route_rules_interface_file(consts.NETNS_PRIMARY_INTERFACE, + version)) + self.assertEqual(rh_real_route_rules_path, rh_route_rules_file) + + def test_RH_get_route_rules_interface_file(self): + self._test_RH_get_route_rules_interface_file(4) + + def test_RH_get_route_rules_interface_file_ipv6(self): + self._test_RH_get_route_rules_interface_file(6) + def test_cmd_get_version_of_installed_package(self): package_name = 'foo' ubuntu_cmd = "dpkg-query -W -f=${{Version}} {name}".format( @@ -294,13 +402,33 @@ ) def test_write_port_interface_file(self): + FIXED_IP = u'192.0.2.2' + NEXTHOP = u'192.0.2.1' + DEST = u'198.51.100.0/24' + host_routes = [ + {'nexthop': NEXTHOP, + 'destination': str(ipaddress.ip_network(DEST))} + ] + FIXED_IP_IPV6 = u'2001:db8::2' + NEXTHOP_IPV6 = u'2001:db8::1' + DEST_IPV6 = u'2001:db8:51:100::/64' + host_routes_ipv6 = [ + {'nexthop': NEXTHOP_IPV6, + 'destination': str(ipaddress.ip_network(DEST_IPV6))} + ] + ip_addr = {'ip_address': FIXED_IP, 'host_routes': host_routes} + ipv6_addr = {'ip_address': FIXED_IP_IPV6, + 'host_routes': host_routes_ipv6} + netns_interface = 'eth1234' MTU = 1450 - fixed_ips = [] + fixed_ips = [ip_addr, ipv6_addr] path = 'mypath' mock_template = mock.MagicMock() mock_open = self.useFixture(test_utils.OpenFixture(path)).mock_open mock_gen_text = mock.MagicMock() + mock_local_scripts = mock.MagicMock() + mock_wr_fi = mock.MagicMock() with mock.patch('os.open'), mock.patch.object( os, 'fdopen', mock_open), mock.patch.object( @@ -315,6 +443,51 @@ mock_gen_text.assert_called_once_with( netns_interface, fixed_ips, MTU, mock_template) + mock_gen_text.reset_mock() + + with mock.patch('os.open'), mock.patch.object( + os, 'fdopen', mock_open), mock.patch.object( + osutils.BaseOS, '_generate_network_file_text', + mock_gen_text), mock.patch.object( + osutils.RH, '_write_ifup_ifdown_local_scripts_if_possible', + mock_local_scripts), mock.patch.object( + osutils.RH, 'write_static_routes_interface_file', mock_wr_fi): + self.rh_os_util.write_port_interface_file( + netns_interface=netns_interface, + fixed_ips=fixed_ips, + mtu=MTU, + interface_file_path=path, + template_port=mock_template) + + rh_route_name = 'route-{nic}'.format(nic=netns_interface) + rh_real_route_path = os.path.join( + consts.RH_AMP_NET_DIR_TEMPLATE.format( + netns=consts.AMPHORA_NAMESPACE), + rh_route_name) + rh_route_name_ipv6 = 'route6-{nic}'.format(nic=netns_interface) + rh_real_route_path_ipv6 = os.path.join( + consts.RH_AMP_NET_DIR_TEMPLATE.format( + netns=consts.AMPHORA_NAMESPACE), + rh_route_name_ipv6) + + exp_routes = [ + {'network': ipaddress.ip_network(DEST), 'gw': NEXTHOP} + ] + exp_routes_ipv6 = [ + {'network': ipaddress.ip_network(DEST_IPV6), 'gw': NEXTHOP_IPV6} + ] + expected_calls = [ + mock.call(rh_real_route_path, netns_interface, + exp_routes, mock.ANY, None, None, None), + mock.call(rh_real_route_path_ipv6, netns_interface, + exp_routes_ipv6, mock.ANY, None, None, None)] + + mock_gen_text.assert_called_once_with( + netns_interface, fixed_ips, MTU, mock_template) + self.assertEqual(2, mock_wr_fi.call_count) + mock_wr_fi.assert_has_calls(expected_calls) + mock_local_scripts.assert_called_once() + @mock.patch('shutil.copy2') @mock.patch('os.makedirs') @mock.patch('shutil.copytree') diff -Nru octavia-4.1.1/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py octavia-4.1.4/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py --- octavia-4.1.1/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py 2020-11-04 15:39:51.000000000 +0000 @@ -126,6 +126,11 @@ " # Member %(member_id4)s is disabled\n\n" "}") +CFG_FILE_TEMPLATE_DISABLED_LISTENER = ( + "# Listener %(listener_id)s is disabled \n\n" + "net_namespace %(ns_name)s\n\n" +) + IPVSADM_OUTPUT_TEMPLATE = ( "IP Virtual Server version 1.2.1 (size=4096)\n" "Prot LocalAddress:Port Scheduler Flags\n" @@ -162,6 +167,7 @@ self.member_id2_v6 = uuidutils.generate_uuid() self.member_id3_v6 = uuidutils.generate_uuid() self.member_id4_v6 = uuidutils.generate_uuid() + self.disabled_listener_id = uuidutils.generate_uuid() cfg_content_v4 = CFG_FILE_TEMPLATE_v4 % { 'listener_id': self.listener_id_v4, 'ns_name': constants.AMPHORA_NAMESPACE, @@ -180,10 +186,19 @@ 'member_id3': self.member_id3_v6, 'member_id4': self.member_id4_v6 } + cfg_content_disabled_listener = ( + CFG_FILE_TEMPLATE_DISABLED_LISTENER % { + 'listener_id': self.listener_id_v6, + 'ns_name': constants.AMPHORA_NAMESPACE, + } + ) self.useFixture(test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v4), cfg_content_v4)) self.useFixture(test_utils.OpenFixture( util.keepalived_lvs_cfg_path(self.listener_id_v6), cfg_content_v6)) + self.useFixture(test_utils.OpenFixture( + util.keepalived_lvs_cfg_path(self.disabled_listener_id), + cfg_content_disabled_listener)) @mock.patch('subprocess.check_output') def test_get_listener_realserver_mapping(self, mock_check_output): @@ -278,6 +293,11 @@ 'ipport': None}]} self.assertEqual((expected, constants.AMPHORA_NAMESPACE), res) + # disabled + res = lvs_query.get_udp_listener_resource_ipports_nsname( + self.disabled_listener_id) + self.assertEqual((None, constants.AMPHORA_NAMESPACE), res) + @mock.patch('subprocess.check_output') def test_get_udp_listener_pool_status(self, mock_check_output): # test with ipv4 and ipv6 @@ -456,3 +476,14 @@ mock_is_running.return_value = False res = lvs_query.get_udp_listeners_stats() self.assertEqual({}, res) + + @mock.patch('subprocess.check_output') + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "is_udp_listener_running", return_value=True) + @mock.patch("octavia.amphorae.backends.agent.api_server.util." + "get_udp_listeners") + def test_get_udp_listeners_stats_disabled_listener( + self, mock_get_listener, mock_is_running, mock_check_output): + mock_get_listener.return_value = [self.disabled_listener_id] + res = lvs_query.get_udp_listeners_stats() + self.assertEqual({}, res) diff -Nru octavia-4.1.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py octavia-4.1.4/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py --- octavia-4.1.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_0_5.py 2020-11-04 15:39:51.000000000 +0000 @@ -27,6 +27,7 @@ from octavia.amphorae.drivers.haproxy import exceptions as exc from octavia.amphorae.drivers.haproxy import rest_api_driver as driver from octavia.common import constants +from octavia.common import data_models from octavia.db import models from octavia.network import data_models as network_models from octavia.tests.unit import base @@ -123,16 +124,6 @@ mock_amphora = mock.MagicMock() mock_amphora.id = 'mock_amphora_id' mock_amphora.api_version = API_VERSION - # mock_listener = mock.MagicMock() - # mock_listener.id = 'mock_listener_id' - # mock_listener.protocol = constants.PROTOCOL_HTTP - # mock_listener.connection_limit = constants.DEFAULT_CONNECTION_LIMIT - # mock_listener.tls_certificate_id = None - # mock_loadbalancer = mock.MagicMock() - # mock_loadbalancer.id = 'mock_lb_id' - # mock_loadbalancer.project_id = 'mock_lb_project' - # mock_loadbalancer.listeners = [mock_listener] - # mock_listener.load_balancer = mock_loadbalancer mock_secret.return_value = 'filename.pem' mock_load_cert.return_value = { 'tls_cert': self.sl.default_tls_container, 'sni_certs': [], @@ -168,6 +159,26 @@ self.driver.clients[API_VERSION].upload_config.assert_not_called() self.driver.clients[API_VERSION].reload_listener.assert_not_called() + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.ListenerRepository.update') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_amphora_listeners_bad_cert( + self, mock_load_cert, mock_list_update, mock_get_session): + mock_amphora = mock.MagicMock() + mock_amphora.id = 'mock_amphora_id' + mock_amphora.api_version = API_VERSION + + mock_get_session.return_value = 'fake_session' + mock_load_cert.side_effect = [Exception] + self.driver.update_amphora_listeners(self.lb, + mock_amphora, self.timeout_dict) + mock_list_update.assert_called_once_with( + 'fake_session', self.lb.listeners[0].id, + provisioning_status=constants.ERROR, + operating_status=constants.ERROR) + self.driver.jinja_split.build_config.assert_not_called() + self.driver.clients[API_VERSION].delete_listener.assert_not_called() + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @@ -277,14 +288,14 @@ self.driver.clients[API_VERSION].get_cert_md5sum.side_effect = [ exc.NotFound, 'Fake_MD5', 'aaaaa', 'aaaaaaaa'] self.driver._process_tls_certificates( - sample_listener, self.amp, sample_listener.load_balancer.id) + sample_listener, self.amp, sample_listener.id) gcm_calls = [ - mock.call(self.amp, self.lb.id, + mock.call(self.amp, sample_listener.id, self.sl.default_tls_container.id + '.pem', ignore=(404,)), - mock.call(self.amp, self.lb.id, + mock.call(self.amp, sample_listener.id, sconts[0].id + '.pem', ignore=(404,)), - mock.call(self.amp, self.lb.id, + mock.call(self.amp, sample_listener.id, sconts[1].id + '.pem', ignore=(404,)) ] self.driver.clients[API_VERSION].get_cert_md5sum.assert_has_calls( @@ -299,11 +310,11 @@ sample_certs.X509_CERT_KEY_3, sample_certs.X509_IMDS]) + b'\n' ucp_calls = [ - mock.call(self.amp, self.lb.id, + mock.call(self.amp, sample_listener.id, self.sl.default_tls_container.id + '.pem', fp1), - mock.call(self.amp, self.lb.id, + mock.call(self.amp, sample_listener.id, sconts[0].id + '.pem', fp2), - mock.call(self.amp, self.lb.id, + mock.call(self.amp, sample_listener.id, sconts[1].id + '.pem', fp3) ] self.driver.clients[API_VERSION].upload_cert_pem.assert_has_calls( @@ -364,13 +375,13 @@ 'sample_pool_id_2': ref_pool_cert_2} result = self.driver._process_listener_pool_certs( - sample_listener, self.amp, sample_listener.load_balancer.id) + sample_listener, self.amp, sample_listener.id) pool_certs_calls = [ mock.call(sample_listener, sample_listener.default_pool, - self.amp, sample_listener.load_balancer.id), + self.amp, sample_listener.id), mock.call(sample_listener, sample_listener.pools[1], - self.amp, sample_listener.load_balancer.id) + self.amp, sample_listener.id) ] mock_pool_cert.assert_has_calls(pool_certs_calls, any_order=True) @@ -390,13 +401,14 @@ conf.config(group="haproxy_amphora", base_cert_dir=fake_cert_dir) sample_listener = sample_configs_split.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True) - cert_data_mock = mock.MagicMock() - cert_data_mock.id = uuidutils.generate_uuid() - mock_load_certs.return_value = cert_data_mock + pool_cert = data_models.TLSContainer( + id=uuidutils.generate_uuid(), certificate='pool cert') + pool_data = {'tls_cert': pool_cert, 'sni_certs': []} + mock_load_certs.return_value = pool_data fake_pem = b'fake pem' mock_build_pem.return_value = fake_pem ref_md5 = hashlib.md5(fake_pem).hexdigest() # nosec - ref_name = '{id}.pem'.format(id=cert_data_mock.id) + ref_name = '{id}.pem'.format(id=pool_cert.id) ref_path = '{cert_dir}/{list_id}/{name}'.format( cert_dir=fake_cert_dir, list_id=sample_listener.id, name=ref_name) ref_ca_name = 'fake_ca.pem' @@ -413,19 +425,19 @@ result = self.driver._process_pool_certs( sample_listener, sample_listener.default_pool, self.amp, - sample_listener.load_balancer.id) + sample_listener.id) secret_calls = [ mock.call(sample_listener, sample_listener.default_pool.ca_tls_certificate_id, - self.amp, sample_listener.load_balancer.id), + self.amp, sample_listener.id), mock.call(sample_listener, sample_listener.default_pool.crl_container_id, - self.amp, sample_listener.load_balancer.id)] + self.amp, sample_listener.id)] - mock_build_pem.assert_called_once_with(cert_data_mock) + mock_build_pem.assert_called_once_with(pool_cert) mock_upload_cert.assert_called_once_with( - self.amp, sample_listener.load_balancer.id, pem=fake_pem, + self.amp, sample_listener.id, pem=fake_pem, md5=ref_md5, name=ref_name) mock_secret.assert_has_calls(secret_calls) self.assertEqual(ref_result, result) diff -Nru octavia-4.1.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py octavia-4.1.4/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py --- octavia-4.1.1/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py 2020-11-04 15:39:51.000000000 +0000 @@ -27,6 +27,7 @@ from octavia.amphorae.drivers.haproxy import exceptions as exc from octavia.amphorae.drivers.haproxy import rest_api_driver as driver from octavia.common import constants +from octavia.common import data_models from octavia.db import models from octavia.network import data_models as network_models from octavia.tests.unit import base @@ -123,16 +124,6 @@ mock_amphora = mock.MagicMock() mock_amphora.id = 'mock_amphora_id' mock_amphora.api_version = API_VERSION - # mock_listener = mock.MagicMock() - # mock_listener.id = 'mock_listener_id' - # mock_listener.protocol = constants.PROTOCOL_HTTP - # mock_listener.connection_limit = constants.DEFAULT_CONNECTION_LIMIT - # mock_listener.tls_certificate_id = None - # mock_loadbalancer = mock.MagicMock() - # mock_loadbalancer.id = 'mock_lb_id' - # mock_loadbalancer.project_id = 'mock_lb_project' - # mock_loadbalancer.listeners = [mock_listener] - # mock_listener.load_balancer = mock_loadbalancer mock_secret.return_value = 'filename.pem' mock_load_cert.return_value = { 'tls_cert': self.sl.default_tls_container, 'sni_certs': [], @@ -168,6 +159,27 @@ self.driver.clients[API_VERSION].upload_config.assert_not_called() self.driver.clients[API_VERSION].reload_listener.assert_not_called() + @mock.patch('octavia.db.api.get_session') + @mock.patch('octavia.db.repositories.ListenerRepository.update') + @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') + def test_update_amphora_listeners_bad_cert( + self, mock_load_cert, mock_list_update, mock_get_session): + mock_amphora = mock.MagicMock() + mock_amphora.id = 'mock_amphora_id' + mock_amphora.api_version = API_VERSION + + mock_get_session.return_value = 'fake_session' + mock_load_cert.side_effect = [Exception] + self.driver.update_amphora_listeners(self.lb, + mock_amphora, self.timeout_dict) + mock_list_update.assert_called_once_with( + 'fake_session', self.lb.listeners[0].id, + provisioning_status=constants.ERROR, + operating_status=constants.ERROR) + self.driver.jinja_combo.build_config.assert_not_called() + (self.driver.clients[API_VERSION].delete_listener. + assert_called_once_with)(mock_amphora, self.lb.id) + @mock.patch('octavia.amphorae.drivers.haproxy.rest_api_driver.' 'HaproxyAmphoraLoadBalancerDriver._process_secret') @mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data') @@ -390,22 +402,24 @@ conf.config(group="haproxy_amphora", base_cert_dir=fake_cert_dir) sample_listener = sample_configs_combined.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True) - cert_data_mock = mock.MagicMock() - cert_data_mock.id = uuidutils.generate_uuid() - mock_load_certs.return_value = cert_data_mock + pool_cert = data_models.TLSContainer( + id=uuidutils.generate_uuid(), certificate='pool cert') + pool_data = {'tls_cert': pool_cert, 'sni_certs': []} + mock_load_certs.return_value = pool_data fake_pem = b'fake pem' mock_build_pem.return_value = fake_pem ref_md5 = hashlib.md5(fake_pem).hexdigest() # nosec - ref_name = '{id}.pem'.format(id=cert_data_mock.id) - ref_path = '{cert_dir}/{list_id}/{name}'.format( - cert_dir=fake_cert_dir, list_id=sample_listener.id, name=ref_name) + ref_name = '{id}.pem'.format(id=pool_cert.id) + ref_path = '{cert_dir}/{lb_id}/{name}'.format( + cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, + name=ref_name) ref_ca_name = 'fake_ca.pem' - ref_ca_path = '{cert_dir}/{list_id}/{name}'.format( - cert_dir=fake_cert_dir, list_id=sample_listener.id, + ref_ca_path = '{cert_dir}/{lb_id}/{name}'.format( + cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, name=ref_ca_name) ref_crl_name = 'fake_crl.pem' - ref_crl_path = '{cert_dir}/{list_id}/{name}'.format( - cert_dir=fake_cert_dir, list_id=sample_listener.id, + ref_crl_path = '{cert_dir}/{lb_id}/{name}'.format( + cert_dir=fake_cert_dir, lb_id=sample_listener.load_balancer.id, name=ref_crl_name) ref_result = {'client_cert': ref_path, 'ca_cert': ref_ca_path, 'crl': ref_crl_path} @@ -423,7 +437,7 @@ sample_listener.default_pool.crl_container_id, self.amp, sample_listener.load_balancer.id)] - mock_build_pem.assert_called_once_with(cert_data_mock) + mock_build_pem.assert_called_once_with(pool_cert) mock_upload_cert.assert_called_once_with( self.amp, sample_listener.load_balancer.id, pem=fake_pem, md5=ref_md5, name=ref_name) diff -Nru octavia-4.1.1/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py octavia-4.1.4/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py --- octavia-4.1.1/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/amphorae/drivers/test_noop_amphoraloadbalancer_driver.py 2020-11-04 15:39:51.000000000 +0000 @@ -145,8 +145,10 @@ self.amphora.id, self.pem_file)]) def test_update_agent_config(self): - self.driver.update_agent_config(self.amphora, self.agent_config) + self.driver.update_amphora_agent_config(self.amphora, + self.agent_config) self.assertEqual( - (self.amphora.id, self.agent_config, 'update_agent_config'), + (self.amphora.id, self.agent_config, + 'update_amphora_agent_config'), self.driver.driver.amphoraconfig[( self.amphora.id, self.agent_config)]) diff -Nru octavia-4.1.1/octavia/tests/unit/certificates/generator/test_local.py octavia-4.1.4/octavia/tests/unit/certificates/generator/test_local.py --- octavia-4.1.1/octavia/tests/unit/certificates/generator/test_local.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/certificates/generator/test_local.py 2020-11-04 15:39:51.000000000 +0000 @@ -81,7 +81,7 @@ should_expire = (datetime.datetime.utcnow() + datetime.timedelta(seconds=2 * 365 * 24 * 60 * 60)) diff = should_expire - cert.not_valid_after - self.assertTrue(diff < datetime.timedelta(seconds=10)) + self.assertLess(diff, datetime.timedelta(seconds=10)) # Make sure this is a version 3 X509. self.assertEqual('v3', cert.version.name) @@ -126,7 +126,7 @@ should_expire = (datetime.datetime.utcnow() + datetime.timedelta(seconds=2 * 365 * 24 * 60 * 60)) diff = should_expire - cert.not_valid_after - self.assertTrue(diff < datetime.timedelta(seconds=10)) + self.assertLess(diff, datetime.timedelta(seconds=10)) # Make sure this is a version 3 X509. self.assertEqual('v3', cert.version.name) diff -Nru octavia-4.1.1/octavia/tests/unit/cmd/test_house_keeping.py octavia-4.1.4/octavia/tests/unit/cmd/test_house_keeping.py --- octavia-4.1.1/octavia/tests/unit/cmd/test_house_keeping.py 2019-12-16 10:45:30.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/cmd/test_house_keeping.py 2020-11-04 15:39:51.000000000 +0000 @@ -109,7 +109,6 @@ mock_CertRotation.assert_called_once_with() self.assertEqual(1, cert_rotate_mock.rotate.call_count) - @mock.patch('time.sleep') @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') @mock.patch('octavia.cmd.house_keeping.spare_amp_thread_event') @@ -118,7 +117,7 @@ def test_main(self, mock_service, mock_thread, spare_amp_thread_event_mock, db_cleanup_thread_event_mock, - cert_rotate_thread_event_mock, sleep_time): + cert_rotate_thread_event_mock): spare_amp_thread_mock = mock.MagicMock() db_cleanup_thread_mock = mock.MagicMock() @@ -132,9 +131,7 @@ db_cleanup_thread_mock.daemon.return_value = True cert_rotate_thread_mock.daemon.return_value = True - # mock the time.sleep() in the while loop - sleep_time.side_effect = [True, Exception('break')] - self.assertRaisesRegex(Exception, 'break', house_keeping.main) + house_keeping.main() spare_amp_thread_mock.start.assert_called_once_with() db_cleanup_thread_mock.start.assert_called_once_with() @@ -144,7 +141,6 @@ self.assertTrue(db_cleanup_thread_mock.daemon) self.assertTrue(cert_rotate_thread_mock.daemon) - @mock.patch('time.sleep') @mock.patch('octavia.cmd.house_keeping.cert_rotate_thread_event') @mock.patch('octavia.cmd.house_keeping.db_cleanup_thread_event') @mock.patch('octavia.cmd.house_keeping.spare_amp_thread_event') @@ -153,8 +149,7 @@ def test_main_keyboard_interrupt(self, mock_service, mock_thread, spare_amp_thread_event_mock, db_cleanup_thread_event_mock, - cert_rotate_thread_event_mock, - sleep_time): + cert_rotate_thread_event_mock): spare_amp_thread_mock = mock.MagicMock() db_cleanup_thread_mock = mock.MagicMock() cert_rotate_thread_mock = mock.MagicMock() @@ -167,8 +162,10 @@ db_cleanup_thread_mock.daemon.return_value = True cert_rotate_thread_mock.daemon.return_value = True - # mock the time.sleep() in the while loop - sleep_time.side_effect = [True, KeyboardInterrupt] + mock_join = mock.MagicMock() + mock_join.side_effect = [KeyboardInterrupt, None] + spare_amp_thread_mock.join = mock_join + house_keeping.main() spare_amp_thread_event_mock.set.assert_called_once_with() @@ -184,8 +181,7 @@ self.assertTrue(spare_amp_thread_mock.daemon) self.assertTrue(db_cleanup_thread_mock.daemon) self.assertTrue(cert_rotate_thread_mock.daemon) - - spare_amp_thread_mock.join.assert_called_once_with() + self.assertEqual(2, spare_amp_thread_mock.join.call_count) db_cleanup_thread_mock.join.assert_called_once_with() cert_rotate_thread_mock.join.assert_called_once_with() diff -Nru octavia-4.1.1/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py octavia-4.1.4/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py --- octavia-4.1.1/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py 2020-11-04 15:39:51.000000000 +0000 @@ -76,12 +76,19 @@ "weight 13 check inter 30s fall 3 rise 2 cookie " "sample_member_id_2\n\n").format( maxconn=constants.HAPROXY_MAX_MAXCONN) + tls_tupe = {'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='imaCert1', private_key='imaPrivateKey1', + primary_cn='FakeCN'), + 'cont_id_ca': 'client_ca.pem', + 'cont_id_crl': 'SHA_ID.pem'} rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( proto='TERMINATED_HTTPS', tls=True, sni=True, client_ca_cert=True, client_crl_cert=True)], - client_ca_filename='client_ca.pem', client_crl='SHA_ID.pem') + tls_tupe) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), @@ -124,7 +131,13 @@ rendered_obj = self.jinja_cfg.render_loadbalancer_obj( sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( - proto='TERMINATED_HTTPS', tls=True)]) + proto='TERMINATED_HTTPS', tls=True)], + tls_certs={'cont_id_1': + sample_configs_combined.sample_tls_container_tuple( + id='tls_container_id', + certificate='ImAalsdkfjCert', + private_key='ImAsdlfksdjPrivateKey', + primary_cn="FakeCN")}) self.assertEqual( sample_configs_combined.sample_base_expected_config( frontend=fe, backend=be), @@ -812,7 +825,7 @@ sample_configs_combined.sample_amphora_tuple(), [sample_configs_combined.sample_listener_tuple( pool_cert=True, tls_enabled=True)], - pool_tls_certs={ + tls_certs={ 'sample_pool_id_1': {'client_cert': cert_file_path, 'ca_cert': None, 'crl': None}}) @@ -852,7 +865,7 @@ [sample_configs_combined.sample_listener_tuple( pool_cert=True, pool_ca_cert=True, pool_crl=True, tls_enabled=True)], - pool_tls_certs={ + tls_certs={ 'sample_pool_id_1': {'client_cert': pool_client_cert, 'ca_cert': pool_ca_cert, @@ -909,13 +922,13 @@ def test_transform_listener(self): in_listener = sample_configs_combined.sample_listener_tuple() - ret = self.jinja_cfg._transform_listener(in_listener, {}, + ret = self.jinja_cfg._transform_listener(in_listener, None, {}, in_listener.load_balancer) self.assertEqual(sample_configs_combined.RET_LISTENER, ret) def test_transform_listener_with_l7(self): in_listener = sample_configs_combined.sample_listener_tuple(l7=True) - ret = self.jinja_cfg._transform_listener(in_listener, {}, + ret = self.jinja_cfg._transform_listener(in_listener, None, {}, in_listener.load_balancer) self.assertEqual(sample_configs_combined.RET_LISTENER_L7, ret) @@ -923,7 +936,7 @@ in_amphora = sample_configs_combined.sample_amphora_tuple() in_listener = sample_configs_combined.sample_listener_tuple() ret = self.jinja_cfg._transform_loadbalancer( - in_amphora, in_listener.load_balancer, [in_listener], {}) + in_amphora, in_listener.load_balancer, [in_listener], None, {}) self.assertEqual(sample_configs_combined.RET_LB, ret) def test_transform_amphora(self): @@ -935,7 +948,7 @@ in_amphora = sample_configs_combined.sample_amphora_tuple() in_listener = sample_configs_combined.sample_listener_tuple(l7=True) ret = self.jinja_cfg._transform_loadbalancer( - in_amphora, in_listener.load_balancer, [in_listener], {}) + in_amphora, in_listener.load_balancer, [in_listener], None, {}) self.assertEqual(sample_configs_combined.RET_LB_L7, ret) def test_transform_l7policy(self): @@ -1053,6 +1066,7 @@ rendered_obj = j_cfg.build_config( sample_amphora, [sample_proxy_listener], + tls_certs=None, haproxy_versions=("1", "8", "1")) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), @@ -1080,6 +1094,7 @@ rendered_obj = j_cfg.build_config( sample_amphora, [sample_proxy_listener], + tls_certs=None, haproxy_versions=("1", "5", "18")) self.assertEqual( sample_configs_combined.sample_base_expected_config(backend=be), @@ -1162,6 +1177,7 @@ rendered_obj = j_cfg.build_config( sample_configs_combined.sample_amphora_tuple(), [sample_listener], + tls_certs=None, haproxy_versions=("1", "5", "18")) self.assertEqual( sample_configs_combined.sample_base_expected_config( diff -Nru octavia-4.1.1/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py octavia-4.1.4/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py --- octavia-4.1.1/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/common/jinja/lvs/test_lvs_jinja_cfg.py 2020-11-04 15:39:51.000000000 +0000 @@ -46,7 +46,7 @@ " persistence_timeout 33\n" " persistence_granularity 255.255.0.0\n" " delay_loop 30\n" - " delay_before_retry 31\n" + " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" @@ -54,24 +54,20 @@ " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") @@ -94,7 +90,7 @@ " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" - " delay_before_retry 31\n" + " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" @@ -102,24 +98,20 @@ " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") @@ -141,7 +133,7 @@ " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" - " delay_before_retry 31\n" + " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" @@ -149,24 +141,20 @@ " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.99 82\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 10.0.0.98 82\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") @@ -188,7 +176,7 @@ " lb_kind NAT\n" " protocol UDP\n" " delay_loop 30\n" - " delay_before_retry 31\n" + " delay_before_retry 30\n" " retry 3\n\n\n" " # Configuration for Pool sample_pool_id_1\n" " # Configuration for HealthMonitor sample_monitor_id_1\n" @@ -196,24 +184,20 @@ " real_server 10.0.0.99 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 192.168.1.1 9000\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" " # Configuration for Member sample_member_id_2\n" " real_server 10.0.0.98 82 {\n" " weight 13\n" " uthreshold 98\n" - " delay_before_retry 31\n" - " retry 3\n" " MISC_CHECK {\n" " misc_path \"/var/lib/octavia/lvs/check/" "udp_check.sh 192.168.1.1 9000\"\n" - " misc_timeout 30\n" + " misc_timeout 31\n" " }\n" " }\n\n" "}\n\n") @@ -313,3 +297,18 @@ ret = self.udp_jinja_cfg._transform_listener(in_listener) sample_configs_combined.RET_UDP_LISTENER.pop('connection_limit') self.assertEqual(sample_configs_combined.RET_UDP_LISTENER, ret) + + def test_render_template_disabled_udp_listener(self): + exp = ("# Configuration for Loadbalancer sample_loadbalancer_id_1\n" + "# Listener sample_listener_id_1 is disabled\n\n" + "net_namespace amphora-haproxy\n\n") + rendered_obj = self.udp_jinja_cfg.render_loadbalancer_obj( + sample_configs_combined.sample_listener_tuple( + enabled=False, + proto=constants.PROTOCOL_UDP, + persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP, + persistence_timeout=33, + persistence_granularity='255.255.0.0', + monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT, + connection_limit=98)) + self.assertEqual(exp, rendered_obj) diff -Nru octavia-4.1.1/octavia/tests/unit/common/sample_configs/sample_configs_combined.py octavia-4.1.4/octavia/tests/unit/common/sample_configs/sample_configs_combined.py --- octavia-4.1.1/octavia/tests/unit/common/sample_configs/sample_configs_combined.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/common/sample_configs/sample_configs_combined.py 2020-11-04 15:39:51.000000000 +0000 @@ -756,14 +756,16 @@ backup_member=False, disabled_member=False, has_http_reuse=True, pool_cert=False, pool_ca_cert=False, pool_crl=False, tls_enabled=False, - hm_host_http_check=False): + hm_host_http_check=False, + provisioning_status=constants.ACTIVE): proto = 'HTTP' if proto is None else proto monitor_proto = proto if monitor_proto is None else monitor_proto in_pool = collections.namedtuple( 'pool', 'id, protocol, lb_algorithm, members, health_monitor, ' 'session_persistence, enabled, operating_status, ' 'tls_certificate_id, ca_tls_certificate_id, ' - 'crl_container_id, tls_enabled, ' + constants.HTTP_REUSE) + 'crl_container_id, tls_enabled, ' + 'provisioning_status, ' + constants.HTTP_REUSE) if (proto == constants.PROTOCOL_UDP and persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP): kwargs = {'persistence_type': persistence_type, @@ -805,7 +807,7 @@ tls_certificate_id='pool_cont_1' if pool_cert else None, ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None, crl_container_id='pool_crl' if pool_crl else None, - tls_enabled=tls_enabled) + tls_enabled=tls_enabled, provisioning_status=constants.ACTIVE) def sample_member_tuple(id, ip, enabled=True, operating_status='ACTIVE', diff -Nru octavia-4.1.1/octavia/tests/unit/common/test_utils.py octavia-4.1.4/octavia/tests/unit/common/test_utils.py --- octavia-4.1.1/octavia/tests/unit/common/test_utils.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/common/test_utils.py 2020-11-04 15:39:51.000000000 +0000 @@ -60,3 +60,18 @@ utils.ip_netmask_to_cidr('10.0.0.1', '255.255.240.0')) self.assertEqual('10.0.0.0/30', utils.ip_netmask_to_cidr( '10.0.0.1', '255.255.255.252')) + + def test_base64_sha1_string(self): + str_to_sha1 = [ + # no special cases str (no altchars) + ('77e7d60d-e137-4246-8a84-a25db33571cd', + 'iVZVQ5AKmk2Ae0uGLP0Ue4OseRM='), + # backward compat amphorae with - in str[1:] + ('9c6e5f27-a0da-4ceb-afe5-5a81230be42e', + 'NjrNgt3Egl-H5ScbYM5ChtUH3M8='), + # sha1 would start with -, now replaced with x + ('4db4a3cf-9fef-4057-b1fd-b2afbf7a8a0f', + 'xxqntK8jJ_gE3QEmh-D1-XgCW_E=') + ] + for str, sha1 in str_to_sha1: + self.assertEqual(sha1, utils.base64_sha1_string(str)) diff -Nru octavia-4.1.1/octavia/tests/unit/controller/healthmanager/health_drivers/test_update_db.py octavia-4.1.4/octavia/tests/unit/controller/healthmanager/health_drivers/test_update_db.py --- octavia-4.1.1/octavia/tests/unit/controller/healthmanager/health_drivers/test_update_db.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/controller/healthmanager/health_drivers/test_update_db.py 2020-11-04 15:39:51.000000000 +0000 @@ -118,9 +118,10 @@ def _make_fake_lb_health_dict(self, listener=True, pool=True, health_monitor=True, members=1, lb_prov_status=constants.ACTIVE, - listener_protocol=constants.PROTOCOL_TCP): + listener_protocol=constants.PROTOCOL_TCP, + enabled=True): - lb_ref = {'enabled': True, 'id': self.FAKE_UUID_1, + lb_ref = {'enabled': enabled, 'id': self.FAKE_UUID_1, constants.OPERATING_STATUS: 'bogus', constants.PROVISIONING_STATUS: lb_prov_status} @@ -196,6 +197,23 @@ self.hm.update_health(health, '192.0.2.1') self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) self.assertTrue(self.loadbalancer_repo.update.called) + self.assertTrue(self.amphora_health_repo.replace.called) + + def test_update_health_lb_disabled(self): + + health = { + "id": self.FAKE_UUID_1, + "ver": 1, + "listeners": {}, + "recv_time": time.time() + } + + lb_ref = self._make_fake_lb_health_dict( + listener=True, pool=True, enabled=False) + self.hm.amphora_repo.get_lb_for_health_update.return_value = lb_ref + self.hm.update_health(health, '192.0.2.1') + self.assertTrue(self.amphora_repo.get_lb_for_health_update.called) + self.assertTrue(self.loadbalancer_repo.update.called) self.assertTrue(self.amphora_health_repo.replace.called) def test_update_health_lb_pending_no_listener(self): diff -Nru octavia-4.1.1/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py octavia-4.1.4/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py --- octavia-4.1.1/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py 2020-11-04 15:39:51.000000000 +0000 @@ -882,42 +882,6 @@ 'TEST', id=AMP_ID) - def test_mark_listener_active_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_listener_active = database_tasks.MarkListenerActiveInDB() - mark_listener_active.execute(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - LISTENER_ID, - provisioning_status=constants.ACTIVE) - - # Test the revert - mock_listener_repo_update.reset_mock() - mark_listener_active.revert(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - # Test the revert - mock_listener_repo_update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - mark_listener_active.revert(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - def test_mark_listener_deleted_in_db(self, mock_generate_uuid, mock_LOG, @@ -991,7 +955,10 @@ id=LISTENER_ID, provisioning_status=constants.ERROR) + @mock.patch('octavia.db.repositories.ListenerRepository.' + 'prov_status_active_if_not_error') def test_mark_lb_and_listeners_active_in_db(self, + mock_list_not_error, mock_generate_uuid, mock_LOG, mock_get_session, @@ -999,16 +966,12 @@ mock_listener_repo_update, mock_amphora_repo_update, mock_amphora_repo_delete): - mark_lb_and_listeners_active = (database_tasks. MarkLBAndListenersActiveInDB()) mark_lb_and_listeners_active.execute(self.loadbalancer_mock, [self.listener_mock]) - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - LISTENER_ID, - provisioning_status=constants.ACTIVE) + mock_list_not_error.assert_called_once_with('TEST', LISTENER_ID) repo.LoadBalancerRepository.update.assert_called_once_with( 'TEST', LB_ID, diff -Nru octavia-4.1.1/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py octavia-4.1.4/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py --- octavia-4.1.1/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py 2020-11-04 15:39:51.000000000 +0000 @@ -571,6 +571,22 @@ t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + mock_driver.reset_mock() + update_dict = {'description': 'fool', + 'vip': { + 'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}} + tmp_lb.amphorae = AMPS_DATA + tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY + net.execute(tmp_lb, update_dict=update_dict) + mock_driver.apply_qos_on_port.assert_called_with( + t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) + self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) + + mock_driver.reset_mock() + update_dict = {} + net.execute(null_qos_lb, update_dict=update_dict) + self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) + # revert mock_driver.reset_mock() tmp_lb.amphorae = [AMPS_DATA[0]] diff -Nru octavia-4.1.1/octavia/tests/unit/controller/worker/test_controller_worker.py octavia-4.1.4/octavia/tests/unit/controller/worker/test_controller_worker.py --- octavia-4.1.1/octavia/tests/unit/controller/worker/test_controller_worker.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/controller/worker/test_controller_worker.py 2020-11-04 15:39:51.000000000 +0000 @@ -824,7 +824,8 @@ mock_amp_repo_get): _flow_mock.reset_mock() - + mock_member_repo_get.side_effect = [None, _member_mock, + _member_mock, _member_mock] cw = controller_worker.ControllerWorker() cw.batch_update_members([9], [11], [MEMBER_UPDATE_DICT]) @@ -837,6 +838,7 @@ constants.POOL: _pool_mock})) _flow_mock.run.assert_called_once_with() + self.assertEqual(4, mock_member_repo_get.call_count) @mock.patch('octavia.controller.worker.flows.' 'pool_flows.PoolFlows.get_create_pool_flow', diff -Nru octavia-4.1.1/octavia/tests/unit/network/drivers/neutron/test_base.py octavia-4.1.4/octavia/tests/unit/network/drivers/neutron/test_base.py --- octavia-4.1.1/octavia/tests/unit/network/drivers/neutron/test_base.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/network/drivers/neutron/test_base.py 2020-11-04 15:39:51.000000000 +0000 @@ -14,6 +14,8 @@ import mock from neutronclient.common import exceptions as neutron_client_exceptions +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture from octavia.common import clients from octavia.common import data_models @@ -198,6 +200,9 @@ port2['fixed_ips'][0]['ip_address']]) def test_get_network(self): + config = self.useFixture(oslo_fixture.Config(cfg.CONF)) + config.config(group="networking", allow_invisible_resource_usage=True) + show_network = self.driver.neutron_client.show_network show_network.return_value = {'network': { 'id': t_constants.MOCK_NETWORK_ID, @@ -208,7 +213,25 @@ self.assertEqual(1, len(network.subnets)) self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) + @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") + def test_get_user_network(self, neutron_client_mock): + show_network = neutron_client_mock.return_value.show_network + show_network.return_value = {'network': { + 'id': t_constants.MOCK_NETWORK_ID, + 'subnets': [t_constants.MOCK_SUBNET_ID]}} + + network = self.driver.get_network(t_constants.MOCK_NETWORK_ID, + context=mock.ANY) + + self.assertIsInstance(network, network_models.Network) + self.assertEqual(t_constants.MOCK_NETWORK_ID, network.id) + self.assertEqual(1, len(network.subnets)) + self.assertEqual(t_constants.MOCK_SUBNET_ID, network.subnets[0]) + def test_get_subnet(self): + config = self.useFixture(oslo_fixture.Config(cfg.CONF)) + config.config(group="networking", allow_invisible_resource_usage=True) + show_subnet = self.driver.neutron_client.show_subnet show_subnet.return_value = {'subnet': { 'id': t_constants.MOCK_SUBNET_ID, @@ -220,7 +243,26 @@ self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) + @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") + def test_get_user_subnet(self, neutron_client_mock): + show_subnet = neutron_client_mock.return_value.show_subnet + show_subnet.return_value = {'subnet': { + 'id': t_constants.MOCK_SUBNET_ID, + 'gateway_ip': t_constants.MOCK_IP_ADDRESS, + 'cidr': t_constants.MOCK_CIDR}} + + subnet = self.driver.get_subnet(t_constants.MOCK_SUBNET_ID, + context=mock.ANY) + + self.assertIsInstance(subnet, network_models.Subnet) + self.assertEqual(t_constants.MOCK_SUBNET_ID, subnet.id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, subnet.gateway_ip) + self.assertEqual(t_constants.MOCK_CIDR, subnet.cidr) + def test_get_port(self): + config = self.useFixture(oslo_fixture.Config(cfg.CONF)) + config.config(group="networking", allow_invisible_resource_usage=True) + show_port = self.driver.neutron_client.show_port show_port.return_value = {'port': { 'id': t_constants.MOCK_PORT_ID, @@ -234,6 +276,31 @@ self.assertIsInstance(port, network_models.Port) self.assertEqual(t_constants.MOCK_PORT_ID, port.id) self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) + self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) + self.assertEqual(1, len(port.fixed_ips)) + self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) + self.assertEqual(t_constants.MOCK_SUBNET_ID, + port.fixed_ips[0].subnet_id) + self.assertEqual(t_constants.MOCK_IP_ADDRESS, + port.fixed_ips[0].ip_address) + + @mock.patch("octavia.common.clients.NeutronAuth.get_user_neutron_client") + def test_get_user_port(self, neutron_client_mock): + show_port = neutron_client_mock.return_value.show_port + show_port.return_value = {'port': { + 'id': t_constants.MOCK_PORT_ID, + 'mac_address': t_constants.MOCK_MAC_ADDR, + 'network_id': t_constants.MOCK_NETWORK_ID, + 'fixed_ips': [{ + 'subnet_id': t_constants.MOCK_SUBNET_ID, + 'ip_address': t_constants.MOCK_IP_ADDRESS + }]}} + + port = self.driver.get_port(t_constants.MOCK_PORT_ID, context=mock.ANY) + + self.assertIsInstance(port, network_models.Port) + self.assertEqual(t_constants.MOCK_PORT_ID, port.id) + self.assertEqual(t_constants.MOCK_MAC_ADDR, port.mac_address) self.assertEqual(t_constants.MOCK_NETWORK_ID, port.network_id) self.assertEqual(1, len(port.fixed_ips)) self.assertIsInstance(port.fixed_ips[0], network_models.FixedIP) diff -Nru octavia-4.1.1/octavia/tests/unit/test_hacking.py octavia-4.1.4/octavia/tests/unit/test_hacking.py --- octavia-4.1.1/octavia/tests/unit/test_hacking.py 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/octavia/tests/unit/test_hacking.py 2020-11-04 15:39:51.000000000 +0000 @@ -71,7 +71,7 @@ return check_fns def test_factory(self): - self.assertTrue(len(self._get_factory_checks(checks.factory)) > 0) + self.assertGreater(len(self._get_factory_checks(checks.factory)), 0) def test_assert_true_instance(self): self.assertEqual(1, len(list(checks.assert_true_instance( diff -Nru octavia-4.1.1/octavia.egg-info/pbr.json octavia-4.1.4/octavia.egg-info/pbr.json --- octavia-4.1.1/octavia.egg-info/pbr.json 2019-12-16 10:46:30.000000000 +0000 +++ octavia-4.1.4/octavia.egg-info/pbr.json 2020-11-04 15:41:11.000000000 +0000 @@ -1 +1 @@ -{"git_version": "567a388b", "is_release": true} \ No newline at end of file +{"git_version": "7eb21660", "is_release": true} \ No newline at end of file diff -Nru octavia-4.1.1/octavia.egg-info/PKG-INFO octavia-4.1.4/octavia.egg-info/PKG-INFO --- octavia-4.1.1/octavia.egg-info/PKG-INFO 2019-12-16 10:46:30.000000000 +0000 +++ octavia-4.1.4/octavia.egg-info/PKG-INFO 2020-11-04 15:41:11.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: octavia -Version: 4.1.1 +Version: 4.1.4 Summary: OpenStack Octavia Scalable Load Balancer as a Service Home-page: https://docs.openstack.org/octavia/latest/ Author: OpenStack diff -Nru octavia-4.1.1/octavia.egg-info/requires.txt octavia-4.1.4/octavia.egg-info/requires.txt --- octavia-4.1.1/octavia.egg-info/requires.txt 2019-12-16 10:46:30.000000000 +0000 +++ octavia-4.1.4/octavia.egg-info/requires.txt 2020-11-04 15:41:11.000000000 +0000 @@ -1,18 +1,24 @@ +Babel!=2.4.0,>=2.3.4 +Flask!=0.11,>=0.10 +Jinja2>=2.10 +PyMySQL>=0.7.6 +SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 +WSME>=0.8.0 +WebOb>=1.7.1 alembic>=0.8.10 +castellan>=0.16.0 cotyledon>=1.3.0 -pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 -pbr!=2.1.0,>=2.0.0 -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 -Babel!=2.4.0,>=2.3.4 +cryptography>=2.1 +debtcollector>=1.19.0 +diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 +distro>=1.2.0 futurist>=1.2.0 -requests>=2.14.2 -rfc3986>=0.3.1 +gunicorn>=19.0.0 +jsonschema>=2.6.0 keystoneauth1>=3.4.0 keystonemiddleware>=4.17.0 -python-neutronclient>=6.7.0 -WebOb>=1.7.1 -six>=1.10.0 -stevedore>=1.20.0 +netifaces>=0.10.4 +octavia-lib>=1.1.1 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.27.0 @@ -25,27 +31,21 @@ oslo.serialization!=2.19.1,>=2.18.0 oslo.upgradecheck>=0.1.0 oslo.utils>=3.33.0 +pbr!=2.1.0,>=2.0.0 +pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 +pyOpenSSL>=17.1.0 pyasn1!=0.2.3,>=0.1.8 pyasn1-modules>=0.0.6 -PyMySQL>=0.7.6 python-barbicanclient>=4.5.2 python-glanceclient>=2.8.0 +python-neutronclient>=6.7.0 python-novaclient>=9.1.0 -pyOpenSSL>=17.1.0 -WSME>=0.8.0 -Jinja2>=2.10 +requests>=2.14.2 +rfc3986>=0.3.1 +six>=1.10.0 +stevedore>=1.20.0 taskflow>=2.16.0 -diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 -castellan>=0.16.0 tenacity>=4.9.0 -distro>=1.2.0 -jsonschema>=2.6.0 -debtcollector>=1.19.0 -octavia-lib>=1.1.1 -Flask!=0.11,>=0.10 -netifaces>=0.10.4 -cryptography>=2.1 -gunicorn>=19.0.0 [:(python_version<'3.3')] ipaddress>=1.0.17 diff -Nru octavia-4.1.1/octavia.egg-info/SOURCES.txt octavia-4.1.4/octavia.egg-info/SOURCES.txt --- octavia-4.1.1/octavia.egg-info/SOURCES.txt 2019-12-16 10:46:30.000000000 +0000 +++ octavia-4.1.4/octavia.egg-info/SOURCES.txt 2020-11-04 15:41:11.000000000 +0000 @@ -975,6 +975,7 @@ releasenotes/notes/Fix-API-update-null-None-1b400962017a3d56.yaml releasenotes/notes/Fix-HM-DB-Rollback-no-connection-2664c4f7823ecaec.yaml releasenotes/notes/Fix-allocate_and_associate-deadlock-3ff1464421c1d464.yaml +releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml releasenotes/notes/Fix-ifup-on-member-create-5b405d98eb036718.yaml releasenotes/notes/Fix-plug-vip-revert-abandoned-vrrp-port-efff14edce62ad75.yaml releasenotes/notes/IPv6-support-953ef81ed8555fce.yaml @@ -994,6 +995,7 @@ releasenotes/notes/add-id-column-to-healthmonitor-a331934ad2cede87.yaml releasenotes/notes/add-monitor-address-and-port-to-member-99fa2ee65e2b04b4.yaml releasenotes/notes/add-policy-json-support-38929bb1fb581a7a.yaml +releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml releasenotes/notes/add-quota-support-fe63a52b6b903789.yaml releasenotes/notes/add-rh-flavors-support-for-amphora-agent-cd3e9f9f519b9ff2.yaml releasenotes/notes/add-shared-pools-and-l7-ef9edf01bb9058e0.yaml @@ -1007,8 +1009,10 @@ releasenotes/notes/add_provider_driver_support-7523f130dd5025af.yaml releasenotes/notes/add_tag_support-4735534f4066b9af.yaml releasenotes/notes/admin-state-up-fix-4aa278eac67646ae.yaml +releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml releasenotes/notes/allow-operators-to-disable-ping-healthchecks-42fd8c3b88edaf35.yaml releasenotes/notes/allow-vip-on-mgmt-net-d6c65d4ccb2a8f2c.yaml +releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml releasenotes/notes/amp-az-1a0b4255c77fd1dc.yaml releasenotes/notes/api-create-project-id-4bb984b24d56de2e.yaml releasenotes/notes/auth-strategy-keystone-80b3780a18420b6c.yaml @@ -1033,21 +1037,33 @@ releasenotes/notes/fix-PING-health-monitor-bc38de57fa759ac0.yaml releasenotes/notes/fix-SNI-single-process-879ffce5eaa6c1c3.yaml releasenotes/notes/fix-active-standby-in-centos-4e47140e0e139de8.yaml +releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml +releasenotes/notes/fix-amphora-agent-branch-checkout-e2eeb19c6aa09535.yaml +releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml releasenotes/notes/fix-certs-ramfs-race-561f355d13fc6d14.yaml +releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml releasenotes/notes/fix-creating-fully-populated-load-balancer-ae57ffae5c017ac3.yaml +releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml releasenotes/notes/fix-driver-errors-81d33948288bf8cf.yaml releasenotes/notes/fix-error-messages-ec817a66249e6666.yaml releasenotes/notes/fix-eth1-ip-flush-7fadda4bdca387b5.yaml releasenotes/notes/fix-health-check-db-outage-279b0bc1d0039312.yaml +releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml releasenotes/notes/fix-housekeeping-db-performance-b0d0fcfcce696314.yaml releasenotes/notes/fix-lb-error-failover-2c17afaa20c0c97f.yaml +releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml releasenotes/notes/fix-loadbalancer-db-cleanup-61ee81a4fd597067.yaml releasenotes/notes/fix-multi-amp-down-failover-952618fb8d3d8ae6.yaml releasenotes/notes/fix-oslo-messaging-connection-leakage-aeb79474105ac116.yaml +releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml +releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml releasenotes/notes/fix-provider-capabilities-filtering-8bd12b2cf7b37a84.yaml +releasenotes/notes/fix-python2-attributeerror-strptime-89a7350c55ac8818.yaml +releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml releasenotes/notes/fix-route-table-b2ec0aa7b92d2abc.yaml releasenotes/notes/fix-spare-amphora-check-and-creation-3adf939b45610155.yaml releasenotes/notes/fix-support-for-monitoring-address-and-port-in-udp-members-ff83395544f228cf.yaml +releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml releasenotes/notes/fix-udp-members-status-ef3202849bfda29b.yaml releasenotes/notes/fix-udp-server-status-bug-db4d3e38bcdf0554.yaml releasenotes/notes/fix-unlimited-connection-limit-48079688de033c1a.yaml @@ -1058,6 +1074,7 @@ releasenotes/notes/fix-worker-graceful-shutdown-c44b6797637aa1b3.yaml releasenotes/notes/fix_active_standby_ipv6-0317d5cd9e5d50e5.yaml releasenotes/notes/fixed-spare-amphora-rotation-007ba406411a313d.yaml +releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml releasenotes/notes/glance-tags-for-amphora-images-28bd9df1ed4b9ca3.yaml releasenotes/notes/glance_image_owner-42c92a12f91a62a6.yaml releasenotes/notes/haproxy-single-process-b17a3af3a97accea.yaml @@ -1095,6 +1112,7 @@ releasenotes/notes/support-redirect-prefix-7f8b289aee04fe99.yaml releasenotes/notes/support-remote-debugging-fcb52df4a59c1467.yaml releasenotes/notes/support-wsgi-deployment-56013fef7172e982.yaml +releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml releasenotes/notes/use-dib-distribution-mirror-400d96c1a7df9862.yaml releasenotes/notes/validate-same-ip-protocol-in-udp-lb-2813b545131097ec.yaml releasenotes/notes/vip-port-project-id-bbb26b657b08365e.yaml diff -Nru octavia-4.1.1/PKG-INFO octavia-4.1.4/PKG-INFO --- octavia-4.1.1/PKG-INFO 2019-12-16 10:46:31.000000000 +0000 +++ octavia-4.1.4/PKG-INFO 2020-11-04 15:41:11.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: octavia -Version: 4.1.1 +Version: 4.1.4 Summary: OpenStack Octavia Scalable Load Balancer as a Service Home-page: https://docs.openstack.org/octavia/latest/ Author: OpenStack diff -Nru octavia-4.1.1/playbooks/legacy/grenade-devstack-octavia/run.yaml octavia-4.1.4/playbooks/legacy/grenade-devstack-octavia/run.yaml --- octavia-4.1.1/playbooks/legacy/grenade-devstack-octavia/run.yaml 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/playbooks/legacy/grenade-devstack-octavia/run.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -32,28 +32,19 @@ export PROJECTS="openstack/grenade $PROJECTS" export PROJECTS="openstack/octavia $PROJECTS" export PROJECTS="openstack/octavia-lib $PROJECTS" - export PROJECTS="openstack/octavia-tempest-plugin $PROJECTS" export PROJECTS="openstack/python-octaviaclient $PROJECTS" export DEVSTACK_PROJECT_FROM_GIT="python-octaviaclient $DEVSTACK_PROJECT_FROM_GIT" export GRENADE_PLUGINRC="enable_grenade_plugin octavia https://opendev.org/openstack/octavia" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"export TEMPEST_PLUGINS='/opt/stack/new/octavia-tempest-plugin'" export DEVSTACK_LOCAL_CONFIG+=$'\n'"[[post-config|/etc/octavia/octavia.conf]]"$'\n'"[DEFAULT]"$'\n'"debug = True" - export DEVSTACK_GATE_TEMPEST=1 + export DEVSTACK_GATE_TEMPEST=0 export DEVSTACK_GATE_GRENADE=pullup export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi - # Add configuration values for enabling security features in local.conf - function pre_test_hook { - if [ -f /opt/stack/old/octavia-tempest-plugin/tools/pre_test_hook.sh ] ; then - . /opt/stack/old/octavia-tempest-plugin/tools/pre_test_hook.sh - fi - } - export -f pre_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh diff -Nru octavia-4.1.1/playbooks/legacy/octavia-v1-dsvm-py3x-scenario/run.yaml octavia-4.1.4/playbooks/legacy/octavia-v1-dsvm-py3x-scenario/run.yaml --- octavia-4.1.1/playbooks/legacy/octavia-v1-dsvm-py3x-scenario/run.yaml 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/playbooks/legacy/octavia-v1-dsvm-py3x-scenario/run.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -40,7 +40,6 @@ fi export PROJECTS="openstack/barbican $PROJECTS" export PROJECTS="openstack/python-barbicanclient $PROJECTS" - export PROJECTS="openstack/diskimage-builder $PROJECTS" export PROJECTS="openstack/tripleo-image-elements $PROJECTS" export PROJECTS="openstack/neutron-lbaas $PROJECTS" export PROJECTS="openstack/octavia-lib $PROJECTS" diff -Nru octavia-4.1.1/playbooks/legacy/octavia-v1-dsvm-scenario/run.yaml octavia-4.1.4/playbooks/legacy/octavia-v1-dsvm-scenario/run.yaml --- octavia-4.1.1/playbooks/legacy/octavia-v1-dsvm-scenario/run.yaml 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/playbooks/legacy/octavia-v1-dsvm-scenario/run.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -39,7 +39,6 @@ fi export PROJECTS="openstack/barbican $PROJECTS" export PROJECTS="openstack/python-barbicanclient $PROJECTS" - export PROJECTS="openstack/diskimage-builder $PROJECTS" export PROJECTS="openstack/tripleo-image-elements $PROJECTS" export PROJECTS="openstack/neutron-lbaas $PROJECTS" export PROJECTS="openstack/octavia-lib $PROJECTS" diff -Nru octavia-4.1.1/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml octavia-4.1.4/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml --- octavia-4.1.1/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/add-protocol-validation-0f9129a045e372ce.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Add listener and pool protocol validation. The pool and listener can't be + combined arbitrarily. We need some constraints on the protocol side. diff -Nru octavia-4.1.1/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml octavia-4.1.4/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml --- octavia-4.1.1/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/allow-invisible-subnets-e30b0b5fbd216294.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,16 @@ +--- +upgrade: + - | + After this upgrade, users will no longer be able use network resources they + cannot see or "show" on load balancers. Operators can revert this behavior + by setting the "allow_invisible_resource_usage" configuration file setting + to ``True``. +security: + - | + Previously, if a user knew or could guess the UUID for a network resource, + they could use that UUID to create load balancer resources using that UUID. + Now the user must have permission to see or "show" the resource before it + can be used with a load balancer. This will be the new default, but + operators can disable this behavior via the setting the configuration file + setting "allow_invisible_resource_usage" to ``True``. This issue falls + under the "Class C1" security issue as the user would require a valid UUID. diff -Nru octavia-4.1.1/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml octavia-4.1.4/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml --- octavia-4.1.1/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/amp-agent-py3-cert-upload-binary-74e0ab35c5a85c68.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,11 @@ +--- +upgrade: + - | + Any amphorae running a py3 based image must be recycled or else they will + eventually fail on certificate rotation. +fixes: + - | + Resolved broken certificate upload on py3 based amphora images. On a + housekeeping certificate rotation event, the amphora would clear out its + server certificate and return a 500, putting the amphora in ERROR status + and breaking further communication. See upgrade notes. diff -Nru octavia-4.1.1/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml octavia-4.1.4/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml --- octavia-4.1.1/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-add-member-tls-enabled-pool-cc77bfa320aaf659.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where members added to TLS-enabled pools would go to ERROR + provisioning status. diff -Nru octavia-4.1.1/releasenotes/notes/fix-amphora-agent-branch-checkout-e2eeb19c6aa09535.yaml octavia-4.1.4/releasenotes/notes/fix-amphora-agent-branch-checkout-e2eeb19c6aa09535.yaml --- octavia-4.1.1/releasenotes/notes/fix-amphora-agent-branch-checkout-e2eeb19c6aa09535.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-amphora-agent-branch-checkout-e2eeb19c6aa09535.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where the the amphora image create tool would checkout the + master amphora-agent code and master upper constraints. diff -Nru octavia-4.1.1/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml octavia-4.1.4/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml --- octavia-4.1.1/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-api-sort-key-337f342d5cdce432.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where some columns could not be used for sort keys in + API list calls. diff -Nru octavia-4.1.1/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml octavia-4.1.4/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml --- octavia-4.1.1/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-client-auth-single-process-749af7791454ff03.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where load balancers with more than one TLS enabled + listener, using client authentication and/or backend re-encryption, + may load incorrect certificates for the listener. diff -Nru octavia-4.1.1/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml octavia-4.1.4/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml --- octavia-4.1.1/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-disable-udp-listener-status-3d34a5596e62da1c.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix operational status for disabled UDP listeners. The operating status of + disabled UDP listeners is now OFFLINE instead of ONLINE, the behavior is now + similary to the behavior of HTTP/HTTPS/TCP/... listeners. diff -Nru octavia-4.1.1/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml octavia-4.1.4/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml --- octavia-4.1.1/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/Fix-healthmanager-not-update-amphora-health-when-LB-disabled-46a4fb295c6d0850.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed an issue when a loadbalancer is disabled, Octavia Health Manager + keeps failovering the amphorae + diff -Nru octavia-4.1.1/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml octavia-4.1.4/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml --- octavia-4.1.1/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-house-keeping-shutdown-17b04417a2c4849f.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a bug that could interrupt resource creation when performing a graceful + shutdown of the house keeping service and leave resources such as amphorae + in a BOOTING status. diff -Nru octavia-4.1.1/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml octavia-4.1.4/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml --- octavia-4.1.1/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-lb-update-with-no-data-abefe7860b8fb4c7.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where load balancers would go into ERROR when + setting data not visible to providers (e.g. tags). diff -Nru octavia-4.1.1/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml octavia-4.1.4/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml --- octavia-4.1.1/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-peer-name-prefix-hypen-e74a87e9a01b4f4c.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,10 @@ +--- +upgrade: + - | + An amphora image update is recommended to pick up a workaround to an + HAProxy issue where it would fail to reload on configuration change should + the local peer name start with "-x". +fixes: + - | + Workaround an HAProxy issue where it would fail to reload on configuration + change should the local peer name start with "-x". diff -Nru octavia-4.1.1/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml octavia-4.1.4/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml --- octavia-4.1.1/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-protocol-header-insertion-mismatch-e3aeb5f5fee0348b.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where listener "insert_headers" parameter was accepted for + protocols that do not support header insertion. diff -Nru octavia-4.1.1/releasenotes/notes/fix-python2-attributeerror-strptime-89a7350c55ac8818.yaml octavia-4.1.4/releasenotes/notes/fix-python2-attributeerror-strptime-89a7350c55ac8818.yaml --- octavia-4.1.1/releasenotes/notes/fix-python2-attributeerror-strptime-89a7350c55ac8818.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-python2-attributeerror-strptime-89a7350c55ac8818.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix a potential AttributeError exception at init time in the housekeeping + service when using python2 because of an issue with thread safety when + calling strptime for the first time. diff -Nru octavia-4.1.1/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml octavia-4.1.4/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml --- octavia-4.1.1/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-redhat-amphora-images-interface-files-5ba1be40c65940d9.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,8 @@ +--- +fixes: + - | + Fixed code that configured the CentOS/Red Hat amphora images to use the + correct names for the network 'ifcfg' files for static routes and + routing rules. It was using the wrong name for the routes file, + and did not support IPv6 in either file. For more information, see + https://storyboard.openstack.org/#!/story/2007051 diff -Nru octavia-4.1.1/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml octavia-4.1.4/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml --- octavia-4.1.1/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/fix-tls-enabled-pool-provisioning-e3adb987244a025a.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed an issue where TLS-enabled pools would fail to provision. diff -Nru octavia-4.1.1/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml octavia-4.1.4/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml --- octavia-4.1.1/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/get-all-unscoped-token-61da95856bc662e0.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,10 @@ +--- +security: + - | + If you are using the admin_or_owner-policy.yaml policy override file + you should upgrade your API processes to include the unscoped token fix. + The default policies are not affected by this issue. +fixes: + - | + Fixes an issue when using the admin_or_owner-policy.yaml policy override + file and unscoped tokens. diff -Nru octavia-4.1.1/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml octavia-4.1.4/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml --- octavia-4.1.1/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml 1970-01-01 00:00:00.000000000 +0000 +++ octavia-4.1.4/releasenotes/notes/udp-delay-based-on-correct-setting-6a60856de2927ccd.yaml 2020-11-04 15:39:50.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - | + Delay between checks on UDP healthmonitors was using the incorrect config + value ``timeout``, when it should have been ``delay``. diff -Nru octavia-4.1.1/test-requirements.txt octavia-4.1.4/test-requirements.txt --- octavia-4.1.1/test-requirements.txt 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/test-requirements.txt 2020-11-04 15:39:51.000000000 +0000 @@ -1,7 +1,7 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +hacking!=0.13.0,<0.14,>=0.12.0,<2 # Apache-2.0 requests-mock>=1.2.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 flake8-import-order==0.12 # LGPLv3 diff -Nru octavia-4.1.1/zuul.d/jobs.yaml octavia-4.1.4/zuul.d/jobs.yaml --- octavia-4.1.1/zuul.d/jobs.yaml 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/zuul.d/jobs.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -5,7 +5,6 @@ required-projects: - openstack/devstack-gate - openstack/barbican - - openstack/diskimage-builder - openstack/neutron-lbaas - openstack/octavia - openstack/octavia-lib @@ -40,7 +39,8 @@ run: playbooks/image-build/run.yaml post-run: playbooks/image-build/post.yaml required-projects: - - openstack/diskimage-builder + - name: openstack/diskimage-builder + override-checkout: 2.30.0 - openstack/octavia - job: name: publish-openstack-octavia-amphora-image-xenial @@ -90,5 +90,4 @@ - openstack/devstack-gate - openstack/octavia - openstack/octavia-lib - - openstack/octavia-tempest-plugin - openstack/python-octaviaclient diff -Nru octavia-4.1.1/zuul.d/projects.yaml octavia-4.1.4/zuul.d/projects.yaml --- octavia-4.1.1/zuul.d/projects.yaml 2019-12-16 10:45:31.000000000 +0000 +++ octavia-4.1.4/zuul.d/projects.yaml 2020-11-04 15:39:51.000000000 +0000 @@ -44,18 +44,27 @@ - octavia-v1-dsvm-py3x-scenario - octavia-v2-dsvm-noop-api - octavia-v2-dsvm-noop-py2-api - - octavia-v2-dsvm-scenario - - octavia-v2-dsvm-py2-scenario - - octavia-v2-dsvm-scenario-ubuntu-xenial - - octavia-v2-dsvm-py2-scenario-centos-7: + - octavia-v2-dsvm-scenario: + vars: &scenario-vars + devstack_local_conf: + test-config: + "$TEMPEST_CONFIG": + loadbalancer-feature-enabled: + log_offload_enabled: False + - octavia-v2-dsvm-py2-scenario: + vars: *scenario-vars voting: false - - octavia-v2-dsvm-scenario-ubuntu-bionic: + - octavia-v2-dsvm-py2-scenario-centos-7: + vars: *scenario-vars voting: false - octavia-v2-dsvm-scenario-two-node: + vars: *scenario-vars voting: false - octavia-v2-dsvm-py2-scenario-two-node: + vars: *scenario-vars voting: false - octavia-grenade: + voting: false irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ @@ -104,20 +113,8 @@ - neutron-lbaasv2-dsvm-py3x-api - octavia-v2-dsvm-noop-api - octavia-v2-dsvm-noop-py2-api - - octavia-v2-dsvm-scenario - - octavia-v2-dsvm-py2-scenario - - octavia-v2-dsvm-scenario-ubuntu-xenial - - octavia-grenade: - irrelevant-files: - - ^.*\.rst$ - - ^api-ref/.*$ - - ^doc/.*$ - - ^octavia/tests/unit/.*$ - - ^releasenotes/.*$ - - ^setup.cfg$ - - ^tools/.*$ - - ^(test-|)requirements.txt$ - - ^tox.ini$ + - octavia-v2-dsvm-scenario: + vars: *scenario-vars periodic: jobs: - publish-openstack-octavia-amphora-image-xenial: