diff -Nru senlin-6.0.0/api-ref/source/clusters.inc senlin-7.0.0~b1~git2018111913.0ddbc114/api-ref/source/clusters.inc --- senlin-6.0.0/api-ref/source/clusters.inc 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/api-ref/source/clusters.inc 2018-11-19 18:48:08.000000000 +0000 @@ -437,6 +437,7 @@ - 401 - 403 - 404 + - 409 - 503 Request Parameters @@ -494,6 +495,7 @@ - 401 - 403 - 404 + - 409 - 503 Request Parameters diff -Nru senlin-6.0.0/AUTHORS senlin-7.0.0~b1~git2018111913.0ddbc114/AUTHORS --- senlin-6.0.0/AUTHORS 2018-08-30 14:19:54.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/AUTHORS 2018-11-19 18:48:10.000000000 +0000 @@ -57,8 +57,11 @@ Liuqing Jing Lu lei Luong Anh Tuan +Matt Riedemann Monty Taylor Nam Nguyen Hoai +Nguyen Hai +Nguyen Hai Truong Nguyen Hung Phuong Nguyen Phuong An Nguyen Van Trung @@ -75,6 +78,7 @@ Ronald Bradford Sampath Priyankara Sean Dague +Sean McGinnis Thierry Carrez Thomas Goirand TingtingYu @@ -91,6 +95,7 @@ Zhenguo Niu ZhiQiang Fan Zuul +akhiljain23 blkart bran caishan @@ -105,6 +110,7 @@ chohoor deepakmourya dixiaoli +gaobin gecong1973 gengchc2 ghanshyam @@ -145,6 +151,7 @@ wanghui wangqi wbluo0907 +whoami-rajat wlfightup xiaozhuangqing xu-haiwei diff -Nru senlin-6.0.0/ChangeLog senlin-7.0.0~b1~git2018111913.0ddbc114/ChangeLog --- senlin-6.0.0/ChangeLog 2018-08-30 14:19:54.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/ChangeLog 2018-11-19 18:48:10.000000000 +0000 @@ -1,6 +1,43 @@ CHANGES ======= +* add line to fix Unexpected indentation +* Add python 3.6 unit test job +* Replace usage of get\_legacy\_facade() with get\_engine() +* Minor error +* Add missing www\_authenticate\_uri to devstack +* Cleaned up devstack logging +* Add senlin-status upgrade check command framework +* Remove i18n.enable\_lazy() call from senlin.cmd +* Update min tox version to 2.0 +* Start using glance instead of compute to find images +* Make CLUSTER\_DELETE action ignore conflicts/locks +* Add sphinx extension to document policies/profiles +* Update scaling policy logic to be applied before action acceptance +* Stop using deprecated version of enable\_logging +* Increment versioning with pbr instruction +* Reject actions if target resource is locked +* Fix cooldown check +* Don't quote {posargs} in tox.ini +* Cleanup .zuul.yaml +* Support multiple detection types in health policy +* Removed extra underlines in tests +* Remove old tempest remanents from main repo +* Multiple detection modes spec +* Node poll URL improvements +* Propagate node creation errors +* Fail-fast on locked resource spec +* Bump openstacksdk version to 0.17.2 +* add python 3.6 unit test job +* switch documentation job to new PTI +* import zuul job settings from project-config +* Fix broken schema validation +* Delete receiver doc exist "senlin command line" comment +* Fix net check return error when net\_obj get value is "None" +* Fix test\_run\_workflow unittests +* Imported Translations from Zanata +* Update reno for stable/rocky + 6.0.0 ----- @@ -2778,8 +2815,8 @@ * Make event related DB queries project safe * Bump development version to 0.2.0 -0.1 ---- +0.1.0 +----- * Revise cluster\_add\_nodes action's behavior * Updated from global requirements diff -Nru senlin-6.0.0/debian/changelog senlin-7.0.0~b1~git2018111913.0ddbc114/debian/changelog --- senlin-6.0.0/debian/changelog 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/changelog 2018-11-19 18:48:35.000000000 +0000 @@ -1,3 +1,11 @@ +senlin (7.0.0~b1~git2018111913.0ddbc114-0ubuntu1) disco; urgency=medium + + * New upstream snapshot for OpenStack Stein. + * d/control: Align (Build-)Depends with upstream. + * d/control,rules,python*,d/tests/*: Drop Python 2 support. + + -- Corey Bryant Mon, 19 Nov 2018 13:48:35 -0500 + senlin (6.0.0-0ubuntu2) cosmic; urgency=medium * d/rules: Ensure /usr/etc is purged from python(3)-senlin packages diff -Nru senlin-6.0.0/debian/control senlin-7.0.0~b1~git2018111913.0ddbc114/debian/control --- senlin-6.0.0/debian/control 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/control 2018-11-19 18:48:35.000000000 +0000 @@ -8,7 +8,7 @@ Build-Depends: debhelper (>= 10), dh-python, - openstack-pkg-tools (>= 74~), + openstack-pkg-tools (>= 85ubuntu3~), po-debconf, python-all, python-pbr (>= 2.0.0), @@ -19,52 +19,6 @@ python3-sphinx (>= 1.6.2), Build-Depends-Indep: git, - python-babel (>= 2.3.4), - python-coverage (>= 4.0), - python-docker (>= 2.4.2), - python-eventlet (>= 0.18.2), - python-hacking (>= 0.12.0), - python-jsonpath-rw (>= 1.2.0), - python-jsonschema (>= 2.6.0), - python-keystoneauth1 (>= 3.4.0), - python-keystonemiddleware (>= 4.17.0), - python-microversion-parse (>= 0.2.1), - python-migrate (>= 0.11.0), - python-mock (>= 2.0.0), - python-openstackdocstheme (>= 1.18.1), - python-openstacksdk (>= 0.11.2), - python-os-testr (>= 1.0.0), - python-oslo.config (>= 1:5.2.0), - python-oslo.context (>= 1:2.19.2), - python-oslo.db (>= 4.27.0), - python-oslo.i18n (>= 3.15.3), - python-oslo.log (>= 3.36.0), - python-oslo.messaging (>= 5.29.0), - python-oslo.middleware (>= 3.31.0), - python-oslo.policy (>= 1.30.0), - python-oslo.serialization (>= 2.18.0), - python-oslo.service (>= 1.24.0), - python-oslo.utils (>= 3.33.0), - python-oslo.versionedobjects (>= 1.31.2), - python-oslotest (>= 1:3.2.0), - python-osprofiler (>= 1.4.0), - python-paramiko, - python-pastedeploy (>= 1.5.0), - python-pep8, - python-pymysql (>= 0.7.6), - python-reno (>= 2.5.0), - python-requests (>= 2.14.2), - python-routes (>= 2.3.1), - python-six (>= 1.10.0), - python-sqlalchemy (>= 1.0.10), - python-stestr, - python-stevedore (>= 1:1.20.0), - python-tempest (>= 1:17.1.0), - python-testscenarios (>= 0.4), - python-testtools (>= 2.2.0), - python-tz (>= 2013.6), - python-webob (>= 1:1.7.1), - python-yaml (>= 3.12), python3-babel (>= 2.3.4), python3-coverage (>= 4.0), python3-docker (>= 2.4.2), @@ -78,7 +32,8 @@ python3-migrate (>= 0.11.0), python3-mock (>= 2.0.0), python3-openstackdocstheme (>= 1.18.1), - python3-openstacksdk (>= 0.11.2), + python3-openstacksdk (>= 0.17.2), + python3-os-api-ref (>= 1.4.0), python3-os-testr (>= 1.0.0), python3-oslo.config (>= 1:5.2.0), python3-oslo.context (>= 1:2.19.2), @@ -90,6 +45,7 @@ python3-oslo.policy (>= 1.30.0), python3-oslo.serialization (>= 2.18.0), python3-oslo.service (>= 1.24.0), + python3-oslo.upgradecheck (>= 0.1.0), python3-oslo.utils (>= 3.33.0), python3-oslo.versionedobjects (>= 1.31.2), python3-oslotest (>= 1:3.2.0), @@ -103,7 +59,7 @@ python3-routes (>= 2.3.1), python3-six (>= 1.10.0), python3-sqlalchemy (>= 1.0.10), - python3-stestr, + python3-stestr (>= 2.0.0), python3-stevedore (>= 1:1.20.0), python3-tempest (>= 1:17.1.0), python3-testscenarios (>= 0.4), @@ -120,61 +76,6 @@ Homepage: https://github.com/stackforge/senlin Testsuite: autopkgtest-pkg-python -Package: python-senlin -Section: python -Architecture: all -Depends: - python-babel (>= 2.3.4), - python-docker (>= 2.4.2), - python-eventlet (>= 0.18.2), - python-jsonpath-rw (>= 1.2.0), - python-jsonschema (>= 2.6.0), - python-keystoneauth1 (>= 3.4.0), - python-keystonemiddleware (>= 4.17.0), - python-microversion-parse (>= 0.2.1), - python-migrate (>= 0.11.0), - python-openstacksdk (>= 0.11.2), - python-oslo.config (>= 1:5.2.0), - python-oslo.context (>= 1:2.19.2), - python-oslo.db (>= 4.27.0), - python-oslo.i18n (>= 3.15.3), - python-oslo.log (>= 3.36.0), - python-oslo.messaging (>= 5.29.0), - python-oslo.middleware (>= 3.31.0), - python-oslo.policy (>= 1.30.0), - python-oslo.serialization (>= 2.18.0), - python-oslo.service (>= 1.24.0), - python-oslo.utils (>= 3.33.0), - python-oslo.versionedobjects (>= 1.31.2), - python-osprofiler (>= 1.4.0), - python-pastedeploy (>= 1.5.0), - python-pbr (>= 2.0.0), - python-requests (>= 2.14.2), - python-routes (>= 2.3.1), - python-six (>= 1.10.0), - python-sqlalchemy (>= 1.0.10), - python-stevedore (>= 1:1.20.0), - python-tenacity (>= 4.9.0), - python-tz (>= 2013.6), - python-webob (>= 1:1.7.1), - python-yaml (>= 3.12), - ${misc:Depends}, - ${python:Depends}, -Description: clustering service for OpenStack clouds - Python 2.7 - Senlin is a clustering service for OpenStack clouds. It creates and operates - clusters of homogeneous objects exposed by other OpenStack services. The goal - is to make the orchestration of collections of similar objects easier. - . - Senlin provides RESTful APIs to users so that they can associate various - policies to a cluster. Sample policies include placement policy, load - balancing policy, health policy, scaling policy, update policy and so on. - . - Senlin is designed to be capable of managing different types of objects. An - object's lifecycle is managed using profile type implementations, which are - themselves plugins. - . - This package provides the Python 2.7 library. - Package: python3-senlin Section: python Architecture: all @@ -188,7 +89,7 @@ python3-keystonemiddleware (>= 4.17.0), python3-microversion-parse (>= 0.2.1), python3-migrate (>= 0.11.0), - python3-openstacksdk (>= 0.11.2), + python3-openstacksdk (>= 0.17.2), python3-oslo.config (>= 1:5.2.0), python3-oslo.context (>= 1:2.19.2), python3-oslo.db (>= 4.27.0), @@ -199,6 +100,7 @@ python3-oslo.policy (>= 1.30.0), python3-oslo.serialization (>= 2.18.0), python3-oslo.service (>= 1.24.0), + python3-oslo.upgradecheck (>= 0.1.0), python3-oslo.utils (>= 3.33.0), python3-oslo.versionedobjects (>= 1.31.2), python3-osprofiler (>= 1.4.0), @@ -215,7 +117,7 @@ python3-yaml (>= 3.12), ${misc:Depends}, ${python3:Depends}, -Description: clustering service for OpenStack clouds - Python 3.x +Description: clustering service for OpenStack clouds - Python 3 Senlin is a clustering service for OpenStack clouds. It creates and operates clusters of homogeneous objects exposed by other OpenStack services. The goal is to make the orchestration of collections of similar objects easier. @@ -228,7 +130,7 @@ object's lifecycle is managed using profile type implementations, which are themselves plugins. . - This package provides the Python 3.x library. + This package provides the Python 3 library. Package: senlin-api Architecture: all @@ -258,7 +160,7 @@ adduser, dbconfig-common, debconf, - python-senlin (= ${binary:Version}) | python3-senlin (= ${binary:Version}), + python3-senlin (= ${binary:Version}), sqlite3, ${misc:Depends}, Description: clustering service for OpenStack clouds - common files diff -Nru senlin-6.0.0/debian/python3-senlin.postinst senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python3-senlin.postinst --- senlin-6.0.0/debian/python3-senlin.postinst 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python3-senlin.postinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "configure" ] ; then - update-alternatives --install /usr/bin/senlin-api senlin-api /usr/bin/python3-senlin-api 200 - update-alternatives --install /usr/bin/senlin-engine senlin-engine /usr/bin/python3-senlin-engine 200 - update-alternatives --install /usr/bin/senlin-manage senlin-manage /usr/bin/python3-senlin-manage 200 - update-alternatives --install /usr/bin/senlin-wsgi-api senlin-wsgi-api /usr/bin/python3-senlin-wsgi-api 200 -fi - -#DEBHELPER# - -exit 0 diff -Nru senlin-6.0.0/debian/python3-senlin.postrm senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python3-senlin.postrm --- senlin-6.0.0/debian/python3-senlin.postrm 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python3-senlin.postrm 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "remove" ] || [ "$1" = "disappear" ] ; then - update-alternatives --remove senlin-api /usr/bin/python3-senlin-api - update-alternatives --remove senlin-engine /usr/bin/python3-senlin-engine - update-alternatives --remove senlin-manage /usr/bin/python3-senlin-manage - update-alternatives --remove senlin-wsgi-api /usr/bin/python3-senlin-wsgi-api -fi - -#DEBHELPER# - -exit 0 diff -Nru senlin-6.0.0/debian/python3-senlin.prerm senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python3-senlin.prerm --- senlin-6.0.0/debian/python3-senlin.prerm 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python3-senlin.prerm 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "remove" ] ; then - update-alternatives --remove senlin-api /usr/bin/python3-senlin-api - update-alternatives --remove senlin-engine /usr/bin/python3-senlin-engine - update-alternatives --remove senlin-manage /usr/bin/python3-senlin-manage - update-alternatives --remove senlin-wsgi-api /usr/bin/python3-senlin-wsgi-api -fi - -#DEBHELPER# - -exit 0 diff -Nru senlin-6.0.0/debian/python-senlin.postinst senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python-senlin.postinst --- senlin-6.0.0/debian/python-senlin.postinst 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python-senlin.postinst 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "configure" ] ; then - update-alternatives --install /usr/bin/senlin-api senlin-api /usr/bin/python2-senlin-api 300 - update-alternatives --install /usr/bin/senlin-engine senlin-engine /usr/bin/python2-senlin-engine 300 - update-alternatives --install /usr/bin/senlin-manage senlin-manage /usr/bin/python2-senlin-manage 300 - update-alternatives --install /usr/bin/senlin-wsgi-api senlin-wsgi-api /usr/bin/python2-senlin-wsgi-api 300 -fi - -#DEBHELPER# - -exit 0 diff -Nru senlin-6.0.0/debian/python-senlin.postrm senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python-senlin.postrm --- senlin-6.0.0/debian/python-senlin.postrm 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python-senlin.postrm 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "remove" ] || [ "$1" = "disappear" ] ; then - update-alternatives --remove senlin-api /usr/bin/python2-senlin-api - update-alternatives --remove senlin-engine /usr/bin/python2-senlin-engine - update-alternatives --remove senlin-manage /usr/bin/python2-senlin-manage - update-alternatives --remove senlin-wsgi-api /usr/bin/python2-senlin-wsgi-api -fi - -#DEBHELPER# - -exit 0 diff -Nru senlin-6.0.0/debian/python-senlin.prerm senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python-senlin.prerm --- senlin-6.0.0/debian/python-senlin.prerm 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/python-senlin.prerm 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "remove" ] ; then - update-alternatives --remove senlin-api /usr/bin/python2-senlin-api - update-alternatives --remove senlin-engine /usr/bin/python2-senlin-engine - update-alternatives --remove senlin-manage /usr/bin/python2-senlin-manage - update-alternatives --remove senlin-wsgi-api /usr/bin/python2-senlin-wsgi-api -fi - -#DEBHELPER# - -exit 0 diff -Nru senlin-6.0.0/debian/rules senlin-7.0.0~b1~git2018111913.0ddbc114/debian/rules --- senlin-6.0.0/debian/rules 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/rules 2018-11-19 18:48:35.000000000 +0000 @@ -7,10 +7,9 @@ SPHINXOPTS := -D html_last_updated_fmt="$(BUILD_DATE)" %: - dh $@ --buildsystem=python_distutils --with python2,python3,sphinxdoc,systemd + dh $@ --buildsystem=python_distutils --with python3,sphinxdoc,systemd override_dh_auto_clean: - python2 setup.py clean python3 setup.py clean override_dh_clean: @@ -44,13 +43,12 @@ echo "Do nothing..." override_dh_auto_install: - rm -rf $(CURDIR)/debian/python-senlin rm -rf $(CURDIR)/debian/python3-senlin - pkgos-dh_auto_install + pkgos-dh_auto_install --no-py2 rm -rf $(CURDIR)/debian/python*-senlin/usr/etc/* ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) - pkgos-dh_auto_test 'senlin\.tests\.unit\.(?!(.*test_common_context.TestRequestContext.test_request_context_from_dict.*|.*test_common_context.TestRequestContext.test_request_context_init.*|.*test_common_context.TestRequestContext.test_request_context_update.*))' + pkgos-dh_auto_test --no-py2 'senlin\.tests\.unit\.(?!(.*test_common_context.TestRequestContext.test_request_context_from_dict.*|.*test_common_context.TestRequestContext.test_request_context_init.*|.*test_common_context.TestRequestContext.test_request_context_update.*))' endif cp -auxf senlin/db/sqlalchemy/migrate_repo $(CURDIR)/debian/python3-senlin/usr/lib/python3/dist-packages/senlin/db/sqlalchemy mkdir -p $(CURDIR)/debian/senlin-common/usr/share/senlin-common diff -Nru senlin-6.0.0/debian/tests/control senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/control --- senlin-6.0.0/debian/tests/control 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/control 2018-11-19 18:48:35.000000000 +0000 @@ -1,7 +1,3 @@ -Tests: senlin-daemons, senlin-shebangs-py3 -Depends: python3-senlin, senlin-api, senlin-engine -Restrictions: needs-root - -Tests: senlin-daemons, senlin-shebangs-py2 +Tests: senlin-daemons Depends: senlin-api, senlin-engine Restrictions: needs-root diff -Nru senlin-6.0.0/debian/tests/senlin-shebangs-py2 senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/senlin-shebangs-py2 --- senlin-6.0.0/debian/tests/senlin-shebangs-py2 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/senlin-shebangs-py2 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -#!/bin/bash -#--------------------- -# Testing /usr/bin/senlin-* shebangs -#--------------------- -set -e - -BINARIES=('senlin-api' 'senlin-engine' 'senlin-manage' 'senlin-wsgi-api') - -ret=0 - -for binary in "${BINARIES[@]}"; do - if ! `dirname $0`/test-shebang.py $binary python2.7; then - ret=1 - fi -done - -exit $ret diff -Nru senlin-6.0.0/debian/tests/senlin-shebangs-py3 senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/senlin-shebangs-py3 --- senlin-6.0.0/debian/tests/senlin-shebangs-py3 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/senlin-shebangs-py3 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -#!/bin/bash -#--------------------- -# Testing /usr/bin/senlin-* shebangs -#--------------------- -set -e - -BINARIES=('senlin-api' 'senlin-engine' 'senlin-manage' 'senlin-wsgi-api') - -ret=0 - -for binary in "${BINARIES[@]}"; do - if ! `dirname $0`/test-shebang.py $binary python3; then - ret=1 - fi -done - -exit $ret diff -Nru senlin-6.0.0/debian/tests/test-shebang.py senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/test-shebang.py --- senlin-6.0.0/debian/tests/test-shebang.py 2018-09-06 20:17:43.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/debian/tests/test-shebang.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -#!/usr/bin/env python3 -""" -Test Python shebang in /usr/bin/ binary -""" -import sys - -ret = 0 -bin_path = "/usr/bin/{}".format(sys.argv[1]) -shebang = "#!/usr/bin/{}".format(sys.argv[2]) - -with open(bin_path) as f: - first_line = f.readline().rstrip().replace(" ", "") - if first_line != shebang: - print("ERROR: shebang '{}' not found in {}".format(shebang, bin_path)) - ret = 1 - else: - print("OK") - -sys.exit(ret) diff -Nru senlin-6.0.0/devstack/lib/senlin senlin-7.0.0~b1~git2018111913.0ddbc114/devstack/lib/senlin --- senlin-6.0.0/devstack/lib/senlin 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/devstack/lib/senlin 2018-11-19 18:48:08.000000000 +0000 @@ -97,8 +97,11 @@ iniset $SENLIN_CONF DEFAULT auth_encryption_key $(generate_hex_string 16) iniset $SENLIN_CONF DEFAULT default_region_name "$REGION_NAME" - iniset $SENLIN_CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$SENLIN_USE_MOD_WSGI" == "False" ]; then + if [ "$USE_SYSTEMD" != "False" ]; then + setup_systemd_logging $SENLIN_CONF + fi + + if [ "$LOG_COLOR" == "True" ] && [ "$USE_SYSTEMD" == "False" ] && [ "$SENLIN_USE_MOD_WSGI" == "False" ]; then # Add color to logging output setup_colorized_logging $SENLIN_CONF DEFAULT fi @@ -116,6 +119,7 @@ #configure_auth_token_middleware $SENLIN_CONF senlin $SENLIN_AUTH_CACHE_DIR iniset $SENLIN_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE iniset $SENLIN_CONF keystone_authtoken auth_url $KEYSTONE_AUTH_URI + iniset $SENLIN_CONF keystone_authtoken www_authenticate_uri $KEYSTONE_SERVICE_URI_V3 iniset $SENLIN_CONF keystone_authtoken username senlin iniset $SENLIN_CONF keystone_authtoken password $SERVICE_PASSWORD iniset $SENLIN_CONF keystone_authtoken project_name $SERVICE_TENANT_NAME diff -Nru senlin-6.0.0/doc/source/configuration/config.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/configuration/config.rst --- senlin-6.0.0/doc/source/configuration/config.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/configuration/config.rst 2018-11-19 18:48:08.000000000 +0000 @@ -16,7 +16,7 @@ Configuration Options ===================== -senlin uses `oslo.config` to define and manage configuration options to +Senlin uses `oslo.config` to define and manage configuration options to allow the deployer to control many aspects of the service API and the service engine. diff -Nru senlin-6.0.0/doc/source/conf.py senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/conf.py --- senlin-6.0.0/doc/source/conf.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/conf.py 2018-11-19 18:48:08.000000000 +0000 @@ -18,6 +18,13 @@ from senlin.version import version_info as senlin_version sys.path.insert(0, os.path.abspath('../..')) + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) + +sys.path.insert(0, ROOT) +sys.path.insert(0, BASE_DIR) + # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be @@ -31,6 +38,7 @@ 'oslo_config.sphinxext', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', + 'ext.resources' ] # openstackdocstheme options diff -Nru senlin-6.0.0/doc/source/contributor/policies/affinity_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/affinity_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/affinity_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/affinity_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -21,12 +21,8 @@ work with vSphere hypervisor when VMware DRS feature is enabled. However, such an extension is only applicable to *admin* owned server clusters. - -Applicable Profiles -~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle only Nova server profile type, e.g. -``os.nova.server-1.0``. +.. schemaspec:: + :package: senlin.policies.affinity_policy.AffinityPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/policies/deletion_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/deletion_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/deletion_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/deletion_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -19,11 +19,8 @@ The deletion policy is designed to be enforced when a cluster's size is to be shrunk. - -Applicable Profiles -~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle any (``ANY``) profile types. +.. schemaspec:: + :package: senlin.policies.deletion_policy.DeletionPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/policies/health_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/health_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/health_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/health_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -19,12 +19,8 @@ The health policy is designed to automate the failure detection and recovery process for a cluster. - -Applicable Profile Types -~~~~~~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle both ``os.nova.server`` and ``os.heat.stack`` -profile types. +.. schemaspec:: + :package: senlin.policies.health_policy.HealthPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/policies/load_balance_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/load_balance_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/load_balance_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/load_balance_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -20,12 +20,8 @@ LBaaS V2 features so that workloads can be distributed across nodes in a reasonable manner. - -Applicable Profiles -~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle only Nova server clusters, i.e. clusters -whose profile is a type of "``os.nova.server-1.0``". +.. schemaspec:: + :package: senlin.policies.lb_policy.LoadBalancingPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/policies/region_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/region_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/region_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/region_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -19,11 +19,8 @@ This policy is designed to make sure the nodes in a cluster are distributed across multiple regions according to a specified scheme. - -Applicable Profiles -~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle any profile types. +.. schemaspec:: + :package: senlin.policies.region_placement.RegionPlacementPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/policies/scaling_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/scaling_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/scaling_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/scaling_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -28,11 +28,8 @@ Note that when calculating the target capacity of the cluster, Senlin only considers the **ACTIVE** nodes. - -Applicable Profiles -~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle any (``ANY``) profile types. +.. schemaspec:: + :package: senlin.policies.scaling_policy.ScalingPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/policies/zone_v1.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/zone_v1.rst --- senlin-6.0.0/doc/source/contributor/policies/zone_v1.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/policies/zone_v1.rst 2018-11-19 18:48:08.000000000 +0000 @@ -19,12 +19,8 @@ This policy is designed to make sure the nodes in a cluster are distributed across multiple availability zones according to a specified scheme. - -Applicable Profiles -~~~~~~~~~~~~~~~~~~~ - -The policy is designed to handle Nova server clusters only, i.e. clusters with -a profile of type ``os.nova.server-1.0`` for example. +.. schemaspec:: + :package: senlin.policies.zone_placement.ZonePlacementPolicy Actions Handled diff -Nru senlin-6.0.0/doc/source/contributor/receiver.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/receiver.rst --- senlin-6.0.0/doc/source/contributor/receiver.rst 2018-08-30 14:17:04.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/contributor/receiver.rst 2018-11-19 18:48:08.000000000 +0000 @@ -54,12 +54,11 @@ --------------------------- When a user requests to create a webhook receiver by invoking the -:program:`senlin` command line tool or the equivalent :program:`openstack` -command, the request comes with at least three parameters: the -receiver type which should be ``webhook``, the targeted cluster and the -intended action to invoke when the receiver is triggered. Optionally, the -user can provide some additional parameters to use and/or the credentials of -a different user. +:program:`openstack` command, the request comes with at least three +parameters: the receiver type which should be ``webhook``, the targeted +cluster and the intended action to invoke when the receiver is triggered. +Optionally, the user can provide some additional parameters to use and/or +the credentials of a different user. When the Senlin API service receives the request, it does three things: @@ -91,9 +90,8 @@ Creating a message receiver --------------------------- -When a user requests to create a message receiver by invoking :program:`senlin` -command line tool or the equivalent :program:`openstack` command, the receiver -type ``message`` is the only parameter need to be specified. +When a user requests to create a message receiver by invoking :program:`openstack` +command, the receiver type ``message`` is the only parameter need to be specified. When the Senlin API service receives the request, it does the following things: diff -Nru senlin-6.0.0/doc/source/ext/resources.py senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/ext/resources.py --- senlin-6.0.0/doc/source/ext/resources.py 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/ext/resources.py 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,291 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# -*- coding: utf-8 -*- + +from docutils import nodes +from docutils.parsers import rst +from docutils.parsers.rst import directives +from functools import cmp_to_key +from oslo_utils import importutils +from sphinx.util import logging + +from senlin.common import schema + +LOG = logging.getLogger(__name__) + + +class SchemaDirective(rst.Directive): + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = True + option_spec = {'package': directives.unchanged} + has_content = False + add_index = True + section_title = 'Spec' + properties_only = False + + def run(self): + """Build doctree nodes consisting for the specified schema class + + :returns: doctree node list + """ + + # gives you access to the options of the directive + options = self.options + + content = [] + + # read in package class + obj = importutils.import_class(options['package']) + + # skip other spec properties if properties_only is True + if not self.properties_only: + section = self._create_section(content, 'spec', + title=self.section_title) + + # create version section + version_section = self._create_section(section, 'version', + title='Latest Version') + field = nodes.line('', obj.VERSION) + version_section.append(field) + + # build versions table + version_tbody = self._build_table( + section, 'Available Versions', + ['Version', 'Status', 'Supported Since']) + sorted_versions = sorted(obj.VERSIONS.items()) + for version, support_status in sorted_versions: + for support in support_status: + cells = [version] + sorted_support = sorted(support.items(), reverse=True) + cells += [x[1] for x in sorted_support] + self._create_table_row(cells, version_tbody) + + # create applicable profile types + profile_type_description = ('This policy is designed to handle ' + 'the following profile types:') + profile_type_section = self._create_section( + section, 'profile_types', title='Applicable Profile Types') + field = nodes.line('', profile_type_description) + profile_type_section.append(field) + for profile_type in obj.PROFILE_TYPE: + profile_type_section += self._create_list_item(profile_type) + + # create actions handled + policy_trigger_description = ('This policy is triggered by the ' + 'following actions during the ' + 'respective phases:') + target_tbody = self._build_table( + section, 'Policy Triggers', + ['Action', 'Phase'], + policy_trigger_description + ) + sorted_targets = sorted(obj.TARGET, key=lambda tup: tup[1]) + for phase, action in sorted_targets: + cells = [action, phase] + self._create_table_row(cells, target_tbody) + + # build properties + properties_section = self._create_section(section, 'properties', + title='Properties') + else: + properties_section = content + + sorted_schema = sorted(obj.properties_schema.items(), + key=cmp_to_key(self._sort_by_type)) + for k, v in sorted_schema: + self._build_properties(k, v, properties_section) + + # we return the result + return content + + def _create_section(self, parent, sectionid, title=None, term=None): + """Create a new section + + :returns: If term is specified, returns a definition node contained + within the newly created section. Otherwise return the newly created + section node. + """ + + idb = nodes.make_id(sectionid) + section = nodes.section(ids=[idb]) + parent.append(section) + + if term: + if term != '**': + section.append(nodes.term('', term)) + + definition = nodes.definition() + section.append(definition) + + return definition + + if title: + section.append(nodes.title('', title)) + + return section + + def _create_list_item(self, str): + """Creates a new list item + + :returns: List item node + """ + para = nodes.paragraph() + para += nodes.strong('', str) + + item = nodes.list_item() + item += para + + return item + + def _create_def_list(self, parent): + """Creates a definition list + + :returns: Definition list node + """ + + definition_list = nodes.definition_list() + parent.append(definition_list) + + return definition_list + + def _sort_by_type(self, x, y): + """Sort two keys so that map and list types are ordered last.""" + + x_key, x_value = x + y_key, y_value = y + + # if both values are map or list, sort by their keys + if ((isinstance(x_value, schema.Map) or + isinstance(x_value, schema.List)) and + (isinstance(y_value, schema.Map) or + isinstance(y_value, schema.List))): + return (x_key > y_key) - (x_key < y_key) + + # show simple types before maps or list + if (isinstance(x_value, schema.Map) or + isinstance(x_value, schema.List)): + return 1 + + if (isinstance(y_value, schema.Map) or + isinstance(y_value, schema.List)): + return -1 + + return (x_key > y_key) - (x_key < y_key) + + def _create_table_row(self, cells, parent): + """Creates a table row for cell in cells + + :returns: Row node + """ + + row = nodes.row() + parent.append(row) + + for c in cells: + entry = nodes.entry() + row += entry + entry += nodes.literal(text=c) + + return row + + def _build_table(self, section, title, headers, description=None): + """Creates a table with given title, headers and description + + :returns: Table body node + """ + + table_section = self._create_section(section, title, title=title) + + if description: + field = nodes.line('', description) + table_section.append(field) + + table = nodes.table() + tgroup = nodes.tgroup(len(headers)) + table += tgroup + + table_section.append(table) + + for _ in headers: + tgroup.append(nodes.colspec(colwidth=1)) + + # create header + thead = nodes.thead() + tgroup += thead + self._create_table_row(headers, thead) + + tbody = nodes.tbody() + tgroup += tbody + + # create body consisting of targets + tbody = nodes.tbody() + tgroup += tbody + + return tbody + + def _build_properties(self, k, v, definition): + """Build schema property documentation + + :returns: None + """ + + if isinstance(v, schema.Map): + newdef = self._create_section(definition, k, term=k) + + if v.schema is None: + # if it's a map for arbritary values, only include description + field = nodes.line('', v.description) + newdef.append(field) + return + + newdeflist = self._create_def_list(newdef) + + sorted_schema = sorted(v.schema.items(), + key=cmp_to_key(self._sort_by_type)) + for key, value in sorted_schema: + self._build_properties(key, value, newdeflist) + elif isinstance(v, schema.List): + newdef = self._create_section(definition, k, term=k) + + # identify next section as list properties + field = nodes.line() + emph = nodes.emphasis('', 'List properties:') + field.append(emph) + newdef.append(field) + + newdeflist = self._create_def_list(newdef) + + self._build_properties('**', v.schema['*'], newdeflist) + else: + newdef = self._create_section(definition, k, term=k) + if 'description' in v: + field = nodes.line('', v['description']) + newdef.append(field) + else: + field = nodes.line('', '++') + newdef.append(field) + + +class SchemaProperties(SchemaDirective): + properties_only = True + + +class SchemaSpec(SchemaDirective): + section_title = 'Spec' + properties_only = False + + +def setup(app): + app.add_directive('schemaprops', SchemaProperties) + app.add_directive('schemaspec', SchemaSpec) diff -Nru senlin-6.0.0/doc/source/index.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/index.rst --- senlin-6.0.0/doc/source/index.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/index.rst 2018-11-19 18:48:08.000000000 +0000 @@ -90,11 +90,27 @@ user/policy_types/region_placement user/policy_types/zone_placement +3.3 Built-in Profile Types +-------------------------- + +The senlin service is released with some built-in profile types that target +some common use cases. You can develop and deploy your own profile types by +following the instructions in the :ref:`developer-guide` section. + +The following is a list of builtin profile types: + +.. toctree:: + :maxdepth: 1 + + user/profile_types/nova + user/profile_types/stack + user/profile_types/docker + 4 Usage Scenarios ~~~~~~~~~~~~~~~~~ This section provides some guides for typical usage scenarios. More scenarios -are to be added +are to be added. 4.1 Managing Node Affinity -------------------------- diff -Nru senlin-6.0.0/doc/source/reference/man/index.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/reference/man/index.rst --- senlin-6.0.0/doc/source/reference/man/index.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/reference/man/index.rst 2018-11-19 18:48:08.000000000 +0000 @@ -20,3 +20,4 @@ :maxdepth: 1 senlin-manage + senlin-status diff -Nru senlin-6.0.0/doc/source/reference/man/senlin-status.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/reference/man/senlin-status.rst --- senlin-6.0.0/doc/source/reference/man/senlin-status.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/reference/man/senlin-status.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,78 @@ +============= +senlin-status +============= + +Synopsis +======== + +:: + + senlin-status [] + +Description +=========== + +:program:`senlin-status` is a tool that provides routines for checking the +status of a Senlin deployment. + +Options +======= + +The standard pattern for executing a :program:`senlin-status` command is:: + + senlin-status [] + +Run without arguments to see a list of available command categories:: + + senlin-status + +Categories are: + +* ``upgrade`` + +Detailed descriptions are below. + +You can also run with a category argument such as ``upgrade`` to see a list of +all commands in that category:: + + senlin-status upgrade + +These sections describe the available categories and arguments for +:program:`senlin-status`. + +Upgrade +~~~~~~~ + +.. _senlin-status-checks: + +``senlin-status upgrade check`` + Performs a release-specific readiness check before restarting services with + new code. This command expects to have complete configuration and access + to databases and services. + + **Return Codes** + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - All upgrade readiness checks passed successfully and there is nothing + to do. + * - 1 + - At least one check encountered an issue and requires further + investigation. This is considered a warning but the upgrade may be OK. + * - 2 + - There was an upgrade status check failure that needs to be + investigated. This should be considered something that stops an + upgrade. + * - 255 + - An unexpected error occurred. + + **History of Checks** + + **7.0.0 (Stein)** + + * Placeholder to be filled in with checks as they are added in Stein. diff -Nru senlin-6.0.0/doc/source/user/policies.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policies.rst --- senlin-6.0.0/doc/source/user/policies.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policies.rst 2018-11-19 18:48:08.000000000 +0000 @@ -234,5 +234,13 @@ of policy objects. * :doc:`Working with Policy Types ` +* :ref:`Affinity Policy ` +* :ref:`Batch Policy ` +* :ref:`Deletion Policy ` +* :ref:`Health Policy ` +* :ref:`Load-Balancing Policy ` +* :ref:`Region Placement Policy ` +* :ref:`Scaling Policy ` +* :ref:`Zone Placement Policy ` * :doc:`Managing the Bindings between Clusters and Policies ` * :doc:`Browsing Events ` diff -Nru senlin-6.0.0/doc/source/user/policy_types/affinity.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/affinity.rst --- senlin-6.0.0/doc/source/user/policy_types/affinity.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/affinity.rst 2018-11-19 18:48:08.000000000 +0000 @@ -11,6 +11,7 @@ License for the specific language governing permissions and limitations under the License. +.. _ref-affinity-policy: =============== Affinity Policy @@ -28,19 +29,16 @@ Properties ~~~~~~~~~~ -A typical spec for an affinity policy looks like the following example: +.. schemaprops:: + :package: senlin.policies.affinity_policy.AffinityPolicy -.. code-block:: yaml +Sample +~~~~~~ - type: senlin.policy.affinity - version: 1.0 - properties: - servergroup: - name: my_server_group - policies: affinity - availability_zone: nova - enable_drs_extension: false +A typical spec for an affinity policy looks like the following example: +.. literalinclude :: /../../examples/policies/affinity_policy.yaml + :language: yaml The affinity policy has the following properties: diff -Nru senlin-6.0.0/doc/source/user/policy_types/batch.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/batch.rst --- senlin-6.0.0/doc/source/user/policy_types/batch.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/batch.rst 2018-11-19 18:48:08.000000000 +0000 @@ -30,17 +30,16 @@ Properties ~~~~~~~~~~ -Below is a typical spec for a batch policy: +.. schemaprops:: + :package: senlin.policies.batch_policy.BatchPolicy -.. code-block:: yaml +Sample +~~~~~~ - type: senlin.policy.batch - version: 1.0 - properties: - min_in_service: 8 - max_batch_size: 3 - pause_time: 30 +Below is a typical spec for a batch policy: +.. literalinclude :: /../../examples/policies/batch_policy.yaml + :language: yaml The ``min_in_service`` property specifies the minimum number of nodes to be kept in ACTIVE status. This is mainly for cluster update use cases. The diff -Nru senlin-6.0.0/doc/source/user/policy_types/deletion.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/deletion.rst --- senlin-6.0.0/doc/source/user/policy_types/deletion.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/deletion.rst 2018-11-19 18:48:08.000000000 +0000 @@ -27,17 +27,16 @@ Properties ~~~~~~~~~~ -Below is a typical spec for a deletion policy: +.. schemaprops:: + :package: senlin.policies.deletion_policy.DeletionPolicy + +Sample +~~~~~~ -.. code-block:: yaml +Below is a typical spec for a deletion policy: - type: senlin.policy.deletion - version: 1.1 - properties: - criteria: OLDEST_FIRST - destroy_after_deletion: false - grace_period: 30 - reduce_desired_capacity: true +.. literalinclude :: /../../examples/policies/deletion_policy.yaml + :language: yaml The valid values for the "``criteria`` property include: diff -Nru senlin-6.0.0/doc/source/user/policy_types/health.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/health.rst --- senlin-6.0.0/doc/source/user/policy_types/health.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/health.rst 2018-11-19 18:48:08.000000000 +0000 @@ -27,37 +27,19 @@ The policy type is currently applicable to clusters whose profile type is one of ``os.nova.server`` or ``os.heat.stack``. This could be extended in future. - -.. note:: - - The health policy is still under rapid development. More features are being - designed, implemented and verified. Its support status is still - ``EXPERIMENTAL``, which means there could be changes at the discretion of - the development team before it is formally supported. - - Properties ~~~~~~~~~~ -A typical spec for a health policy looks like the following example: +.. schemaprops:: + :package: senlin.policies.health_policy.HealthPolicy -.. code-block:: yaml +Sample +~~~~~~ - type: senlin.policy.health - version: 1.0 - properties: - detection: - type: NODE_STATUS_POLLING - options: - interval: 60 - recovery: - actions: - - name: REBOOT - params: - type: soft - fencing: - - compute +A typical spec for a health policy looks like the following example: +.. literalinclude :: /../../examples/policies/health_policy_poll.yaml + :language: yaml There are two groups of properties (``detection`` and ``recovery``), each of which provides information related to the failure detection and the failure diff -Nru senlin-6.0.0/doc/source/user/policy_types/load_balancing.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/load_balancing.rst --- senlin-6.0.0/doc/source/user/policy_types/load_balancing.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/load_balancing.rst 2018-11-19 18:48:08.000000000 +0000 @@ -35,40 +35,17 @@ Properties ~~~~~~~~~~ +.. schemaprops:: + :package: senlin.policies.lb_policy.LoadBalancingPolicy + +Sample +~~~~~~ + The design of the load-balancing policy faithfully follows the interface and properties exposed by the LBaaS v2 service. A sample spec is shown below: -.. code-block:: yaml - - type: senlin.policy.loadbalance - version: 1.1 - properties: - pool: - protocol: HTTP - protocol_port: 80 - subnet: private_subnet - lb_method: ROUND_ROBIN - admin_state_up: true - session_persistence: - type: HTTP_COOKIE - cookie_name: my_cookie - vip: - subnet: public_subnet - address: 12.34.56.78 - connection_limit: 5000 - protocol: HTTP - protocol_port: 80 - admin_state_up: true - health_monitor: - type: HTTP - delay: 20 - timeout: 5 - max_retries: 3 - admin_state_up: true - http_method: GET - url_path: /health - expected_codes: 200 - lb_status_timeout: 300 +.. literalinclude :: /../../examples/policies/lb_policy.yaml + :language: yaml As you can see, there are many properties related to the policy. The good news is that for most of them, there are reasonable default values. All properties diff -Nru senlin-6.0.0/doc/source/user/policy_types/region_placement.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/region_placement.rst --- senlin-6.0.0/doc/source/user/policy_types/region_placement.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/region_placement.rst 2018-11-19 18:48:08.000000000 +0000 @@ -28,20 +28,16 @@ Properties ~~~~~~~~~~ -A typical spec for a region placement policy is shown in the following sample: +.. schemaprops:: + :package: senlin.policies.region_placement.RegionPlacementPolicy + +Sample +~~~~~~ -.. code-block:: yaml +A typical spec for a region placement policy is shown in the following sample: - type: senlin.policy.region_placement - version: 1.0 - properties: - regions: - - name: region_1 - weight: 100 - cap: 50 - - name: region_2 - weight: 200 - cap: 100 +.. literalinclude :: /../../examples/policies/placement_region.yaml + :language: yaml In this sample spec, two regions are provided, namely "``region_1``" and "``region_2``". There are "weight" and "cap" attributes associated with them, diff -Nru senlin-6.0.0/doc/source/user/policy_types/scaling.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/scaling.rst --- senlin-6.0.0/doc/source/user/policy_types/scaling.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/scaling.rst 2018-11-19 18:48:08.000000000 +0000 @@ -25,20 +25,16 @@ Properties ~~~~~~~~~~ -A typical spec for a scaling policy is shown below: +.. schemaprops:: + :package: senlin.policies.scaling_policy.ScalingPolicy + +Sample +~~~~~~ -.. code-block:: yaml +A typical spec for a scaling policy is shown below: - type: senlin.policy.scaling - version: 1.0 - properties: - event: CLUSTER_SCALE_IN - adjustment: - type: CHANGE_IN_PERCENTAGE - number: 10 - min_step: 1 - best_effort: true - cooldown: 30 +.. literalinclude :: /../../examples/policies/scaling_policy.yaml + :language: yaml You should pay special attentions to the ``event`` property, whose valid values include "``CLUSTER_SCALE_IN``" and "``CLUSTER_SCALE_OUT``". One @@ -49,14 +45,16 @@ Senlin has carefully designed the builtin policy types so that for scaling policies, you can attach more than one instance of the same policy type but -you may get an error when you are attempting to attach two policies of anther +you may get an error when you are attempting to attach two policies of another type (say ``senlin.policy.deletion``) to the same cluster. The value of the ``event`` property indicates when the policy will be checked. A policy with ``event`` set to "``CLUSTER_SCALE_IN``" will be checked when and only when a corresponding action is triggered on the cluster. A policy with ``event`` set to "``CLUSTER_SCALE_OUT``" will be checked when and only when -a corresponding action is triggered. +a corresponding action is triggered. If the cluster is currently processing a +scaling action it will not accept another scaling action until the current +action has been processed and cooldown has been observed. For both types of actions that can triggered the scaling policy, there are always three types of adjustments to choose from as listed below. The type diff -Nru senlin-6.0.0/doc/source/user/policy_types/zone_placement.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/zone_placement.rst --- senlin-6.0.0/doc/source/user/policy_types/zone_placement.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types/zone_placement.rst 2018-11-19 18:48:08.000000000 +0000 @@ -31,19 +31,17 @@ Properties ~~~~~~~~~~ +.. schemaprops:: + :package: senlin.policies.zone_placement.ZonePlacementPolicy + +Sample +~~~~~~ + A typical spec for a zone placement policy is exemplified in the following sample: -.. code-block:: yaml - - type: senlin.policy.zone_placement - version: 1.0 - properties: - regions: - - name: az_1 - weight: 100 - - name: az_2 - weight: 200 +.. literalinclude :: /../../examples/policies/placement_zone.yaml + :language: yaml In this sample spec, two availability zones are provided, namely "``az_1``" and "``az_2``". Each availability zone can have an optional "``weight``" attribute diff -Nru senlin-6.0.0/doc/source/user/policy_types.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types.rst --- senlin-6.0.0/doc/source/user/policy_types.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/policy_types.rst 2018-11-19 18:48:08.000000000 +0000 @@ -22,7 +22,7 @@ ~~~~~~~ A :term:`Policy Type` is an abstract specification of the rules to be checked -and/or enforced when certain :term:`Action` is performed on a cluster that +and/or enforced when an :term:`Action` is performed on a cluster that contains nodes of certain :term:`Profile Type`. A registry of policy types is built in memory when the Senlin engine diff -Nru senlin-6.0.0/doc/source/user/profiles.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profiles.rst --- senlin-6.0.0/doc/source/user/profiles.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profiles.rst 2018-11-19 18:48:08.000000000 +0000 @@ -416,6 +416,9 @@ creation and usage: - :doc:`Working with Profile Types ` +- :ref:`Nova Profile ` +- :ref:`Stack Profile ` +- :ref:`Docker Profile ` - :doc:`Creating and Managing Clusters ` - :doc:`Creating and Managing Nodes ` - :doc:`Managing Cluster Membership ` diff -Nru senlin-6.0.0/doc/source/user/profile_types/docker.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profile_types/docker.rst --- senlin-6.0.0/doc/source/user/profile_types/docker.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profile_types/docker.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,35 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-docker-profile: + +============== +Docker Profile +============== + +The docker profile instantiates nodes that are associated with docker container +instances. + +Properties +~~~~~~~~~~ + +.. schemaprops:: + :package: senlin.profiles.container.docker.DockerProfile + +Sample +~~~~~~ + +Below is a typical spec for a docker profile: + +.. literalinclude :: /../../examples/profiles/docker_container/docker_basic.yaml + :language: yaml \ No newline at end of file diff -Nru senlin-6.0.0/doc/source/user/profile_types/nova.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profile_types/nova.rst --- senlin-6.0.0/doc/source/user/profile_types/nova.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profile_types/nova.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,35 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-nova-profile: + +============ +Nova Profile +============ + +The nova profile instantiates nodes that are associated with nova server +instances. + +Properties +~~~~~~~~~~ + +.. schemaprops:: + :package: senlin.profiles.os.nova.server.ServerProfile + +Sample +~~~~~~ + +Below is a typical spec for a nova profile: + +.. literalinclude :: /../../examples/profiles/nova_server/cirros_basic.yaml + :language: yaml \ No newline at end of file diff -Nru senlin-6.0.0/doc/source/user/profile_types/stack.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profile_types/stack.rst --- senlin-6.0.0/doc/source/user/profile_types/stack.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/source/user/profile_types/stack.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,35 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +.. _ref-stack-profile: + +============= +Stack Profile +============= + +The stack profile instantiates nodes that are associated with heat stack +instances. + +Properties +~~~~~~~~~~ + +.. schemaprops:: + :package: senlin.profiles.os.heat.stack.StackProfile + +Sample +~~~~~~ + +Below is a typical spec for a stack profile: + +.. literalinclude :: /../../examples/profiles/heat_stack/nova_server/heat_stack_nova_server.yaml + :language: yaml diff -Nru senlin-6.0.0/doc/specs/approved/container-cluster.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/specs/approved/container-cluster.rst --- senlin-6.0.0/doc/specs/approved/container-cluster.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/specs/approved/container-cluster.rst 2018-11-19 18:48:08.000000000 +0000 @@ -73,6 +73,7 @@ container profile, which can be used to determine the placement of containers. Since Senlin supports scaling, some rules should be obeyed to cooperate host_node and host_cluster usage. + * Only container type profile can contain 'host_node' and 'host_cluster' properties. * Container type profile must contain both 'host_node' and 'host_cluster' diff -Nru senlin-6.0.0/doc/specs/fail-fast-on-locked_resource.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/specs/fail-fast-on-locked_resource.rst --- senlin-6.0.0/doc/specs/fail-fast-on-locked_resource.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/specs/fail-fast-on-locked_resource.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,257 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +============================= +Fail fast on locked resources +============================= + + +When an operation on a locked resource (e.g. cluster or node) is requested, +Senlin creates a corresponding action and calls on the engine dispatcher to +asynchronously process it. If the targeted resource is locked by another +operation, the action will fail to process it and the engine will ask the +dispatcher to retry the action up to three times. If the resource is still +locked after three retries, the action is considered failed. The user making +the operation request will not know that an action has failed until the +retries have been exhausted and it queries the action state from Senlin. + +This spec proposes to check the lock status of the targeted resource and fail +immediately if it is locked during the synchronous API call by the user. The +failed action is not automatically retried. Instead it is up to the user to +retry the API call as desired. + + +Problem description +=================== + +The current implementation where failed actions are automatically retried can +lead to starvation situations when a large number of actions on the same target +cluster or node are requested. E.g. if a user requests a 100 scale-in operations +on a cluster, the Senlin engine will take a long time to process the retries and +will not be able to respond to other commands in the meantime. + +Another problem with the current implementation is encountered when health +checks are running against a cluster and the user is simultaneously performing +operations on it. When the health check thread determines that a node is +unhealthy (1), the user could request a cluster scale-out (2) before the health +check thread had a chance to call node recovery (4). In that case the first node +recovery will fail because the cluster is already locked and the node recovery +action will be retried in the background. However after the scale-out +completes and the next iteration of the health check runs, it might still see +the node as unhealthy and request another node recovery. In that case the node +will be unnecessarily recovered twice. + +:: + + +---------------+ +---------------+ +-------+ + | HealthManager | | SenlinEngine | | User | + +---------------+ +---------------+ +-------+ + | -----------------\ | | + |-| Health check | | | + | | thread starts. | | | + | |----------------| | | + | | | + | (1) Is Node healthy? No. | | + |------------------------- | | + | | | | + |<------------------------ | | + | | | + | | (2) Scale Out Cluster. | + | |<---------------------------| + | | | + | | (3) Lock cluster. | + | |------------------ | + | | | | + | |<----------------- | + | | | + | (4) Recover node. | | + |-------------------------------------------------->| | + | | | + | (5) Recover node action created. | | + |<--------------------------------------------------| | + | | | + | | (6) Cluster is locked. | + | | Retry node recover. | + | |----------------------- | + | | | | + | |<---------------------- | + | | | + | (7) Get node recover action status. | | + |-------------------------------------------------->| | + | | | + | (8) Node recover action status is failed. | | + |<--------------------------------------------------| | + | ---------------\ | | + |-| Health check | | | + | | thread ends. | | | + | |--------------| | | + | | | + +Finally, there are other operations that can lead to locked clusters that are +never released as indicated in this bug: +https://bugs.launchpad.net/senlin/+bug/1725883 + +Use Cases +--------- + +As a user, I want to know right away if an operation on a cluster or node fails +because the cluster or node is locked by another operation. By being able to +receive immediate feedback when an operation fails due to a locked resource, the +Senlin engine will adhere to the fail-fast software design principle [1] and +thereby reducing the software complexity and potential bugs due to +locked resources. + +Proposed change +=============== + + +1. **All actions** + + Before an action is created, check if the targeted cluster or node is + already locked in the cluster_lock or node_lock tables. + + * If the target cluster or node is locked, throw a ResourceIsLocked + exception. + * If the action table already has an active action operating on the + target cluster or node, throw a ActionConflict exception. An action + is defined as active if its status is one of the following: + READY, WAITING, RUNNING OR WAITING_LIFECYCLE_COMPLETION. + * If the target cluster or node is not locked, proceed to create the + action. + +2. **ResourceIsLocked** + + New exception type that corresponds to a 409 HTTP error code. + +3. **ActionConflict** + + New exception type that corresponds to a 409 HTTP error code. + + +Alternatives +------------ + +None + + +Data model impact +----------------- + +None + +REST API impact +--------------- + +* Alls Action (changed in **bold**) + + :: + + POST /v1/clusters/{cluster_id}/actions + + + - Normal HTTP response code(s): + + =============== =========================================================== + Code Reason + =============== =========================================================== + 202 - Accepted Request was accepted for processing, but the processing has + not been completed. A 'location' header is included in the + response which contains a link to check the progress of the + request. + =============== =========================================================== + + - Expected error HTTP response code(s): + + ========================== =============================================== + Code Reason + ========================== =============================================== + 400 - Bad Request Some content in the request was invalid. + 401 - Unauthorized User must authenticate before making a request. + 403 - Forbidden Policy does not allow current user to do this + operation. + 404 - Not Found The requested resource could not be found. + **409 - Conflict** **The requested resource is locked by** + **another action** + 503 - Service Unavailable Service unavailable. This is mostly + caused by service configuration errors which + prevents the service from successful start up. + ========================== =============================================== + + + +Security impact +--------------- + +None + +Notifications impact +-------------------- + + +Other end user impact +--------------------- + +The python-senlinclient requires modification to return the 409 HTTP error code +to the user. + +Performance Impact +------------------ + +None + +Other deployer impact +--------------------- + +None + +Developer impact +---------------- + +None + + +Implementation +============== + +Assignee(s) +----------- + +dtruong@blizzard.com + +Work Items +---------- + +None + +Dependencies +============ + +None + + +Testing +======= + +Unit tests and tempest tests are needed for the new action request behavior when +a resource is locked. + +Documentation Impact +==================== + +End User Guide needs to updated to describe the new behavior of action +requests when a target resource is locked. The End User Guide should also +describe that the user can retry an action if they receive 409 HTTP error code. + +References +========== + +[1] https://www.martinfowler.com/ieeeSoftware/failFast.pdf + + +History +======= + +None diff -Nru senlin-6.0.0/doc/specs/multiple-detection-modes.rst senlin-7.0.0~b1~git2018111913.0ddbc114/doc/specs/multiple-detection-modes.rst --- senlin-6.0.0/doc/specs/multiple-detection-modes.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/doc/specs/multiple-detection-modes.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,317 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +================================================= +Multiple polling detection modes in Health Policy +================================================= + +The health policy allows a user specify a detection mode to use for checking +node health. In the current implementation only one of the following detection +modes is allowed: + +* NODE_STATUS_POLLING +* NODE_STATUS_POLL_URL +* LIFECYCLE_EVENTS + +This spec proposes to let the user specify multiple polling detection modes in +the same health policy. E.g. the user can specify both NODE_STATUS_POLLING and +NODE_STATUS_POLL_URL detection modes in the same health policy. + + +Problem description +=================== + +The current implementation only allows a health policy to specify a single +detection mode to use for verifying the node health. However, there are +situations in which the user would want to have two detection modes checked and +only rebuild a node if both modes failed. Using multiple detection modes has the +benefit of fault tolerant health checks where one detection mode takes over in +case the other detection mode cannot be completed. + + +Use Cases +--------- + +As a user, I want to specify multiple polling detection modes for a given health +policy. The order of the polling detection modes used when creating the health +policy specifies the order of evaluation for the health checks. As a user, I also +want to be able to specify if a single detection mode failure triggers a node +rebuild or if all detection modes have to fail before a node is considered +unhealthy. + + +Proposed change +=============== + + +1. **Health Policy** + + Increment health policy version to 1.1 and implement the following schema: + +:: + + name: senlin.policy.health-1.1 + schema: + detection: + description: Policy aspect for node failure detection. + required: true + schema: + detection_modes: + description: List of node failure detection modes. + required: false + schema: + '*': + description: Node failure detection mode to try + required: false + schema: + options: + default: {} + required: false + schema: + poll_url: + default: '' + description: URL to poll for node status. See documentation for + valid expansion parameters. Only required when type is 'NODE_STATUS_POLL_URL'. + required: false + type: String + updatable: false + poll_url_conn_error_as_unhealthy: + default: true + description: Whether to treat URL connection errors as an indication + of an unhealthy node. Only required when type is 'NODE_STATUS_POLL_URL'. + required: false + type: Boolean + updatable: false + poll_url_healthy_response: + default: '' + description: String pattern in the poll URL response body that + indicates a healthy node. Required when type is 'NODE_STATUS_POLL_URL'. + required: false + type: String + updatable: false + poll_url_retry_interval: + default: 3 + description: Number of seconds between URL polling retries before + a node is considered down. Required when type is 'NODE_STATUS_POLL_URL'. + required: false + type: Integer + updatable: false + poll_url_retry_limit: + default: 3 + description: Number of times to retry URL polling when its return + body is missing POLL_URL_HEALTHY_RESPONSE string before a node + is considered down. Required when type is 'NODE_STATUS_POLL_URL'. + required: false + type: Integer + updatable: false + poll_url_ssl_verify: + default: true + description: Whether to verify SSL when calling URL to poll for + node status. Only required when type is 'NODE_STATUS_POLL_URL'. + required: false + type: Boolean + updatable: false + type: Map + updatable: false + type: + constraints: + - constraint: + - LIFECYCLE_EVENTS + - NODE_STATUS_POLLING + - NODE_STATUS_POLL_URL + type: AllowedValues + description: Type of node failure detection. + required: true + type: String + updatable: false + type: Map + updatable: false + type: List + updatable: false + interval: + default: 60 + description: Number of seconds between pollings. Only required when type is + 'NODE_STATUS_POLLING' or 'NODE_STATUS_POLL_URL'. + required: false + type: Integer + updatable: false + node_update_timeout: + default: 300 + description: Number of seconds since last node update to wait before checking + node health. + required: false + type: Integer + updatable: false + recovery_conditional: + constraints: + - constraint: + - ALL_FAILED + - ANY_FAILED + type: AllowedValues + default: ANY_FAILED + description: The conditional that determines when recovery should be performed + in case multiple detection modes are specified. 'ALL_FAILED' + means that all detection modes have to return failed health checks before + a node is recovered. 'ANY_FAILED' means that a failed health + check with a single detection mode triggers a node recovery. + required: false + type: String + updatable: false + type: Map + updatable: false + recovery: + description: Policy aspect for node failure recovery. + required: true + schema: + actions: + description: List of actions to try for node recovery. + required: false + schema: + '*': + description: Action to try for node recovery. + required: false + schema: + name: + constraints: + - constraint: + - REBOOT + - REBUILD + - RECREATE + type: AllowedValues + description: Name of action to execute. + required: true + type: String + updatable: false + params: + description: Parameters for the action + required: false + type: Map + updatable: false + type: Map + updatable: false + type: List + updatable: false + fencing: + description: List of services to be fenced. + required: false + schema: + '*': + constraints: + - constraint: + - COMPUTE + type: AllowedValues + description: Service to be fenced. + required: true + type: String + updatable: false + type: List + updatable: false + node_delete_timeout: + default: 20 + description: Number of seconds to wait for node deletion to finish and start + node creation for recreate recovery option. Required when type is 'NODE_STATUS_POLL_URL + and recovery action is RECREATE'. + required: false + type: Integer + updatable: false + node_force_recreate: + default: false + description: Whether to create node even if node deletion failed. Required + when type is 'NODE_STATUS_POLL_URL' and action recovery action is RECREATE. + required: false + type: Boolean + updatable: false + type: Map + updatable: false + + + +Alternatives +------------ + +None + + +Data model impact +----------------- + +None + +REST API impact +--------------- + +None + +Security impact +--------------- + +None + +Notifications impact +-------------------- + +None + +Other end user impact +--------------------- + +None + +Performance Impact +------------------ + +None + +Other deployer impact +--------------------- + +None + +Developer impact +---------------- + +None + + +Implementation +============== + +Assignee(s) +----------- + +dtruong@blizzard.com + +Work Items +---------- + +None + +Dependencies +============ + +None + + +Testing +======= + +Unit tests and tempest tests are needed to test multiple detection modes. + +Documentation Impact +==================== + +End User Guide needs to be updated to describe how multiple detection modes can +be set. + +References +========== + +None + +History +======= + +None diff -Nru senlin-6.0.0/lower-constraints.txt senlin-7.0.0~b1~git2018111913.0ddbc114/lower-constraints.txt --- senlin-6.0.0/lower-constraints.txt 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/lower-constraints.txt 2018-11-19 18:48:08.000000000 +0000 @@ -55,7 +55,7 @@ munch==2.2.0 netaddr==0.7.19 netifaces==0.10.6 -openstacksdk==0.11.2 +openstacksdk==0.17.2 os-client-config==1.29.0 os-service-types==1.2.0 oslo.cache==1.29.0 @@ -70,6 +70,7 @@ oslo.policy==1.30.0 oslo.serialization==2.18.0 oslo.service==1.24.0 +oslo.upgradecheck==0.1.0 oslo.utils==3.33.0 oslo.versionedobjects==1.31.2 oslotest==3.2.0 diff -Nru senlin-6.0.0/PKG-INFO senlin-7.0.0~b1~git2018111913.0ddbc114/PKG-INFO --- senlin-6.0.0/PKG-INFO 2018-08-30 14:19:55.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/PKG-INFO 2018-11-19 18:48:11.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: senlin -Version: 6.0.0 +Version: 6.1.0.dev64 Summary: OpenStack Clustering Home-page: https://docs.openstack.org/senlin/latest/ Author: OpenStack diff -Nru senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py27-api/run.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py27-api/run.yaml --- senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py27-api/run.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py27-api/run.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -62,13 +62,13 @@ fi function pre_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./pre_test_hook.sh } export -f pre_test_hook function post_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./post_test_hook.sh } export -f post_test_hook diff -Nru senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py27-functional/run.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py27-functional/run.yaml --- senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py27-functional/run.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py27-functional/run.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -62,13 +62,13 @@ fi function pre_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./pre_test_hook.sh } export -f pre_test_hook function post_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./post_test_hook.sh } export -f post_test_hook diff -Nru senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py27-integration/run.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py27-integration/run.yaml --- senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py27-integration/run.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py27-integration/run.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -81,13 +81,13 @@ fi function pre_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./pre_test_hook.sh } export -f pre_test_hook function post_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./post_test_hook.sh } export -f post_test_hook diff -Nru senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py35-api/run.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py35-api/run.yaml --- senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py35-api/run.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py35-api/run.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -64,13 +64,13 @@ fi function pre_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./pre_test_hook.sh } export -f pre_test_hook function post_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./post_test_hook.sh } export -f post_test_hook diff -Nru senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py35-functional/run.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py35-functional/run.yaml --- senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py35-functional/run.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py35-functional/run.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -63,13 +63,13 @@ fi function pre_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./pre_test_hook.sh } export -f pre_test_hook function post_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./post_test_hook.sh } export -f post_test_hook diff -Nru senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py35-integration/run.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py35-integration/run.yaml --- senlin-6.0.0/playbooks/legacy/senlin-dsvm-tempest-py35-integration/run.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/playbooks/legacy/senlin-dsvm-tempest-py35-integration/run.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -81,13 +81,13 @@ fi function pre_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./pre_test_hook.sh } export -f pre_test_hook function post_test_hook { - cd /opt/stack/new/senlin/senlin/tests/tempest/ + cd /opt/stack/new/senlin-tempest-plugin/senlin_tempest_plugin source ./post_test_hook.sh } export -f post_test_hook diff -Nru senlin-6.0.0/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml --- senlin-6.0.0/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,21 @@ +--- +prelude: > + This release alters the cluster_scale_in and cluster_scale_out actions to + no longer place the action into the actions table when a conflict is + detected. This behavior is an improvement on the old way actions are + processed as the requester will now receive immediate feedback from the + API when an action cannot be processed. This release also honors the + scaling action cooldown in the same manner by erring via the API when a + scaling action cannot be processed due to cooldown. +features: + - | + Scaling actions (IN or OUT) now validate that there is no conflicting + action already being processed and will return an error via the API + informing the end user if a conflict is detected. A conflicting action is + detected when new action of either `CLUSTER_SCALE_IN` or + `CLUSTER_SCALE_OUT` is attempted while there is already cluster scaling + action in the action table in a pending status (READY, RUNNING, WAITING, + ACTION_WAITING_LIFECYCLE_COMPLETION). + Additinally the cooldown will be checked and enforced when a scaling + action is requested. If the cooldown is being observed the requester will + be informed of this when submitting the action via an error. diff -Nru senlin-6.0.0/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml --- senlin-6.0.0/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,13 @@ +--- +prelude: > + Added new tool ``senlin-status upgrade check``. +features: + - | + New framework for ``senlin-status upgrade check`` command is added. + This framework allows adding various checks which can be run before a + Senlin upgrade to ensure if the upgrade can be performed safely. +upgrade: + - | + Operator can now use new CLI tool ``senlin-status upgrade check`` + to check if Senlin deployment can be safely upgraded from + N-1 to N release. diff -Nru senlin-6.0.0/releasenotes/source/index.rst senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/index.rst --- senlin-6.0.0/releasenotes/source/index.rst 2018-08-30 14:17:04.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/index.rst 2018-11-19 18:48:08.000000000 +0000 @@ -19,6 +19,7 @@ :maxdepth: 1 unreleased + rocky queens pike ocata diff -Nru senlin-6.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po --- senlin-6.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,113 @@ +# Andi Chandler , 2018. #zanata +msgid "" +msgstr "" +"Project-Id-Version: senlin\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2018-08-03 04:35+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2018-01-19 08:19+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en_GB\n" +"X-Generator: Zanata 4.3.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "1.0.0" +msgstr "1.0.0" + +msgid "2.0.0" +msgstr "2.0.0" + +msgid "2.0.0.0b1" +msgstr "2.0.0.0b1" + +msgid "2.0.0.0b2" +msgstr "2.0.0.0b2" + +msgid "2.0.0.0b3" +msgstr "2.0.0.0b3" + +msgid "2.0.0.0rc1" +msgstr "2.0.0.0rc1" + +msgid "3.0.0" +msgstr "3.0.0" + +msgid "3.0.1" +msgstr "3.0.1" + +msgid "4.0.0" +msgstr "4.0.0" + +msgid "" +"A cluster in the middle of an on-going action should not be deletable. The " +"engine service has been improved to detect this situation." +msgstr "" +"A cluster in the middle of an on-going action should not be deletable. The " +"engine service has been improved to detect this situation." + +msgid "" +"A configuration option \"exclude_derived_actions\" is introduced into the " +"\"dispatchers\" group for controlling whether derived actions should lead " +"into event notifications and/or DB records." +msgstr "" +"A configuration option \"exclude_derived_actions\" is introduced into the " +"\"dispatchers\" group for controlling whether derived actions should lead " +"into event notifications and/or DB records." + +msgid "" +"A event_purge subcommand is added to senlin-manage tool for purging events " +"generated in a specific project." +msgstr "" +"A event_purge subcommand is added to senlin-manage tool for purging events " +"generated in a specific project." + +msgid "Current Series Release Notes" +msgstr "Current Series Release Notes" + +msgid "Mitaka Series Release Notes" +msgstr "Mitaka Series Release Notes" + +msgid "Newton Series Release Notes" +msgstr "Newton Series Release Notes" + +msgid "Ocata Series Release Notes" +msgstr "Ocata Series Release Notes" + +msgid "Pike Series Release Notes" +msgstr "Pike Series Release Notes" + +msgid "Senlin Release Notes" +msgstr "Senlin Release Notes" + +msgid "" +"When referenced objects are not found in an API request, 400 is returned now." +msgstr "" +"When referenced objects are not found in an API request, 400 is returned now." + +msgid "" +"With the new 'profile-validate' API, the nova server profile now supports " +"the validation of its 'flavor', 'image' (if provided), 'availability_zone' " +"and block device driver properties." +msgstr "" +"With the new 'profile-validate' API, the Nova server profile now supports " +"the validation of its 'flavour', 'image' (if provided), 'availability_zone' " +"and block device driver properties." + +msgid "" +"With the newly added 'message' type of receivers, the 'cluster' and the " +"'action' property are not always required when creating a receiver. They are " +"still required if the receiver type is 'webhook' (the default)." +msgstr "" +"With the newly added 'message' type of receivers, the 'cluster' and the " +"'action' property are not always required when creating a receiver. They are " +"still required if the receiver type is 'webhook' (the default)." + +msgid "" +"Zaqar resources including \"queue\", \"message\", \"subscription\" and " +"\"claim\" are now supported in Senlin driver." +msgstr "" +"Zaqar resources including \"queue\", \"message\", \"subscription\" and " +"\"claim\" are now supported in Senlin driver." diff -Nru senlin-6.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po --- senlin-6.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,63 @@ +# Gérald LONLAS , 2016. #zanata +msgid "" +msgstr "" +"Project-Id-Version: senlin\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2018-08-03 04:35+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2016-10-22 06:38+0000\n" +"Last-Translator: Gérald LONLAS \n" +"Language-Team: French\n" +"Language: fr\n" +"X-Generator: Zanata 4.3.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" + +msgid "1.0.0" +msgstr "1.0.0" + +msgid "2.0.0" +msgstr "2.0.0" + +msgid "2.0.0.0b1" +msgstr "2.0.0.0b1" + +msgid "2.0.0.0b2" +msgstr "2.0.0.0b2" + +msgid "2.0.0.0b3" +msgstr "2.0.0.0b3" + +msgid "2.0.0.0rc1" +msgstr "2.0.0.0rc1" + +msgid "Bug Fixes" +msgstr "Corrections de bugs" + +msgid "Current Series Release Notes" +msgstr "Note de la release actuelle" + +msgid "Deprecation Notes" +msgstr "Notes dépréciées " + +msgid "Mitaka Series Release Notes" +msgstr "Note de release pour Mitaka" + +msgid "New Features" +msgstr "Nouvelles fonctionnalités" + +msgid "Newton Series Release Notes" +msgstr "Note de release pour Newton" + +msgid "Other Notes" +msgstr "Autres notes" + +msgid "Security Issues" +msgstr "Problèmes de sécurités" + +msgid "Senlin Release Notes" +msgstr "Note de release pour Senlin" + +msgid "Upgrade Notes" +msgstr "Notes de mises à jours" diff -Nru senlin-6.0.0/releasenotes/source/rocky.rst senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/rocky.rst --- senlin-6.0.0/releasenotes/source/rocky.rst 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/releasenotes/source/rocky.rst 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,6 @@ +=================================== + Rocky Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/rocky diff -Nru senlin-6.0.0/requirements.txt senlin-7.0.0~b1~git2018111913.0ddbc114/requirements.txt --- senlin-6.0.0/requirements.txt 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/requirements.txt 2018-11-19 18:48:08.000000000 +0000 @@ -11,7 +11,7 @@ keystoneauth1>=3.4.0 # Apache-2.0 keystonemiddleware>=4.17.0 # Apache-2.0 microversion-parse>=0.2.1 # Apache-2.0 -openstacksdk>=0.11.2 # Apache-2.0 +openstacksdk>=0.17.2 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 @@ -22,6 +22,7 @@ oslo.policy>=1.30.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 +oslo.upgradecheck>=0.1.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.versionedobjects>=1.31.2 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 diff -Nru senlin-6.0.0/senlin/api/middleware/fault.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/api/middleware/fault.py --- senlin-6.0.0/senlin/api/middleware/fault.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/api/middleware/fault.py 2018-11-19 18:48:08.000000000 +0000 @@ -42,6 +42,8 @@ """Replace error body with something the client can parse.""" error_map = { + 'ActionConflict': webob.exc.HTTPConflict, + 'ActionCooldown': webob.exc.HTTPConflict, 'ActionInProgress': webob.exc.HTTPConflict, 'BadRequest': webob.exc.HTTPBadRequest, 'FeatureNotSupported': webob.exc.HTTPConflict, @@ -56,6 +58,7 @@ 'ProfileOperationFailed': webob.exc.HTTPInternalServerError, 'RequestLimitExceeded': webob.exc.HTTPBadRequest, 'ResourceInUse': webob.exc.HTTPConflict, + 'ResourceIsLocked': webob.exc.HTTPConflict, 'ResourceNotFound': webob.exc.HTTPNotFound, } diff -Nru senlin-6.0.0/senlin/api/openstack/history.rst senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/api/openstack/history.rst --- senlin-6.0.0/senlin/api/openstack/history.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/api/openstack/history.rst 2018-11-19 18:48:08.000000000 +0000 @@ -111,3 +111,9 @@ are now sent directly in the query body rather than in the params field. +1.11 +---- +- Modified the ``cluster_action`` API. The API now responds with + response code 409 when a scaling action conflicts with one already + being processed or a cooldown for a scaling action is encountered. + diff -Nru senlin-6.0.0/senlin/api/openstack/v1/version.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/api/openstack/v1/version.py --- senlin-6.0.0/senlin/api/openstack/v1/version.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/api/openstack/v1/version.py 2018-11-19 18:48:08.000000000 +0000 @@ -24,7 +24,7 @@ # This includes any semantic changes which may not affect the input or # output formats or even originate in the API code layer. _MIN_API_VERSION = "1.0" - _MAX_API_VERSION = "1.10" + _MAX_API_VERSION = "1.11" DEFAULT_API_VERSION = _MIN_API_VERSION diff -Nru senlin-6.0.0/senlin/cmd/api.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/api.py --- senlin-6.0.0/senlin/cmd/api.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/api.py 2018-11-19 18:48:08.000000000 +0000 @@ -18,7 +18,6 @@ import sys from oslo_config import cfg -from oslo_i18n import _lazy from oslo_log import log as logging from oslo_service import systemd import six @@ -30,8 +29,6 @@ from senlin import objects from senlin import version -_lazy.enable_lazy() - LOG = logging.getLogger('senlin.api') diff -Nru senlin-6.0.0/senlin/cmd/api_wsgi.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/api_wsgi.py --- senlin-6.0.0/senlin/cmd/api_wsgi.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/api_wsgi.py 2018-11-19 18:48:08.000000000 +0000 @@ -19,7 +19,6 @@ from oslo_config import cfg -import oslo_i18n as i18n from oslo_log import log as logging from senlin.api.common import wsgi @@ -30,8 +29,6 @@ def init_app(): - i18n.enable_lazy() - logging.register_options(cfg.CONF) cfg.CONF(project='senlin', prog='senlin-api', version=version.version_info.version_string()) diff -Nru senlin-6.0.0/senlin/cmd/engine.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/engine.py --- senlin-6.0.0/senlin/cmd/engine.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/engine.py 2018-11-19 18:48:08.000000000 +0000 @@ -16,7 +16,6 @@ Senlin Engine Server. """ from oslo_config import cfg -from oslo_i18n import _lazy from oslo_log import log as logging from oslo_service import service @@ -25,8 +24,6 @@ from senlin.common import profiler from senlin import objects -_lazy.enable_lazy() - def main(): logging.register_options(cfg.CONF) diff -Nru senlin-6.0.0/senlin/cmd/status.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/status.py --- senlin-6.0.0/senlin/cmd/status.py 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/cmd/status.py 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,55 @@ +# Copyright (c) 2018 NEC, Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo_config import cfg +from oslo_upgradecheck import upgradecheck + +from senlin.common.i18n import _ + + +class Checks(upgradecheck.UpgradeCommands): + + """Upgrade checks for the senlin-status upgrade check command + + Upgrade checks should be added as separate methods in this class + and added to _upgrade_checks tuple. + """ + + def _check_placeholder(self): + # This is just a placeholder for upgrade checks, it should be + # removed when the actual checks are added + return upgradecheck.Result(upgradecheck.Code.SUCCESS) + + # The format of the check functions is to return an + # oslo_upgradecheck.upgradecheck.Result + # object with the appropriate + # oslo_upgradecheck.upgradecheck.Code and details set. + # If the check hits warnings or failures then those should be stored + # in the returned Result's "details" attribute. The + # summary will be rolled up at the end of the check() method. + _upgrade_checks = ( + # In the future there should be some real checks added here + (_('Placeholder'), _check_placeholder), + ) + + +def main(): + return upgradecheck.main( + cfg.CONF, project='senlin', upgrade_command=Checks()) + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru senlin-6.0.0/senlin/common/consts.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/consts.py --- senlin-6.0.0/senlin/common/consts.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/consts.py 2018-11-19 18:48:08.000000000 +0000 @@ -64,6 +64,8 @@ 'CLUSTER_OPERATION', ) +CLUSTER_SCALE_ACTIONS = [CLUSTER_SCALE_IN, CLUSTER_SCALE_OUT] + NODE_ACTION_NAMES = ( NODE_CREATE, NODE_DELETE, NODE_UPDATE, NODE_JOIN, NODE_LEAVE, @@ -288,6 +290,12 @@ 'REBOOT', 'REBUILD', 'RECREATE', ) +RECOVERY_CONDITIONAL = ( + ALL_FAILED, ANY_FAILED, +) = ( + 'ALL_FAILED', 'ANY_FAILED', +) + NOTIFICATION_PRIORITIES = ( PRIO_AUDIT, PRIO_CRITICAL, PRIO_ERROR, PRIO_WARN, PRIO_INFO, PRIO_DEBUG, PRIO_SAMPLE, diff -Nru senlin-6.0.0/senlin/common/exception.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/exception.py --- senlin-6.0.0/senlin/common/exception.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/exception.py 2018-11-19 18:48:08.000000000 +0000 @@ -128,6 +128,15 @@ msg_fmt = _("The %(type)s '%(id)s' cannot be deleted: %(reason)s.") +class ResourceIsLocked(SenlinException): + """Generic exception for resource in use. + + The resource type here can be 'cluster', 'node'. + """ + msg_fmt = _("%(action)s for %(type)s '%(id)s' cannot be completed " + "because it is already locked.") + + class ProfileNotSpecified(SenlinException): msg_fmt = _("Profile not specified.") @@ -180,6 +189,17 @@ msg_fmt = _("The %(type)s '%(id)s' is in status %(status)s.") +class ActionConflict(SenlinException): + msg_fmt = _("The %(type)s action for target %(target)s conflicts with " + "the following action(s): %(actions)s") + + +class ActionCooldown(SenlinException): + msg_fmt = _("The %(type)s action for cluster %(cluster)s cannot be " + "processed due to Policy %(policy_id)s cooldown still in " + "progress") + + class NodeNotOrphan(SenlinException): msg_fmt = _("%(message)s") @@ -188,15 +208,16 @@ """A base class for internal exceptions in senlin. The internal exception classes which inherit from :class:`SenlinException` - class should be translated to a user facing exception type if need to be - made user visible. + class should be translated to a user facing exception type if they need to + be made user visible. """ msg_fmt = _("%(message)s") message = _('Internal error happened') def __init__(self, **kwargs): self.code = kwargs.pop('code', 500) - self.message = kwargs.pop('message', self.message) + # If a "message" is not provided, or None or blank, use the default. + self.message = kwargs.pop('message', self.message) or self.message super(InternalError, self).__init__( code=self.code, message=self.message, **kwargs) diff -Nru senlin-6.0.0/senlin/common/schema.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/schema.py --- senlin-6.0.0/senlin/common/schema.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/schema.py 2018-11-19 18:48:08.000000000 +0000 @@ -265,7 +265,7 @@ return self.to_schema_type(value) def validate(self, value, context=None): - if not isinstance(value, six.string_types): + if value is None: msg = _("The value '%s' is not a valid string.") % value raise exc.ESchema(message=msg) @@ -384,9 +384,13 @@ msg = _("'%s' is not a Map") % value raise exc.ESchema(message=msg) + if not self.schema: + return + for key, child in self.schema.items(): item_value = value.get(key) - child.validate(item_value, context) + if item_value: + child.validate(item_value, context) class StringParam(SchemaBase): @@ -472,6 +476,7 @@ try: # Validate through resolve self.resolve_value(k) + # Validate schema for version if self._version: self._schema[k]._validate_version(k, self._version) @@ -490,6 +495,7 @@ schema_item = self._schema[key] if key in self._data: raw_value = self._data[key] + schema_item.validate(raw_value) return schema_item.resolve(raw_value) elif schema_item.has_default(): return schema_item.get_default() diff -Nru senlin-6.0.0/senlin/common/utils.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/utils.py --- senlin-6.0.0/senlin/common/utils.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/common/utils.py 2018-11-19 18:48:08.000000000 +0000 @@ -88,7 +88,7 @@ return levels.get(n, None) -def url_fetch(url, allowed_schemes=('http', 'https'), verify=True): +def url_fetch(url, timeout=1, allowed_schemes=('http', 'https'), verify=True): """Get the data at the specified URL. The URL must use the http: or https: schemes. @@ -96,7 +96,6 @@ the allowed_schemes argument. Raise an IOError if getting the data fails. """ - LOG.info('Fetching data from %s', url) components = urllib.parse.urlparse(url) @@ -105,12 +104,12 @@ if components.scheme == 'file': try: - return urllib.request.urlopen(url).read() + return urllib.request.urlopen(url, timeout=timeout).read() except urllib.error.URLError as uex: raise URLFetchError(_('Failed to retrieve data: %s') % uex) try: - resp = requests.get(url, stream=True, verify=verify) + resp = requests.get(url, stream=True, verify=verify, timeout=timeout) resp.raise_for_status() # We cannot use resp.text here because it would download the entire diff -Nru senlin-6.0.0/senlin/db/api.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/db/api.py --- senlin-6.0.0/senlin/db/api.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/db/api.py 2018-11-19 18:48:08.000000000 +0000 @@ -129,6 +129,10 @@ return IMPL.cluster_lock_acquire(cluster_id, action_id, scope) +def cluster_is_locked(cluster_id): + return IMPL.cluster_is_locked(cluster_id) + + def cluster_lock_release(cluster_id, action_id, scope): return IMPL.cluster_lock_release(cluster_id, action_id, scope) @@ -141,6 +145,10 @@ return IMPL.node_lock_acquire(node_id, action_id) +def node_is_locked(node_id): + return IMPL.node_is_locked(node_id) + + def node_lock_release(node_id, action_id): return IMPL.node_lock_release(node_id, action_id) @@ -321,6 +329,11 @@ refresh=refresh) +def action_list_active_scaling(context, cluster_id, project_safe=True): + return IMPL.action_list_active_scaling(context, cluster_id, + project_safe=project_safe) + + def action_get_by_name(context, name, project_safe=True): return IMPL.action_get_by_name(context, name, project_safe=project_safe) @@ -334,6 +347,11 @@ return IMPL.action_get_all_by_owner(context, owner) +def action_get_all_active_by_target(context, target_id, project_safe=True): + return IMPL.action_get_all_active_by_target(context, target_id, + project_safe=project_safe) + + def action_get_all(context, filters=None, limit=None, marker=None, sort=None, project_safe=True): return IMPL.action_get_all(context, filters=filters, sort=sort, diff -Nru senlin-6.0.0/senlin/db/sqlalchemy/api.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/db/sqlalchemy/api.py --- senlin-6.0.0/senlin/db/sqlalchemy/api.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/db/sqlalchemy/api.py 2018-11-19 18:48:08.000000000 +0000 @@ -56,13 +56,13 @@ cfg.CONF.import_group('profiler', 'senlin.common.config') if cfg.CONF.profiler.enabled: if cfg.CONF.profiler.trace_sqlalchemy: - eng = _main_context_manager.get_legacy_facade().get_engine() + eng = _main_context_manager.writer.get_engine() osprofiler.sqlalchemy.add_tracing(sqlalchemy, eng, "db") return _main_context_manager def get_engine(): - return _get_main_context_manager().get_legacy_facade().get_engine() + return _get_main_context_manager().writer.get_engine() def session_for_read(): @@ -463,6 +463,14 @@ @retry_on_deadlock +def cluster_is_locked(cluster_id): + with session_for_read() as session: + query = session.query(models.ClusterLock) + lock = query.get(cluster_id) + return lock is not None + + +@retry_on_deadlock def _release_cluster_lock(session, lock, action_id, scope): success = False if (scope == -1 and lock.semaphore < 0) or lock.semaphore == 1: @@ -530,6 +538,15 @@ @retry_on_deadlock +def node_is_locked(node_id): + with session_for_read() as session: + query = session.query(models.NodeLock) + lock = query.get(node_id) + + return lock is not None + + +@retry_on_deadlock def node_lock_release(node_id, action_id): with session_for_write() as session: success = False @@ -1050,6 +1067,25 @@ return action +def action_list_active_scaling(context, cluster_id=None, project_safe=True): + with session_for_read() as session: + query = session.query(models.Action) + if project_safe: + query = query.filter_by(project=context.project_id) + if cluster_id: + query = query.filter_by(target=cluster_id) + query = query.filter( + models.Action.status.in_( + [consts.ACTION_READY, + consts.ACTION_WAITING, + consts.ACTION_RUNNING, + consts.ACTION_WAITING_LIFECYCLE_COMPLETION])) + query = query.filter( + models.Action.action.in_(consts.CLUSTER_SCALE_ACTIONS)) + scaling_actions = query.all() + return scaling_actions + + def action_get_by_name(context, name, project_safe=True): return query_by_name(context, models.Action, name, project_safe=project_safe) @@ -1065,6 +1101,22 @@ return query.all() +def action_get_all_active_by_target(context, target_id, project_safe=True): + with session_for_read() as session: + query = session.query(models.Action) + if project_safe: + query = query.filter_by(project=context.project_id) + query = query.filter_by(target=target_id) + query = query.filter( + models.Action.status.in_( + [consts.ACTION_READY, + consts.ACTION_WAITING, + consts.ACTION_RUNNING, + consts.ACTION_WAITING_LIFECYCLE_COMPLETION])) + actions = query.all() + return actions + + def action_get_all(context, filters=None, limit=None, marker=None, sort=None, project_safe=True): diff -Nru senlin-6.0.0/senlin/drivers/base.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/base.py --- senlin-6.0.0/senlin/drivers/base.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/base.py 2018-11-19 18:48:08.000000000 +0000 @@ -45,3 +45,4 @@ self.message = backend.message self.workflow = backend.workflow self.block_storage = backend.block_storage + self.glance = backend.glance diff -Nru senlin-6.0.0/senlin/drivers/os/__init__.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os/__init__.py --- senlin-6.0.0/senlin/drivers/os/__init__.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os/__init__.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,6 +11,7 @@ # under the License. from senlin.drivers.os import cinder_v2 +from senlin.drivers.os import glance_v2 from senlin.drivers.os import heat_v1 from senlin.drivers.os import keystone_v3 from senlin.drivers.os import lbaas @@ -23,6 +24,7 @@ block_storage = cinder_v2.CinderClient compute = nova_v2.NovaClient +glance = glance_v2.GlanceClient identity = keystone_v3.KeystoneClient loadbalancing = lbaas.LoadBalancerDriver message = zaqar_v2.ZaqarClient diff -Nru senlin-6.0.0/senlin/drivers/os/nova_v2.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os/nova_v2.py --- senlin-6.0.0/senlin/drivers/os/nova_v2.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os/nova_v2.py 2018-11-19 18:48:08.000000000 +0000 @@ -33,10 +33,6 @@ return self.conn.compute.find_flavor(name_or_id, ignore_missing) @sdk.translate_exception - def image_find(self, name_or_id, ignore_missing=False): - return self.conn.compute.find_image(name_or_id, ignore_missing) - - @sdk.translate_exception def keypair_create(self, **attrs): return self.conn.compute.create_keypair(**attrs) diff -Nru senlin-6.0.0/senlin/drivers/os_test/glance_v2.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os_test/glance_v2.py --- senlin-6.0.0/senlin/drivers/os_test/glance_v2.py 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os_test/glance_v2.py 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from senlin.drivers import base +from senlin.drivers import sdk + + +class GlanceClient(base.DriverBase): + """Fake Glance V2 driver.""" + + def __init__(self, ctx): + self.fake_image = { + "created": "2015-01-01T01:02:03Z", + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [], + "metadata": { + "architecture": "x86_64", + "auto_disk_config": "True", + "kernel_id": "nokernel", + "ramdisk_id": "nokernel" + }, + "minDisk": 0, + "minRam": 0, + "name": "cirros-0.3.5-x86_64-disk", + "progress": 100, + "status": "ACTIVE", + "updated": "2011-01-01T01:02:03Z" + } + + def image_find(self, name_or_id, ignore_missing=False): + return sdk.FakeResourceObject(self.fake_image) diff -Nru senlin-6.0.0/senlin/drivers/os_test/__init__.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os_test/__init__.py --- senlin-6.0.0/senlin/drivers/os_test/__init__.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os_test/__init__.py 2018-11-19 18:48:08.000000000 +0000 @@ -12,6 +12,7 @@ from senlin.drivers.os_test import cinder_v2 +from senlin.drivers.os_test import glance_v2 from senlin.drivers.os_test import heat_v1 from senlin.drivers.os_test import keystone_v3 from senlin.drivers.os_test import lbaas @@ -24,6 +25,7 @@ block_storage = cinder_v2.CinderClient compute = nova_v2.NovaClient +glance = glance_v2.GlanceClient identity = keystone_v3.KeystoneClient loadbalancing = lbaas.LoadBalancerDriver message = zaqar_v2.ZaqarClient diff -Nru senlin-6.0.0/senlin/drivers/os_test/nova_v2.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os_test/nova_v2.py --- senlin-6.0.0/senlin/drivers/os_test/nova_v2.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/os_test/nova_v2.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,6 +11,7 @@ # under the License. import copy +import time from oslo_utils import uuidutils @@ -161,6 +162,8 @@ 'zoneName': 'nova', } + self.simulated_waits = {} + def flavor_find(self, name_or_id, ignore_missing=False): return sdk.FakeResourceObject(self.fake_flavor) @@ -180,17 +183,33 @@ return sdk.FakeResourceObject(self.keypair) def server_create(self, **attrs): - self.fake_server_create['id'] = uuidutils.generate_uuid() - self.fake_server_get['id'] = self.fake_server_create['id'] + server_id = uuidutils.generate_uuid() + self.fake_server_create['id'] = server_id + self.fake_server_get['id'] = server_id + + # save simulated wait time if it was set in metadata + if ('metadata' in attrs and + 'simulated_wait_time' in attrs['metadata']): + simulated_wait = attrs['metadata']['simulated_wait_time'] + if (isinstance(simulated_wait, int) and simulated_wait > 0): + self.simulated_waits[server_id] = simulated_wait + return sdk.FakeResourceObject(self.fake_server_create) def server_get(self, server): return sdk.FakeResourceObject(self.fake_server_get) def wait_for_server(self, server, timeout=None): + # sleep for simulated wait time if it was supplied during server_create + if server in self.simulated_waits: + time.sleep(self.simulated_waits[server]) return def wait_for_server_delete(self, server, timeout=None): + # sleep for simulated wait time if it was supplied during server_create + if server in self.simulated_waits: + time.sleep(self.simulated_waits[server]) + del self.simulated_waits[server] return def server_update(self, server, **attrs): diff -Nru senlin-6.0.0/senlin/drivers/sdk.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/sdk.py --- senlin-6.0.0/senlin/drivers/sdk.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/drivers/sdk.py 2018-11-19 18:48:08.000000000 +0000 @@ -16,9 +16,9 @@ import sys import functools +import openstack from openstack import connection from openstack import exceptions as sdk_exc -from openstack import utils as sdk_utils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils @@ -32,7 +32,7 @@ exc = sdk_exc LOG = logging.getLogger(__name__) -sdk_utils.enable_logging(debug=False, stream=sys.stdout) +openstack.enable_logging(debug=False, stream=sys.stdout) def parse_exception(ex): diff -Nru senlin-6.0.0/senlin/engine/actions/base.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/actions/base.py --- senlin-6.0.0/senlin/engine/actions/base.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/actions/base.py 2018-11-19 18:48:08.000000000 +0000 @@ -25,8 +25,10 @@ from senlin.engine import dispatcher from senlin.engine import event as EVENT from senlin.objects import action as ao +from senlin.objects import cluster_lock as cl from senlin.objects import cluster_policy as cpo from senlin.objects import dependency as dobj +from senlin.objects import node_lock as nl from senlin.policies import base as policy_mod wallclock = time.time @@ -247,6 +249,10 @@ :param dict kwargs: Other keyword arguments for the action. :return: ID of the action created. """ + + cls._check_action_lock(target, action) + cls._check_conflicting_actions(ctx, target, action) + params = { 'user_id': ctx.user_id, 'project_id': ctx.project_id, @@ -256,9 +262,42 @@ 'trusts': ctx.trusts, } c = req_context.RequestContext.from_dict(params) + + if action in consts.CLUSTER_SCALE_ACTIONS: + Action.validate_scaling_action(c, target, action) + obj = cls(target, action, c, **kwargs) return obj.store(ctx) + @staticmethod + def _check_action_lock(target, action): + if action == consts.CLUSTER_DELETE: + # CLUSTER_DELETE actions do not care about cluster locks + return + elif (action in list(consts.CLUSTER_ACTION_NAMES) and + cl.ClusterLock.is_locked(target)): + raise exception.ResourceIsLocked( + action=action, type='cluster', id=target) + elif (action in list(consts.NODE_ACTION_NAMES) and + nl.NodeLock.is_locked(target)): + raise exception.ResourceIsLocked( + action=action, type='node', id=target) + + @staticmethod + def _check_conflicting_actions(ctx, target, action): + conflict_actions = ao.Action.get_all_active_by_target(ctx, target) + if conflict_actions and action == consts.CLUSTER_DELETE: + delete_ids = [a['id'] for a in conflict_actions + if a['action'] == consts.CLUSTER_DELETE] + if delete_ids: + raise exception.ActionConflict( + type=action, target=target, actions=",".join( + delete_ids)) + elif conflict_actions: + action_ids = [a['id'] for a in conflict_actions] + raise exception.ActionConflict( + type=action, target=target, actions=",".join(action_ids)) + @classmethod def delete(cls, ctx, action_id): """Delete an action from database. @@ -427,14 +466,10 @@ for pb in bindings: policy = policy_mod.Policy.load(self.context, pb.policy_id) - # We record the last operation time for all policies bound to the - # cluster, no matter that policy is only interested in the - # "BEFORE" or "AFTER" or both. - if target == 'AFTER': - ts = timeutils.utcnow(True) - pb.last_op = ts - cpo.ClusterPolicy.update(self.context, pb.cluster_id, - pb.policy_id, {'last_op': ts}) + + # add last_op as input for the policy so that it can be used + # during pre_op + self.inputs['last_op'] = pb.last_op if not policy.need_check(target, self): continue @@ -444,13 +479,6 @@ else: # target == 'AFTER' method = getattr(policy, 'post_op', None) - if getattr(policy, 'cooldown', None): - if pb.cooldown_inprogress(policy.cooldown): - self.data['status'] = policy_mod.CHECK_ERROR - self.data['reason'] = ('Policy %s cooldown is still ' - 'in progress.') % policy.id - return - if method is not None: method(cluster_id, self) @@ -459,6 +487,70 @@ return return + @staticmethod + def validate_scaling_action(ctx, cluster_id, action): + """Validate scaling action against actions table and policy cooldown. + + :param ctx: An instance of the request context. + :param cluster_id: ID of the cluster the scaling action is targeting. + :param action: Scaling action being validated. + :return: None + :raises: An exception of ``ActionCooldown`` when the action being + validated is still in cooldown based off the policy or + ``ActionConflict`` when a scaling action is already in the action + table. + """ + # Check for conflicting actions in the actions table. + conflicting_actions = Action._get_conflicting_scaling_actions( + ctx, cluster_id) + if conflicting_actions: + action_ids = [a.get('id', None) for a in conflicting_actions] + LOG.info("Unable to process %(action)s for cluster %(cluster_id)s " + "the action conflicts with %(conflicts)s", + {'action': action, + 'cluster_id': cluster_id, + 'conflicts': action_ids}) + raise exception.ActionConflict( + type=action, + target=cluster_id, + actions=",".join(action_ids)) + + # Check to see if action cooldown should be observed. + bindings = cpo.ClusterPolicy.get_all(ctx, cluster_id, + sort='priority', + filters={'enabled': True}) + for pb in bindings: + policy = policy_mod.Policy.load(ctx, pb.policy_id) + if getattr(policy, 'cooldown', None) and policy.event == action: + if pb.last_op and not timeutils.is_older_than( + pb.last_op, policy.cooldown): + LOG.info("Unable to process %(action)s for cluster " + "%(cluster_id)s the actions policy %(policy)s " + "cooldown still in progress", + {'action': action, + 'cluster_id': cluster_id, + 'policy': pb.policy_id}) + raise exception.ActionCooldown( + type=action, + cluster=cluster_id, + policy_id=pb.policy_id) + return + + @staticmethod + def _get_conflicting_scaling_actions(ctx, cluster_id): + """Check actions table for conflicting scaling actions. + + :param ctx: An instance of the request context. + :param cluster_id: ID of the cluster the scaling action is targeting. + :return: A list of conflicting actions. + """ + scaling_actions = ao.Action.action_list_active_scaling( + ctx, cluster_id) + if scaling_actions: + return [a.to_dict() for a in scaling_actions] + else: + return None + def to_dict(self): if self.id: dep_on = dobj.Dependency.get_depended(self.context, self.id) diff -Nru senlin-6.0.0/senlin/engine/actions/node_action.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/actions/node_action.py --- senlin-6.0.0/senlin/engine/actions/node_action.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/actions/node_action.py 2018-11-19 18:48:08.000000000 +0000 @@ -67,7 +67,7 @@ {'cluster_id': '', 'status': consts.NS_ERROR}) return self.RES_ERROR, result - res = self.entity.do_create(self.context) + res, reason = self.entity.do_create(self.context) if cluster_id and self.cause == consts.CAUSE_RPC: # Update cluster's desired_capacity and re-evaluate its status no @@ -79,7 +79,7 @@ if res: return self.RES_OK, 'Node created successfully.' else: - return self.RES_ERROR, 'Node creation failed.' + return self.RES_ERROR, reason @profiler.trace('NodeAction.do_delete', hide_args=False) def do_delete(self): diff -Nru senlin-6.0.0/senlin/engine/health_manager.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/health_manager.py --- senlin-6.0.0/senlin/engine/health_manager.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/health_manager.py 2018-11-19 18:48:08.000000000 +0000 @@ -17,6 +17,8 @@ health policies. """ +from collections import defaultdict +from collections import namedtuple from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging @@ -24,13 +26,13 @@ from oslo_service import threadgroup from oslo_utils import timeutils import re -import six import time from senlin.common import consts from senlin.common import context from senlin.common import messaging as rpc from senlin.common import utils +from senlin.engine import node as node_mod from senlin import objects from senlin.rpc import client as rpc_client @@ -195,94 +197,91 @@ listener.start() -class HealthManager(service.Service): +class HealthCheckType(object): + @staticmethod + def factory(detection_type, cid, interval, params): + node_update_timeout = params['node_update_timeout'] + detection_params = [ + p for p in params['detection_modes'] + if p['type'] == detection_type + ] + if len(detection_params) != 1: + raise Exception( + 'The same detection mode cannot be used more than once in the ' + 'same policy. Encountered {} instances of ' + 'type {}.'.format(len(detection_params), detection_type) + ) + + if detection_type == consts.NODE_STATUS_POLLING: + return NodePollStatusHealthCheck( + cid, interval, node_update_timeout, detection_params[0]) + elif detection_type == consts.NODE_STATUS_POLL_URL: + return NodePollUrlHealthCheck( + cid, interval, node_update_timeout, detection_params[0]) + else: + raise Exception( + 'Invalid detection type: {}'.format(detection_type)) - def __init__(self, engine_service, topic, version): - super(HealthManager, self).__init__() + def __init__(self, cluster_id, interval, node_update_timeout, params): + """Initialize HealthCheckType - self.TG = threadgroup.ThreadGroup() - self.engine_id = engine_service.engine_id - self.topic = topic - self.version = version - self.ctx = context.get_admin_context() - self.rpc_client = rpc_client.EngineClient() - self.rt = { - 'registries': [], - } + :param ctx: + :param cluster_id: The UUID of the cluster to be checked. + :param params: Parameters specific to poll url or recovery action. + """ + self.cluster_id = cluster_id + self.interval = interval + self.node_update_timeout = node_update_timeout + self.params = params - def _dummy_task(self): - """A Dummy task that is queued on the health manager thread group. + def run_health_check(self, ctx, node): + """Run health check on node - The task is here so that the service always has something to wait() - on, or else the process will exit. + :returns: True if node is healthy. False otherwise. """ - self._load_runtime_registry() - - def _wait_for_action(self, ctx, action_id, timeout): - done = False - req = objects.ActionGetRequest(identity=action_id) - with timeutils.StopWatch(timeout) as timeout_watch: - while timeout > 0: - action = self.rpc_client.call(ctx, 'action_get', req) - if action['status'] in [consts.ACTION_SUCCEEDED, - consts.ACTION_FAILED, - consts.ACTION_CANCELLED]: - if action['status'] == consts.ACTION_SUCCEEDED: - done = True - break - time.sleep(2) - timeout = timeout_watch.leftover(True) + pass - if done: - return True, "" - elif timeout <= 0: - return False, "Timeout while polling cluster status" - else: - return False, "Cluster check action failed or cancelled" - def _poll_cluster(self, cluster_id, timeout, recover_action): - """Routine to be executed for polling cluster status. +class NodePollStatusHealthCheck(HealthCheckType): + def run_health_check(self, ctx, node): + """Routine to be executed for polling node status. - :param cluster_id: The UUID of the cluster to be checked. - :param timeout: The maximum number of seconds to wait. - :param recover_action: The health policy action name. - :returns: Nothing. + :returns: True if node is healthy. False otherwise. """ - start_time = timeutils.utcnow(True) - cluster = objects.Cluster.get(self.ctx, cluster_id, project_safe=False) - if not cluster: - LOG.warning("Cluster (%s) is not found.", cluster_id) - return _chase_up(start_time, timeout) - ctx = context.get_service_context(user_id=cluster.user, - project_id=cluster.project) - params = {'delete_check_action': True} try: - req = objects.ClusterCheckRequest(identity=cluster_id, - params=params) - action = self.rpc_client.call(ctx, 'cluster_check', req) + # create engine node from db node + entity = node_mod.Node._from_object(ctx, node) + + if not entity.do_check(ctx, return_check_result=True): + # server was not found as a result of performing check + node_last_updated = node.updated_at or node.init_at + if not timeutils.is_older_than( + node_last_updated, self.node_update_timeout): + LOG.info("Node %s was updated at %s which is less " + "than %d secs ago. Skip node recovery from " + "NodePollStatusHealthCheck.", + node.id, node_last_updated, + self.node_update_timeout) + return True + else: + return False + else: + LOG.debug("NodePollStatusHealthCheck reports node %s is " + "healthy.", node.id) + return True except Exception as ex: - LOG.warning("Failed in triggering 'cluster_check' RPC for " - "'%(c)s': %(r)s", - {'c': cluster_id, 'r': six.text_type(ex)}) - return _chase_up(start_time, timeout) - - # wait for action to complete - res, reason = self._wait_for_action(ctx, action['action'], timeout) - if not res: - LOG.warning("%s", reason) - return _chase_up(start_time, timeout) - - # loop through nodes to trigger recovery - nodes = objects.Node.get_all_by_cluster(ctx, cluster_id) - for node in nodes: - if node.status != consts.NS_ACTIVE: - LOG.info("Requesting node recovery: %s", node.id) - req = objects.NodeRecoverRequest(identity=node.id, - params=recover_action) - self.rpc_client.call(ctx, 'node_recover', req) + LOG.warning( + 'Error when performing health check on node %s: %s', + node.id, ex + ) + return False + - return _chase_up(start_time, timeout) +class NodePollUrlHealthCheck(HealthCheckType): + @staticmethod + def _convert_detection_tuple(dictionary): + return namedtuple('DetectionMode', dictionary.keys())(**dictionary) def _expand_url_template(self, url_template, node): """Expands parameters in an URL template @@ -300,106 +299,103 @@ return url - def _check_url_and_recover_node(self, ctx, node, recover_action, params): + def run_health_check(self, ctx, node): """Routine to check a node status from a url and recovery if necessary - :param ctx: The request context to use for recovery action :param node: The node to be checked. - :param recover_action: The health policy action name. - :param params: Parameters specific to poll url or recovery action - :returns: action if node was triggered for recovery. Otherwise None. + :returns: True if node is considered to be healthy. False otherwise. """ - url_template = params['poll_url'] - verify_ssl = params['poll_url_ssl_verify'] - expected_resp_str = params['poll_url_healthy_response'] - max_unhealthy_retry = params['poll_url_retry_limit'] - retry_interval = params['poll_url_retry_interval'] - node_update_timeout = params['node_update_timeout'] + url_template = self.params['poll_url'] + verify_ssl = self.params['poll_url_ssl_verify'] + conn_error_as_unhealthy = self.params[ + 'poll_url_conn_error_as_unhealthy'] + expected_resp_str = self.params['poll_url_healthy_response'] + max_unhealthy_retry = self.params['poll_url_retry_limit'] + retry_interval = self.params['poll_url_retry_interval'] + + def stop_node_recovery(): + node_last_updated = node.updated_at or node.init_at + if not timeutils.is_older_than( + node_last_updated, self.node_update_timeout): + LOG.info("Node %s was updated at %s which is less than " + "%d secs ago. Skip node recovery from " + "NodePollUrlHealthCheck.", + node.id, node_last_updated, self.node_update_timeout) + return True + + LOG.info("Node %s is reported as down (%d retries left)", + node.id, available_attemps) + time.sleep(retry_interval) + + return False url = self._expand_url_template(url_template, node) - LOG.info("Polling node status from URL: %s", url) + LOG.debug("Polling node status from URL: %s", url) available_attemps = max_unhealthy_retry + timeout = max(retry_interval * 0.1, 1) while available_attemps > 0: available_attemps -= 1 try: - result = utils.url_fetch(url, verify=verify_ssl) + result = utils.url_fetch( + url, timeout=timeout, verify=verify_ssl) except utils.URLFetchError as ex: - LOG.error("Error when requesting node health status from" - " %s: %s", url, ex) - return None + if conn_error_as_unhealthy: + if stop_node_recovery(): + return True + continue + else: + LOG.error("Error when requesting node health status from" + " %s: %s", url, ex) + return True LOG.debug("Node status returned from URL(%s): %s", url, result) if re.search(expected_resp_str, result): - LOG.debug('Node %s is healthy', node.id) - return None + LOG.debug('NodePollUrlHealthCheck reports node %s is healthy.', + node.id) + return True if node.status != consts.NS_ACTIVE: LOG.info("Skip node recovery because node %s is not in " - "ACTIVE state", node.id) - return None + "ACTIVE state.", node.id) + return True - node_last_updated = node.updated_at or node.init_at - if not timeutils.is_older_than( - node_last_updated, node_update_timeout): - LOG.info("Node %s was updated at %s which is less than " - "%d secs ago. Skip node recovery.", - node.id, node_last_updated, node_update_timeout) - return None - - LOG.info("Node %s is reported as down (%d retries left)", - node.id, available_attemps) - time.sleep(retry_interval) + if stop_node_recovery(): + return True - # recover node after exhausting retries - LOG.info("Requesting node recovery: %s", node.id) - req = objects.NodeRecoverRequest(identity=node.id, - params=recover_action) - - return self.rpc_client.call(ctx, 'node_recover', req) + return False - def _poll_url(self, cluster_id, timeout, recover_action, params): - """Routine to be executed for polling node status from a url - :param cluster_id: The UUID of the cluster to be checked. - :param timeout: The maximum number of seconds to wait for recovery - action - :param recover_action: The health policy action name. - :param params: Parameters specific to poll url or recovery action - :returns: Nothing. - """ - start_time = timeutils.utcnow(True) +class HealthManager(service.Service): - cluster = objects.Cluster.get(self.ctx, cluster_id, project_safe=False) - if not cluster: - LOG.warning("Cluster (%s) is not found.", cluster_id) - return _chase_up(start_time, timeout) + def __init__(self, engine_service, topic, version): + super(HealthManager, self).__init__() - ctx = context.get_service_context(user_id=cluster.user, - project_id=cluster.project) + self.TG = threadgroup.ThreadGroup() + self.engine_id = engine_service.engine_id + self.topic = topic + self.version = version + self.ctx = context.get_admin_context() + self.rpc_client = rpc_client.EngineClient() + self.rt = { + 'registries': [], + } + self.health_check_types = defaultdict(lambda: []) - actions = [] + def _dummy_task(self): + """A Dummy task that is queued on the health manager thread group. - # loop through nodes to poll url for each node - nodes = objects.Node.get_all_by_cluster(ctx, cluster_id) - for node in nodes: - action = self._check_url_and_recover_node(ctx, node, - recover_action, params) - if action: - actions.append(action) - - for a in actions: - # wait for action to complete - res, reason = self._wait_for_action(ctx, a['action'], timeout) - if not res: - LOG.warning("Node recovery action %s did not complete " - "within specified timeout: %s", a['action'], - reason) + The task is here so that the service always has something to wait() + on, or else the process will exit. + """ - return _chase_up(start_time, timeout) + try: + self._load_runtime_registry() + except Exception as ex: + LOG.error("Failed when running '_load_runtime_registry': %s", ex) def _add_listener(self, cluster_id, recover_action): """Routine to be executed for adding cluster listener. @@ -426,12 +422,129 @@ return self.TG.add_thread(ListenerProc, exchange, project, cluster_id, recover_action) + def _recover_node(self, node_id, ctx, recover_action): + """Recover node + + :returns: Recover action + """ + try: + LOG.info("%s is requesting node recovery " + "for %s.", self.__class__.__name__, node_id) + req = objects.NodeRecoverRequest(identity=node_id, + params=recover_action) + + return self.rpc_client.call(ctx, 'node_recover', req) + except Exception as ex: + LOG.error('Error when performing node recovery for %s: %s', + node_id, ex) + return None + + def _wait_for_action(self, ctx, action_id, timeout): + req = objects.ActionGetRequest(identity=action_id) + with timeutils.StopWatch(timeout) as timeout_watch: + while not timeout_watch.expired(): + action = self.rpc_client.call(ctx, 'action_get', req) + if action['status'] in [ + consts.ACTION_SUCCEEDED, consts.ACTION_FAILED, + consts.ACTION_CANCELLED]: + break + time.sleep(2) + + if action['status'] == consts.ACTION_SUCCEEDED: + return True, "" + + if (action['status'] == consts.ACTION_FAILED or + action['status'] == consts.ACTION_CANCELLED): + return False, "Cluster check action failed or cancelled" + + return False, ("Timeout while waiting for node recovery action to " + "finish") + + def _add_health_check(self, cluster_id, health_check): + self.health_check_types[cluster_id].append(health_check) + + def _execute_health_check(self, interval, cluster_id, + recover_action, recovery_cond, + node_update_timeout): + start_time = timeutils.utcnow(True) + + try: + if cluster_id not in self.health_check_types: + LOG.error("Cluster (%s) is not found in health_check_types.", + self.cluster_id) + return _chase_up(start_time, interval) + + if len(self.health_check_types[cluster_id]) == 0: + LOG.error("No health check types found for Cluster (%s).", + self.cluster_id) + return _chase_up(start_time, interval) + + cluster = objects.Cluster.get(self.ctx, cluster_id, + project_safe=False) + if not cluster: + LOG.warning("Cluster (%s) is not found.", self.cluster_id) + return _chase_up(start_time, interval) + + ctx = context.get_service_context(user_id=cluster.user, + project_id=cluster.project) + + actions = [] + + # loop through nodes and run all health checks on each node + nodes = objects.Node.get_all_by_cluster(ctx, cluster_id) + + for node in nodes: + node_is_healthy = True + + if recovery_cond == consts.ANY_FAILED: + # recovery happens if any detection mode fails + # i.e. the inverse logic is that node is considered healthy + # if all detection modes pass + node_is_healthy = all( + hc.run_health_check(ctx, node) + for hc in self.health_check_types[cluster_id]) + elif recovery_cond == consts.ALL_FAILED: + # recovery happens if all detection modes fail + # i.e. the inverse logic is that node is considered healthy + # if any detection mode passes + node_is_healthy = any( + hc.run_health_check(ctx, node) + for hc in self.health_check_types[cluster_id]) + else: + raise Exception( + '{} is an invalid recovery conditional'.format( + recovery_cond)) + + if not node_is_healthy: + action = self._recover_node(node.id, ctx, + recover_action) + actions.append(action) + + for a in actions: + # wait for action to complete + res, reason = self._wait_for_action( + ctx, a['action'], node_update_timeout) + if not res: + LOG.warning("Node recovery action %s did not complete " + "within specified timeout: %s", a['action'], + reason) + + if len(actions) > 0: + LOG.info('Health check passed for all nodes in cluster %s.', + cluster_id) + except Exception as ex: + LOG.warning('Error while performing health check: %s', ex) + + return _chase_up(start_time, interval) + def _start_check(self, entry): """Routine for starting the checking for a cluster. :param entry: A dict containing the data associated with the cluster. :returns: An updated registry entry record. """ + LOG.info('Enabling health check for cluster %s.', entry['cluster_id']) + cid = entry['cluster_id'] ctype = entry['check_type'] # Get the recover action parameter from the entry params @@ -447,22 +560,24 @@ for operation in rac: recover_action['operation'] = operation.get('name') - if ctype == consts.NODE_STATUS_POLLING: - interval = min(entry['interval'], cfg.CONF.check_interval_max) - timer = self.TG.add_dynamic_timer(self._poll_cluster, - None, # initial_delay - None, # check_interval_max - cid, interval, recover_action) - entry['timer'] = timer - elif ctype == consts.NODE_STATUS_POLL_URL: + polling_types = [consts.NODE_STATUS_POLLING, + consts.NODE_STATUS_POLL_URL] + + detection_types = ctype.split(',') + if all(check in polling_types for check in detection_types): interval = min(entry['interval'], cfg.CONF.check_interval_max) - timer = self.TG.add_dynamic_timer(self._poll_url, - None, # initial_delay - None, # check_interval_max - cid, interval, - recover_action, params) + for check in ctype.split(','): + self._add_health_check(cid, HealthCheckType.factory( + check, cid, interval, params)) + timer = self.TG.add_dynamic_timer(self._execute_health_check, + None, None, interval, cid, + recover_action, + params['recovery_conditional'], + params['node_update_timeout']) + entry['timer'] = timer - elif ctype == consts.LIFECYCLE_EVENTS: + elif (len(detection_types) == 1 and + detection_types[0] == consts.LIFECYCLE_EVENTS): LOG.info("Start listening events for cluster (%s).", cid) listener = self._add_listener(cid, recover_action) if listener: @@ -471,8 +586,8 @@ LOG.warning("Error creating listener for cluster %s", cid) return None else: - LOG.warning("Cluster %(id)s check type %(type)s is invalid.", - {'id': cid, 'type': ctype}) + LOG.error("Cluster %(id)s check type %(type)s is invalid.", + {'id': cid, 'type': ctype}) return None return entry @@ -483,10 +598,17 @@ :param entry: A dict containing the data associated with the cluster. :returns: ``None``. """ + LOG.info('Disabling health check for cluster %s.', entry['cluster_id']) + timer = entry.get('timer', None) if timer: + # stop timer timer.stop() + + # tell threadgroup to remove timer self.TG.timer_done(timer) + if entry['cluster_id'] in self.health_check_types: + self.health_check_types.pop(entry['cluster_id']) return listener = entry.get('listener', None) @@ -546,13 +668,13 @@ """Respond to confirm that the rpc service is still alive.""" return True - def register_cluster(self, ctx, cluster_id, check_type, interval=None, - params=None, enabled=True): + def register_cluster(self, ctx, cluster_id, interval=None, + node_update_timeout=None, params=None, + enabled=True): """Register cluster for health checking. :param ctx: The context of notify request. :param cluster_id: The ID of the cluster to be checked. - :param check_type: A string indicating the type of checks. :param interval: An optional integer indicating the length of checking periods in seconds. :param dict params: Other parameters for the health check. @@ -560,6 +682,17 @@ """ params = params or {} + # extract check_type from params + check_type = "" + if 'detection_modes' in params: + check_type = ','.join([ + NodePollUrlHealthCheck._convert_detection_tuple(d).type + for d in params['detection_modes'] + ]) + + # add node_update_timeout to params + params['node_update_timeout'] = node_update_timeout + registry = objects.HealthRegistry.create(ctx, cluster_id, check_type, interval, params, self.engine_id, @@ -591,6 +724,7 @@ self._stop_check(entry) self.rt['registries'].pop(i) objects.HealthRegistry.delete(ctx, cluster_id) + LOG.debug('unregister done') def enable_cluster(self, ctx, cluster_id, params=None): for c in self.rt['registries']: @@ -639,12 +773,12 @@ def register(cluster_id, engine_id=None, **kwargs): params = kwargs.pop('params', {}) interval = kwargs.pop('interval', cfg.CONF.periodic_interval) - check_type = kwargs.pop('check_type', consts.NODE_STATUS_POLLING) + node_update_timeout = kwargs.pop('node_update_timeout', 300) enabled = kwargs.pop('enabled', True) return notify(engine_id, 'register_cluster', cluster_id=cluster_id, interval=interval, - check_type=check_type, + node_update_timeout=node_update_timeout, params=params, enabled=enabled) diff -Nru senlin-6.0.0/senlin/engine/node.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/node.py --- senlin-6.0.0/senlin/engine/node.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/node.py 2018-11-19 18:48:08.000000000 +0000 @@ -211,7 +211,9 @@ def do_create(self, context): if self.status != consts.NS_INIT: LOG.error('Node is in status "%s"', self.status) - return False + self.set_status(context, consts.NS_ERROR, + 'Node must be in INIT status') + return False, 'Node must be in INIT status' self.set_status(context, consts.NS_CREATING, 'Creation in progress') try: @@ -220,11 +222,11 @@ physical_id = ex.resource_id self.set_status(context, consts.NS_ERROR, six.text_type(ex), physical_id=physical_id) - return False + return False, str(ex) self.set_status(context, consts.NS_ACTIVE, 'Creation succeeded', physical_id=physical_id) - return True + return True, None def do_delete(self, context): self.set_status(context, consts.NS_DELETING, 'Deletion in progress') @@ -314,7 +316,7 @@ self.index = -1 return True - def do_check(self, context): + def do_check(self, context, return_check_result=False): if not self.physical_id: return False @@ -328,6 +330,9 @@ self.set_status(context, consts.NS_ERROR, six.text_type(ex)) return False + if return_check_result: + return res + # Physical object is ACTIVE but for some reason the node status in # senlin was WARNING. We only update the status_reason if res: diff -Nru senlin-6.0.0/senlin/engine/service.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/service.py --- senlin-6.0.0/senlin/engine/service.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/engine/service.py 2018-11-19 18:48:08.000000000 +0000 @@ -2511,7 +2511,7 @@ receiver = receiver_obj.Receiver.find(ctx, identity) try: - cluster = co.Cluster.find(ctx, receiver.cluster_id) + db_cluster = co.Cluster.find(ctx, receiver.cluster_id) except (exception.ResourceNotFound, exception.MultipleChoices) as ex: msg = ex.enhance_msg('referenced', ex) raise exception.BadRequest(msg=msg) @@ -2527,7 +2527,7 @@ 'inputs': data } - action_id = action_mod.Action.create(ctx, cluster.id, + action_id = action_mod.Action.create(ctx, db_cluster.id, receiver.action, **kwargs) dispatcher.start_action() LOG.info("Webhook %(w)s triggered with action queued: %(a)s.", diff -Nru senlin-6.0.0/senlin/__init__.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/__init__.py --- senlin-6.0.0/senlin/__init__.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/__init__.py 2018-11-19 18:48:08.000000000 +0000 @@ -1,15 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_i18n - -oslo_i18n.enable_lazy() diff -Nru senlin-6.0.0/senlin/locale/de/LC_MESSAGES/senlin.po senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/locale/de/LC_MESSAGES/senlin.po --- senlin-6.0.0/senlin/locale/de/LC_MESSAGES/senlin.po 2018-08-30 14:17:04.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/locale/de/LC_MESSAGES/senlin.po 2018-11-19 18:48:08.000000000 +0000 @@ -4,7 +4,7 @@ msgstr "" "Project-Id-Version: senlin VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-03-09 06:43+0000\n" +"POT-Creation-Date: 2018-08-03 04:36+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -184,10 +184,6 @@ msgid "Action to try for node recovery." msgstr "Aktion für die Wiederherstellung des Knotens." -#, python-format -msgid "Actions %(actions)s were successfully built." -msgstr "Aktionen %(actions)s wurden erfolgreich erstellt." - msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." @@ -239,14 +235,6 @@ msgid "Bus of the device." msgstr "Bus des Geräts." -#, python-format -msgid "" -"Can not build trust between user %(user)s and zaqar service user %(zaqar)s " -"for receiver %(receiver)s." -msgstr "" -"Es kann keine Vertrauensstellung zwischen Benutzer %(user)s und Zaqar-" -"Dienstbenutzer %(zaqar)s für Empfänger %(receiver)s hergestellt werden." - msgid "Candidates generated" msgstr "Kandidaten generiert" @@ -359,14 +347,6 @@ msgid "Endpoint plugin %(name)s is not found." msgstr "Endpoint-Plugin %(name)s wurde nicht gefunden." -#, python-format -msgid "" -"Engine %(id)s has launched %(num)s node actions consecutively, stop " -"scheduling node action for %(interval)s second..." -msgstr "" -"Engine %(id)s hat %(num)s Node-Aktionen nacheinander gestartet, beendet die " -"Planung der Node-Aktion für %(interval)s Sekunde ..." - msgid "Engine died when executing this action." msgstr "Beim Ausführen dieser Aktion ist die Engine abgestürzt." @@ -414,14 +394,6 @@ msgstr "Fehler beim Hinzufügen von Knoten zu lb pool: %s" #, python-format -msgid "Failed in building action: %s" -msgstr "Fehler beim Erstellen einer Aktion: %s" - -#, python-format -msgid "Failed in claiming message: %s" -msgstr "Fehler beim Anfordern der Nachricht: %s" - -#, python-format msgid "Failed in creating %(type)s: %(message)s." msgstr "Fehler beim Erstellen von %(type)s: %(message)s." @@ -473,10 +445,6 @@ msgid "Failed in deleting listener: DELETE FAILED." msgstr "Fehler beim Löschen des Listeners: DELETE FAILED." -#, python-format -msgid "Failed in deleting message %(id)s: %(reason)s" -msgstr "Fehler beim Löschen der Nachricht %(id)s: %(reason)s" - msgid "Failed in deleting servergroup." msgstr "Fehler beim Löschen der Servergruppe." @@ -962,13 +930,6 @@ msgstr "Anzahl der Sekunden zwischen Sperrversuche." msgid "" -"Number of seconds between pollings. Only required when type is " -"'NODE_STATUS_POLLING'." -msgstr "" -"Anzahl der Sekunden zwischen den Befragungen Wird nur benötigt, wenn der Typ " -"'NODE_STATUS_POLLING' ist." - -msgid "" "Number of seconds to hold the cluster for cool-down before allowing cluster " "to be resized again." msgstr "" @@ -1123,16 +1084,6 @@ "Erstellen Sie den Server mit dem aktuellen Abbild und dem " "Administratorkennwort neu." -#, python-format -msgid "" -"Receiver notification host is not specified in configuration file and Senlin " -"service endpoint can not be found, using local hostname (%(host)s) for " -"subscriber url." -msgstr "" -"Der Empfängerbenachrichtigungshost ist in der Konfigurationsdatei nicht " -"angegeben, und der Senlin-Dienstendpunkt kann nicht gefunden werden, wobei " -"der lokale Hostname (%(host)s) für die Teilnehmer-URL verwendet wird." - msgid "Recovery action REBOOT is only applicable to os.nova.server clusters." msgstr "Recovery-Aktion REBOOT ist nur auf os.nova.server-Cluster anwendbar." @@ -1167,16 +1118,6 @@ msgid "Rescue the server." msgstr "Retten Sie den Server." -#, python-format -msgid "" -"Resizing cluster '%(cluster)s': type=%(adj_type)s, number=%(number)s, " -"min_size=%(min_size)s, max_size=%(max_size)s, min_step=%(min_step)s, strict=" -"%(strict)s." -msgstr "" -"Größenanpassung des Clusters '%(cluster)s': typ=%(adj_type)s, anzahl=" -"%(number)s, min_size=%(min_size)s, max_size=%(max_size)s, min_step=" -"%(min_step)s,strict=%(strict)s." - msgid "Restart a container." msgstr "Starten Sie einen Container neu." @@ -1202,10 +1143,6 @@ msgid "Senlin API revision." msgstr "Senlin API Revision." -#, python-format -msgid "Senlin endpoint can not be found: %s." -msgstr "Der Senlin-Endpunkt wurde nicht gefunden: %s." - msgid "Senlin engine revision." msgstr "Senlin-Engineüberarbeitung." @@ -1867,23 +1804,6 @@ "Datenträger-Quellentyp, muss entweder 'image', 'snapshot', 'volume' oder " "'blank' sein" -#, python-format -msgid "Warning: %(key)s will be deprecated after version %(version)s!" -msgstr "Warnung: %(key)s wird nach Version %(version)s veraltet sein!" - -#, python-format -msgid "Webhook failed authentication: %s." -msgstr "Webhook fehlgeschlagene Authentifizierung: %s." - -#, python-format -msgid "" -"Webhook host is not specified in configuration file and Senlin service " -"endpoint can not be found,using local hostname (%(host)s) for webhook url." -msgstr "" -"Der Webhook-Host ist in der Konfigurationsdatei nicht angegeben, und der " -"Senlin-Dienstendpunkt kann nicht gefunden werden, indem der lokale Hostname " -"(%(host)s) für die Webhook-URL verwendet wird." - msgid "Weight of the availability zone (default is 100)." msgstr "Gewicht der Verfügbarkeitszone (Standard ist 100)." diff -Nru senlin-6.0.0/senlin/objects/action.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/action.py --- senlin-6.0.0/senlin/objects/action.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/action.py 2018-11-19 18:48:08.000000000 +0000 @@ -96,6 +96,11 @@ return cls._from_db_object(context, cls(), obj) @classmethod + def action_list_active_scaling(cls, context, cluster_id, **kwargs): + objs = db_api.action_list_active_scaling(context, cluster_id, **kwargs) + return [cls._from_db_object(context, cls(), obj) for obj in objs] + + @classmethod def get_all(cls, context, **kwargs): objs = db_api.action_get_all(context, **kwargs) return [cls._from_db_object(context, cls(), obj) for obj in objs] @@ -106,6 +111,11 @@ return [cls._from_db_object(context, cls(), obj) for obj in objs] @classmethod + def get_all_active_by_target(cls, context, target): + objs = db_api.action_get_all_active_by_target(context, target) + return [cls._from_db_object(context, cls(), obj) for obj in objs] + + @classmethod def check_status(cls, context, action_id, timestamp): return db_api.action_check_status(context, action_id, timestamp) diff -Nru senlin-6.0.0/senlin/objects/cluster_lock.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/cluster_lock.py --- senlin-6.0.0/senlin/objects/cluster_lock.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/cluster_lock.py 2018-11-19 18:48:08.000000000 +0000 @@ -32,6 +32,10 @@ return db_api.cluster_lock_acquire(cluster_id, action_id, scope) @classmethod + def is_locked(cls, cluster_id): + return db_api.cluster_is_locked(cluster_id) + + @classmethod def release(cls, cluster_id, action_id, scope): return db_api.cluster_lock_release(cluster_id, action_id, scope) diff -Nru senlin-6.0.0/senlin/objects/cluster_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/cluster_policy.py --- senlin-6.0.0/senlin/objects/cluster_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/cluster_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -12,8 +12,6 @@ """Cluster-policy binding object.""" -from oslo_utils import timeutils - from senlin.db import api as db_api from senlin.objects import base from senlin.objects import cluster as cluster_obj @@ -86,13 +84,6 @@ def delete(cls, context, cluster_id, policy_id): db_api.cluster_policy_detach(context, cluster_id, policy_id) - def cooldown_inprogress(self, cooldown): - last_op = self.last_op - if last_op and not timeutils.is_older_than(last_op, cooldown): - return True - - return False - def to_dict(self): binding_dict = { 'id': self.id, diff -Nru senlin-6.0.0/senlin/objects/node_lock.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/node_lock.py --- senlin-6.0.0/senlin/objects/node_lock.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/objects/node_lock.py 2018-11-19 18:48:08.000000000 +0000 @@ -31,6 +31,10 @@ return db_api.node_lock_acquire(node_id, action_id) @classmethod + def is_locked(cls, cluster_id): + return db_api.node_is_locked(cluster_id) + + @classmethod def release(cls, node_id, action_id): return db_api.node_lock_release(node_id, action_id) diff -Nru senlin-6.0.0/senlin/policies/health_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/policies/health_policy.py --- senlin-6.0.0/senlin/policies/health_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/policies/health_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -10,7 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. +from collections import namedtuple from oslo_config import cfg +from oslo_log import log as logging from senlin.common import constraints from senlin.common import consts @@ -21,16 +23,21 @@ from senlin.engine import health_manager from senlin.policies import base +LOG = logging.getLogger(__name__) + class HealthPolicy(base.Policy): """Policy for health management of a cluster.""" - VERSION = '1.0' + VERSION = '1.1' VERSIONS = { '1.0': [ {'status': consts.EXPERIMENTAL, 'since': '2017.02'}, {'status': consts.SUPPORTED, 'since': '2018.06'}, - ] + ], + '1.1': [ + {'status': consts.SUPPORTED, 'since': '2018.09'} + ], } PRIORITY = 600 @@ -55,19 +62,21 @@ KEYS = (DETECTION, RECOVERY) = ('detection', 'recovery') _DETECTION_KEYS = ( - DETECTION_TYPE, DETECTION_OPTIONS, + DETECTION_MODES, DETECTION_TYPE, DETECTION_OPTIONS, DETECTION_INTERVAL, + NODE_UPDATE_TIMEOUT, RECOVERY_CONDITIONAL ) = ( - 'type', 'options' + 'detection_modes', 'type', 'options', 'interval', + 'node_update_timeout', 'recovery_conditional' ) _DETECTION_OPTIONS = ( - DETECTION_INTERVAL, POLL_URL, POLL_URL_SSL_VERIFY, - POLL_URL_HEALTHY_RESPONSE, POLL_URL_RETRY_LIMIT, - POLL_URL_RETRY_INTERVAL, NODE_UPDATE_TIMEOUT, + POLL_URL, POLL_URL_SSL_VERIFY, + POLL_URL_CONN_ERROR_AS_UNHEALTHY, POLL_URL_HEALTHY_RESPONSE, + POLL_URL_RETRY_LIMIT, POLL_URL_RETRY_INTERVAL, ) = ( - 'interval', 'poll_url', 'poll_url_ssl_verify', - 'poll_url_healthy_response', 'poll_url_retry_limit', - 'poll_url_retry_interval', 'node_update_timeout', + 'poll_url', 'poll_url_ssl_verify', + 'poll_url_conn_error_as_unhealthy', 'poll_url_healthy_response', + 'poll_url_retry_limit', 'poll_url_retry_interval' ) _RECOVERY_KEYS = ( @@ -95,62 +104,100 @@ DETECTION: schema.Map( _('Policy aspect for node failure detection.'), schema={ - DETECTION_TYPE: schema.String( - _('Type of node failure detection.'), + DETECTION_INTERVAL: schema.Integer( + _("Number of seconds between pollings. Only " + "required when type is 'NODE_STATUS_POLLING' or " + "'NODE_STATUS_POLL_URL'."), + default=60, + ), + NODE_UPDATE_TIMEOUT: schema.Integer( + _("Number of seconds since last node update to " + "wait before checking node health."), + default=300, + ), + RECOVERY_CONDITIONAL: schema.String( + _("The conditional that determines when recovery should be" + " performed in case multiple detection modes are " + "specified. 'ALL_FAILED' means that all " + "detection modes have to return failed health checks " + "before a node is recovered. 'ANY_FAILED'" + " means that a failed health check with a single " + "detection mode triggers a node recovery."), constraints=[ - constraints.AllowedValues(consts.DETECTION_TYPES), + constraints.AllowedValues( + consts.RECOVERY_CONDITIONAL), ], - required=True, - ), - DETECTION_OPTIONS: schema.Map( - schema={ - DETECTION_INTERVAL: schema.Integer( - _("Number of seconds between pollings. Only " - "required when type is 'NODE_STATUS_POLLING' or " - "'NODE_STATUS_POLL_URL'."), - default=60, - ), - POLL_URL: schema.String( - _("URL to poll for node status. See documentation " - "for valid expansion parameters. Only required " - "when type is 'NODE_STATUS_POLL_URL'."), - default='', - ), - POLL_URL_SSL_VERIFY: schema.Boolean( - _("Whether to verify SSL when calling URL to poll " - "for node status. Only required when type is " - "'NODE_STATUS_POLL_URL'."), - default=True, - ), - POLL_URL_HEALTHY_RESPONSE: schema.String( - _("String pattern in the poll URL response body " - "that indicates a healthy node. " - "Required when type is 'NODE_STATUS_POLL_URL'."), - default='', - ), - POLL_URL_RETRY_LIMIT: schema.Integer( - _("Number of times to retry URL polling when its " - "return body is missing " - "POLL_URL_HEALTHY_RESPONSE string before a node " - "is considered down. Required when type is " - "'NODE_STATUS_POLL_URL'."), - default=3, - ), - POLL_URL_RETRY_INTERVAL: schema.Integer( - _("Number of seconds between URL polling retries " - "before a node is considered down. " - "Required when type is 'NODE_STATUS_POLL_URL'."), - default=3, - ), - NODE_UPDATE_TIMEOUT: schema.Integer( - _("Number of seconds since last node update to " - "wait before checking node health. " - "Required when type is 'NODE_STATUS_POLL_URL'."), - default=300, - ), - }, - default={} + default=consts.ANY_FAILED, + required=False, ), + DETECTION_MODES: schema.List( + _('List of node failure detection modes.'), + schema=schema.Map( + _('Node failure detection mode to try'), + schema={ + DETECTION_TYPE: schema.String( + _('Type of node failure detection.'), + constraints=[ + constraints.AllowedValues( + consts.DETECTION_TYPES), + ], + required=True, + ), + DETECTION_OPTIONS: schema.Map( + schema={ + POLL_URL: schema.String( + _("URL to poll for node status. See " + "documentation for valid expansion " + "parameters. Only required " + "when type is " + "'NODE_STATUS_POLL_URL'."), + default='', + ), + POLL_URL_SSL_VERIFY: schema.Boolean( + _("Whether to verify SSL when calling " + "URL to poll for node status. Only " + "required when type is " + "'NODE_STATUS_POLL_URL'."), + default=True, + ), + POLL_URL_CONN_ERROR_AS_UNHEALTHY: + schema.Boolean( + _("Whether to treat URL connection " + "errors as an indication of an " + "unhealthy node. Only required " + "when type is " + "'NODE_STATUS_POLL_URL'."), + default=True, + ), + POLL_URL_HEALTHY_RESPONSE: schema.String( + _("String pattern in the poll URL " + "response body that indicates a " + "healthy node. Required when type " + "is 'NODE_STATUS_POLL_URL'."), + default='', + ), + POLL_URL_RETRY_LIMIT: schema.Integer( + _("Number of times to retry URL " + "polling when its return body is " + "missing POLL_URL_HEALTHY_RESPONSE " + "string before a node is considered " + "down. Required when type is " + "'NODE_STATUS_POLL_URL'."), + default=3, + ), + POLL_URL_RETRY_INTERVAL: schema.Integer( + _("Number of seconds between URL " + "polling retries before a node is " + "considered down. Required when " + "type is 'NODE_STATUS_POLL_URL'."), + default=3, + ), + }, + default={} + ), + } + ) + ) }, required=True, ), @@ -202,25 +249,44 @@ "action is RECREATE."), default=False, ), - } + }, + required=True, ), } def __init__(self, name, spec, **kwargs): super(HealthPolicy, self).__init__(name, spec, **kwargs) - self.check_type = self.properties[self.DETECTION][self.DETECTION_TYPE] + self.interval = self.properties[self.DETECTION].get( + self.DETECTION_INTERVAL, 60) - options = self.properties[self.DETECTION][self.DETECTION_OPTIONS] - self.interval = options.get(self.DETECTION_INTERVAL, 60) - self.poll_url = options.get(self.POLL_URL, '') - self.poll_url_ssl_verify = options.get(self.POLL_URL_SSL_VERIFY, True) - self.poll_url_healthy_response = options.get( - self.POLL_URL_HEALTHY_RESPONSE, '') - self.poll_url_retry_limit = options.get(self.POLL_URL_RETRY_LIMIT, '') - self.poll_url_retry_interval = options.get( - self.POLL_URL_RETRY_INTERVAL, '') - self.node_update_timeout = options.get(self.NODE_UPDATE_TIMEOUT, 300) + self.node_update_timeout = self.properties[self.DETECTION].get( + self.NODE_UPDATE_TIMEOUT, 300) + + self.recovery_conditional = self.properties[self.DETECTION].get( + self.RECOVERY_CONDITIONAL, consts.ANY_FAILED) + + DetectionMode = namedtuple( + 'DetectionMode', + [self.DETECTION_TYPE] + list(self._DETECTION_OPTIONS)) + + self.detection_modes = [] + + raw_modes = self.properties[self.DETECTION][self.DETECTION_MODES] + for mode in raw_modes: + options = mode[self.DETECTION_OPTIONS] + + self.detection_modes.append( + DetectionMode( + mode[self.DETECTION_TYPE], + options.get(self.POLL_URL, ''), + options.get(self.POLL_URL_SSL_VERIFY, True), + options.get(self.POLL_URL_CONN_ERROR_AS_UNHEALTHY, True), + options.get(self.POLL_URL_HEALTHY_RESPONSE, ''), + options.get(self.POLL_URL_RETRY_LIMIT, ''), + options.get(self.POLL_URL_RETRY_INTERVAL, '') + ) + ) recover_settings = self.properties[self.RECOVERY] self.recover_actions = recover_settings[self.RECOVERY_ACTIONS] @@ -248,6 +314,30 @@ cfg.CONF.health_check_interval_min} raise exc.InvalidSpec(message=message) + # check valid detection types + polling_types = [consts.NODE_STATUS_POLLING, + consts.NODE_STATUS_POLL_URL] + + has_valid_polling_types = all( + d.type in polling_types + for d in self.detection_modes + ) + has_valid_lifecycle_type = ( + len(self.detection_modes) == 1 and + self.detection_modes[0].type == consts.LIFECYCLE_EVENTS + ) + + if not has_valid_polling_types and not has_valid_lifecycle_type: + message = ("Invalid detection modes in health policy: %s" % + ', '.join([d.type for d in self.detection_modes])) + raise exc.InvalidSpec(message=message) + + if len(self.detection_modes) != len(set(self.detection_modes)): + message = ("Duplicate detection modes are not allowed in " + "health policy: %s" % + ', '.join([d.type for d in self.detection_modes])) + raise exc.InvalidSpec(message=message) + # TODO(Qiming): Add detection of duplicated action names when # support to list of actions is implemented. @@ -274,36 +364,33 @@ return False, err_msg kwargs = { - 'check_type': self.check_type, 'interval': self.interval, + 'node_update_timeout': self.node_update_timeout, 'params': { 'recover_action': self.recover_actions, - 'poll_url': self.poll_url, - 'poll_url_ssl_verify': self.poll_url_ssl_verify, - 'poll_url_healthy_response': self.poll_url_healthy_response, - 'poll_url_retry_limit': self.poll_url_retry_limit, - 'poll_url_retry_interval': self.poll_url_retry_interval, - 'node_update_timeout': self.node_update_timeout, 'node_delete_timeout': self.node_delete_timeout, 'node_force_recreate': self.node_force_recreate, + 'recovery_conditional': self.recovery_conditional, }, 'enabled': enabled } + converted_detection_modes = [ + d._asdict() for d in self.detection_modes + ] + detection_mode = {'detection_modes': converted_detection_modes} + kwargs['params'].update(detection_mode) + health_manager.register(cluster.id, engine_id=None, **kwargs) data = { - 'check_type': self.check_type, 'interval': self.interval, - 'poll_url': self.poll_url, - 'poll_url_ssl_verify': self.poll_url_ssl_verify, - 'poll_url_healthy_response': self.poll_url_healthy_response, - 'poll_url_retry_limit': self.poll_url_retry_limit, - 'poll_url_retry_interval': self.poll_url_retry_interval, 'node_update_timeout': self.node_update_timeout, + 'recovery_conditional': self.recovery_conditional, 'node_delete_timeout': self.node_delete_timeout, 'node_force_recreate': self.node_force_recreate, } + data.update(detection_mode) return True, self._build_policy_data(data) @@ -314,7 +401,10 @@ :param cluster: The target cluster. :returns: A tuple comprising the execution result and reason. """ - health_manager.unregister(cluster.id) + ret = health_manager.unregister(cluster.id) + if not ret: + LOG.warning('Unregistering health manager for cluster %s ' + 'timed out.', cluster.id) return True, '' def pre_op(self, cluster_id, action, **args): diff -Nru senlin-6.0.0/senlin/policies/scaling_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/policies/scaling_policy.py --- senlin-6.0.0/senlin/policies/scaling_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/policies/scaling_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,6 +11,7 @@ # under the License. from oslo_config import cfg +from oslo_utils import timeutils from senlin.common import constraints from senlin.common import consts @@ -19,6 +20,7 @@ from senlin.common import scaleutils as su from senlin.common import schema from senlin.common import utils +from senlin.objects import cluster_policy as cpo from senlin.policies import base @@ -44,6 +46,8 @@ TARGET = [ ('BEFORE', consts.CLUSTER_SCALE_IN), ('BEFORE', consts.CLUSTER_SCALE_OUT), + ('AFTER', consts.CLUSTER_SCALE_IN), + ('AFTER', consts.CLUSTER_SCALE_OUT), ] PROFILE_TYPE = [ @@ -186,6 +190,17 @@ :return: None. """ + # check cooldown + last_op = action.inputs.get('last_op', None) + if last_op and not timeutils.is_older_than(last_op, self.cooldown): + action.data.update({ + 'status': base.CHECK_ERROR, + 'reason': _('Policy %s cooldown is still ' + 'in progress.') % self.id + }) + action.store(action.context) + return + # Use action input if count is provided count_value = action.inputs.get('count', None) cluster = action.entity @@ -243,10 +258,26 @@ return - def need_check(self, target, action): - res = super(ScalingPolicy, self).need_check(target, action) - if res: - # Check if the action is expected by the policy - res = (self.event == action.action) + def post_op(self, cluster_id, action): + # update last_op for next cooldown check + ts = timeutils.utcnow(True) + cpo.ClusterPolicy.update(action.context, cluster_id, + self.id, {'last_op': ts}) - return res + def need_check(self, target, action): + # check if target + action matches policy targets + if not super(ScalingPolicy, self).need_check(target, action): + return False + + if target == 'BEFORE': + # Scaling policy BEFORE check should only be triggered if the + # incoming action matches the specific policy event. + # E.g. for scale-out policy the BEFORE check to select nodes for + # termination should only run for scale-out actions. + return self.event == action.action + else: + # Scaling policy AFTER check to reset cooldown timer should be + # triggered for all supported policy events (both scale-in and + # scale-out). E.g. a scale-out policy should reset cooldown timer + # whenever scale-out or scale-in action completes. + return action.action in list(self._SUPPORTED_EVENTS) diff -Nru senlin-6.0.0/senlin/profiles/base.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/profiles/base.py --- senlin-6.0.0/senlin/profiles/base.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/profiles/base.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,8 +11,10 @@ # under the License. import copy +import eventlet import inspect +from oslo_config import cfg from oslo_context import context as oslo_context from oslo_log import log as logging from oslo_utils import timeutils @@ -127,6 +129,7 @@ self._orchestrationclient = None self._workflowclient = None self._block_storageclient = None + self._glanceclient = None @classmethod def _from_object(cls, profile): @@ -302,7 +305,7 @@ try: return profile.do_check(obj) except exc.InternalError as ex: - LOG.error(ex) + LOG.debug(ex) return False @classmethod @@ -385,6 +388,19 @@ self._computeclient = driver_base.SenlinDriver().compute(params) return self._computeclient + def glance(self, obj): + """Construct glance client based on object. + + :param obj: Object for which the client is created. It is expected to + be None when retrieving an existing client. When creating + a client, it contains the user and project to be used. + """ + if self._glanceclient is not None: + return self._glanceclient + params = self._build_conn_params(obj.user, obj.project) + self._glanceclient = driver_base.SenlinDriver().glance(params) + return self._glanceclient + def network(self, obj): """Construct network client based on object. @@ -518,6 +534,13 @@ raise exc.EResourceOperation(op='recovering', type='node', id=obj.id, message=six.text_type(ex)) + + # pause to allow deleted resource to get reclaimed by nova + # this is needed to avoid a problem when the compute resources are + # at their quota limit. The deleted resource has to become available + # so that the new node can be created. + eventlet.sleep(cfg.CONF.batch_interval) + res = None try: res = self.do_create(obj) diff -Nru senlin-6.0.0/senlin/profiles/os/nova/server.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/profiles/os/nova/server.py --- senlin-6.0.0/senlin/profiles/os/nova/server.py 2018-08-30 14:17:04.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/profiles/os/nova/server.py 2018-11-19 18:48:08.000000000 +0000 @@ -385,7 +385,7 @@ def _validate_image(self, obj, name_or_id, reason=None): try: - return self.compute(obj).image_find(name_or_id, False) + return self.glance(obj).image_find(name_or_id, False) except exc.InternalError as ex: if reason == 'create': raise exc.EResourceCreation(type='server', @@ -517,6 +517,8 @@ return try: net_obj = nc.network_get(net) + if net_obj is None: + return _("The specified network %s could not be found.") % net result[self.NETWORK] = net_obj.id except exc.InternalError as ex: return six.text_type(ex) @@ -554,6 +556,9 @@ if net: try: net_obj = nc.network_get(net) + if net_obj is None: + return _("The floating network %s could not be found." + ) % net result[self.FLOATING_NETWORK] = net_obj.id except exc.InternalError as ex: return six.text_type(ex) diff -Nru senlin-6.0.0/senlin/tests/tempest/post_test_hook.sh senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/tempest/post_test_hook.sh --- senlin-6.0.0/senlin/tests/tempest/post_test_hook.sh 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/tempest/post_test_hook.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -#!/bin/bash -xe - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -set -ex - -export DEST=${DEST:-/opt/stack/new} -export DEVSTACK_DIR=$DEST/devstack -export SENLIN_DIR=$DEST/senlin - -. $DEVSTACK_DIR/openrc admin admin - -cd $DEST/tempest -echo "Running tempest " $SENLIN_TEST_TYPE "tests" -sudo tox -evenv-tempest -- tempest run --regex $DEVSTACK_GATE_TEMPEST_REGEX diff -Nru senlin-6.0.0/senlin/tests/tempest/pre_test_hook.sh senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/tempest/pre_test_hook.sh --- senlin-6.0.0/senlin/tests/tempest/pre_test_hook.sh 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/tempest/pre_test_hook.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside pre_test_hook function in devstack gate. - -set -x - -export localconf=$BASE/new/devstack/local.conf - -export SENLIN_CONF=/etc/senlin/senlin.conf -export ZAQAR_CONF=/etc/zaqar/zaqar.conf -export SENLIN_BACKEND=${SENLIN_BACKEND:-'openstack_test'} - -_LOG_CFG='default_log_levels =' -_LOG_CFG+='amqp=WARN,amqplib=WARN,sqlalchemy=WARN,oslo_messaging=WARN' -_LOG_CFG+=',iso8601=WARN,requests.packages.urllib3.connectionpool=WARN' -_LOG_CFG+=',urllib3.connectionpool=WARN' -_LOG_CFG+=',requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN' -_LOG_CFG+=',keystonemiddleware=WARN' -_LOG_CFG+=',routes.middleware=WARN' -_LOG_CFG+=',stevedore=WARN' -_LOG_CFG+=',oslo_messaging._drivers.amqp=WARN' -_LOG_CFG+=',oslo_messaging._drivers.amqpdriver=WARN' - -echo -e '[[post-config|$SENLIN_CONF]]\n[DEFAULT]\n' >> $localconf -echo -e 'num_engine_workers=2\n' >> $localconf -echo -e "cloud_backend=$SENLIN_BACKEND\n" >> $localconf -echo -e $_LOG_CFG >> $localconf - -if [[ "$SENLIN_BACKEND" == "openstack" ]]; then - echo -e "[[post-config|$ZAQAR_CONF]]\n[DEFAULT]\n" >> $localconf - echo -e "auth_strategy=keystone\n" >> $localconf - echo -e "[storage]\n" >> $localconf - echo -e "message_pipeline=zaqar.notification.notifier" >> $localconf -fi diff -Nru senlin-6.0.0/senlin/tests/tempest/README.rst senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/tempest/README.rst --- senlin-6.0.0/senlin/tests/tempest/README.rst 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/tempest/README.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -===== -MOVED -===== - -The senlin tempest plugin has moved to http://git.openstack.org/cgit/openstack/senlin-tempest-plugin diff -Nru senlin-6.0.0/senlin/tests/unit/api/middleware/test_trust.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/api/middleware/test_trust.py --- senlin-6.0.0/senlin/tests/unit/api/middleware/test_trust.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/api/middleware/test_trust.py 2018-11-19 18:48:08.000000000 +0000 @@ -31,7 +31,7 @@ self.middleware = trust.TrustMiddleware(None) @mock.patch("senlin.rpc.client.EngineClient") - def test__get_trust_already_exists(self, mock_rpc): + def test_get_trust_already_exists(self, mock_rpc): x_cred = {'trust': 'FAKE_TRUST_ID'} x_rpc = mock.Mock() x_rpc.call.return_value = x_cred @@ -51,7 +51,7 @@ @mock.patch.object(context, "get_service_credentials") @mock.patch("senlin.drivers.base.SenlinDriver") @mock.patch("senlin.rpc.client.EngineClient") - def test__get_trust_bad(self, mock_rpc, mock_driver, mock_creds): + def test_get_trust_bad(self, mock_rpc, mock_driver, mock_creds): x_cred = {'foo': 'bar'} x_rpc = mock.Mock() x_rpc.call.return_value = x_cred @@ -99,7 +99,7 @@ @mock.patch.object(context, "get_service_credentials") @mock.patch("senlin.drivers.base.SenlinDriver") @mock.patch("senlin.rpc.client.EngineClient") - def test__get_trust_not_found(self, mock_rpc, mock_driver, mock_creds): + def test_get_trust_not_found(self, mock_rpc, mock_driver, mock_creds): x_rpc = mock.Mock() x_rpc.call.return_value = None mock_rpc.return_value = x_rpc @@ -137,7 +137,7 @@ @mock.patch.object(context, "get_service_credentials") @mock.patch("senlin.drivers.base.SenlinDriver") @mock.patch("senlin.rpc.client.EngineClient") - def test__get_trust_do_create(self, mock_rpc, mock_driver, mock_creds): + def test_get_trust_do_create(self, mock_rpc, mock_driver, mock_creds): x_rpc = mock.Mock() x_rpc.call.return_value = None mock_rpc.return_value = x_rpc @@ -179,7 +179,7 @@ @mock.patch.object(context, "get_service_credentials") @mock.patch("senlin.drivers.base.SenlinDriver") @mock.patch("senlin.rpc.client.EngineClient") - def test__get_trust_fatal(self, mock_rpc, mock_driver, mock_creds): + def test_get_trust_fatal(self, mock_rpc, mock_driver, mock_creds): x_rpc = mock.Mock() x_rpc.call.return_value = None mock_rpc.return_value = x_rpc diff -Nru senlin-6.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/api/middleware/test_version_negotiation.py --- senlin-6.0.0/senlin/tests/unit/api/middleware/test_version_negotiation.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/api/middleware/test_version_negotiation.py 2018-11-19 18:48:08.000000000 +0000 @@ -24,7 +24,7 @@ @mock.patch("senlin.api.openstack.versions.Controller") class VersionNegotiationTest(base.SenlinTestCase): - def test__get_version_controller(self, mock_vc): + def test_get_version_controller(self, mock_vc): gvc = mock_vc.return_value xvc = mock.Mock() gvc.get_controller = mock.Mock(return_value=xvc) @@ -38,7 +38,7 @@ self.assertEqual(0, request.environ['api.minor']) gvc.get_controller.assert_called_once_with('1.0') - def test__get_version_controller_shorter_version(self, mock_vc): + def test_get_version_controller_shorter_version(self, mock_vc): gvc = mock_vc.return_value xvc = mock.Mock() gvc.get_controller = mock.Mock(return_value=xvc) @@ -52,7 +52,7 @@ self.assertEqual(0, request.environ['api.minor']) gvc.get_controller.assert_called_once_with('1.0') - def test__get_controller_not_match_version(self, mock_vc): + def test_get_controller_not_match_version(self, mock_vc): gvc = mock_vc.return_value gvc.get_controller = mock.Mock(return_value=None) vnf = vn.VersionNegotiationFilter(None, None) @@ -208,7 +208,7 @@ response = vnf.process_request(request) self.assertEqual(gvc, response) - def test__check_version_request(self, mock_vc): + def test_check_version_request(self, mock_vc): controller = mock.Mock() minv = vr.APIVersionRequest('1.0') maxv = vr.APIVersionRequest('1.3') @@ -224,7 +224,7 @@ expected = vr.APIVersionRequest('1.0') self.assertEqual(expected, request.version_request) - def test__check_version_request_default(self, mock_vc): + def test_check_version_request_default(self, mock_vc): controller = mock.Mock() controller.DEFAULT_API_VERSION = "1.0" request = webob.Request({'PATH_INFO': 'resource'}) @@ -237,7 +237,7 @@ expected = vr.APIVersionRequest(controller.DEFAULT_API_VERSION) self.assertEqual(expected, request.version_request) - def test__check_version_request_invalid_format(self, mock_vc): + def test_check_version_request_invalid_format(self, mock_vc): controller = mock.Mock() request = webob.Request({'PATH_INFO': 'resource'}) request.headers[wsgi.API_VERSION_KEY] = 'clustering 2.03' @@ -250,7 +250,7 @@ "must be of format 'major.minor'.", six.text_type(ex)) - def test__check_version_request_invalid_version(self, mock_vc): + def test_check_version_request_invalid_version(self, mock_vc): controller = mock.Mock() minv = vr.APIVersionRequest('1.0') maxv = vr.APIVersionRequest('1.100') @@ -269,7 +269,7 @@ {'min_ver': str(minv), 'max_ver': str(maxv)}) self.assertEqual(expected, six.text_type(ex)) - def test__check_version_request_latest(self, mock_vc): + def test_check_version_request_latest(self, mock_vc): controller = mock.Mock() controller.max_api_version = mock.Mock(return_value='12.34') diff -Nru senlin-6.0.0/senlin/tests/unit/api/openstack/v1/test_clusters.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/api/openstack/v1/test_clusters.py --- senlin-6.0.0/senlin/tests/unit/api/openstack/v1/test_clusters.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/api/openstack/v1/test_clusters.py 2018-11-19 18:48:08.000000000 +0000 @@ -407,7 +407,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_add_nodes(self, mock_call, mock_parse, mock_enforce): + def test_do_add_nodes(self, mock_call, mock_parse, mock_enforce): req = mock.Mock() cid = 'FAKE_ID' data = dict(nodes=['NODE1']) @@ -428,8 +428,8 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_add_nodes_failed_request(self, mock_call, - mock_parse, _ignore): + def test_do_add_nodes_failed_request(self, mock_call, + mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes=['NODE2']) @@ -449,7 +449,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__add_nodes_failed_engine(self, mock_call, mock_parse, _ignore): + def test_add_nodes_failed_engine(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes=['NODE3']) @@ -472,7 +472,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_del_nodes(self, mock_call, mock_parse, _ignore): + def test_do_del_nodes(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'FAKE_ID' data = dict(nodes=['NODE4'], destroy=False) @@ -492,8 +492,8 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_del_nodes_failed_request(self, mock_call, - mock_parse, _ignore): + def test_do_del_nodes_failed_request(self, mock_call, + mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes=['NODE5'], destroy=False) @@ -512,7 +512,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_del_nodes_failed_engine(self, mock_call, mock_parse, _ignore): + def test_do_del_nodes_failed_engine(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes=['NODE6'], destroy=False) @@ -534,7 +534,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_replace_nodes(self, mock_call, mock_parse, _ignore): + def test_do_replace_nodes(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'FAKE_ID' data = dict(nodes={'OLD': 'NEW'}) @@ -555,7 +555,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_replace_nodes_none(self, mock_call, mock_parse, _ign): + def test_do_replace_nodes_none(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes=None) @@ -569,8 +569,8 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_replace_nodes_failed_request(self, mock_call, - mock_parse, _ign): + def test_do_replace_nodes_failed_request(self, mock_call, + mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes={'OLD': 'NEW'}) @@ -590,8 +590,8 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_replace_nodes_failed_engine(self, mock_call, - mock_parse, _ign): + def test_do_replace_nodes_failed_engine(self, mock_call, + mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(nodes={'OLD': 'NEW'}) @@ -641,18 +641,18 @@ 'ClusterResizeRequest', req, params) mock_call.assert_called_once_with(req.context, 'cluster_resize', obj) - def test__do_resize_exact_capacity(self, mock_enforce): + def test_do_resize_exact_capacity(self, mock_enforce): self._test_do_resize_with_type('EXACT_CAPACITY') - def test__do_resize_with_change_capacity(self, mock_enforce): + def test_do_resize_with_change_capacity(self, mock_enforce): self._test_do_resize_with_type('CHANGE_IN_CAPACITY') - def test__do_resize_with_change_percentage(self, mock_enforce): + def test_do_resize_with_change_percentage(self, mock_enforce): self._test_do_resize_with_type('CHANGE_IN_PERCENTAGE') @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_resize_failed_request(self, mock_call, mock_parse, _ign): + def test_do_resize_failed_request(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'adjustment_type': 'EXACT_CAPACITY', 'number': 10} @@ -674,7 +674,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_resize_missing_number(self, mock_call, mock_parse, _ign): + def test_do_resize_missing_number(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'adjustment_type': 'EXACT_CAPACITY'} @@ -690,7 +690,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_resize_missing_type(self, mock_call, mock_parse, _ign): + def test_do_resize_missing_type(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'number': 2} @@ -707,7 +707,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_resize_max_size_too_small(self, mock_call, mock_parse, _ign): + def test_do_resize_max_size_too_small(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'min_size': 2, 'max_size': 1} @@ -724,7 +724,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_resize_empty_params(self, mock_call, mock_parse, _ign): + def test_do_resize_empty_params(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {} @@ -739,7 +739,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_resize_failed_engine(self, mock_call, mock_parse, _ign): + def test_do_resize_failed_engine(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'max_size': 200} @@ -759,7 +759,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_scale_out(self, mock_call, mock_parse, _ignore): + def test_do_scale_out(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(count=1) @@ -780,7 +780,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_scale_out_failed_request(self, mock_call, mock_parse, _ign): + def test_do_scale_out_failed_request(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(count=2) @@ -800,7 +800,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_scale_out_failed_engine(self, mock_call, mock_parse, _ign): + def test_do_scale_out_failed_engine(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(count=3) @@ -823,7 +823,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_scale_in(self, mock_call, mock_parse, _ignore): + def test_do_scale_in(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(count=4) @@ -844,7 +844,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_scale_in_failed_request(self, mock_call, mock_parse, _ign): + def test_do_scale_in_failed_request(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(count=5) @@ -864,7 +864,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_scale_in_failed_engine(self, mock_call, mock_parse, _ign): + def test_do_scale_in_failed_engine(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = dict(count=6) @@ -887,7 +887,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_attach(self, mock_call, mock_parse, _ign): + def test_do_policy_attach(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -906,7 +906,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_attach_failed_request(self, mock_call, mock_parse, _i): + def test_do_policy_attach_failed_request(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -924,7 +924,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_attach_failed_engine(self, mock_call, mock_parse, _i): + def test_do_policy_attach_failed_engine(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -945,7 +945,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_detach(self, mock_call, mock_parse, _ign): + def test_do_policy_detach(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -964,7 +964,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_detach_failed_request(self, mock_call, mock_parse, _i): + def test_do_policy_detach_failed_request(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -982,7 +982,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_detach_failed_engine(self, mock_call, mock_parse, _i): + def test_do_policy_detach_failed_engine(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -1003,7 +1003,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_update(self, mock_call, mock_parse, _ign): + def test_do_policy_update(self, mock_call, mock_parse, _ign): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -1022,7 +1022,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_update_failed_request(self, mock_call, mock_parse, _i): + def test_do_policy_update_failed_request(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -1040,7 +1040,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_policy_update_failed_engine(self, mock_call, mock_parse, _i): + def test_do_policy_update_failed_engine(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'policy_id': 'xxxx-yyyy'} @@ -1061,7 +1061,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_check(self, mock_call, mock_parse, _ignore): + def test_do_check(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'op': 'value'} @@ -1080,7 +1080,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_check_failed_request(self, mock_call, mock_parse, _ign): + def test_do_check_failed_request(self, mock_call, mock_parse, _ign): data = {} req = mock.Mock() cid = 'aaaa-bbbb-cccc' @@ -1097,7 +1097,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_check_failed_engine(self, mock_call, mock_parse, _i): + def test_do_check_failed_engine(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {} @@ -1117,7 +1117,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_recover(self, mock_call, mock_parse, _ignore): + def test_do_recover(self, mock_call, mock_parse, _ignore): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {'op': 'value'} @@ -1136,7 +1136,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_recover_failed_request(self, mock_call, mock_parse, _ign): + def test_do_recover_failed_request(self, mock_call, mock_parse, _ign): data = {} req = mock.Mock() cid = 'aaaa-bbbb-cccc' @@ -1153,7 +1153,7 @@ @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') - def test__do_recover_failed_engine(self, mock_call, mock_parse, _i): + def test_do_recover_failed_engine(self, mock_call, mock_parse, _i): req = mock.Mock() cid = 'aaaa-bbbb-cccc' data = {} diff -Nru senlin-6.0.0/senlin/tests/unit/cmd/test_status.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/cmd/test_status.py --- senlin-6.0.0/senlin/tests/unit/cmd/test_status.py 1970-01-01 00:00:00.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/cmd/test_status.py 2018-11-19 18:48:08.000000000 +0000 @@ -0,0 +1,30 @@ +# Copyright (c) 2018 NEC, Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_upgradecheck.upgradecheck import Code + +from senlin.cmd import status +from senlin.tests.unit.common import base + + +class TestUpgradeChecks(base.SenlinTestCase): + + def setUp(self): + super(TestUpgradeChecks, self).setUp() + self.cmd = status.Checks() + + def test__check_placeholder(self): + check_result = self.cmd._check_placeholder() + self.assertEqual( + Code.SUCCESS, check_result.code) diff -Nru senlin-6.0.0/senlin/tests/unit/db/test_action_api.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/db/test_action_api.py --- senlin-6.0.0/senlin/tests/unit/db/test_action_api.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/db/test_action_api.py 2018-11-19 18:48:08.000000000 +0000 @@ -244,6 +244,32 @@ for spec in specs: self.assertIn(spec['name'], names) + def test_action_get_all_active_by_target(self): + specs = [ + {'name': 'A01', 'target': 'cluster_001', 'status': 'READY'}, + {'name': 'A02', 'target': 'node_001'}, + {'name': 'A03', 'target': 'cluster_001', 'status': 'INIT'}, + {'name': 'A04', 'target': 'cluster_001', 'status': 'WAITING'}, + {'name': 'A05', 'target': 'cluster_001', 'status': 'READY'}, + {'name': 'A06', 'target': 'cluster_001', 'status': 'RUNNING'}, + {'name': 'A07', 'target': 'cluster_001', 'status': 'SUCCEEDED'}, + {'name': 'A08', 'target': 'cluster_001', 'status': 'FAILED'}, + {'name': 'A09', 'target': 'cluster_001', 'status': 'CANCELLED'}, + {'name': 'A10', 'target': 'cluster_001', + 'status': 'WAITING_LIFECYCLE_COMPLETION'}, + {'name': 'A11', 'target': 'cluster_001', 'status': 'SUSPENDED'}, + ] + + for spec in specs: + _create_action(self.ctx, **spec) + + actions = db_api.action_get_all_active_by_target(self.ctx, + 'cluster_001') + self.assertEqual(5, len(actions)) + names = [p.name for p in actions] + for name in names: + self.assertIn(name, ['A01', 'A04', 'A05', 'A06', 'A10']) + def test_action_get_all_project_safe(self): parser.simple_parse(shared.sample_action) _create_action(self.ctx) diff -Nru senlin-6.0.0/senlin/tests/unit/db/test_lock_api.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/db/test_lock_api.py --- senlin-6.0.0/senlin/tests/unit/db/test_lock_api.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/db/test_lock_api.py 2018-11-19 18:48:08.000000000 +0000 @@ -174,6 +174,27 @@ observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) self.assertTrue(observed) + def test_cluster_is_locked(self): + # newly created cluster should not be locked + observed = db_api.cluster_is_locked(self.cluster.id) + self.assertFalse(observed) + + # lock cluster + observed = db_api.cluster_lock_acquire(self.cluster.id, UUID1, -1) + self.assertIn(UUID1, observed) + + # cluster should be locked + observed = db_api.cluster_is_locked(self.cluster.id) + self.assertTrue(observed) + + # release cluster lock + observed = db_api.cluster_lock_release(self.cluster.id, UUID1, -1) + self.assertTrue(observed) + + # cluster should not be locked anymore + observed = db_api.cluster_is_locked(self.cluster.id) + self.assertFalse(observed) + def test_node_lock_acquire_release(self): observed = db_api.node_lock_acquire(self.node.id, UUID1) self.assertEqual(UUID1, observed) @@ -221,6 +242,27 @@ observed = db_api.node_lock_release(self.node.id, UUID2) self.assertTrue(observed) + def test_node_is_locked(self): + # newly created node should not be locked + observed = db_api.node_is_locked(self.node.id) + self.assertFalse(observed) + + # lock node + observed = db_api.node_lock_acquire(self.node.id, UUID1) + self.assertIn(UUID1, observed) + + # node should be locked + observed = db_api.node_is_locked(self.node.id) + self.assertTrue(observed) + + # release node lock + observed = db_api.node_lock_release(self.node.id, UUID1) + self.assertTrue(observed) + + # node should not be locked anymore + observed = db_api.node_is_locked(self.node.id) + self.assertFalse(observed) + class GCByEngineTest(base.SenlinTestCase): diff -Nru senlin-6.0.0/senlin/tests/unit/drivers/test_nova_v2.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/drivers/test_nova_v2.py --- senlin-6.0.0/senlin/tests/unit/drivers/test_nova_v2.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/drivers/test_nova_v2.py 2018-11-19 18:48:08.000000000 +0000 @@ -50,19 +50,6 @@ d.flavor_find('foo', False) self.compute.find_flavor.assert_called_once_with('foo', False) - def test_image_find(self): - d = nova_v2.NovaClient(self.conn_params) - d.image_find('foo') - self.compute.find_image.assert_called_once_with('foo', False) - self.compute.find_image.reset_mock() - - d.image_find('foo', True) - self.compute.find_image.assert_called_once_with('foo', True) - self.compute.find_image.reset_mock() - - d.image_find('foo', False) - self.compute.find_image.assert_called_once_with('foo', False) - def test_keypair_create(self): d = nova_v2.NovaClient(self.conn_params) d.keypair_create(name='foo') diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_action_base.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_action_base.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_action_base.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_action_base.py 2018-11-19 18:48:08.000000000 +0000 @@ -29,8 +29,10 @@ from senlin.engine import event as EVENT from senlin.engine import node as node_mod from senlin.objects import action as ao +from senlin.objects import cluster_lock as cl from senlin.objects import cluster_policy as cpo from senlin.objects import dependency as dobj +from senlin.objects import node_lock as nl from senlin.policies import base as policy_mod from senlin.tests.unit.common import base from senlin.tests.unit.common import utils @@ -92,6 +94,11 @@ self.assertIsNone(obj.updated_at) self.assertEqual({}, obj.data) + def _create_cp_binding(self, cluster_id, policy_id): + return cpo.ClusterPolicy(cluster_id=cluster_id, policy_id=policy_id, + enabled=True, id=uuidutils.generate_uuid(), + last_op=None) + @mock.patch.object(cluster_mod.Cluster, 'load') def test_action_new_cluster(self, mock_load): fake_cluster = mock.Mock(timeout=cfg.CONF.default_action_timeout) @@ -245,6 +252,194 @@ self.assertEqual('FAKE_ID', result) mock_store.assert_called_once_with(self.ctx) + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(cl.ClusterLock, 'is_locked') + def test_action_create_lock_cluster_false(self, mock_lock, + mock_active, mock_store): + mock_store.return_value = 'FAKE_ID' + mock_active.return_value = None + mock_lock.return_value = False + + result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_CREATE', + name='test') + + self.assertEqual('FAKE_ID', result) + mock_store.assert_called_once_with(self.ctx) + mock_active.assert_called_once_with(mock.ANY, OBJID) + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(cl.ClusterLock, 'is_locked') + def test_action_create_lock_cluster_true(self, mock_lock, + mock_active, mock_store): + mock_store.return_value = 'FAKE_ID' + mock_active.return_value = None + mock_lock.return_value = True + + error_message = ( + 'CLUSTER_CREATE for cluster \'{}\' cannot be completed because ' + 'it is already locked.').format(OBJID) + with self.assertRaisesRegexp(exception.ResourceIsLocked, + error_message): + ab.Action.create(self.ctx, OBJID, 'CLUSTER_CREATE', name='test') + + mock_store.assert_not_called() + mock_active.assert_not_called() + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(nl.NodeLock, 'is_locked') + def test_action_create_lock_node_false(self, mock_lock, + mock_active, mock_store): + mock_store.return_value = 'FAKE_ID' + mock_active.return_value = None + mock_lock.return_value = False + + result = ab.Action.create(self.ctx, OBJID, 'NODE_CREATE', + name='test') + + self.assertEqual('FAKE_ID', result) + mock_store.assert_called_once_with(self.ctx) + mock_active.assert_called_once_with(mock.ANY, OBJID) + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(cl.ClusterLock, 'is_locked') + def test_action_create_lock_cluster_true_delete(self, mock_lock, + mock_active, mock_store): + mock_store.return_value = 'FAKE_ID' + mock_active.return_value = None + mock_lock.return_value = True + + result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_DELETE', + name='test') + + self.assertEqual('FAKE_ID', result) + mock_store.assert_called_once_with(self.ctx) + mock_active.assert_called_once_with(mock.ANY, OBJID) + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(nl.NodeLock, 'is_locked') + def test_action_create_lock_node_true(self, mock_lock, mock_active, + mock_store): + mock_store.return_value = 'FAKE_ID' + mock_active.return_value = None + mock_lock.return_value = True + + error_message = ( + 'NODE_CREATE for node \'{}\' cannot be completed because ' + 'it is already locked.').format(OBJID) + with self.assertRaisesRegexp(exception.ResourceIsLocked, + error_message): + ab.Action.create(self.ctx, OBJID, 'NODE_CREATE', name='test') + + mock_store.assert_not_called() + mock_active.assert_not_called() + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(cl.ClusterLock, 'is_locked') + def test_action_create_conflict(self, mock_lock, mock_active, mock_store): + mock_store.return_value = 'FAKE_ID' + uuid1 = 'ce982cd5-26da-4e2c-84e5-be8f720b7478' + uuid2 = 'ce982cd5-26da-4e2c-84e5-be8f720b7479' + mock_active.return_value = [ao.Action(id=uuid1), ao.Action(id=uuid2)] + mock_lock.return_value = False + + error_message = ( + 'The NODE_CREATE action for target {} conflicts with the following' + ' action\(s\): {},{}').format(OBJID, uuid1, uuid2) + with self.assertRaisesRegexp(exception.ActionConflict, + error_message): + ab.Action.create(self.ctx, OBJID, 'NODE_CREATE', name='test') + + mock_store.assert_not_called() + mock_active.assert_called_once_with(mock.ANY, OBJID) + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(cl.ClusterLock, 'is_locked') + def test_action_create_delete_conflict(self, mock_lock, mock_active, + mock_store): + mock_store.return_value = 'FAKE_ID' + uuid1 = 'ce982cd5-26da-4e2c-84e5-be8f720b7478' + uuid2 = 'ce982cd5-26da-4e2c-84e5-be8f720b7479' + mock_active.return_value = [ + ao.Action(id=uuid1, action='CLUSTER_DELETE'), + ao.Action(id=uuid2, action='NODE_DELETE') + ] + mock_lock.return_value = True + + error_message = ( + 'The CLUSTER_DELETE action for target {} conflicts with the ' + 'following action\(s\): {}').format(OBJID, uuid1) + with self.assertRaisesRegexp(exception.ActionConflict, + error_message): + ab.Action.create(self.ctx, OBJID, 'CLUSTER_DELETE', name='test') + + mock_store.assert_not_called() + mock_active.assert_called_once_with(mock.ANY, OBJID) + + @mock.patch.object(ab.Action, 'store') + @mock.patch.object(ao.Action, 'get_all_active_by_target') + @mock.patch.object(cl.ClusterLock, 'is_locked') + def test_action_create_delete_no_conflict(self, mock_lock, mock_active, + mock_store): + mock_store.return_value = 'FAKE_ID' + uuid1 = 'ce982cd5-26da-4e2c-84e5-be8f720b7478' + uuid2 = 'ce982cd5-26da-4e2c-84e5-be8f720b7479' + mock_active.return_value = [ + ao.Action(id=uuid1, action='NODE_DELETE'), + ao.Action(id=uuid2, action='NODE_DELETE') + ] + mock_lock.return_value = True + + result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_DELETE', + name='test') + + self.assertEqual('FAKE_ID', result) + mock_store.assert_called_once_with(self.ctx) + mock_active.assert_called_once_with(mock.ANY, OBJID) + + @mock.patch.object(timeutils, 'is_older_than') + @mock.patch.object(cpo.ClusterPolicy, 'get_all') + @mock.patch.object(policy_mod.Policy, 'load') + @mock.patch.object(ab.Action, 'store') + def test_action_create_scaling_cooldown_in_progress(self, mock_store, + mock_load, + mock_load_all, + mock_time_util): + cluster_id = CLUSTER_ID + # Note: policy is mocked + policy_id = uuidutils.generate_uuid() + policy = mock.Mock(id=policy_id, + TARGET=[('AFTER', 'CLUSTER_SCALE_OUT')], + event='CLUSTER_SCALE_OUT', + cooldown=240) + pb = self._create_cp_binding(cluster_id, policy.id) + pb.last_op = timeutils.utcnow(True) + mock_load_all.return_value = [pb] + mock_load.return_value = policy + mock_time_util.return_value = False + self.assertRaises(exception.ActionCooldown, ab.Action.create, self.ctx, + cluster_id, 'CLUSTER_SCALE_OUT') + self.assertEqual(0, mock_store.call_count) + + @mock.patch.object(ao.Action, 'action_list_active_scaling') + @mock.patch.object(ab.Action, 'store') + def test_action_create_scaling_conflict(self, mock_store, + mock_list_active): + cluster_id = CLUSTER_ID + + mock_action = mock.Mock() + mock_action.to_dict.return_value = {'id': 'fake_action_id'} + mock_list_active.return_value = [mock_action] + self.assertRaises(exception.ActionConflict, ab.Action.create, self.ctx, + cluster_id, 'CLUSTER_SCALE_IN') + self.assertEqual(0, mock_store.call_count) + def test_action_delete(self): result = ab.Action.delete(self.ctx, 'non-existent') self.assertIsNone(result) @@ -682,12 +877,12 @@ filters={'enabled': True}) mock_load.assert_called_once_with(action.context, policy.id) # last_op was updated anyway - self.assertIsNotNone(pb.last_op) + self.assertEqual(action.inputs['last_op'], pb.last_op) # neither pre_op nor post_op was called, because target not match self.assertEqual(0, mock_pre_op.call_count) self.assertEqual(0, mock_post_op.call_count) - def test__check_result_true(self): + def test_check_result_true(self): cluster_id = CLUSTER_ID action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx) action.data['status'] = policy_mod.CHECK_OK @@ -697,7 +892,7 @@ self.assertTrue(res) - def test__check_result_false(self): + def test_check_result_false(self): cluster_id = CLUSTER_ID action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx) action.data['status'] = policy_mod.CHECK_ERROR @@ -768,45 +963,11 @@ filters={'enabled': True}) mock_load.assert_called_once_with(action.context, policy.id) # last_op was updated for POST check - self.assertIsNotNone(pb.last_op) + self.assertEqual(action.inputs['last_op'], pb.last_op) # pre_op is called, but post_op was not called self.assertEqual(0, policy.pre_op.call_count) policy.post_op.assert_called_once_with(cluster_id, action) - @mock.patch.object(cpo.ClusterPolicy, 'cooldown_inprogress') - @mock.patch.object(cpo.ClusterPolicy, 'get_all') - @mock.patch.object(policy_mod.Policy, 'load') - def test_policy_check_cooldown_inprogress(self, mock_load, mock_load_all, - mock_inprogress): - cluster_id = CLUSTER_ID - # Note: policy is mocked - policy_id = uuidutils.generate_uuid() - policy = mock.Mock(id=policy_id, TARGET=[('AFTER', 'OBJECT_ACTION')]) - # Note: policy binding is created but not stored - pb = self._create_cp_binding(cluster_id, policy.id) - mock_inprogress.return_value = True - mock_load_all.return_value = [pb] - mock_load.return_value = policy - action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx) - - # Do it - res = action.policy_check(CLUSTER_ID, 'AFTER') - - self.assertIsNone(res) - self.assertEqual(policy_mod.CHECK_ERROR, action.data['status']) - self.assertEqual( - 'Policy %s cooldown is still in progress.' % policy_id, - six.text_type(action.data['reason'])) - mock_load_all.assert_called_once_with( - action.context, cluster_id, sort='priority', - filters={'enabled': True}) - mock_load.assert_called_once_with(action.context, policy.id) - # last_op was updated for POST check - self.assertIsNotNone(pb.last_op) - # neither pre_op nor post_op was not called, due to cooldown - self.assertEqual(0, policy.pre_op.call_count) - self.assertEqual(0, policy.post_op.call_count) - @mock.patch.object(cpo.ClusterPolicy, 'get_all') @mock.patch.object(policy_mod.Policy, 'load') @mock.patch.object(ab.Action, '_check_result') diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_cluster_action.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_cluster_action.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_cluster_action.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_cluster_action.py 2018-11-19 18:48:08.000000000 +0000 @@ -36,7 +36,7 @@ self.ctx = utils.dummy_context() @mock.patch.object(ab.Action, 'policy_check') - def test__execute(self, mock_check, mock_load): + def test_execute(self, mock_check, mock_load): cluster = mock.Mock() cluster.id = 'FAKE_CLUSTER' mock_load.return_value = cluster @@ -122,7 +122,7 @@ @mock.patch.object(senlin_lock, 'cluster_lock_acquire') @mock.patch.object(senlin_lock, 'cluster_lock_release') - def test_execute(self, mock_release, mock_acquire, mock_load): + def test_execute_with_locking(self, mock_release, mock_acquire, mock_load): cluster = mock.Mock() cluster.id = 'FAKE_CLUSTER' mock_load.return_value = cluster diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_create.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_create.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_create.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_create.py 2018-11-19 18:48:08.000000000 +0000 @@ -39,9 +39,9 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__create_nodes_single(self, mock_wait, mock_start, mock_dep, - mock_node, mock_index, mock_action, - mock_update, mock_load): + def test_create_nodes_single(self, mock_wait, mock_start, mock_dep, + mock_node, mock_index, mock_action, + mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', profile_id='FAKE_PROFILE', user='FAKE_USER', project='FAKE_PROJECT', @@ -109,9 +109,9 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__create_nodes_multiple(self, mock_wait, mock_start, mock_dep, - mock_node, mock_index, mock_action, - mock_update, mock_load): + def test_create_nodes_multiple(self, mock_wait, mock_start, mock_dep, + mock_node, mock_index, mock_action, + mock_update, mock_load): cluster = mock.Mock(id='01234567-123434', config={"node.name.format": "node-$3I"}) node1 = mock.Mock(id='01234567-abcdef', @@ -183,9 +183,9 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__create_nodes_multiple_failed_wait(self, mock_wait, mock_start, - mock_dep, mock_node, mock_get, - mock_update, mock_load): + def test_create_nodes_multiple_failed_wait(self, mock_wait, mock_start, + mock_dep, mock_node, mock_get, + mock_update, mock_load): cluster = mock.Mock(id='01234567-123434', config={}) db_cluster = mock.Mock(next_index=1) mock_get.return_value = db_cluster diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_delete.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_delete.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_delete.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_delete.py 2018-11-19 18:48:08.000000000 +0000 @@ -37,8 +37,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_single(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_delete_nodes_single(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='FAKE_CLUSTER', desired_capacity=100, config={}) @@ -73,9 +73,9 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_single_stop_node(self, mock_wait, mock_start, - mock_dep, mock_action, mock_update, - mock_load): + def test_delete_nodes_single_stop_node(self, mock_wait, mock_start, + mock_dep, mock_action, mock_update, + mock_load): # prepare mocks cluster = mock.Mock(id='FAKE_CLUSTER', desired_capacity=100, config={'cluster.stop_node_before_delete': True}) @@ -124,8 +124,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_multi(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_delete_nodes_multi(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) mock_load.return_value = cluster @@ -157,7 +157,7 @@ cluster.remove_node.assert_has_calls([ mock.call('NODE_1'), mock.call('NODE_2')]) - def test__delete_empty(self, mock_load): + def test_delete_empty(self, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID') mock_load.return_value = cluster @@ -176,8 +176,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_with_pd(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_delete_nodes_with_pd(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) mock_load.return_value = cluster @@ -209,11 +209,11 @@ @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_with_lifecycle_hook(self, mock_wait, mock_start, - mock_post, mock_dep, - mock_node_get, - mock_action, mock_update, - mock_load): + def test_delete_nodes_with_lifecycle_hook(self, mock_wait, mock_start, + mock_post, mock_dep, + mock_node_get, + mock_action, mock_update, + mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) mock_load.return_value = cluster @@ -264,7 +264,7 @@ @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_with_lifecycle_hook_failed_node( + def test_delete_nodes_with_lifecycle_hook_failed_node( self, mock_wait, mock_start, mock_post, mock_dep, mock_node_get, mock_action, mock_update, mock_load): self.delete_nodes_with_lifecycle_hook_invalid_node( @@ -279,7 +279,7 @@ @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_with_lifecycle_hook_missing_node( + def test_delete_nodes_with_lifecycle_hook_missing_node( self, mock_wait, mock_start, mock_post, mock_dep, mock_node_get, mock_action, mock_update, mock_load): self.delete_nodes_with_lifecycle_hook_invalid_node( @@ -335,14 +335,14 @@ @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_with_lifecycle_hook_timeout(self, mock_wait, - mock_start, - mock_post, mock_dep, - mock_node_get, - mock_action, - mock_update, - mock_check_status, - mock_load): + def test_delete_nodes_with_lifecycle_hook_timeout(self, mock_wait, + mock_start, + mock_post, mock_dep, + mock_node_get, + mock_action, + mock_update, + mock_check_status, + mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) mock_load.return_value = cluster @@ -395,9 +395,9 @@ mock_wait.assert_has_calls(wait_calls) @mock.patch.object(ab.Action, 'create') - def test__delete_nodes_with_lifecycle_hook_invalid_type(self, - mock_action, - mock_load): + def test_delete_nodes_with_lifecycle_hook_invalid_type(self, + mock_action, + mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) mock_load.return_value = cluster @@ -424,10 +424,10 @@ @mock.patch.object(ao.Action, 'update') @mock.patch.object(ab.Action, 'create') - def test__delete_nodes_with_lifecycle_hook_unsupported_webhook(self, - mock_action, - mock_update, - mock_load): + def test_delete_nodes_with_lifecycle_hook_unsupported_webhook(self, + mock_action, + mock_update, + mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100, config={}) mock_load.return_value = cluster @@ -453,8 +453,8 @@ "'webhook' is not implemented", res_msg) @mock.patch.object(ca.ClusterAction, '_remove_nodes_normally') - def test__delete_nodes_failed_remove_stop_node(self, mock_remove, - mock_load): + def test_delete_nodes_failed_remove_stop_node(self, mock_remove, + mock_load): # prepare mocks cluster = mock.Mock(id='ID', config={'cluster.stop_node_before_delete': True}) @@ -481,7 +481,7 @@ @mock.patch.object(ca.ClusterAction, '_remove_nodes_with_hook') @mock.patch.object(ca.ClusterAction, '_remove_nodes_normally') - def test__delete_nodes_with_lifecycle_hook_failed_remove_stop_node( + def test_delete_nodes_with_lifecycle_hook_failed_remove_stop_node( self, mock_remove_normally, mock_remove_hook, mock_load): # prepare mocks cluster = mock.Mock(id='ID', @@ -773,8 +773,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__remove_nodes_normally(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_remove_nodes_normally(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100) mock_load.return_value = cluster @@ -810,9 +810,9 @@ @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__remove_nodes_with_hook(self, mock_wait, mock_start, mock_post, - mock_dep, mock_node_get, mock_action, - mock_update, mock_load): + def test_remove_nodes_with_hook(self, mock_wait, mock_start, mock_post, + mock_dep, mock_node_get, mock_action, + mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100) mock_load.return_value = cluster @@ -860,9 +860,9 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__remove_nodes_normally_failed_wait(self, mock_wait, mock_start, - mock_dep, mock_action, - mock_update, mock_load): + def test_remove_nodes_normally_failed_wait(self, mock_wait, mock_start, + mock_dep, mock_action, + mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='ID', config={}) mock_load.return_value = cluster @@ -888,9 +888,9 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__remove_nodes_hook_failed_wait(self, mock_wait, mock_start, - mock_dep, mock_action, - mock_update, mock_load): + def test_remove_nodes_hook_failed_wait(self, mock_wait, mock_start, + mock_dep, mock_action, + mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='ID', config={}) mock_load.return_value = cluster @@ -925,10 +925,10 @@ @mock.patch.object(msg.Message, 'post_lifecycle_hook_message') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__delete_nodes_with_error_nodes(self, mock_wait, mock_start, - mock_post, mock_dep, - mock_node_get, mock_action, - mock_update, mock_load): + def test_delete_nodes_with_error_nodes(self, mock_wait, mock_start, + mock_post, mock_dep, + mock_node_get, mock_action, + mock_update, mock_load): # prepare mocks cluster = mock.Mock(id='CLUSTER_ID', desired_capacity=100) mock_load.return_value = cluster diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_node_action.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_node_action.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_node_action.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_node_action.py 2018-11-19 18:48:08.000000000 +0000 @@ -37,7 +37,7 @@ def test_do_create_okay(self, mock_load): node = mock.Mock(id='NID') - node.do_create = mock.Mock(return_value=True) + node.do_create = mock.Mock(return_value=[True, '']) mock_load.return_value = node action = node_action.NodeAction(node.id, 'ACTION', self.ctx) @@ -49,7 +49,8 @@ def test_do_create_failed(self, mock_load): node = mock.Mock(id='NID') - node.do_create = mock.Mock(return_value=False) + node.do_create = mock.Mock(return_value=[False, + 'custom error message']) mock_load.return_value = node action = node_action.NodeAction(node.id, 'ACTION', self.ctx) @@ -57,7 +58,7 @@ res_code, res_msg = action.do_create() self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node creation failed.', res_msg) + self.assertEqual('custom error message', res_msg) node.do_create.assert_called_once_with(action.context) @mock.patch.object(scaleutils, 'check_size_params') @@ -68,7 +69,7 @@ cluster = mock.Mock(id='CID') mock_c_load.return_value = cluster node = mock.Mock(id='NID', cluster_id='CID') - node.do_create = mock.Mock(return_value=True) + node.do_create = mock.Mock(return_value=[True, '']) mock_load.return_value = node mock_count.return_value = 11 mock_check.return_value = None @@ -97,7 +98,7 @@ cluster = mock.Mock(id='CID') mock_c_load.return_value = cluster node = mock.Mock(id='NID', cluster_id='CID') - node.do_create = mock.Mock(return_value=True) + node.do_create = mock.Mock(return_value=[True, '']) mock_load.return_value = node mock_count.return_value = 11 mock_check.return_value = 'overflow' @@ -128,7 +129,8 @@ cluster = mock.Mock(id='CID') mock_c_load.return_value = cluster node = mock.Mock(id='NID', cluster_id='CID') - node.do_create = mock.Mock(return_value=False) + node.do_create = mock.Mock(return_value=[False, + 'custom error message']) mock_load.return_value = node mock_count.return_value = 11 mock_check.return_value = '' @@ -140,7 +142,7 @@ # assertions self.assertEqual(action.RES_ERROR, res_code) - self.assertEqual('Node creation failed.', res_msg) + self.assertEqual('custom error message', res_msg) mock_c_load.assert_called_once_with(action.context, 'CID') mock_count.assert_called_once_with(action.context, 'CID') mock_check.assert_called_once_with(cluster, 11, None, None, True) diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_recover.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_recover.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_recover.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_recover.py 2018-11-19 18:48:08.000000000 +0000 @@ -366,7 +366,7 @@ mock_desired.assert_called_once_with() @mock.patch.object(ca.ClusterAction, '_create_nodes') - def test__check_capacity_create(self, mock_create, mock_load): + def test_check_capacity_create(self, mock_create, mock_load): node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') cluster = mock.Mock(id='FAKE_ID', RECOVERING='RECOVERING', @@ -383,8 +383,8 @@ @mock.patch.object(su, 'nodes_by_random') @mock.patch.object(no.Node, 'get_all_by_cluster') @mock.patch.object(ca.ClusterAction, '_delete_nodes') - def test__check_capacity_delete(self, mock_delete, mock_get, - mock_su, mock_load): + def test_check_capacity_delete(self, mock_delete, mock_get, + mock_su, mock_load): node1 = mock.Mock(id='NODE_1', cluster_id='FAKE_ID', status='ACTIVE') node2 = mock.Mock(id='NODE_2', cluster_id='FAKE_ID', status='ERROR') diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_resize.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_resize.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_resize.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_resize.py 2018-11-19 18:48:08.000000000 +0000 @@ -28,7 +28,7 @@ super(ClusterResizeTest, self).setUp() self.ctx = utils.dummy_context() - def test__update_cluster_size(self, mock_load): + def test_update_cluster_size(self, mock_load): cluster = mock.Mock(id='CID', desired_capacity=10, nodes=[]) mock_load.return_value = cluster action = ca.ClusterAction(cluster.id, 'CLUSTER_RESIZE', self.ctx, @@ -40,7 +40,7 @@ action.context, consts.CS_RESIZING, 'Cluster resize started.', desired_capacity=15, min_size=1, max_size=20) - def test__update_cluster_size_minimum(self, mock_load): + def test_update_cluster_size_minimum(self, mock_load): cluster = mock.Mock(id='CID', desired_capacity=10, nodes=[]) mock_load.return_value = cluster action = ca.ClusterAction(cluster.id, 'CLUSTER_RESIZE', self.ctx, diff -Nru senlin-6.0.0/senlin/tests/unit/engine/actions/test_update.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_update.py --- senlin-6.0.0/senlin/tests/unit/engine/actions/test_update.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/actions/test_update.py 2018-11-19 18:48:08.000000000 +0000 @@ -144,8 +144,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__update_nodes_no_policy(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_update_nodes_no_policy(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): node1 = mock.Mock(id='node_id1') node2 = mock.Mock(id='node_id2') cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], @@ -176,8 +176,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__update_nodes_batch_policy(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_update_nodes_batch_policy(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): node1 = mock.Mock(id='node_id1') node2 = mock.Mock(id='node_id2') cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], @@ -215,8 +215,8 @@ @mock.patch.object(dobj.Dependency, 'create') @mock.patch.object(dispatcher, 'start_action') @mock.patch.object(ca.ClusterAction, '_wait_for_dependents') - def test__update_nodes_fail_wait(self, mock_wait, mock_start, mock_dep, - mock_action, mock_update, mock_load): + def test_update_nodes_fail_wait(self, mock_wait, mock_start, mock_dep, + mock_action, mock_update, mock_load): node1 = mock.Mock(id='node_id1') node2 = mock.Mock(id='node_id2') cluster = mock.Mock(id='FAKE_ID', nodes=[node1, node2], diff -Nru senlin-6.0.0/senlin/tests/unit/engine/receivers/test_message.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/receivers/test_message.py --- senlin-6.0.0/senlin/tests/unit/engine/receivers/test_message.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/receivers/test_message.py 2018-11-19 18:48:08.000000000 +0000 @@ -87,7 +87,7 @@ mock_param.assert_called_once_with('user1', 'project1') sd.message.assert_called_once_with(params) - def test__generate_subscriber_url_host_provided(self): + def test_generate_subscriber_url_host_provided(self): cfg.CONF.set_override('host', 'web.com', 'receiver') cfg.CONF.set_override('port', '1234', 'receiver') message = mmod.Message('message', None, None, id=UUID) @@ -97,7 +97,7 @@ self.assertEqual(expected, res) @mock.patch.object(mmod.Message, '_get_base_url') - def test__generate_subscriber_url_host_not_provided( + def test_generate_subscriber_url_host_not_provided( self, mock_get_base_url): mock_get_base_url.return_value = 'http://web.com:1234/v1' message = mmod.Message('message', None, None, id=UUID) @@ -108,7 +108,7 @@ @mock.patch.object(socket, 'gethostname') @mock.patch.object(mmod.Message, '_get_base_url') - def test__generate_subscriber_url_no_host_no_base( + def test_generate_subscriber_url_no_host_no_base( self, mock_get_base_url, mock_gethostname): mock_get_base_url.return_value = None mock_gethostname.return_value = 'test-host' @@ -160,7 +160,7 @@ mock_create_subscription.assert_called_once_with('test-queue') @mock.patch.object(mmod.Message, 'zaqar') - def test__create_queue(self, mock_zaqar): + def test_create_queue(self, mock_zaqar): cfg.CONF.set_override('max_message_size', 8192, 'receiver') mock_zc = mock.Mock() mock_zaqar.return_value = mock_zc @@ -178,7 +178,7 @@ mock_zc.queue_create.assert_called_once_with(**kwargs) @mock.patch.object(mmod.Message, 'zaqar') - def test__create_queue_fail(self, mock_zaqar): + def test_create_queue_fail(self, mock_zaqar): cfg.CONF.set_override('max_message_size', 8192, 'receiver') mock_zc = mock.Mock() mock_zaqar.return_value = mock_zc @@ -196,8 +196,8 @@ @mock.patch.object(mmod.Message, '_generate_subscriber_url') @mock.patch.object(mmod.Message, '_build_trust') @mock.patch.object(mmod.Message, 'zaqar') - def test__create_subscription(self, mock_zaqar, mock_build_trust, - mock_generate_subscriber_url): + def test_create_subscription(self, mock_zaqar, mock_build_trust, + mock_generate_subscriber_url): mock_zc = mock.Mock() mock_zaqar.return_value = mock_zc mock_build_trust.return_value = '123abc' @@ -223,8 +223,8 @@ @mock.patch.object(mmod.Message, '_generate_subscriber_url') @mock.patch.object(mmod.Message, '_build_trust') @mock.patch.object(mmod.Message, 'zaqar') - def test__create_subscription_fail(self, mock_zaqar, mock_build_trust, - mock_generate_subscriber_url): + def test_create_subscription_fail(self, mock_zaqar, mock_build_trust, + mock_generate_subscriber_url): mock_zc = mock.Mock() mock_zaqar.return_value = mock_zc mock_build_trust.return_value = '123abc' @@ -296,8 +296,8 @@ @mock.patch.object(ks_loading, 'load_auth_from_conf_options') @mock.patch.object(ks_loading, 'load_session_from_conf_options') @mock.patch.object(mmod.Message, 'keystone') - def test__build_trust_exists(self, mock_keystone, mock_load_session, - mock_load_auth): + def test_build_trust_exists(self, mock_keystone, mock_load_session, + mock_load_auth): mock_auth = mock.Mock() mock_session = mock.Mock() mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' @@ -324,7 +324,7 @@ @mock.patch.object(ks_loading, 'load_auth_from_conf_options') @mock.patch.object(ks_loading, 'load_session_from_conf_options') @mock.patch.object(mmod.Message, 'keystone') - def test__build_trust_create_new_multiroles( + def test_build_trust_create_new_multiroles( self, mock_keystone, mock_load_session, mock_load_auth): mock_auth = mock.Mock() mock_session = mock.Mock() @@ -352,7 +352,7 @@ @mock.patch.object(ks_loading, 'load_auth_from_conf_options') @mock.patch.object(ks_loading, 'load_session_from_conf_options') @mock.patch.object(mmod.Message, 'keystone') - def test__build_trust_create_new_single_admin_role( + def test_build_trust_create_new_single_admin_role( self, mock_keystone, mock_load_session, mock_load_auth): mock_auth = mock.Mock() mock_session = mock.Mock() @@ -380,9 +380,9 @@ @mock.patch.object(ks_loading, 'load_auth_from_conf_options') @mock.patch.object(ks_loading, 'load_session_from_conf_options') @mock.patch.object(mmod.Message, 'keystone') - def test__build_trust_create_new_trust_failed(self, mock_keystone, - mock_load_session, - mock_load_auth): + def test_build_trust_create_new_trust_failed(self, mock_keystone, + mock_load_session, + mock_load_auth): mock_auth = mock.Mock() mock_session = mock.Mock() mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' @@ -409,9 +409,9 @@ @mock.patch.object(ks_loading, 'load_auth_from_conf_options') @mock.patch.object(ks_loading, 'load_session_from_conf_options') @mock.patch.object(mmod.Message, 'keystone') - def test__build_trust_get_trust_exception(self, mock_keystone, - mock_load_session, - mock_load_auth): + def test_build_trust_get_trust_exception(self, mock_keystone, + mock_load_session, + mock_load_auth): mock_auth = mock.Mock() mock_session = mock.Mock() mock_session.get_user_id.return_value = 'zaqar-trustee-user-id' diff -Nru senlin-6.0.0/senlin/tests/unit/engine/receivers/test_receiver.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/receivers/test_receiver.py --- senlin-6.0.0/senlin/tests/unit/engine/receivers/test_receiver.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/receivers/test_receiver.py 2018-11-19 18:48:08.000000000 +0000 @@ -289,8 +289,8 @@ @mock.patch.object(context, "get_service_credentials") @mock.patch.object(driver_base, "SenlinDriver") - def test__get_base_url_succeeded(self, mock_senlin_driver, - mock_get_service_creds): + def test_get_base_url_succeeded(self, mock_senlin_driver, + mock_get_service_creds): cfg.CONF.set_override('default_region_name', 'RegionOne') fake_driver = mock.Mock() fake_kc = mock.Mock() @@ -312,7 +312,7 @@ @mock.patch.object(context, "get_service_credentials") @mock.patch.object(driver_base, "SenlinDriver") - def test__get_base_url_failed_get_endpoint_exception( + def test_get_base_url_failed_get_endpoint_exception( self, mock_senlin_driver, mock_get_service_creds): cfg.CONF.set_override('default_region_name', 'RegionOne') fake_driver = mock.Mock() diff -Nru senlin-6.0.0/senlin/tests/unit/engine/service/test_clusters.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/service/test_clusters.py --- senlin-6.0.0/senlin/tests/unit/engine/service/test_clusters.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/service/test_clusters.py 2018-11-19 18:48:08.000000000 +0000 @@ -1570,7 +1570,7 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes(self, mock_profile, mock_node): + def test_validate_replace_nodes(self, mock_profile, mock_node): cluster = mock.Mock(id='CID', profile_id='FAKE_ID') mock_profile.return_value = mock.Mock(type='FAKE_TYPE') mock_node.side_effect = [ @@ -1595,8 +1595,8 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_old_missing(self, mock_profile, - mock_node): + def test_validate_replace_nodes_old_missing(self, mock_profile, + mock_node): c = mock.Mock(id='CID', profile_id='FAKE_ID') mock_node.side_effect = exc.ResourceNotFound(type='node', id='OLD') @@ -1610,8 +1610,8 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_new_missing(self, mock_profile, - mock_node): + def test_validate_replace_nodes_new_missing(self, mock_profile, + mock_node): c = mock.Mock(id='CID', profile_id='FAKE_ID') mock_node.side_effect = [ mock.Mock(), @@ -1632,8 +1632,8 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_old_not_member(self, mock_profile, - mock_node): + def test_validate_replace_nodes_old_not_member(self, mock_profile, + mock_node): c = mock.Mock(id='CID', profile_id='FAKE_ID') mock_node.side_effect = [ mock.Mock(cluster_id='OTHER'), @@ -1654,8 +1654,8 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_new_not_orphan(self, mock_profile, - mock_node): + def test_validate_replace_nodes_new_not_orphan(self, mock_profile, + mock_node): c = mock.Mock(id='CID', profile_id='FAKE_ID') mock_node.side_effect = [ mock.Mock(cluster_id='CID'), @@ -1676,8 +1676,8 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_new_bad_status(self, mock_profile, - mock_node): + def test_validate_replace_nodes_new_bad_status(self, mock_profile, + mock_node): c = mock.Mock(id='CID', profile_id='FAKE_ID') mock_node.side_effect = [ mock.Mock(cluster_id='CID'), @@ -1697,8 +1697,8 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_mult_err(self, mock_profile, - mock_node): + def test_validate_replace_nodes_mult_err(self, mock_profile, + mock_node): c = mock.Mock(id='CID', profile_id='FAKE_ID') mock_node.side_effect = [ mock.Mock(id='OLD1', cluster_id='CID'), @@ -1721,7 +1721,7 @@ @mock.patch.object(no.Node, 'find') @mock.patch.object(po.Profile, 'get') - def test__validate_replace_nodes_new_profile_type_mismatch( + def test_validate_replace_nodes_new_profile_type_mismatch( self, mock_profile, mock_node): c = mock.Mock(id='CID', profile_id='FAKE_CLUSTER_PROFILE') mock_profile.side_effect = [ diff -Nru senlin-6.0.0/senlin/tests/unit/engine/service/test_nodes.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/service/test_nodes.py --- senlin-6.0.0/senlin/tests/unit/engine/service/test_nodes.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/service/test_nodes.py 2018-11-19 18:48:08.000000000 +0000 @@ -708,7 +708,7 @@ @mock.patch.object(environment.Environment, 'get_profile') @mock.patch.object(pb.Profile, 'adopt_node') - def test__node_adopt_preview(self, mock_adopt, mock_profile): + def test_node_adopt_preview_with_profile(self, mock_adopt, mock_profile): class FakeProfile(object): pass @@ -742,7 +742,7 @@ self.assertEqual(expected, s) @mock.patch.object(pb.Profile, 'adopt_node') - def test__node_adopt_preview_bad_type(self, mock_adopt): + def test_node_adopt_preview_bad_type(self, mock_adopt): req = mock.Mock( identity="FAKE_NODE", type="TestProfile-1.0", @@ -760,7 +760,7 @@ @mock.patch.object(environment.Environment, 'get_profile') @mock.patch.object(pb.Profile, 'adopt_node') - def test__node_adopt_preview_failed_adopt(self, mock_adopt, mock_profile): + def test_node_adopt_preview_failed_adopt(self, mock_adopt, mock_profile): class FakeProfile(object): pass diff -Nru senlin-6.0.0/senlin/tests/unit/engine/test_cluster.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_cluster.py --- senlin-6.0.0/senlin/tests/unit/engine/test_cluster.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_cluster.py 2018-11-19 18:48:08.000000000 +0000 @@ -87,8 +87,8 @@ @mock.patch.object(pcb.Policy, 'load') @mock.patch.object(pfb.Profile, 'load') @mock.patch.object(no.Node, 'get_all_by_cluster') - def test__load_runtime_data(self, mock_nodes, mock_profile, mock_policy, - mock_pb): + def test_load_runtime_data(self, mock_nodes, mock_profile, mock_policy, + mock_pb): x_binding = mock.Mock() x_binding.policy_id = POLICY_ID mock_pb.return_value = [x_binding] @@ -121,7 +121,7 @@ project_safe=False) mock_nodes.assert_called_once_with(self.context, CLUSTER_ID) - def test__load_runtime_data_id_is_none(self): + def test_load_runtime_data_id_is_none(self): cluster = cm.Cluster('test-cluster', 0, PROFILE_ID) cluster._load_runtime_data(self.context) diff -Nru senlin-6.0.0/senlin/tests/unit/engine/test_engine_startstop.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_engine_startstop.py --- senlin-6.0.0/senlin/tests/unit/engine/test_engine_startstop.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_engine_startstop.py 2018-11-19 18:48:08.000000000 +0000 @@ -169,7 +169,7 @@ @mock.patch.object(service_obj.Service, 'gc_by_engine') @mock.patch.object(service_obj.Service, 'get_all') @mock.patch.object(service_obj.Service, 'delete') - def test__service_manage_cleanup(self, mock_delete, mock_get_all, mock_gc): + def test_service_manage_cleanup(self, mock_delete, mock_get_all, mock_gc): delta = datetime.timedelta(seconds=2 * cfg.CONF.periodic_interval) ages_a_go = timeutils.utcnow(True) - delta mock_get_all.return_value = [{'id': 'foo', 'updated_at': ages_a_go}] @@ -181,8 +181,10 @@ @mock.patch('senlin.engine.health_manager.HealthManager') @mock.patch('oslo_messaging.Target') @mock.patch.object(service_obj.Service, 'get_all') - def test_service_manage_cleanup(self, mock_get_all, mock_msg_cls, - mock_hm_cls, mock_disp_cls): + def test_service_manage_cleanup_without_exception(self, mock_get_all, + mock_msg_cls, + mock_hm_cls, + mock_disp_cls): cfg.CONF.set_override('periodic_interval', 1) # start engine and verify that get_all is being called more than once diff -Nru senlin-6.0.0/senlin/tests/unit/engine/test_event.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_event.py --- senlin-6.0.0/senlin/tests/unit/engine/test_event.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_event.py 2018-11-19 18:48:08.000000000 +0000 @@ -53,7 +53,7 @@ invoke_on_load=True, propagate_map_exceptions=True) - def test__event_data(self): + def test_event_data(self): entity = mock.Mock(id='ENTITY_ID') entity.name = 'FAKE_ENTITY' action = mock.Mock(id='ACTION_ID', action='ACTION', entity=entity) @@ -65,7 +65,7 @@ 'id': 'ACTION_I'}, res) - def test__event_data_with_phase_reason(self): + def test_event_data_with_phase_reason(self): entity = mock.Mock(id='ENTITY_ID') entity.name = 'FAKE_ENTITY' action = mock.Mock(id='ACTION_ID', action='ACTION', entity=entity) @@ -77,7 +77,7 @@ 'obj_id': 'ENTITY_I', 'reason': 'REASON1'}, res) - def test__dump(self): + def test_dump(self): cfg.CONF.set_override('debug', True) saved_dispathers = event.dispatchers event.dispatchers = mock.Mock() @@ -90,7 +90,7 @@ finally: event.dispatchers = saved_dispathers - def test__dump_without_timestamp(self): + def test_dump_without_timestamp(self): cfg.CONF.set_override('debug', True) saved_dispathers = event.dispatchers event.dispatchers = mock.Mock() @@ -104,7 +104,7 @@ finally: event.dispatchers = saved_dispathers - def test__dump_guarded(self): + def test_dump_guarded(self): cfg.CONF.set_override('debug', False) cfg.CONF.set_override('priority', 'warning', group='dispatchers') saved_dispathers = event.dispatchers @@ -117,7 +117,7 @@ finally: event.dispatchers = saved_dispathers - def test__dump_exclude_derived_actions_positive(self): + def test_dump_exclude_derived_actions_positive(self): cfg.CONF.set_override('exclude_derived_actions', True, group='dispatchers') saved_dispathers = event.dispatchers @@ -130,7 +130,7 @@ finally: event.dispatchers = saved_dispathers - def test__dump_exclude_derived_actions_negative(self): + def test_dump_exclude_derived_actions_negative(self): cfg.CONF.set_override('exclude_derived_actions', False, group='dispatchers') saved_dispathers = event.dispatchers @@ -145,7 +145,7 @@ finally: event.dispatchers = saved_dispathers - def test__dump_with_exception(self): + def test_dump_with_exception(self): cfg.CONF.set_override('debug', True) saved_dispathers = event.dispatchers event.dispatchers = mock.Mock() diff -Nru senlin-6.0.0/senlin/tests/unit/engine/test_health_manager.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_health_manager.py --- senlin-6.0.0/senlin/tests/unit/engine/test_health_manager.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_health_manager.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,10 +11,12 @@ # under the License. import copy +import re import time import mock from oslo_config import cfg +from oslo_service import threadgroup from oslo_utils import timeutils as tu from senlin.common import consts @@ -22,6 +24,7 @@ from senlin.common import messaging from senlin.common import utils from senlin.engine import health_manager as hm +from senlin.engine import node as node_mod from senlin import objects from senlin.objects import cluster as obj_cluster from senlin.objects import health_registry as hr @@ -502,280 +505,302 @@ x_listener.start.assert_called_once_with() -class TestHealthManager(base.SenlinTestCase): +class TestHealthCheckType(base.SenlinTestCase): + def setUp(self): + super(TestHealthCheckType, self).setUp() + + self.hc = hm.NodePollStatusHealthCheck( + cluster_id='CLUSTER_ID', interval=1, node_update_timeout=1, + params='' + ) + + def test_factory(self): + cid = 'CLUSTER_ID' + interval = 1 + params = { + 'detection_modes': [ + { + 'type': 'NODE_STATUS_POLLING', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + }, + { + 'type': 'NODE_STATUS_POLL_URL', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + } + ], + 'node_update_timeout': 300, + } + + for d in params['detection_modes']: + hc = hm.HealthCheckType.factory(d['type'], cid, interval, params) + + self.assertEqual(cid, hc.cluster_id) + self.assertEqual(interval, hc.interval) + self.assertEqual(d, hc.params) + self.assertEqual( + params['node_update_timeout'], hc.node_update_timeout) + + def test_factory_invalid_type(self): + cid = 'CLUSTER_ID' + interval = 1 + params = { + 'detection_modes': [ + { + 'type': 'blah', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + }, + ], + 'node_update_timeout': 300, + } + + with self.assertRaisesRegex(Exception, 'Invalid detection type: blah'): + hm.HealthCheckType.factory('blah', cid, interval, params) + + def test_factory_same_type_twice(self): + cid = 'CLUSTER_ID' + interval = 1 + params = { + 'detection_modes': [ + { + 'type': 'NODE_STATUS_POLLING', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + }, + { + 'type': 'NODE_STATUS_POLLING', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + } + ], + 'node_update_timeout': 300, + } + with self.assertRaisesRegex( + Exception, + '.*Encountered 2 instances of type NODE_STATUS_POLLING'): + hm.HealthCheckType.factory( + 'NODE_STATUS_POLLING', cid, interval, params) + + +class TestNodePollStatusHealthCheck(base.SenlinTestCase): def setUp(self): - super(TestHealthManager, self).setUp() + super(TestNodePollStatusHealthCheck, self).setUp() - mock_eng = mock.Mock() - mock_eng.engine_id = 'ENGINE_ID' - topic = consts.HEALTH_MANAGER_TOPIC - version = consts.RPC_API_VERSION - self.hm = hm.HealthManager(mock_eng, topic, version) + self.hc = hm.NodePollStatusHealthCheck( + cluster_id='CLUSTER_ID', + interval=1, node_update_timeout=1, params='' + ) - def test_init(self): - self.assertEqual('ENGINE_ID', self.hm.engine_id) - self.assertIsNotNone(self.hm.TG) - self.assertIsNotNone(self.hm.rpc_client) - self.assertEqual(consts.HEALTH_MANAGER_TOPIC, self.hm.topic) - self.assertEqual(consts.RPC_API_VERSION, self.hm.version) - self.assertEqual(0, len(self.hm.rt['registries'])) + @mock.patch.object(node_mod.Node, '_from_object') + @mock.patch.object(tu, 'is_older_than') + def test_run_health_check_healthy(self, mock_tu, mock_node_obj): + x_entity = mock.Mock() + x_entity.do_check.return_value = True + mock_node_obj.return_value = x_entity - @mock.patch.object(hm.HealthManager, "_load_runtime_registry") - def test__dummy_task(self, mock_load): - self.hm._dummy_task() - mock_load.assert_called_once_with() + ctx = mock.Mock() + node = mock.Mock(id='FAKE_NODE1', status="ERROR", + updated_at='2018-08-13 18:00:00', + init_at='2018-08-13 17:00:00') - @mock.patch.object(hr.HealthRegistry, 'claim') - @mock.patch.object(objects.HealthRegistry, 'update') - def test__load_runtime_registry(self, mock_update, mock_claim): - mock_claim.return_value = [ - mock.Mock(cluster_id='CID1', - check_type=consts.NODE_STATUS_POLLING, - interval=12, - params={'k1': 'v1'}, - enabled=True), - mock.Mock(cluster_id='CID2', - check_type=consts.NODE_STATUS_POLLING, - interval=34, - params={'k2': 'v2'}, - enabled=False), - mock.Mock(cluster_id='CID3', - check_type='UNKNOWN_CHECK_TYPE', - interval=56, - params={'k3': 'v3'}), - ] + # do it + res = self.hc.run_health_check(ctx, node) + + self.assertTrue(res) + mock_tu.assert_not_called() + + @mock.patch.object(node_mod.Node, '_from_object') + @mock.patch.object(tu, 'is_older_than') + def test_run_health_check_unhealthy(self, mock_tu, mock_node_obj): + x_entity = mock.Mock() + x_entity.do_check.return_value = False + mock_node_obj.return_value = x_entity + + mock_tu.return_value = True + + ctx = mock.Mock() + node = mock.Mock(id='FAKE_NODE1', status="ERROR", + updated_at='2018-08-13 18:00:00', + init_at='2018-08-13 17:00:00') - timer1 = mock.Mock() - timer2 = mock.Mock() - mock_add_timer = self.patchobject(self.hm.TG, 'add_dynamic_timer', - side_effect=[timer1, timer2]) # do it - self.hm._load_runtime_registry() + res = self.hc.run_health_check(ctx, node) + + self.assertFalse(res) + mock_tu.assert_called_once_with(node.updated_at, 1) + + @mock.patch.object(node_mod.Node, '_from_object') + @mock.patch.object(tu, 'is_older_than') + def test_run_health_check_unhealthy_within_timeout( + self, mock_tu, mock_node_obj): + x_entity = mock.Mock() + x_entity.do_check.return_value = False + mock_node_obj.return_value = x_entity + + mock_tu.return_value = False + + ctx = mock.Mock() + node = mock.Mock(id='FAKE_NODE1', status="ERROR", + updated_at='2018-08-13 18:00:00', + init_at='2018-08-13 17:00:00') + + # do it + res = self.hc.run_health_check(ctx, node) + + self.assertTrue(res) + mock_tu.assert_called_once_with(node.updated_at, 1) - # assertions - mock_claim.assert_called_once_with(self.hm.ctx, self.hm.engine_id) - mock_calls = [ - mock.call(self.hm._poll_cluster, None, None, 'CID1', 12, {}) - ] - mock_add_timer.assert_has_calls(mock_calls) - self.assertEqual(2, len(self.hm.registries)) - self.assertEqual( - { - 'cluster_id': 'CID1', - 'check_type': consts.NODE_STATUS_POLLING, - 'interval': 12, - 'params': {'k1': 'v1'}, - 'timer': timer1, - 'enabled': True, - }, - self.hm.registries[0]) - self.assertEqual( - { - 'cluster_id': 'CID2', - 'check_type': consts.NODE_STATUS_POLLING, - 'interval': 34, - 'params': {'k2': 'v2'}, - 'enabled': False, - }, - self.hm.registries[1]) - def test__expand_url_template(self): +class TestNodePollUrlHealthCheck(base.SenlinTestCase): + def setUp(self): + super(TestNodePollUrlHealthCheck, self).setUp() + + default_params = { + 'poll_url': 'FAKE_POLL_URL', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', + 'poll_url_retry_limit': 2, + 'poll_url_retry_interval': 1, + 'node_update_timeout': 5 + } + + self.hc = hm.NodePollUrlHealthCheck( + cluster_id='CLUSTER_ID', interval=1, node_update_timeout=1, + params=default_params + ) + + def test_expand_url_template(self): url_template = 'https://abc123/foo/bar' node = mock.Mock() # do it - res = self.hm._expand_url_template(url_template, node) + res = self.hc._expand_url_template(url_template, node) self.assertEqual(res, url_template) - def test__expand_url_template_nodename(self): + def test_expand_url_template_nodename(self): node = mock.Mock() node.name = 'name' url_template = 'https://abc123/{nodename}/bar' expanded_url = 'https://abc123/{}/bar'.format(node.name) # do it - res = self.hm._expand_url_template(url_template, node) + res = self.hc._expand_url_template(url_template, node) self.assertEqual(res, expanded_url) - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(obj_node.Node, 'get_all_by_cluster') - @mock.patch.object(hm.HealthManager, "_wait_for_action") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__poll_cluster(self, mock_rpc, mock_ctx, mock_get, - mock_wait, mock_nodes, mock_chase): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') - mock_get.return_value = x_cluster + @mock.patch.object(tu, "is_older_than") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") + @mock.patch.object(utils, 'url_fetch') + def test_run_health_check_healthy( + self, mock_url_fetch, mock_expand_url, mock_time): ctx = mock.Mock() - mock_ctx.return_value = ctx - mock_wait.return_value = (True, "") - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - mock_nodes.return_value = [x_node] - x_action_check = {'action': 'CHECK_ID'} - x_action_recover = {'action': 'RECOVER_ID'} - mock_rpc.side_effect = [x_action_check, x_action_recover] - - recover_action = {'operation': 'REBUILD'} - # do it - res = self.hm._poll_cluster('CLUSTER_ID', 456, recover_action) - - self.assertEqual(mock_chase.return_value, res) - mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id=x_cluster.user, - project_id=x_cluster.project) - mock_rpc.assert_has_calls([ - mock.call(ctx, 'cluster_check', mock.ANY), - mock.call(ctx, 'node_recover', mock.ANY) - ]) - mock_wait.assert_called_once_with(ctx, "CHECK_ID", 456) - mock_chase.assert_called_once_with(mock.ANY, 456) - - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__poll_cluster_not_found(self, mock_check, mock_get, mock_chase): - mock_get.return_value = None + node = mock.Mock() + node.status = consts.NS_ACTIVE + mock_time.return_value = True + mock_expand_url.return_value = 'FAKE_EXPANDED_URL' + mock_url_fetch.return_value = ("Healthy because this return value " + "contains FAKE_HEALTHY_PATTERN") - recover_action = {'operation': 'REBUILD'} # do it - res = self.hm._poll_cluster('CLUSTER_ID', 123, recover_action) + res = self.hc.run_health_check(ctx, node) - self.assertEqual(mock_chase.return_value, res) - self.assertEqual(0, mock_check.call_count) - mock_chase.assert_called_once_with(mock.ANY, 123) + self.assertTrue(res) + mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, + verify=True) - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(context, 'get_service_context') - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__poll_cluster_failed_check_rpc(self, mock_check, mock_get, - mock_ctx, mock_chase): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') - mock_get.return_value = x_cluster + @mock.patch.object(tu, "is_older_than") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") + @mock.patch.object(utils, 'url_fetch') + def test_run_health_check_healthy_min_timeout( + self, mock_url_fetch, mock_expand_url, mock_time): ctx = mock.Mock() - mock_ctx.return_value = ctx - mock_check.side_effect = Exception("boom") - - recover_action = {'operation': 'REBUILD'} - # do it - res = self.hm._poll_cluster('CLUSTER_ID', 123, recover_action) - - self.assertEqual(mock_chase.return_value, res) - mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id='USER_ID', - project_id='PROJECT_ID') - mock_check.assert_called_once_with(ctx, 'cluster_check', mock.ANY) - mock_chase.assert_called_once_with(mock.ANY, 123) + node = mock.Mock() + node.status = consts.NS_ACTIVE + mock_time.return_value = True + mock_expand_url.return_value = 'FAKE_EXPANDED_URL' + mock_url_fetch.return_value = ("Healthy because this return value " + "contains FAKE_HEALTHY_PATTERN") - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(hm.HealthManager, "_wait_for_action") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__poll_cluster_failed_wait(self, mock_rpc, mock_ctx, - mock_get, mock_wait, mock_chase): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') - mock_get.return_value = x_cluster - ctx = mock.Mock() - mock_ctx.return_value = ctx - mock_wait.return_value = (False, "bad") - x_action_check = {'action': 'CHECK_ID'} - mock_rpc.return_value = x_action_check + self.hc.params['poll_url_retry_interval'] = 0 - recover_action = {'operation': 'REBUILD'} # do it - res = self.hm._poll_cluster('CLUSTER_ID', 456, recover_action) + res = self.hc.run_health_check(ctx, node) - self.assertEqual(mock_chase.return_value, res) - mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id='USER_ID', - project_id='PROJECT_ID') - mock_rpc.assert_called_once_with(ctx, 'cluster_check', mock.ANY) - mock_wait.assert_called_once_with(ctx, "CHECK_ID", 456) - mock_chase.assert_called_once_with(mock.ANY, 456) + self.assertTrue(res) + mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, + verify=True) @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.HealthManager, "_expand_url_template") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") @mock.patch.object(utils, 'url_fetch') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__check_url_and_recover_node_healthy( - self, mock_rpc, mock_url_fetch, mock_expand_url, mock_time): + def test_run_health_check_healthy_timeout( + self, mock_url_fetch, mock_expand_url, mock_time): ctx = mock.Mock() node = mock.Mock() node.status = consts.NS_ACTIVE mock_time.return_value = True mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - x_action_check = {'action': 'CHECK_ID'} - mock_rpc.return_value = x_action_check mock_url_fetch.return_value = ("Healthy because this return value " "contains FAKE_HEALTHY_PATTERN") - params = { - 'poll_url': 'FAKE_POLL_URL', - 'poll_url_ssl_verify': True, - 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', - 'poll_url_retry_limit': 2, - 'poll_url_retry_interval': 1, - 'node_update_timeout': 5, - } - recover_action = {'operation': 'REBUILD'} + self.hc.params['poll_url_retry_interval'] = 100 # do it - res = self.hm._check_url_and_recover_node(ctx, node, recover_action, - params) + res = self.hc.run_health_check(ctx, node) - self.assertIsNone(res) - mock_rpc.assert_not_called() - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', + self.assertTrue(res) + mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=10, verify=True) @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.HealthManager, "_expand_url_template") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") @mock.patch.object(utils, 'url_fetch') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__check_url_and_recover_node_unhealthy_inactive( - self, mock_rpc, mock_url_fetch, mock_expand_url, mock_time): + def test_run_health_check_unhealthy_inactive( + self, mock_url_fetch, mock_expand_url, mock_time): ctx = mock.Mock() node = mock.Mock() node.status = consts.NS_RECOVERING mock_time.return_value = True mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - x_action_check = {'action': 'CHECK_ID'} - mock_rpc.return_value = x_action_check mock_url_fetch.return_value = "" - params = { - 'poll_url': 'FAKE_POLL_URL', - 'poll_url_ssl_verify': True, - 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', - 'poll_url_retry_limit': 2, - 'poll_url_retry_interval': 1, - 'node_update_timeout': 5, - } - - recover_action = {'operation': 'REBUILD'} # do it - res = self.hm._check_url_and_recover_node(ctx, node, recover_action, - params) + res = self.hc.run_health_check(ctx, node) - self.assertIsNone(res) - mock_rpc.assert_not_called() - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', + self.assertTrue(res) + mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, verify=True) @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.HealthManager, "_expand_url_template") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") @mock.patch.object(utils, 'url_fetch') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__check_url_and_recover_node_unhealthy_update_timeout( - self, mock_rpc, mock_url_fetch, mock_expand_url, mock_time): + def test_run_health_check_unhealthy_update_timeout( + self, mock_url_fetch, mock_expand_url, mock_time): ctx = mock.Mock() node = mock.Mock() node.id = 'FAKE_NODE_ID' @@ -783,35 +808,20 @@ node.status = consts.NS_ACTIVE mock_time.return_value = False mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - x_action_check = {'action': 'CHECK_ID'} - mock_rpc.return_value = x_action_check mock_url_fetch.return_value = "" - params = { - 'poll_url': 'FAKE_POLL_URL', - 'poll_url_ssl_verify': True, - 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', - 'poll_url_retry_limit': 2, - 'poll_url_retry_interval': 1, - 'node_update_timeout': 5, - } - - recover_action = {'operation': 'REBUILD'} # do it - res = self.hm._check_url_and_recover_node(ctx, node, recover_action, - params) + res = self.hc.run_health_check(ctx, node) - self.assertIsNone(res) - mock_rpc.assert_not_called() - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', + self.assertTrue(res) + mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, verify=True) @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.HealthManager, "_expand_url_template") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") @mock.patch.object(utils, 'url_fetch') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__check_url_and_recover_node_unhealthy_init_timeout( - self, mock_rpc, mock_url_fetch, mock_expand_url, mock_time): + def test_run_health_check_unhealthy_init_timeout( + self, mock_url_fetch, mock_expand_url, mock_time): ctx = mock.Mock() node = mock.Mock() node.id = 'FAKE_NODE_ID' @@ -820,159 +830,163 @@ node.status = consts.NS_ACTIVE mock_time.return_value = False mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - x_action_check = {'action': 'CHECK_ID'} - mock_rpc.return_value = x_action_check mock_url_fetch.return_value = "" - params = { - 'poll_url': 'FAKE_POLL_URL', - 'poll_url_ssl_verify': True, - 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', - 'poll_url_retry_limit': 2, - 'poll_url_retry_interval': 1, - 'node_update_timeout': 5, - } - - recover_action = {'operation': 'REBUILD'} # do it - res = self.hm._check_url_and_recover_node(ctx, node, recover_action, - params) + res = self.hc.run_health_check(ctx, node) - self.assertIsNone(res) - mock_rpc.assert_not_called() - mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', + self.assertTrue(res) + mock_url_fetch.assert_called_once_with('FAKE_EXPANDED_URL', timeout=1, verify=True) @mock.patch.object(time, "sleep") @mock.patch.object(tu, "is_older_than") - @mock.patch.object(hm.HealthManager, "_expand_url_template") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") @mock.patch.object(utils, 'url_fetch') - @mock.patch.object(rpc_client.EngineClient, 'call') - def test__check_url_and_recover_node_unhealthy(self, - mock_rpc, mock_url_fetch, - mock_expand_url, mock_time, - mock_sleep): + def test_run_health_check_unhealthy(self, + mock_url_fetch, + mock_expand_url, mock_time, + mock_sleep): ctx = mock.Mock() node = mock.Mock() node.status = consts.NS_ACTIVE node.id = 'FAKE_ID' mock_time.return_value = True mock_expand_url.return_value = 'FAKE_EXPANDED_URL' - x_action_check = {'action': 'CHECK_ID'} - mock_rpc.return_value = x_action_check mock_url_fetch.return_value = "" - params = { - 'poll_url': 'FAKE_POLL_URL', - 'poll_url_ssl_verify': False, - 'poll_url_healthy_response': 'FAKE_HEALTHY_PATTERN', - 'poll_url_retry_limit': 2, - 'poll_url_retry_interval': 1, - 'node_update_timeout': 5, - } - recover_action = {'operation': 'REBUILD'} + # do it + res = self.hc.run_health_check(ctx, node) + + self.assertFalse(res) + mock_url_fetch.assert_has_calls( + [ + mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True), + mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True) + ] + ) + mock_sleep.assert_has_calls([mock.call(1), mock.call(1)]) + + @mock.patch.object(time, "sleep") + @mock.patch.object(tu, "is_older_than") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") + @mock.patch.object(utils, 'url_fetch') + def test_run_health_check_conn_error(self, + mock_url_fetch, + mock_expand_url, mock_time, + mock_sleep): + ctx = mock.Mock() + node = mock.Mock() + node.status = consts.NS_ACTIVE + node.id = 'FAKE_ID' + mock_time.return_value = True + mock_expand_url.return_value = 'FAKE_EXPANDED_URL' + mock_url_fetch.side_effect = utils.URLFetchError("Error") # do it - res = self.hm._check_url_and_recover_node(ctx, node, recover_action, - params) + res = self.hc.run_health_check(ctx, node) - self.assertEqual(mock_rpc.return_value, res) - mock_rpc.assert_called_once_with(ctx, 'node_recover', mock.ANY) + self.assertFalse(res) mock_url_fetch.assert_has_calls( [ - mock.call('FAKE_EXPANDED_URL', verify=False), - mock.call('FAKE_EXPANDED_URL', verify=False) + mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True), + mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True) ] ) mock_sleep.assert_has_calls([mock.call(1), mock.call(1)]) - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(hm.HealthManager, "_check_url_and_recover_node") - @mock.patch.object(obj_node.Node, 'get_all_by_cluster') - @mock.patch.object(hm.HealthManager, "_wait_for_action") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - def test__poll_url(self, mock_ctx, mock_get, mock_wait, mock_nodes, - mock_check_url, mock_chase): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') - mock_get.return_value = x_cluster + @mock.patch.object(time, "sleep") + @mock.patch.object(tu, "is_older_than") + @mock.patch.object(hm.NodePollUrlHealthCheck, "_expand_url_template") + @mock.patch.object(utils, 'url_fetch') + def test_run_health_check_conn_error_noop( + self, mock_url_fetch, mock_expand_url, mock_time, + mock_sleep): ctx = mock.Mock() - mock_ctx.return_value = ctx - mock_wait.return_value = (True, "") - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - mock_nodes.return_value = [x_node] - x_action_recover = {'action': 'RECOVER_ID'} - mock_check_url.return_value = x_action_recover + node = mock.Mock() + node.status = consts.NS_ACTIVE + node.id = 'FAKE_ID' + mock_time.return_value = True + mock_expand_url.return_value = 'FAKE_EXPANDED_URL' + mock_url_fetch.side_effect = utils.URLFetchError("Error") - recover_action = {'operation': 'REBUILD'} - params = {} + self.hc.params['poll_url_conn_error_as_unhealthy'] = False # do it - res = self.hm._poll_url('CLUSTER_ID', 456, recover_action, params) + res = self.hc.run_health_check(ctx, node) - self.assertEqual(mock_chase.return_value, res) - mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id=x_cluster.user, - project_id=x_cluster.project) - mock_check_url.assert_called_once_with(ctx, x_node, - recover_action, params) - mock_wait.assert_called_once_with(ctx, "RECOVER_ID", 456) - mock_chase.assert_called_once_with(mock.ANY, 456) + self.assertTrue(res) + mock_url_fetch.assert_has_calls( + [ + mock.call('FAKE_EXPANDED_URL', timeout=1, verify=True), + ] + ) + mock_sleep.assert_not_called() - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - def test__poll_url_cluster_not_found(self, mock_ctx, mock_get, - mock_chase): - mock_get.return_value = None - recover_action = {'operation': 'REBUILD'} - params = {} +class TestHealthManager(base.SenlinTestCase): - # do it - res = self.hm._poll_url('CLUSTER_ID', 123, recover_action, params) + def setUp(self): + super(TestHealthManager, self).setUp() - self.assertEqual(mock_chase.return_value, res) - mock_ctx.assert_not_called() - mock_chase.assert_called_once_with(mock.ANY, 123) + mock_eng = mock.Mock() + mock_eng.engine_id = 'ENGINE_ID' + topic = consts.HEALTH_MANAGER_TOPIC + version = consts.RPC_API_VERSION + self.hm = hm.HealthManager(mock_eng, topic, version) - @mock.patch.object(hm, "_chase_up") - @mock.patch.object(hm.HealthManager, "_check_url_and_recover_node") - @mock.patch.object(obj_node.Node, 'get_all_by_cluster') - @mock.patch.object(hm.HealthManager, "_wait_for_action") - @mock.patch.object(obj_cluster.Cluster, 'get') - @mock.patch.object(context, 'get_service_context') - def test__poll_url_no_action(self, mock_ctx, mock_get, mock_wait, - mock_nodes, mock_check_url, mock_chase): - x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') - mock_get.return_value = x_cluster - ctx = mock.Mock() - mock_ctx.return_value = ctx - mock_wait.return_value = (True, "") - x_node = mock.Mock(id='FAKE_NODE', status="ERROR") - mock_nodes.return_value = [x_node] - mock_check_url.return_value = None + def test_init(self): + self.assertEqual('ENGINE_ID', self.hm.engine_id) + self.assertIsNotNone(self.hm.TG) + self.assertIsNotNone(self.hm.rpc_client) + self.assertEqual(consts.HEALTH_MANAGER_TOPIC, self.hm.topic) + self.assertEqual(consts.RPC_API_VERSION, self.hm.version) + self.assertEqual(0, len(self.hm.rt['registries'])) - recover_action = {'operation': 'REBUILD'} - params = {} + @mock.patch.object(hm.HealthManager, "_load_runtime_registry") + def test_dummy_task(self, mock_load): + self.hm._dummy_task() + mock_load.assert_called_once_with() + + @mock.patch.object(hm.HealthManager, "_start_check") + @mock.patch.object(hr.HealthRegistry, 'claim') + def test_load_runtime_registry(self, mock_claim, mock_check): + fake_claims = [ + { + 'cluster_id': 'CID1', + 'check_type': consts.NODE_STATUS_POLLING, + 'interval': 12, + 'params': {'k1': 'v1'}, + 'enabled': True, + }, + { + 'cluster_id': 'CID2', + 'check_type': consts.NODE_STATUS_POLLING, + 'interval': 34, + 'params': {'k2': 'v2'}, + 'enabled': False, + }, + ] + mock_claim.return_value = [ + mock.Mock(**fake_claims[0]), + mock.Mock(**fake_claims[1]), + ] + mock_check.return_value = fake_claims # do it - res = self.hm._poll_url('CLUSTER_ID', 456, recover_action, params) + self.hm._load_runtime_registry() - self.assertEqual(mock_chase.return_value, res) - mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', - project_safe=False) - mock_ctx.assert_called_once_with(user_id=x_cluster.user, - project_id=x_cluster.project) - mock_check_url.assert_called_once_with(ctx, x_node, - recover_action, params) - mock_wait.assert_not_called() - mock_chase.assert_called_once_with(mock.ANY, 456) + # assertions + mock_claim.assert_called_once_with(self.hm.ctx, self.hm.engine_id) + mock_check.assert_has_calls( + [ + mock.call(fake_claims[0]) + ] + ) @mock.patch.object(obj_profile.Profile, 'get') @mock.patch.object(obj_cluster.Cluster, 'get') - def test__add_listener_nova(self, mock_cluster, mock_profile): + def test_add_listener_nova(self, mock_cluster, mock_profile): cfg.CONF.set_override('nova_control_exchange', 'FAKE_NOVA_EXCHANGE', group='health_manager') x_listener = mock.Mock() @@ -999,7 +1013,7 @@ @mock.patch.object(obj_profile.Profile, 'get') @mock.patch.object(obj_cluster.Cluster, 'get') - def test__add_listener_heat(self, mock_cluster, mock_profile): + def test_add_listener_heat(self, mock_cluster, mock_profile): cfg.CONF.set_override('heat_control_exchange', 'FAKE_HEAT_EXCHANGE', group='health_manager') x_listener = mock.Mock() @@ -1026,7 +1040,7 @@ @mock.patch.object(obj_profile.Profile, 'get') @mock.patch.object(obj_cluster.Cluster, 'get') - def test__add_listener_other_types(self, mock_cluster, mock_profile): + def test_add_listener_other_types(self, mock_cluster, mock_profile): mock_add_thread = self.patchobject(self.hm.TG, 'add_thread') x_cluster = mock.Mock(project='PROJECT_ID', profile_id='PROFILE_ID') mock_cluster.return_value = x_cluster @@ -1046,7 +1060,7 @@ self.assertFalse(mock_add_thread.called) @mock.patch.object(obj_cluster.Cluster, 'get') - def test__add_listener_cluster_not_found(self, mock_get): + def test_add_listener_cluster_not_found(self, mock_get): mock_get.return_value = None mock_add_thread = self.patchobject(self.hm.TG, 'add_thread') @@ -1060,30 +1074,531 @@ project_safe=False) self.assertEqual(0, mock_add_thread.call_count) - def test__start_check_for_polling(self): + @mock.patch.object(rpc_client.EngineClient, 'call') + @mock.patch('senlin.objects.NodeRecoverRequest', autospec=True) + def test_recover_node(self, mock_req, mock_rpc): + ctx = mock.Mock() + node_id = 'FAKE_NODE' + recover_action = {'operation': 'REBUILD'} + + x_req = mock.Mock + mock_req.return_value = x_req + + x_action = {'action': 'RECOVER_ID1'} + mock_rpc.return_value = x_action + + # do it + res = self.hm._recover_node(node_id, ctx, recover_action) + + self.assertEqual(x_action, res) + mock_req.assert_called_once_with( + identity=node_id, params=recover_action) + mock_rpc.assert_called_once_with(ctx, 'node_recover', x_req) + + @mock.patch.object(rpc_client.EngineClient, 'call') + @mock.patch('senlin.objects.NodeRecoverRequest', autospec=True) + def test_recover_node_failed(self, mock_req, mock_rpc): + ctx = mock.Mock() + node_id = 'FAKE_NODE' + recover_action = {'operation': 'REBUILD'} + + x_req = mock.Mock + mock_req.return_value = x_req + + mock_rpc.side_effect = Exception('boom') + + # do it + res = self.hm._recover_node(node_id, ctx, recover_action) + + self.assertIsNone(res) + mock_req.assert_called_once_with( + identity=node_id, params=recover_action) + mock_rpc.assert_called_once_with(ctx, 'node_recover', x_req) + + @mock.patch('senlin.objects.ActionGetRequest') + @mock.patch.object(rpc_client.EngineClient, 'call') + def test_wait_for_action(self, mock_rpc, mock_action_req): + x_req = mock.Mock() + mock_action_req.return_value = x_req + + x_action = {'status': consts.ACTION_SUCCEEDED} + mock_rpc.return_value = x_action + + ctx = mock.Mock() + action_id = 'FAKE_ACTION_ID' + timeout = 5 + + # do it + res, err = self.hm._wait_for_action(ctx, action_id, timeout) + + self.assertTrue(res) + self.assertEqual(err, '') + mock_rpc.assert_called_with(ctx, 'action_get', x_req) + + @mock.patch('senlin.objects.ActionGetRequest') + @mock.patch.object(rpc_client.EngineClient, 'call') + def test_wait_for_action_success_before_timeout( + self, mock_rpc, mock_action_req): + x_req = mock.Mock() + mock_action_req.return_value = x_req + + x_action1 = {'status': consts.ACTION_RUNNING} + x_action2 = {'status': consts.ACTION_SUCCEEDED} + mock_rpc.side_effect = [x_action1, x_action2] + + ctx = mock.Mock() + action_id = 'FAKE_ACTION_ID' + timeout = 5 + + # do it + res, err = self.hm._wait_for_action(ctx, action_id, timeout) + + self.assertTrue(res) + self.assertEqual(err, '') + mock_rpc.assert_has_calls( + [ + mock.call(ctx, 'action_get', x_req), + mock.call(ctx, 'action_get', x_req) + ] + ) + + @mock.patch('senlin.objects.ActionGetRequest') + @mock.patch.object(rpc_client.EngineClient, 'call') + def test_wait_for_action_timeout(self, mock_rpc, mock_action_req): + x_req = mock.Mock() + mock_action_req.return_value = x_req + + x_action = {'status': consts.ACTION_RUNNING} + mock_rpc.return_value = x_action + + ctx = mock.Mock() + action_id = 'FAKE_ACTION_ID' + timeout = 5 + + # do it + res, err = self.hm._wait_for_action(ctx, action_id, timeout) + + self.assertFalse(res) + self.assertTrue(re.search('timeout', err, re.IGNORECASE)) + mock_rpc.assert_has_calls( + [ + mock.call(ctx, 'action_get', x_req) + ] + ) + + @mock.patch('senlin.objects.ActionGetRequest') + @mock.patch.object(rpc_client.EngineClient, 'call') + def test_wait_for_action_failed(self, mock_rpc, mock_action_req): + x_req = mock.Mock() + mock_action_req.return_value = x_req + + x_action = {'status': consts.ACTION_FAILED} + mock_rpc.return_value = x_action + + ctx = mock.Mock() + action_id = 'FAKE_ACTION_ID' + timeout = 5 + + # do it + res, err = self.hm._wait_for_action(ctx, action_id, timeout) + + self.assertFalse(res) + self.assertEqual(err, 'Cluster check action failed or cancelled') + mock_rpc.assert_called_with(ctx, 'action_get', x_req) + + @mock.patch('senlin.objects.ActionGetRequest') + @mock.patch.object(rpc_client.EngineClient, 'call') + def test_wait_for_action_cancelled(self, mock_rpc, mock_action_req): + x_req = mock.Mock() + mock_action_req.return_value = x_req + + x_action = {'status': consts.ACTION_CANCELLED} + mock_rpc.return_value = x_action + + ctx = mock.Mock() + action_id = 'FAKE_ACTION_ID' + timeout = 5 + + # do it + res, err = self.hm._wait_for_action(ctx, action_id, timeout) + + self.assertFalse(res) + self.assertEqual(err, 'Cluster check action failed or cancelled') + mock_rpc.assert_called_with(ctx, 'action_get', x_req) + + @mock.patch.object(obj_node.Node, 'get_all_by_cluster') + @mock.patch.object(hm.HealthManager, "_recover_node") + @mock.patch.object(hm.HealthManager, "_wait_for_action") + @mock.patch.object(obj_cluster.Cluster, 'get') + @mock.patch.object(context, 'get_service_context') + def test_execute_health_check_any_mode_healthy( + self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes): + cluster_id = 'CLUSTER_ID' + interval = 1 + recovery_cond = consts.ANY_FAILED + node_update_timeout = 1 + recovery_action = {'operation': 'REBUILD'} + + x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') + mock_get.return_value = x_cluster + + ctx = mock.Mock() + mock_ctx.return_value = ctx + + mock_wait.return_value = (True, "") + + x_node1 = mock.Mock(id='FAKE_NODE1', status="ERROR") + x_node2 = mock.Mock(id='FAKE_NODE2', status="ERROR") + mock_nodes.return_value = [x_node1, x_node2] + + hc_true = {'run_health_check.return_value': True} + + hc_test_values = [ + [ + mock.Mock(**hc_true), + mock.Mock(**hc_true), + mock.Mock(**hc_true), + ], + ] + + self.hm.cluster_id = cluster_id + + for hc_mocks in hc_test_values: + self.hm.health_check_types = { + cluster_id: hc_mocks + } + + mock_get.reset_mock() + mock_ctx.reset_mock() + mock_recover.reset_mock() + mock_wait.reset_mock() + + # do it + self.hm._execute_health_check(interval, cluster_id, + recovery_action, + recovery_cond, node_update_timeout) + + mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', + project_safe=False) + mock_ctx.assert_called_once_with(user_id=x_cluster.user, + project_id=x_cluster.project) + + for mock_hc in hc_mocks: + mock_hc.run_health_check.assert_has_calls( + [ + mock.call(ctx, x_node1), + mock.call(ctx, x_node2) + ] + ) + + mock_recover.assert_not_called() + mock_wait.assert_not_called() + + @mock.patch.object(obj_node.Node, 'get_all_by_cluster') + @mock.patch.object(hm.HealthManager, "_recover_node") + @mock.patch.object(hm.HealthManager, "_wait_for_action") + @mock.patch.object(obj_cluster.Cluster, 'get') + @mock.patch.object(context, 'get_service_context') + def test_execute_health_check_any_mode_unhealthy( + self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes): + cluster_id = 'CLUSTER_ID' + interval = 1 + recovery_cond = consts.ANY_FAILED + node_update_timeout = 1 + recovery_action = {'operation': 'REBUILD'} + + x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') + mock_get.return_value = x_cluster + + ctx = mock.Mock() + mock_ctx.return_value = ctx + + mock_wait.return_value = (True, "") + + x_node = mock.Mock(id='FAKE_NODE', status="ERROR") + mock_nodes.return_value = [x_node] + + mock_recover.return_value = {'action': 'FAKE_ACTION_ID'} + + hc_true = {'run_health_check.return_value': True} + hc_false = {'run_health_check.return_value': False} + + hc_test_values = [ + [ + mock.Mock(**hc_false), + mock.Mock(**hc_true), + mock.Mock(**hc_true), + ], + [ + mock.Mock(**hc_true), + mock.Mock(**hc_false), + mock.Mock(**hc_true), + ], + [ + mock.Mock(**hc_true), + mock.Mock(**hc_true), + mock.Mock(**hc_false), + ] + ] + + for hc_mocks in hc_test_values: + self.hm.health_check_types = { + cluster_id: hc_mocks + } + + mock_get.reset_mock() + mock_ctx.reset_mock() + mock_recover.reset_mock() + mock_wait.reset_mock() + + # do it + self.hm._execute_health_check(interval, cluster_id, + recovery_action, + recovery_cond, node_update_timeout) + + mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', + project_safe=False) + mock_ctx.assert_called_once_with(user_id=x_cluster.user, + project_id=x_cluster.project) + + # health checks should be called until one of them returns false + previous_hc_returned_false = False + for mock_hc in hc_mocks: + if not previous_hc_returned_false: + mock_hc.run_health_check.assert_called_once_with( + ctx, x_node) + else: + mock_hc.assert_not_called() + if not mock_hc.run_health_check.return_value: + previous_hc_returned_false = True + + mock_recover.assert_called_once_with('FAKE_NODE', ctx, mock.ANY) + mock_wait.assert_called_once_with( + ctx, 'FAKE_ACTION_ID', node_update_timeout) + + @mock.patch.object(obj_node.Node, 'get_all_by_cluster') + @mock.patch.object(hm.HealthManager, "_recover_node") + @mock.patch.object(hm.HealthManager, "_wait_for_action") + @mock.patch.object(obj_cluster.Cluster, 'get') + @mock.patch.object(context, 'get_service_context') + def test_execute_health_check_all_mode_healthy( + self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes): + cluster_id = 'CLUSTER_ID' + interval = 1 + recovery_cond = consts.ALL_FAILED + node_update_timeout = 1 + recovery_action = {'operation': 'REBUILD'} + + x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') + mock_get.return_value = x_cluster + + ctx = mock.Mock() + mock_ctx.return_value = ctx + + mock_wait.return_value = (True, "") + + x_node = mock.Mock(id='FAKE_NODE1', status="ERROR") + mock_nodes.return_value = [x_node] + + hc_true = {'run_health_check.return_value': True} + hc_false = {'run_health_check.return_value': False} + + hc_test_values = [ + [ + mock.Mock(**hc_true), + mock.Mock(**hc_true), + mock.Mock(**hc_true), + ], + [ + mock.Mock(**hc_false), + mock.Mock(**hc_true), + mock.Mock(**hc_true), + ], + [ + mock.Mock(**hc_true), + mock.Mock(**hc_false), + mock.Mock(**hc_true), + ], + [ + mock.Mock(**hc_true), + mock.Mock(**hc_true), + mock.Mock(**hc_false), + ], + ] + + self.hm.cluster_id = cluster_id + + for hc_mocks in hc_test_values: + self.hm.health_check_types = { + cluster_id: hc_mocks + } + + mock_get.reset_mock() + mock_ctx.reset_mock() + mock_recover.reset_mock() + mock_wait.reset_mock() + + # do it + self.hm._execute_health_check(interval, cluster_id, + recovery_action, + recovery_cond, node_update_timeout) + + mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', + project_safe=False) + mock_ctx.assert_called_once_with(user_id=x_cluster.user, + project_id=x_cluster.project) + + # health checks should be called until one of them returns true + previous_hc_returned_true = False + for mock_hc in hc_mocks: + if not previous_hc_returned_true: + mock_hc.run_health_check.assert_called_once_with( + ctx, x_node) + else: + mock_hc.assert_not_called() + if mock_hc.run_health_check.return_value: + previous_hc_returned_true = True + + mock_recover.assert_not_called() + mock_wait.assert_not_called() + + @mock.patch.object(obj_node.Node, 'get_all_by_cluster') + @mock.patch.object(hm.HealthManager, "_recover_node") + @mock.patch.object(hm.HealthManager, "_wait_for_action") + @mock.patch.object(obj_cluster.Cluster, 'get') + @mock.patch.object(context, 'get_service_context') + def test_execute_health_check_all_mode_unhealthy( + self, mock_ctx, mock_get, mock_wait, mock_recover, mock_nodes): + cluster_id = 'CLUSTER_ID' + interval = 1 + recovery_cond = consts.ALL_FAILED + node_update_timeout = 1 + recovery_action = {'operation': 'REBUILD'} + + x_cluster = mock.Mock(user='USER_ID', project='PROJECT_ID') + mock_get.return_value = x_cluster + + ctx = mock.Mock() + mock_ctx.return_value = ctx + + mock_wait.return_value = (True, "") + + x_node = mock.Mock(id='FAKE_NODE', status="ERROR") + mock_nodes.return_value = [x_node] + + mock_recover.return_value = {'action': 'FAKE_ACTION_ID'} + + hc_false = {'run_health_check.return_value': False} + + hc_test_values = [ + [ + mock.Mock(**hc_false), + mock.Mock(**hc_false), + mock.Mock(**hc_false), + ] + ] + + self.hm.cluster_id = cluster_id + self.hm.node_update_timeout = 1 + + for hc_mocks in hc_test_values: + self.hm.health_check_types = { + cluster_id: hc_mocks + } + + mock_get.reset_mock() + mock_ctx.reset_mock() + mock_recover.reset_mock() + mock_wait.reset_mock() + + # do it + self.hm._execute_health_check(interval, cluster_id, + recovery_action, + recovery_cond, node_update_timeout) + + mock_get.assert_called_once_with(self.hm.ctx, 'CLUSTER_ID', + project_safe=False) + mock_ctx.assert_called_once_with(user_id=x_cluster.user, + project_id=x_cluster.project) + + # all health checks should be called + for mock_hc in hc_mocks: + mock_hc.run_health_check.assert_called_once_with(ctx, x_node) + + mock_recover.assert_called_once_with('FAKE_NODE', ctx, mock.ANY) + mock_wait.assert_called_once_with( + ctx, 'FAKE_ACTION_ID', self.hm.node_update_timeout) + + @mock.patch.object(obj_cluster.Cluster, 'get') + @mock.patch.object(context, 'get_service_context') + def test_execute_health_check_cluster_not_found(self, mock_ctx, mock_get): + cluster_id = 'CLUSTER_ID' + interval = 1 + recovery_cond = consts.ANY_FAILED + node_update_timeout = 1 + recovery_action = {'operation': 'REBUILD'} + + mock_get.return_value = None + + # do it + self.hm._execute_health_check(interval, cluster_id, + recovery_action, recovery_cond, + node_update_timeout) + + mock_ctx.assert_not_called() + + def test_start_check_invalid_type(self): + entry = { + 'cluster_id': 'CCID', + 'interval': 12, + 'check_type': 'blah', + 'params': { + 'recover_action': [{'name': 'REBUILD'}] + }, + } + + res = self.hm._start_check(entry) + + self.assertIsNone(res) + + @mock.patch.object(threadgroup.ThreadGroup, 'add_dynamic_timer') + @mock.patch.object(hm.HealthManager, '_add_health_check') + @mock.patch.object(hm.HealthCheckType, 'factory') + def test_start_check_for_polling(self, mock_hc_factory, mock_add_hc, + mock_add_timer): x_timer = mock.Mock() - mock_add_timer = self.patchobject(self.hm.TG, 'add_dynamic_timer', - return_value=x_timer) + mock_add_timer.return_value = x_timer entry = { 'cluster_id': 'CCID', 'interval': 12, 'check_type': consts.NODE_STATUS_POLLING, - 'params': {'recover_action': [{'name': 'REBUILD'}]}, + 'params': { + 'recover_action': [{'name': 'REBUILD'}], + 'recovery_conditional': 'ANY_FAILED', + 'node_update_timeout': 1, + }, } - recover_action = {'operation': 'REBUILD'} + res = self.hm._start_check(entry) expected = copy.deepcopy(entry) expected['timer'] = x_timer self.assertEqual(expected, res) mock_add_timer.assert_called_once_with( - self.hm._poll_cluster, None, None, 'CCID', 12, recover_action) - - def test__start_check_for_poll_url(self): + self.hm._execute_health_check, None, None, 12, 'CCID', + {'operation': 'REBUILD'}, 'ANY_FAILED', 1) + mock_add_hc.assert_called_once_with('CCID', mock.ANY) + mock_hc_factory.assert_called_once_with( + consts.NODE_STATUS_POLLING, 'CCID', 12, entry['params']) + + @mock.patch.object(threadgroup.ThreadGroup, 'add_dynamic_timer') + @mock.patch.object(hm.HealthManager, '_add_health_check') + @mock.patch.object(hm.HealthCheckType, 'factory') + def test_start_check_for_poll_url(self, mock_hc_factory, mock_add_hc, + mock_add_timer): x_timer = mock.Mock() - mock_add_timer = self.patchobject(self.hm.TG, 'add_dynamic_timer', - return_value=x_timer) + mock_add_timer.return_value = x_timer entry = { 'cluster_id': 'CCID', @@ -1091,25 +1606,71 @@ 'check_type': consts.NODE_STATUS_POLL_URL, 'params': { 'recover_action': [{'name': 'REBUILD'}], - 'node_delete_timeout': 23, - 'node_force_recreate': True + 'recovery_conditional': 'ANY_FAILED', + 'node_update_timeout': 1, }, } - recover_action = { - 'operation': 'REBUILD', - 'delete_timeout': 23, - 'force_recreate': True + + res = self.hm._start_check(entry) + + expected = copy.deepcopy(entry) + expected['timer'] = x_timer + self.assertEqual(expected, res) + mock_add_timer.assert_called_once_with( + self.hm._execute_health_check, None, None, 12, 'CCID', + {'operation': 'REBUILD'}, 'ANY_FAILED', 1) + mock_add_hc.assert_called_once_with('CCID', mock.ANY) + mock_hc_factory.assert_called_once_with( + consts.NODE_STATUS_POLL_URL, + 'CCID', 12, entry['params']) + + @mock.patch.object(threadgroup.ThreadGroup, 'add_dynamic_timer') + @mock.patch.object(hm.HealthManager, '_add_health_check') + @mock.patch.object(hm.HealthCheckType, 'factory') + def test_start_check_poll_url_and_polling(self, mock_hc_factory, + mock_add_hc, mock_add_timer): + x_timer = mock.Mock() + mock_add_timer.return_value = x_timer + + check_type = ','.join( + [consts.NODE_STATUS_POLL_URL, consts.NODE_STATUS_POLLING]) + entry = { + 'cluster_id': 'CCID', + 'interval': 12, + 'check_type': check_type, + 'params': { + 'recover_action': [{'name': 'REBUILD'}], + 'recovery_conditional': 'ALL_FAILED', + 'node_update_timeout': 1, + }, } + res = self.hm._start_check(entry) expected = copy.deepcopy(entry) expected['timer'] = x_timer self.assertEqual(expected, res) mock_add_timer.assert_called_once_with( - self.hm._poll_url, None, None, 'CCID', 12, recover_action, - entry['params']) + self.hm._execute_health_check, None, None, 12, 'CCID', + {'operation': 'REBUILD'}, 'ALL_FAILED', 1) + mock_add_hc.assert_has_calls( + [ + mock.call('CCID', mock.ANY), + mock.call('CCID', mock.ANY) + ] + ) + mock_hc_factory.assert_has_calls( + [ + mock.call( + consts.NODE_STATUS_POLL_URL, 'CCID', 12, entry['params'] + ), + mock.call( + consts.NODE_STATUS_POLLING, 'CCID', 12, entry['params'] + ), + ] + ) - def test__start_check_for_listening(self): + def test_start_check_for_listening(self): x_listener = mock.Mock() mock_add_listener = self.patchobject(self.hm, '_add_listener', return_value=x_listener) @@ -1127,7 +1688,7 @@ self.assertEqual(expected, res) mock_add_listener.assert_called_once_with('CCID', recover_action) - def test__start_check_for_listening_failed(self): + def test_start_check_for_listening_failed(self): mock_add_listener = self.patchobject(self.hm, '_add_listener', return_value=None) @@ -1142,7 +1703,7 @@ self.assertIsNone(res) mock_add_listener.assert_called_once_with('CCID', recover_action) - def test__start_check_other_types(self): + def test_start_check_other_types(self): entry = { 'cluster_id': 'CCID', 'check_type': 'BOGUS TYPE', @@ -1152,29 +1713,41 @@ self.assertIsNone(res) - def test__stop_check_with_timer(self): + def test_stop_check_with_timer(self): x_timer = mock.Mock() - entry = {'timer': x_timer} + entry = {'timer': x_timer, 'cluster_id': 'CLUSTER_ID'} mock_timer_done = self.patchobject(self.hm.TG, 'timer_done') + x_hc_types = mock.MagicMock() + x_hc_types.__contains__.return_value = True + x_hc_types.__iter__.return_value = ['CLUSTER_ID'] + self.hm.health_check_types = x_hc_types + # do it res = self.hm._stop_check(entry) self.assertIsNone(res) x_timer.stop.assert_called_once_with() mock_timer_done.assert_called_once_with(x_timer) + x_hc_types.pop.assert_called_once_with('CLUSTER_ID') - def test__stop_check_with_listener(self): + def test_stop_check_with_listener(self): x_thread = mock.Mock() - entry = {'listener': x_thread} + entry = {'listener': x_thread, 'cluster_id': 'CLUSTER_ID'} mock_thread_done = self.patchobject(self.hm.TG, 'thread_done') + x_hc_types = mock.MagicMock() + x_hc_types.__contains__.return_value = False + x_hc_types.__iter__.return_value = ['CLUSTER_ID'] + self.hm.health_check_types = x_hc_types + # do it res = self.hm._stop_check(entry) self.assertIsNone(res) x_thread.stop.assert_called_once_with() mock_thread_done.assert_called_once_with(x_thread) + x_hc_types.pop.assert_not_called() @mock.patch('oslo_messaging.Target') def test_start(self, mock_target): @@ -1201,53 +1774,91 @@ cfg.CONF.periodic_interval, self.hm._dummy_task) @mock.patch.object(hr.HealthRegistry, 'create') - def test_register_cluster(self, mock_reg_create): + @mock.patch.object(hm.HealthManager, '_start_check') + def test_register_cluster(self, mock_check, mock_reg_create): + entry = { + 'cluster_id': 'CLUSTER_ID', + 'check_type': consts.NODE_STATUS_POLLING, + 'interval': 50, + 'params': { + 'blah': '123', + 'detection_modes': [ + { + 'type': consts.NODE_STATUS_POLLING, + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '', + } + ], + }, + 'enabled': True + } + ctx = mock.Mock() - timer = mock.Mock() - mock_add_tm = self.patchobject(self.hm.TG, 'add_dynamic_timer', - return_value=timer) - mock_poll = self.patchobject(self.hm, '_poll_cluster', - return_value=mock.Mock()) - x_reg = mock.Mock(cluster_id='CLUSTER_ID', - check_type=consts.NODE_STATUS_POLLING, - interval=50, params={}) + + x_reg = mock.Mock(cluster_id=entry['cluster_id'], + check_type=entry['check_type'], + interval=entry['interval'], params=entry['params'], + enabled=entry['enabled']) mock_reg_create.return_value = x_reg - self.hm.register_cluster(ctx, - cluster_id='CLUSTER_ID', - check_type=consts.NODE_STATUS_POLLING, - interval=50, enabled=True) + self.hm.register_cluster( + ctx, cluster_id=entry['cluster_id'], interval=entry['interval'], + node_update_timeout=1, params=entry['params'], + enabled=entry['enabled']) mock_reg_create.assert_called_once_with( - ctx, 'CLUSTER_ID', consts.NODE_STATUS_POLLING, 50, {}, 'ENGINE_ID', - enabled=True) - mock_add_tm.assert_called_with(mock_poll, None, None, 'CLUSTER_ID', 50, - {}) + ctx, entry['cluster_id'], consts.NODE_STATUS_POLLING, + entry['interval'], entry['params'], 'ENGINE_ID', + enabled=entry['enabled']) + mock_check.assert_called_once_with(entry) self.assertEqual(1, len(self.hm.registries)) @mock.patch.object(hr.HealthRegistry, 'create') - def test_register_cluster_not_enabled(self, mock_reg_create): + @mock.patch.object(hm.HealthManager, '_start_check') + def test_register_cluster_not_enabled(self, mock_check, mock_reg_create): + entry = { + 'cluster_id': 'CLUSTER_ID', + 'check_type': consts.NODE_STATUS_POLLING, + 'interval': 50, + 'params': { + 'blah': '123', + 'detection_modes': [ + { + 'type': consts.NODE_STATUS_POLLING, + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '', + } + ], + }, + 'enabled': False + } + ctx = mock.Mock() - timer = mock.Mock() - mock_add_tm = self.patchobject(self.hm.TG, 'add_dynamic_timer', - return_value=timer) - mock_poll = self.patchobject(self.hm, '_poll_cluster', - return_value=mock.Mock()) - x_reg = mock.Mock(cluster_id='CLUSTER_ID', - check_type=consts.NODE_STATUS_POLLING, - interval=50, params={}, enabled=False) + + x_reg = mock.Mock(cluster_id=entry['cluster_id'], + check_type=entry['check_type'], + interval=entry['interval'], params=entry['params'], + enabled=entry['enabled']) mock_reg_create.return_value = x_reg - self.hm.register_cluster(ctx, - cluster_id='CLUSTER_ID', - check_type=consts.NODE_STATUS_POLLING, - interval=50, enabled=x_reg.enabled) + self.hm.register_cluster( + ctx, cluster_id=entry['cluster_id'], interval=entry['interval'], + node_update_timeout=1, params=entry['params'], + enabled=entry['enabled']) mock_reg_create.assert_called_once_with( - ctx, 'CLUSTER_ID', consts.NODE_STATUS_POLLING, 50, {}, 'ENGINE_ID', - enabled=False) - mock_add_tm.assert_not_called() - mock_poll.assert_not_called() + ctx, entry['cluster_id'], consts.NODE_STATUS_POLLING, + entry['interval'], entry['params'], 'ENGINE_ID', + enabled=entry['enabled']) + mock_check.assert_not_called() self.assertEqual(1, len(self.hm.registries)) @mock.patch.object(hm.HealthManager, '_stop_check') diff -Nru senlin-6.0.0/senlin/tests/unit/engine/test_node.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_node.py --- senlin-6.0.0/senlin/tests/unit/engine/test_node.py 2018-08-30 14:17:04.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/engine/test_node.py 2018-11-19 18:48:08.000000000 +0000 @@ -251,11 +251,15 @@ 'Creation succeeded', physical_id=physical_id) - def test_node_create_not_init(self): + @mock.patch.object(nodem.Node, 'set_status') + def test_node_create_not_init(self, mock_status): node = nodem.Node('node1', PROFILE_ID, CLUSTER_ID, self.context) node.status = 'NOT_INIT' - res = node.do_create(self.context) + res, reason = node.do_create(self.context) self.assertFalse(res) + self.assertEqual('Node must be in INIT status', reason) + mock_status.assert_any_call(self.context, consts.NS_ERROR, + 'Node must be in INIT status') @mock.patch.object(nodem.Node, 'set_status') @mock.patch.object(pb.Profile, 'create_object') @@ -265,9 +269,10 @@ mock_create.side_effect = exception.EResourceCreation( type='PROFILE', message='Boom', resource_id='test_id') - res = node.do_create(self.context) + res, reason = node.do_create(self.context) self.assertFalse(res) + self.assertEqual(str(reason), 'Failed in creating PROFILE: Boom.') mock_status.assert_any_call(self.context, consts.NS_CREATING, 'Creation in progress') mock_status.assert_any_call(self.context, consts.NS_ERROR, @@ -894,6 +899,28 @@ ]) x_profile.handle_dance.assert_called_once_with(node, style='tango') + def _verify_execution_create_args(self, expected_name, + expected_inputs_dict, wfc): + wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY) + actual_call_args, call_kwargs = wfc.execution_create.call_args + + # execution_create parameters are name and inputs + actual_call_name, actual_call_inputs = actual_call_args + + # actual_call_inputs is string representation of a dictionary. + # convert actual_call_inputs to json, then dump it back as string + # sorted by key + final_actual_call_inputs = jsonutils.dumps( + jsonutils.loads(actual_call_inputs), sort_keys=True) + + # dump expected_inputs_dict as string sorted by key + final_expected_inputs = jsonutils.dumps( + expected_inputs_dict, sort_keys=True) + + # compare the sorted input strings along with the names + self.assertEqual(actual_call_name, expected_name) + self.assertEqual(final_actual_call_inputs, final_expected_inputs) + def test_run_workflow(self): node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') node.physical_id = 'FAKE_NODE' @@ -931,10 +958,7 @@ 'FAKE_KEY1': 'FAKE_VALUE1', 'FAKE_KEY2': 'FAKE_VALUE2', } - final_inputs = jsonutils.dumps(final_dict) - wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY) - call_args, call_kwargs = wfc.execution_create.call_args - self.assertEqual(call_args, ('foo', final_inputs)) + self._verify_execution_create_args('foo', final_dict, wfc) def test_run_workflow_no_physical_id(self): node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') @@ -976,10 +1000,7 @@ 'FAKE_KEY1': 'FAKE_VALUE1', 'FAKE_KEY2': 'FAKE_VALUE2', } - final_inputs = jsonutils.dumps(final_dict) - wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY) - call_args, call_kwargs = wfc.execution_create.call_args - self.assertEqual(call_args, ('foo', final_inputs)) + self._verify_execution_create_args('foo', final_dict, wfc) def test_run_workflow_failed_creation(self): node = nodem.Node('node1', PROFILE_ID, 'FAKE_CLUSTER') @@ -1053,7 +1074,4 @@ 'FAKE_KEY1': 'FAKE_VALUE1', 'FAKE_KEY2': 'FAKE_VALUE2', } - final_inputs = jsonutils.dumps(final_dict) - wfc.execution_create.assert_called_once_with(mock.ANY, mock.ANY) - call_args, call_kwargs = wfc.execution_create.call_args - self.assertEqual(call_args, ('foo', final_inputs)) + self._verify_execution_create_args('foo', final_dict, wfc) diff -Nru senlin-6.0.0/senlin/tests/unit/events/test_base.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/events/test_base.py --- senlin-6.0.0/senlin/tests/unit/events/test_base.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/events/test_base.py 2018-11-19 18:48:08.000000000 +0000 @@ -27,7 +27,7 @@ self.ctx = utils.dummy_context() @mock.patch('oslo_utils.reflection.get_class_name') - def test__check_entity_cluster(self, mock_get): + def test_check_entity_cluster(self, mock_get): entity = mock.Mock() mock_get.return_value = 'Cluster' @@ -37,7 +37,7 @@ mock_get.assert_called_once_with(entity, fully_qualified=False) @mock.patch('oslo_utils.reflection.get_class_name') - def test__check_entity_node(self, mock_get): + def test_check_entity_node(self, mock_get): entity = mock.Mock() mock_get.return_value = 'Node' @@ -46,23 +46,23 @@ self.assertEqual('NODE', res) mock_get.assert_called_once_with(entity, fully_qualified=False) - def test__get_action_name_unexpected(self): + def test_get_action_name_unexpected(self): action = mock.Mock(action="UNEXPECTED") res = base.EventBackend._get_action_name(action) self.assertEqual('unexpected', res) - def test__get_action_name_correct_format(self): + def test_get_action_name_correct_format(self): action = mock.Mock(action="FOO_BAR") res = base.EventBackend._get_action_name(action) self.assertEqual('bar', res) - def test__get_action_name_operation_found(self): + def test_get_action_name_operation_found(self): action = mock.Mock(action=consts.NODE_OPERATION, inputs={'operation': 'bar'}) res = base.EventBackend._get_action_name(action) self.assertEqual('bar', res) - def test__get_action_name_operation_not_found(self): + def test_get_action_name_operation_not_found(self): action = mock.Mock(action="FOO_OPERATION", inputs={}) res = base.EventBackend._get_action_name(action) self.assertEqual('operation', res) diff -Nru senlin-6.0.0/senlin/tests/unit/events/test_message.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/events/test_message.py --- senlin-6.0.0/senlin/tests/unit/events/test_message.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/events/test_message.py 2018-11-19 18:48:08.000000000 +0000 @@ -35,7 +35,7 @@ self.ctx = utils.dummy_context() @mock.patch.object(nobj.NotificationBase, '_emit') - def test__notify_cluster_action(self, mock_emit): + def test_notify_cluster_action(self, mock_emit): cluster_id = uuidutils.generate_uuid() profile_id = uuidutils.generate_uuid() cluster_init = timeutils.utcnow(True) @@ -131,7 +131,7 @@ self.assertEqual(expected_payload, payload) @mock.patch.object(nobj.NotificationBase, '_emit') - def test__notify_node_action(self, mock_emit): + def test_notify_node_action(self, mock_emit): node_id = uuidutils.generate_uuid() profile_id = uuidutils.generate_uuid() node_init = timeutils.utcnow(True) diff -Nru senlin-6.0.0/senlin/tests/unit/__init__.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/__init__.py --- senlin-6.0.0/senlin/tests/unit/__init__.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/__init__.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,21 +11,9 @@ # under the License. import eventlet -import oslo_i18n from senlin import objects - -def fake_translate_msgid(msgid, domain, desired_locale=None): - return msgid - -oslo_i18n.enable_lazy() - -# To ensure messages don't really get translated while running tests. -# As there are lots of places where matching is expected when comparing -# exception message(translated) with raw message. -oslo_i18n._translate_msgid = fake_translate_msgid - eventlet.monkey_patch(os=False) # The following has to be done after eventlet monkey patching or else the diff -Nru senlin-6.0.0/senlin/tests/unit/objects/test_cluster_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/objects/test_cluster_policy.py --- senlin-6.0.0/senlin/tests/unit/objects/test_cluster_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/objects/test_cluster_policy.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import timedelta -import testtools - -from oslo_utils import timeutils - -from senlin.objects import cluster_policy as cpo - -CLUSTER_ID = "8286fcaa-6474-44e2-873e-28b5cb2c204c" -POLICY_ID = "da958a16-f384-49a1-83a9-abac8b4ec46e" - - -class TestClusterPolicy(testtools.TestCase): - - def test_cooldown_inprogress(self): - last_op = timeutils.utcnow(True) - cp = cpo.ClusterPolicy(cluster_id=CLUSTER_ID, policy_id=POLICY_ID, - last_op=last_op) - - res = cp.cooldown_inprogress(60) - - self.assertTrue(res) - - cp.last_op -= timedelta(hours=1) - - res = cp.cooldown_inprogress(60) - - self.assertFalse(res) diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_batch_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_batch_policy.py --- senlin-6.0.0/senlin/tests/unit/policies/test_batch_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_batch_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -42,7 +42,7 @@ self.assertEqual(2, policy.max_batch_size) self.assertEqual(60, policy.pause_time) - def test__get_batch_size(self): + def test_get_batch_size(self): policy = bp.BatchPolicy('test-batch', self.spec) size, number = policy._get_batch_size(5) @@ -50,7 +50,7 @@ self.assertEqual(2, size) self.assertEqual(3, number) - def test__get_batch_size_less_than_max(self): + def test_get_batch_size_less_than_max(self): spec = copy.deepcopy(self.spec) spec['properties']['max_batch_size'] = 3 policy = bp.BatchPolicy('test-batch', spec) @@ -60,7 +60,7 @@ self.assertEqual(2, size) self.assertEqual(2, number) - def test__get_batch_size_less_than_min(self): + def test_get_batch_size_less_than_min(self): spec = copy.deepcopy(self.spec) spec['properties']['min_in_service'] = 2 policy = bp.BatchPolicy('test-batch', spec) @@ -70,7 +70,7 @@ self.assertEqual(1, size) self.assertEqual(1, number) - def test__get_batch_size_with_default_max(self): + def test_get_batch_size_with_default_max(self): spec = copy.deepcopy(self.spec) spec['properties']['max_batch_size'] = -1 policy = bp.BatchPolicy('test-batch', spec) @@ -79,7 +79,7 @@ self.assertEqual(4, size) self.assertEqual(2, number) - def test__pick_nodes_all_active(self): + def test_pick_nodes_all_active(self): node1 = mock.Mock(id='1', status='ACTIVE') node2 = mock.Mock(id='2', status='ACTIVE') node3 = mock.Mock(id='3', status='ACTIVE') @@ -93,7 +93,7 @@ self.assertIn(node2.id, nodes[0]) self.assertIn(node3.id, nodes[1]) - def test__pick_nodes_with_error_nodes(self): + def test_pick_nodes_with_error_nodes(self): node1 = mock.Mock(id='1', status='ACTIVE') node2 = mock.Mock(id='2', status='ACTIVE') node3 = mock.Mock(id='3', status='ERROR') @@ -110,7 +110,7 @@ @mock.patch.object(bp.BatchPolicy, '_pick_nodes') @mock.patch.object(bp.BatchPolicy, '_get_batch_size') - def test__create_plan_for_update(self, mock_cal, mock_pick): + def test_create_plan_for_update(self, mock_cal, mock_pick): action = mock.Mock(context=self.context, action='CLUSTER_UPDATE') cluster = mock.Mock(id='cid') node1, node2, node3 = mock.Mock(), mock.Mock(), mock.Mock() @@ -132,7 +132,7 @@ mock_cal.assert_called_once_with(3) mock_pick.assert_called_once_with([node1, node2, node3], 2, 2) - def test__create_plan_for_update_no_node(self): + def test_create_plan_for_update_no_node(self): action = mock.Mock(context=self.context, action='CLUSTER_UPDATE') cluster = mock.Mock(id='cid') cluster.nodes = [] diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_deletion_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_deletion_policy.py --- senlin-6.0.0/senlin/tests/unit/policies/test_deletion_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_deletion_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -48,7 +48,7 @@ self.assertFalse(policy.reduce_desired_capacity) @mock.patch.object(su, 'nodes_by_random') - def test__victims_by_regions_random(self, mock_select): + def test_victims_by_regions_random(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -72,7 +72,7 @@ mock.call('R1'), mock.call('R2')]) @mock.patch.object(su, 'nodes_by_profile_age') - def test__victims_by_regions_profile_age(self, mock_select): + def test_victims_by_regions_profile_age(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -96,7 +96,7 @@ mock.call('R1'), mock.call('R2')]) @mock.patch.object(su, 'nodes_by_age') - def test__victims_by_regions_age_oldest(self, mock_select): + def test_victims_by_regions_age_oldest(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -120,7 +120,7 @@ mock.call('R1'), mock.call('R2')]) @mock.patch.object(su, 'nodes_by_age') - def test__victims_by_regions_age_youngest(self, mock_select): + def test_victims_by_regions_age_youngest(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -144,7 +144,7 @@ mock.call('R1'), mock.call('R2')]) @mock.patch.object(su, 'nodes_by_random') - def test__victims_by_zones_random(self, mock_select): + def test_victims_by_zones_random(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -169,7 +169,7 @@ ) @mock.patch.object(su, 'nodes_by_profile_age') - def test__victims_by_zones_profile_age(self, mock_select): + def test_victims_by_zones_profile_age(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -196,7 +196,7 @@ ) @mock.patch.object(su, 'nodes_by_age') - def test__victims_by_zones_age_oldest(self, mock_select): + def test_victims_by_zones_age_oldest(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=2) @@ -221,7 +221,7 @@ ) @mock.patch.object(su, 'nodes_by_age') - def test__victims_by_zones_age_youngest(self, mock_select): + def test_victims_by_zones_age_youngest(self, mock_select): cluster = mock.Mock() node1 = mock.Mock(id=1) node2 = mock.Mock(id=3) @@ -247,7 +247,7 @@ [mock.call('AZ5'), mock.call('AZ6')], ) - def test__update_action_clean(self): + def test_update_action_clean(self): action = mock.Mock() action.data = {} @@ -269,7 +269,7 @@ self.assertEqual(pd, action.data) action.store.assert_called_with(action.context) - def test__update_action_override(self): + def test_update_action_override(self): action = mock.Mock() action.data = { 'deletion': { diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_health_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_health_policy.py --- senlin-6.0.0/senlin/tests/unit/policies/test_health_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_health_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -10,6 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. +from collections import namedtuple import copy import mock @@ -35,13 +36,15 @@ self.spec = { 'type': 'senlin.policy.health', - 'version': '1.0', + 'version': '1.1', 'properties': { 'detection': { - 'type': 'NODE_STATUS_POLLING', - 'options': { - 'interval': 60 - } + "detection_modes": [ + { + 'type': 'NODE_STATUS_POLLING' + }, + ], + 'interval': 60 }, 'recovery': { 'fencing': ['COMPUTE'], @@ -62,13 +65,94 @@ self.hp = health_policy.HealthPolicy('test-policy', self.spec) def test_policy_init(self): - self.assertIsNone(self.hp.id) - self.assertEqual('test-policy', self.hp.name) - self.assertEqual('senlin.policy.health-1.0', self.hp.type) - self.assertEqual('NODE_STATUS_POLLING', self.hp.check_type) - self.assertEqual(60, self.hp.interval) + DetectionMode = namedtuple( + 'DetectionMode', + [self.hp.DETECTION_TYPE] + list(self.hp._DETECTION_OPTIONS)) + + detection_modes = [ + DetectionMode( + type='NODE_STATUS_POLLING', + poll_url='', + poll_url_ssl_verify=True, + poll_url_conn_error_as_unhealthy=True, + poll_url_healthy_response='', + poll_url_retry_limit='', + poll_url_retry_interval='' + ) + ] + + spec = { + 'type': 'senlin.policy.health', + 'version': '1.1', + 'properties': { + 'detection': { + "detection_modes": [ + { + 'type': 'NODE_STATUS_POLLING' + }, + ], + 'interval': 60 + }, + 'recovery': { + 'fencing': ['COMPUTE'], + 'actions': [ + {'name': 'REBUILD'} + ] + } + } + } + + hp = health_policy.HealthPolicy('test-policy', spec) + + self.assertIsNone(hp.id) + self.assertEqual('test-policy', hp.name) + self.assertEqual('senlin.policy.health-1.1', hp.type) + self.assertEqual(detection_modes, hp.detection_modes) + self.assertEqual(60, hp.interval) self.assertEqual([{'name': 'REBUILD', 'params': None}], - self.hp.recover_actions) + hp.recover_actions) + + def test_policy_init_ops(self): + spec = { + 'type': 'senlin.policy.health', + 'version': '1.1', + 'properties': { + 'detection': { + "detection_modes": [ + { + 'type': 'NODE_STATUS_POLLING' + }, + { + 'type': 'NODE_STATUS_POLL_URL' + }, + ], + 'interval': 60 + }, + 'recovery': { + 'fencing': ['COMPUTE'], + 'actions': [ + {'name': 'REBUILD'} + ] + } + } + } + + operations = [None, 'ALL_FAILED', 'ANY_FAILED'] + for op in operations: + # set operation in spec + if op: + spec['properties']['detection']['recovery_conditional'] = op + + # test __init__ + hp = health_policy.HealthPolicy('test-policy', spec) + + # check result + self.assertIsNone(hp.id) + self.assertEqual('test-policy', hp.name) + self.assertEqual('senlin.policy.health-1.1', hp.type) + self.assertEqual(60, hp.interval) + self.assertEqual([{'name': 'REBUILD', 'params': None}], + hp.recover_actions) def test_validate(self): spec = copy.deepcopy(self.spec) @@ -86,7 +170,7 @@ def test_validate_valid_interval(self): spec = copy.deepcopy(self.spec) - spec["properties"]["detection"]["options"]["interval"] = 20 + spec["properties"]["detection"]["interval"] = 20 self.hp = health_policy.HealthPolicy('test-policy', spec) cfg.CONF.set_override('health_check_interval_min', 20) @@ -95,7 +179,7 @@ def test_validate_invalid_interval(self): spec = copy.deepcopy(self.spec) - spec["properties"]["detection"]["options"]["interval"] = 10 + spec["properties"]["detection"]["interval"] = 10 self.hp = health_policy.HealthPolicy('test-policy', spec) cfg.CONF.set_override('health_check_interval_min', 20) @@ -116,18 +200,24 @@ policy_data = { 'HealthPolicy': { 'data': { - 'check_type': self.hp.check_type, 'interval': self.hp.interval, - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': 3, - 'poll_url_retry_interval': 3, + 'detection_modes': [ + { + 'type': 'NODE_STATUS_POLLING', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + } + ], 'node_update_timeout': 300, 'node_delete_timeout': 20, - 'node_force_recreate': False + 'node_force_recreate': False, + 'recovery_conditional': 'ANY_FAILED' }, - 'version': '1.0' + 'version': '1.1' } } @@ -135,18 +225,24 @@ self.assertTrue(res) self.assertEqual(policy_data, data) kwargs = { - 'check_type': self.hp.check_type, 'interval': self.hp.interval, + 'node_update_timeout': 300, 'params': { 'recover_action': self.hp.recover_actions, - 'poll_url': '', - 'poll_url_ssl_verify': True, - 'poll_url_healthy_response': '', - 'poll_url_retry_limit': 3, - 'poll_url_retry_interval': 3, - 'node_update_timeout': 300, 'node_delete_timeout': 20, - 'node_force_recreate': False + 'node_force_recreate': False, + 'recovery_conditional': 'ANY_FAILED', + 'detection_modes': [ + { + 'type': 'NODE_STATUS_POLLING', + 'poll_url': '', + 'poll_url_ssl_verify': True, + 'poll_url_conn_error_as_unhealthy': True, + 'poll_url_healthy_response': '', + 'poll_url_retry_limit': '', + 'poll_url_retry_interval': '' + } + ], }, 'enabled': True } diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_lb_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_lb_policy.py --- senlin-6.0.0/senlin/tests/unit/policies/test_lb_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_lb_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -697,8 +697,8 @@ @mock.patch.object(no.Node, 'get') @mock.patch.object(no.Node, 'update') - def test__add_member(self, m_node_update, m_node_get, - m_extract, m_load): + def test_add_member(self, m_node_update, m_node_get, + m_extract, m_load): node1 = mock.Mock(id='NODE1_ID', data={}) node2 = mock.Mock(id='NODE2_ID', data={}) action = mock.Mock(context='action_context', @@ -754,8 +754,8 @@ @mock.patch.object(no.Node, 'get') @mock.patch.object(no.Node, 'update') - def test__add_member_fail(self, m_node_update, m_node_get, - m_extract, m_load): + def test_add_member_fail(self, m_node_update, m_node_get, + m_extract, m_load): node1 = mock.Mock(id='NODE1_ID', data={}) action = mock.Mock(context='action_context', action=consts.CLUSTER_RESIZE, @@ -921,8 +921,8 @@ @mock.patch.object(no.Node, 'get') @mock.patch.object(no.Node, 'update') - def test__remove_member(self, m_node_update, m_node_get, - m_extract, m_load): + def test_remove_member(self, m_node_update, m_node_get, + m_extract, m_load): node1 = mock.Mock(id='NODE1', data={'lb_member': 'MEM_ID1'}) node2 = mock.Mock(id='NODE2', data={'lb_member': 'MEM_ID2'}) action = mock.Mock( @@ -976,8 +976,8 @@ @mock.patch.object(no.Node, 'get') @mock.patch.object(no.Node, 'update') - def test__remove_member_not_in_pool(self, m_node_update, m_node_get, - m_extract, m_load): + def test_remove_member_not_in_pool(self, m_node_update, m_node_get, + m_extract, m_load): node1 = mock.Mock(id='NODE1', data={'lb_member': 'MEM_ID1'}) node2 = mock.Mock(id='NODE2', data={}) action = mock.Mock( @@ -1025,8 +1025,8 @@ @mock.patch.object(no.Node, 'get') @mock.patch.object(no.Node, 'update') - def test__remove_member_fail(self, m_node_update, m_node_get, - m_extract, m_load): + def test_remove_member_fail(self, m_node_update, m_node_get, + m_extract, m_load): node1 = mock.Mock(id='NODE1', data={'lb_member': 'MEM_ID1'}) action = mock.Mock( context='action_context', action=consts.CLUSTER_DEL_NODES, @@ -1120,7 +1120,7 @@ mock.ANY, self.lb_driver) @mock.patch.object(no.Node, 'update') - def test__process_recovery_not_lb_member(self, m_update, m1, m2): + def test_process_recovery_not_lb_member(self, m_update, m1, m2): node = mock.Mock(id='NODE', data={}) action = mock.Mock( action=consts.NODE_RECOVER, @@ -1137,7 +1137,7 @@ @mock.patch.object(no.Node, 'update') @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - def test__process_recovery_reboot(self, m_remove, m_update, m1, m2): + def test_process_recovery_reboot(self, m_remove, m_update, m1, m2): node = mock.Mock(id='NODE', data={'lb_member': 'mem_1'}) action = mock.Mock( action=consts.NODE_RECOVER, @@ -1156,7 +1156,7 @@ @mock.patch.object(no.Node, 'update') @mock.patch.object(lb_policy.LoadBalancingPolicy, '_remove_member') - def test__process_recovery_recreate(self, m_remove, m_update, m1, m2): + def test_process_recovery_recreate(self, m_remove, m_update, m1, m2): node = mock.Mock(id='NODE', data={'lb_member': 'mem_1', 'recovery': 'RECREATE'}) action = mock.Mock( diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_policy.py --- senlin-6.0.0/senlin/tests/unit/policies/test_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -65,6 +65,8 @@ self.ctx = utils.dummy_context() environment.global_env().register_policy('senlin.policy.dummy-1.0', DummyPolicy) + environment.global_env().register_policy('senlin.policy.dummy-1.1', + DummyPolicy) self.spec = parser.simple_parse(sample_policy) def _create_policy(self, policy_name, policy_id=None): @@ -112,6 +114,52 @@ self.assertEqual({'key1': 'value1', 'key2': 2}, spec_data['properties']) self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties) + + def test_init_version_as_float(self): + self.spec['version'] = 1.1 + policy = self._create_policy('test-policy') + + self.assertIsNone(policy.id) + self.assertEqual('test-policy', policy.name) + self.assertEqual(self.spec, policy.spec) + self.assertEqual('senlin.policy.dummy-1.1', policy.type) + self.assertEqual(self.ctx.user_id, policy.user) + self.assertEqual(self.ctx.project_id, policy.project) + self.assertEqual(self.ctx.domain_id, policy.domain) + self.assertEqual({}, policy.data) + self.assertIsNone(policy.created_at) + self.assertIsNone(policy.updated_at) + self.assertTrue(policy.singleton) + + spec_data = policy.spec_data + self.assertEqual('senlin.policy.dummy', spec_data['type']) + self.assertEqual('1.1', spec_data['version']) + self.assertEqual({'key1': 'value1', 'key2': 2}, + spec_data['properties']) + self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties) + + def test_init_version_as_string(self): + self.spec['version'] = '1.1' + policy = self._create_policy('test-policy') + + self.assertIsNone(policy.id) + self.assertEqual('test-policy', policy.name) + self.assertEqual(self.spec, policy.spec) + self.assertEqual('senlin.policy.dummy-1.1', policy.type) + self.assertEqual(self.ctx.user_id, policy.user) + self.assertEqual(self.ctx.project_id, policy.project) + self.assertEqual(self.ctx.domain_id, policy.domain) + self.assertEqual({}, policy.data) + self.assertIsNone(policy.created_at) + self.assertIsNone(policy.updated_at) + self.assertTrue(policy.singleton) + + spec_data = policy.spec_data + self.assertEqual('senlin.policy.dummy', spec_data['type']) + self.assertEqual('1.1', spec_data['version']) + self.assertEqual({'key1': 'value1', 'key2': 2}, + spec_data['properties']) + self.assertEqual({'key1': 'value1', 'key2': 2}, policy.properties) def test_policy_new_type_not_found(self): bad_spec = { diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_region_placement.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_region_placement.py --- senlin-6.0.0/senlin/tests/unit/policies/test_region_placement.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_region_placement.py 2018-11-19 18:48:08.000000000 +0000 @@ -109,7 +109,7 @@ self.assertEqual("The specified regions '['R1', 'R3']' could not " "be found.", six.text_type(ex)) - def test__create_plan(self): + def test_create_plan(self): policy = rp.RegionPlacementPolicy('p1', self.spec) regions = policy.regions @@ -133,7 +133,7 @@ answer = {'R2': 1, 'R3': 1, 'R4': 1} self.assertEqual(answer, plan) - def test__get_count_node_create_no_region(self): + def test_get_count_node_create_no_region(self): x_profile = mock.Mock(CONTEXT='context', properties={'context': {}}) x_node = mock.Mock(rt={'profile': x_profile}) action = mock.Mock(action=consts.NODE_CREATE, entity=x_node) @@ -143,7 +143,7 @@ res = policy._get_count('FOO', action) self.assertEqual(1, res) - def test__get_count_node_create_region_specified(self): + def test_get_count_node_create_region_specified(self): x_profile = mock.Mock(CONTEXT='context', properties={'context': {'region_name': 'foo'}}) x_node = mock.Mock(rt={'profile': x_profile}) @@ -154,7 +154,7 @@ res = policy._get_count('FOO', action) self.assertEqual(0, res) - def test__get_count_resize_deletion(self): + def test_get_count_resize_deletion(self): action = mock.Mock(action=consts.CLUSTER_RESIZE, data={'deletion': {'count': 3}}) @@ -163,7 +163,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-3, res) - def test__get_count_resize_creation(self): + def test_get_count_resize_creation(self): action = mock.Mock(action=consts.CLUSTER_RESIZE, data={'creation': {'count': 3}}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -173,7 +173,7 @@ self.assertEqual(3, res) @mock.patch.object(su, 'parse_resize_params') - def test__get_count_resize_parse_error(self, mock_parse): + def test_get_count_resize_parse_error(self, mock_parse): x_cluster = mock.Mock() x_cluster.nodes = [mock.Mock(), mock.Mock()] action = mock.Mock(action=consts.CLUSTER_RESIZE, data={}) @@ -189,7 +189,7 @@ self.assertEqual('Something wrong.', action.data['reason']) @mock.patch.object(su, 'parse_resize_params') - def test__get_count_resize_parse_creation(self, mock_parse): + def test_get_count_resize_parse_creation(self, mock_parse): def fake_parse(action, cluster, current): action.data = {'creation': {'count': 3}} return pb.CHECK_OK, '' @@ -208,7 +208,7 @@ mock_parse.assert_called_once_with(action, x_cluster, 0) @mock.patch.object(su, 'parse_resize_params') - def test__get_count_resize_parse_deletion(self, mock_parse): + def test_get_count_resize_parse_deletion(self, mock_parse): def fake_parse(action, cluster, current): action.data = {'deletion': {'count': 3}} return pb.CHECK_OK, '' @@ -226,7 +226,7 @@ self.assertEqual(-3, res) mock_parse.assert_called_once_with(action, x_cluster, 3) - def test__get_count_scale_in_with_data(self): + def test_get_count_scale_in_with_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={'deletion': {'count': 3}}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -234,7 +234,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-3, res) - def test__get_count_scale_in_with_no_data(self): + def test_get_count_scale_in_with_no_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={'deletion': {'num': 3}}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -242,7 +242,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-1, res) - def test__get_count_scale_in_with_inputs(self): + def test_get_count_scale_in_with_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, inputs={'count': 3}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -250,7 +250,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-3, res) - def test__get_count_scale_in_with_incorrect_inputs(self): + def test_get_count_scale_in_with_incorrect_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, inputs={'num': 3}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -258,7 +258,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-1, res) - def test__get_count_scale_out_with_data(self): + def test_get_count_scale_out_with_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={'creation': {'count': 3}}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -266,7 +266,7 @@ res = policy._get_count('FOO', action) self.assertEqual(3, res) - def test__get_count_scale_out_with_no_data(self): + def test_get_count_scale_out_with_no_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={'creation': {'num': 3}}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -274,7 +274,7 @@ res = policy._get_count('FOO', action) self.assertEqual(1, res) - def test__get_count_scale_out_with_inputs(self): + def test_get_count_scale_out_with_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, inputs={'count': 3}) policy = rp.RegionPlacementPolicy('p1', self.spec) @@ -282,7 +282,7 @@ res = policy._get_count('FOO', action) self.assertEqual(3, res) - def test__get_count_scale_out_with_incorrect_inputs(self): + def test_get_count_scale_out_with_incorrect_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, inputs={'num': 3}) policy = rp.RegionPlacementPolicy('p1', self.spec) diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_scaling_policy.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_scaling_policy.py --- senlin-6.0.0/senlin/tests/unit/policies/test_scaling_policy.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_scaling_policy.py 2018-11-19 18:48:08.000000000 +0000 @@ -11,10 +11,13 @@ # under the License. import mock +from oslo_utils import timeutils import six +import time from senlin.common import consts from senlin.common import exception as exc +from senlin.objects import cluster_policy as cpo from senlin.objects import node as no from senlin.policies import base as pb from senlin.policies import scaling_policy as sp @@ -207,14 +210,17 @@ action = mock.Mock() action.context = self.context action.action = consts.CLUSTER_SCALE_IN - action.inputs = {'count': 1} + action.inputs = {'count': 1, 'last_op': timeutils.utcnow(True)} action.entity = self.cluster adjustment = self.spec['properties']['adjustment'] adjustment['type'] = consts.CHANGE_IN_CAPACITY adjustment['number'] = 2 + adjustment['cooldown'] = 1 policy = sp.ScalingPolicy('p1', self.spec) + time.sleep(1) + policy.pre_op(self.cluster['id'], action) pd = { 'deletion': { @@ -238,6 +244,26 @@ } action.data.update.assert_called_with(pd) + def test_pre_op_within_cooldown(self): + action = mock.Mock() + action.context = self.context + action.action = consts.CLUSTER_SCALE_IN + action.inputs = {'last_op': timeutils.utcnow(True)} + action.entity = self.cluster + + adjustment = self.spec['properties']['adjustment'] + adjustment['cooldown'] = 300 + kwargs = {'id': "FAKE_ID"} + policy = sp.ScalingPolicy('p1', self.spec, **kwargs) + + policy.pre_op('FAKE_CLUSTER_ID', action) + pd = { + 'status': pb.CHECK_ERROR, + 'reason': "Policy FAKE_ID cooldown is still in progress.", + } + action.data.update.assert_called_with(pd) + action.store.assert_called_with(self.context) + @mock.patch.object(sp.ScalingPolicy, '_calculate_adjustment_count') def test_pre_op_pass_check_effort(self, mock_adjustmentcount): # Cluster with maxsize and best_effort is False @@ -367,7 +393,23 @@ action.data.update.assert_called_with(pd) action.store.assert_called_with(self.context) - def test_need_check_in_event(self): + @mock.patch.object(cpo.ClusterPolicy, 'update') + @mock.patch.object(timeutils, 'utcnow') + def test_post_op(self, mock_time, mock_cluster_policy): + action = mock.Mock() + action.context = self.context + + mock_time.return_value = 'FAKE_TIME' + + kwargs = {'id': 'FAKE_POLICY_ID'} + policy = sp.ScalingPolicy('test-policy', self.spec, **kwargs) + + policy.post_op('FAKE_CLUSTER_ID', action) + mock_cluster_policy.assert_called_once_with( + action.context, 'FAKE_CLUSTER_ID', 'FAKE_POLICY_ID', + {'last_op': 'FAKE_TIME'}) + + def test_need_check_in_event_before(self): action = mock.Mock() action.context = self.context action.action = consts.CLUSTER_SCALE_IN @@ -377,7 +419,7 @@ res = policy.need_check('BEFORE', action) self.assertTrue(res) - def test_need_check_not_in_event(self): + def test_need_check_not_in_event_before(self): action = mock.Mock() action.context = self.context action.action = consts.CLUSTER_SCALE_OUT @@ -386,3 +428,23 @@ policy = sp.ScalingPolicy('test-policy', self.spec) res = policy.need_check('BEFORE', action) self.assertFalse(res) + + def test_need_check_in_event_after(self): + action = mock.Mock() + action.context = self.context + action.action = consts.CLUSTER_SCALE_OUT + action.data = {} + + policy = sp.ScalingPolicy('test-policy', self.spec) + res = policy.need_check('AFTER', action) + self.assertTrue(res) + + def test_need_check_not_in_event_after(self): + action = mock.Mock() + action.context = self.context + action.action = consts.CLUSTER_ATTACH_POLICY + action.data = {} + + policy = sp.ScalingPolicy('test-policy', self.spec) + res = policy.need_check('AFTER', action) + self.assertFalse(res) diff -Nru senlin-6.0.0/senlin/tests/unit/policies/test_zone_placement.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_zone_placement.py --- senlin-6.0.0/senlin/tests/unit/policies/test_zone_placement.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/policies/test_zone_placement.py 2018-11-19 18:48:08.000000000 +0000 @@ -93,7 +93,7 @@ self.assertEqual("The specified name '['AZ2', 'AZ3']' " "could not be found.", six.text_type(ex)) - def test__create_plan_default(self): + def test_create_plan_default(self): self.spec['properties']['zones'] = [ {'name': 'AZ1'}, {'name': 'AZ2'}, {'name': 'AZ3'}, {'name': 'AZ4'} ] @@ -105,7 +105,7 @@ answer = {'AZ1': 1, 'AZ2': 1, 'AZ3': 1, 'AZ4': 2} self.assertEqual(answer, plan) - def test__create_plan(self): + def test_create_plan(self): policy = zp.ZonePlacementPolicy('test-policy', self.spec) zones = policy.zones @@ -129,7 +129,7 @@ answer = {'AZ4': 4} self.assertEqual(answer, plan) - def test__get_count_node_create_with_zone(self): + def test_get_count_node_create_with_zone(self): x_profile = mock.Mock(AVAILABILITY_ZONE='availability_zone', properties={'availability_zone': 'zone1'}) x_node = mock.Mock(rt={'profile': x_profile}) @@ -140,7 +140,7 @@ res = policy._get_count('FOO', action) self.assertEqual(0, res) - def test__get_count_node_create_without_zone(self): + def test_get_count_node_create_without_zone(self): x_profile = mock.Mock(AVAILABILITY_ZONE='availability_zone', properties={'availability_zone': None}) x_node = mock.Mock(rt={'profile': x_profile}) @@ -151,7 +151,7 @@ res = policy._get_count('FOO', action) self.assertEqual(1, res) - def test__get_count_resize_deletion(self): + def test_get_count_resize_deletion(self): action = mock.Mock(action=consts.CLUSTER_RESIZE, data={'deletion': {'count': 3}}) @@ -160,7 +160,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-3, res) - def test__get_count_resize_creation(self): + def test_get_count_resize_creation(self): action = mock.Mock(action=consts.CLUSTER_RESIZE, data={'creation': {'count': 3}}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -171,8 +171,8 @@ @mock.patch.object(no.Node, 'count_by_cluster') @mock.patch.object(su, 'parse_resize_params') @mock.patch.object(co.Cluster, 'get') - def test__get_count_resize_parse_error(self, mock_cluster, mock_parse, - mock_count): + def test_get_count_resize_parse_error(self, mock_cluster, mock_parse, + mock_count): x_cluster = mock.Mock() mock_cluster.return_value = x_cluster mock_count.return_value = 3 @@ -192,8 +192,8 @@ @mock.patch.object(no.Node, 'count_by_cluster') @mock.patch.object(su, 'parse_resize_params') @mock.patch.object(co.Cluster, 'get') - def test__get_count_resize_parse_creation(self, mock_cluster, mock_parse, - mock_count): + def test_get_count_resize_parse_creation(self, mock_cluster, mock_parse, + mock_count): def fake_parse(action, cluster, current): action.data = {'creation': {'count': 3}} return policy_base.CHECK_OK, '' @@ -215,8 +215,8 @@ @mock.patch.object(no.Node, 'count_by_cluster') @mock.patch.object(su, 'parse_resize_params') @mock.patch.object(co.Cluster, 'get') - def test__get_count_resize_parse_deletion(self, mock_cluster, mock_parse, - mock_count): + def test_get_count_resize_parse_deletion(self, mock_cluster, mock_parse, + mock_count): def fake_parse(action, cluster, current): action.data = {'deletion': {'count': 3}} return policy_base.CHECK_OK, '' @@ -235,7 +235,7 @@ mock_count.assert_called_once_with(action.context, 'FOO') mock_parse.assert_called_once_with(action, x_cluster, 3) - def test__get_count_scale_in_with_data(self): + def test_get_count_scale_in_with_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={'deletion': {'count': 3}}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -243,7 +243,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-3, res) - def test__get_count_scale_in_with_no_data(self): + def test_get_count_scale_in_with_no_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={'deletion': {'num': 3}}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -251,7 +251,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-1, res) - def test__get_count_scale_in_with_inputs(self): + def test_get_count_scale_in_with_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, inputs={'count': 3}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -259,7 +259,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-3, res) - def test__get_count_scale_in_with_incorrect_inputs(self): + def test_get_count_scale_in_with_incorrect_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_IN, data={}, inputs={'num': 3}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -267,7 +267,7 @@ res = policy._get_count('FOO', action) self.assertEqual(-1, res) - def test__get_count_scale_out_with_data(self): + def test_get_count_scale_out_with_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={'creation': {'count': 3}}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -275,7 +275,7 @@ res = policy._get_count('FOO', action) self.assertEqual(3, res) - def test__get_count_scale_out_with_no_data(self): + def test_get_count_scale_out_with_no_data(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={'creation': {'num': 3}}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -283,7 +283,7 @@ res = policy._get_count('FOO', action) self.assertEqual(1, res) - def test__get_count_scale_out_with_inputs(self): + def test_get_count_scale_out_with_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, inputs={'count': 3}) policy = zp.ZonePlacementPolicy('p1', self.spec) @@ -291,7 +291,7 @@ res = policy._get_count('FOO', action) self.assertEqual(3, res) - def test__get_count_scale_out_with_incorrect_inputs(self): + def test_get_count_scale_out_with_incorrect_inputs(self): action = mock.Mock(action=consts.CLUSTER_SCALE_OUT, data={}, inputs={'num': 3}) policy = zp.ZonePlacementPolicy('p1', self.spec) diff -Nru senlin-6.0.0/senlin/tests/unit/profiles/test_container_docker.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_container_docker.py --- senlin-6.0.0/senlin/tests/unit/profiles/test_container_docker.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_container_docker.py 2018-11-19 18:48:08.000000000 +0000 @@ -178,7 +178,7 @@ self.assertEqual(msg, ex.message) @mock.patch.object(node.Node, 'load') - def test__get_host_node_found_by_node(self, mock_load): + def test_get_host_node_found_by_node(self, mock_load): node = mock.Mock() mock_load.return_value = node ctx = mock.Mock() @@ -190,7 +190,7 @@ mock_load.assert_called_once_with(ctx, node_id='host_node') @mock.patch.object(dp.DockerProfile, '_get_random_node') - def test__get_host_node_found_by_cluster(self, mock_get): + def test_get_host_node_found_by_cluster(self, mock_get): node = mock.Mock() mock_get.return_value = node ctx = mock.Mock() @@ -202,7 +202,7 @@ mock_get.assert_called_once_with(ctx, 'host_cluster') @mock.patch.object(node.Node, 'load') - def test__get_host_node_not_found(self, mock_load): + def test_get_host_node_not_found(self, mock_load): mock_load.side_effect = exc.ResourceNotFound(type='node', id='fake_node') profile = dp.DockerProfile('container', self.spec) @@ -218,7 +218,7 @@ @mock.patch.object(node.Node, 'load') @mock.patch.object(no.Node, 'get_all_by_cluster') @mock.patch.object(cluster.Cluster, 'load') - def test__get_random_node(self, mock_cluster, mock_nodes, mock_load): + def test_get_random_node(self, mock_cluster, mock_nodes, mock_load): cluster = mock.Mock() mock_cluster.return_value = cluster node1 = mock.Mock() @@ -240,7 +240,7 @@ self.assertIn(n, [node1, node2]) @mock.patch.object(cluster.Cluster, 'load') - def test__get_random_node_cluster_not_found(self, mock_load): + def test_get_random_node_cluster_not_found(self, mock_load): mock_load.side_effect = exc.ResourceNotFound(type='cluster', id='host_cluster') ctx = mock.Mock() @@ -255,7 +255,7 @@ @mock.patch.object(no.Node, 'get_all_by_cluster') @mock.patch.object(cluster.Cluster, 'load') - def test__get_random_node_empty_cluster(self, mock_cluster, mock_nodes): + def test_get_random_node_empty_cluster(self, mock_cluster, mock_nodes): cluster = mock.Mock() mock_cluster.return_value = cluster mock_nodes.return_value = [] diff -Nru senlin-6.0.0/senlin/tests/unit/profiles/test_heat_stack.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_heat_stack.py --- senlin-6.0.0/senlin/tests/unit/profiles/test_heat_stack.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_heat_stack.py 2018-11-19 18:48:08.000000000 +0000 @@ -801,7 +801,7 @@ oc.stack_get.assert_called_once_with('FAKE_ID') oc.stack_get_template.assert_called_once_with('FAKE_ID') - def test__refresh_tags_empty_no_add(self): + def test_refresh_tags_empty_no_add(self): profile = stack.StackProfile('t', self.spec) node = mock.Mock() @@ -809,7 +809,7 @@ self.assertEqual(("", False), res) - def test__refresh_tags_with_contents_no_add(self): + def test_refresh_tags_with_contents_no_add(self): profile = stack.StackProfile('t', self.spec) node = mock.Mock() @@ -817,7 +817,7 @@ self.assertEqual(('foo', False), res) - def test__refresh_tags_deleted_no_add(self): + def test_refresh_tags_deleted_no_add(self): profile = stack.StackProfile('t', self.spec) node = mock.Mock() @@ -825,7 +825,7 @@ self.assertEqual(('bar', True), res) - def test__refresh_tags_empty_and_add(self): + def test_refresh_tags_empty_and_add(self): profile = stack.StackProfile('t', self.spec) node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) @@ -836,7 +836,7 @@ 'cluster_node_index=123']) self.assertEqual((expected, True), res) - def test__refresh_tags_with_contents_and_add(self): + def test_refresh_tags_with_contents_and_add(self): profile = stack.StackProfile('t', self.spec) node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) @@ -848,7 +848,7 @@ 'cluster_node_index=123']) self.assertEqual((expected, True), res) - def test__refresh_tags_deleted_and_add(self): + def test_refresh_tags_deleted_and_add(self): profile = stack.StackProfile('t', self.spec) node = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) diff -Nru senlin-6.0.0/senlin/tests/unit/profiles/test_nova_server.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_nova_server.py --- senlin-6.0.0/senlin/tests/unit/profiles/test_nova_server.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_nova_server.py 2018-11-19 18:48:08.000000000 +0000 @@ -66,7 +66,7 @@ self.assertIsNone(profile.server_id) - def test__build_metadata(self): + def test_build_metadata(self): obj = mock.Mock(id='NODE_ID', cluster_id='') profile = server.ServerProfile('t', self.spec) @@ -74,7 +74,7 @@ self.assertEqual({'cluster_node_id': 'NODE_ID'}, res) - def test__build_metadata_with_inputs(self): + def test_build_metadata_with_inputs(self): obj = mock.Mock(id='NODE_ID', cluster_id='') profile = server.ServerProfile('t', self.spec) @@ -82,7 +82,7 @@ self.assertEqual({'cluster_node_id': 'NODE_ID', 'foo': 'bar'}, res) - def test__build_metadata_for_cluster_node(self): + def test_build_metadata_for_cluster_node(self): obj = mock.Mock(id='NODE_ID', cluster_id='CLUSTER_ID', index=123) profile = server.ServerProfile('t', self.spec) @@ -1651,17 +1651,14 @@ node_obj = mock.Mock(physical_id='FAKE_ID') node_obj.name = None - ex = self.assertRaises(exc.EResourceOperation, + ex = self.assertRaises(exc.ESchema, profile.handle_rebuild, node_obj) - self.assertEqual("Failed in rebuilding server 'FAKE_ID': " - "Server name is not a string or unicode.", + self.assertEqual("The value 'None' is not a valid string.", six.text_type(ex)) cc.server_get.assert_called_once_with('FAKE_ID') - cc.server_rebuild.assert_called_once_with('FAKE_ID', '123', - None, - 'adminpass') + cc.server_rebuild.assert_not_called() self.assertEqual(0, cc.wait_for_server.call_count) def test_handle_change_password(self): @@ -1955,7 +1952,9 @@ obj = mock.Mock(physical_id='FAKE_ID') profile = server.ServerProfile('t', self.spec) cc = mock.Mock() + gc = mock.Mock() profile._computeclient = cc + profile._glanceclient = gc # do it res = profile.handle_rescue(obj, admin_pass='new_pass', @@ -1964,6 +1963,7 @@ self.assertTrue(res) cc.server_rescue.assert_called_once_with( 'FAKE_ID', admin_pass='new_pass', image_ref='FAKE_IMAGE') + gc.image_find.assert_called_once_with('FAKE_IMAGE', False) def test_handle_rescue_image_none(self): obj = mock.Mock(physical_id='FAKE_ID') @@ -1986,9 +1986,11 @@ def test_handle_rescue_failed_waiting(self): profile = server.ServerProfile('t', self.spec) cc = mock.Mock() + gc = mock.Mock() ex = exc.InternalError(code=500, message='timeout') cc.wait_for_server.side_effect = ex profile._computeclient = cc + profile._glanceclient = gc node_obj = mock.Mock(physical_id='FAKE_ID') ex = self.assertRaises(exc.EResourceOperation, @@ -2002,6 +2004,7 @@ admin_pass='new_pass', image_ref='FAKE_IMAGE') cc.wait_for_server.assert_called_once_with('FAKE_ID', 'RESCUE') + gc.image_find.assert_called_once_with('FAKE_IMAGE', False) def test_handle_unrescue(self): obj = mock.Mock(physical_id='FAKE_ID') diff -Nru senlin-6.0.0/senlin/tests/unit/profiles/test_nova_server_update.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_nova_server_update.py --- senlin-6.0.0/senlin/tests/unit/profiles/test_nova_server_update.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_nova_server_update.py 2018-11-19 18:48:08.000000000 +0000 @@ -159,7 +159,7 @@ } self.patchobject(node_obj.Node, 'update') - def test__update_name(self): + def test_update_name(self): profile = server.ServerProfile('t', self.spec) cc = mock.Mock() profile._computeclient = cc @@ -170,7 +170,7 @@ self.assertIsNone(res) cc.server_update.assert_called_once_with('NOVA_ID', name='NEW_NAME') - def test__update_name_nova_failure(self): + def test_update_name_nova_failure(self): profile = server.ServerProfile('t', self.spec) cc = mock.Mock() profile._computeclient = cc @@ -185,7 +185,7 @@ six.text_type(ex)) cc.server_update.assert_called_once_with('NOVA_ID', name='NEW_NAME') - def test__update_password(self): + def test_update_password(self): profile = server.ServerProfile('t', self.spec) cc = mock.Mock() profile._computeclient = cc @@ -197,7 +197,7 @@ cc.server_change_password.assert_called_once_with( 'NOVA_ID', 'NEW_PASSWORD') - def test__update_password_nova_failure(self): + def test_update_password_nova_failure(self): profile = server.ServerProfile('t', self.spec) cc = mock.Mock() profile._computeclient = cc @@ -214,7 +214,7 @@ cc.server_change_password.assert_called_once_with( 'NOVA_ID', 'NEW_PASSWORD') - def test__update_metadata(self): + def test_update_metadata(self): obj = mock.Mock(id='NODE_ID', physical_id='NOVA_ID', cluster_id='CLUSTER_ID', index=456) cc = mock.Mock() @@ -237,7 +237,7 @@ } ) - def test___update_metadata_no_change(self): + def test__update_metadata_no_change(self): obj = mock.Mock(id='NODE_ID') profile = server.ServerProfile('t', self.spec) cc = mock.Mock() @@ -250,7 +250,7 @@ self.assertIsNone(res) self.assertEqual(0, cc.server_metadata_update.call_count) - def test__update_metadata_nova_failure(self): + def test_update_metadata_nova_failure(self): obj = mock.Mock(id='NODE_ID', physical_id='NOVA_ID', cluster_id='') err = exc.InternalError(code=500, message='Nova Error') cc = mock.Mock() @@ -273,7 +273,7 @@ 'NOVA_ID', {'fooa': 'baaar', 'cluster_node_id': 'NODE_ID'} ) - def test__update_flavor(self): + def test_update_flavor(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() profile = server.ServerProfile('t', self.spec) @@ -296,7 +296,7 @@ mock.call('NOVA_ID', 'VERIFY_RESIZE'), mock.call('NOVA_ID', 'ACTIVE')]) - def test__update_flavor_failed_validation(self): + def test_update_flavor_failed_validation(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() profile = server.ServerProfile('t', self.spec) @@ -314,7 +314,7 @@ mock_validate.assert_called_once_with(obj, 'FLAV', 'update') - def test__update_flavor_failed_validation_2(self): + def test_update_flavor_failed_validation_2(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() profile = server.ServerProfile('t', self.spec) @@ -338,7 +338,7 @@ mock.call(obj, 'new_flavor', 'update'), ]) - def test__update_flavor_same(self): + def test_update_flavor_same(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() profile = server.ServerProfile('t', self.spec) @@ -359,7 +359,7 @@ ]) self.assertEqual(0, cc.server_resize.call_count) - def test__update_flavor_resize_failed(self): + def test_update_flavor_resize_failed(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() cc.server_resize.side_effect = [ @@ -387,7 +387,7 @@ self.assertEqual("Failed in updating server 'NOVA_ID': Resize " "failed.", six.text_type(ex)) - def test__update_flavor_first_wait_for_server_failed(self): + def test_update_flavor_first_wait_for_server_failed(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() cc.wait_for_server.side_effect = [ @@ -421,7 +421,7 @@ self.assertEqual("Failed in updating server 'NOVA_ID': " "TIMEOUT.", six.text_type(ex)) - def test__update_flavor_resize_failed_revert_failed(self): + def test_update_flavor_resize_failed_revert_failed(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() err_resize = exc.InternalError(code=500, message='Resize') @@ -454,7 +454,7 @@ self.assertEqual("Failed in updating server 'NOVA_ID': " "Revert.", six.text_type(ex)) - def test__update_flavor_confirm_failed(self): + def test_update_flavor_confirm_failed(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() err_confirm = exc.InternalError(code=500, message='Confirm') @@ -484,7 +484,7 @@ self.assertEqual("Failed in updating server 'NOVA_ID': Confirm.", six.text_type(ex)) - def test__update_flavor_wait_confirm_failed(self): + def test_update_flavor_wait_confirm_failed(self): obj = mock.Mock(physical_id='NOVA_ID') cc = mock.Mock() err_wait = exc.InternalError(code=500, message='Wait') @@ -517,7 +517,7 @@ self.assertEqual("Failed in updating server 'NOVA_ID': Wait.", six.text_type(ex)) - def test__update_image(self): + def test_update_image(self): profile = server.ServerProfile('t', self.spec) x_image = {'id': '123'} x_server = mock.Mock(image=x_image) @@ -542,7 +542,7 @@ 'NOVA_ID', '456', 'new_name', 'new_pass') cc.wait_for_server.assert_called_once_with('NOVA_ID', 'ACTIVE') - def test__update_image_new_image_is_none(self): + def test_update_image_new_image_is_none(self): profile = server.ServerProfile('t', self.spec) obj = mock.Mock(physical_id='NOVA_ID') new_spec = copy.deepcopy(self.spec) @@ -557,7 +557,7 @@ " with image set to None is not supported by Nova.") self.assertEqual(msg, six.text_type(ex)) - def test__update_image_new_image_invalid(self): + def test_update_image_new_image_invalid(self): # NOTE: The image invalid could be caused by a non-existent image or # a compute driver failure profile = server.ServerProfile('t', self.spec) @@ -578,7 +578,7 @@ self.assertEqual(msg, six.text_type(ex)) mock_check.assert_called_once_with(obj, 'new_image', reason='update') - def test__update_image_old_image_invalid(self): + def test_update_image_old_image_invalid(self): # NOTE: The image invalid could be caused by a non-existent image or # a compute driver failure profile = server.ServerProfile('t', self.spec) @@ -608,7 +608,7 @@ mock.call(obj, 'new_image', reason='update'), ]) - def test__update_image_old_image_is_none_but_succeeded(self): + def test_update_image_old_image_is_none_but_succeeded(self): old_spec = copy.deepcopy(self.spec) del old_spec['properties']['image'] profile = server.ServerProfile('t', old_spec) @@ -634,7 +634,7 @@ 'NOVA_ID', '456', 'new_name', 'new_pass') cc.wait_for_server.assert_called_once_with('NOVA_ID', 'ACTIVE') - def test__update_image_old_image_is_none_but_failed(self): + def test_update_image_old_image_is_none_but_failed(self): old_spec = copy.deepcopy(self.spec) del old_spec['properties']['image'] profile = server.ServerProfile('t', old_spec) @@ -659,7 +659,7 @@ mock_check.assert_called_once_with(obj, 'new_image', reason='update') cc.server_get.assert_called_once_with('NOVA_ID') - def test__update_image_updating_to_same_image(self): + def test_update_image_updating_to_same_image(self): profile = server.ServerProfile('t', self.spec) x_image = {'id': '123'} x_server = mock.Mock(image=x_image) @@ -684,7 +684,7 @@ self.assertEqual(0, cc.server_rebuild.call_count) self.assertEqual(0, cc.wait_for_server.call_count) - def test__update_image_failed_rebuilding(self): + def test_update_image_failed_rebuilding(self): profile = server.ServerProfile('t', self.spec) x_image = {'id': '123'} x_server = mock.Mock(image=x_image) @@ -714,7 +714,7 @@ 'NOVA_ID', '456', 'new_name', 'new_pass') self.assertEqual(0, cc.wait_for_server.call_count) - def test__update_image_failed_waiting(self): + def test_update_image_failed_waiting(self): profile = server.ServerProfile('t', self.spec) x_image = {'id': '123'} x_server = mock.Mock(image=x_image) @@ -744,7 +744,7 @@ 'NOVA_ID', '456', 'new_name', 'new_pass') cc.wait_for_server.assert_called_once_with('NOVA_ID', 'ACTIVE') - def test__create_interfaces(self): + def test_create_interfaces(self): cc = mock.Mock() server_obj = mock.Mock() cc.server_get.return_value = server_obj @@ -804,7 +804,7 @@ ] cc.server_interface_create.assert_has_calls(create_calls) - def test__create_interfaces_failed_getting_server(self): + def test_create_interfaces_failed_getting_server(self): cc = mock.Mock() cc.server_get.side_effect = exc.InternalError(message='Not valid') profile = server.ServerProfile('t', self.spec) @@ -823,7 +823,7 @@ cc.server_get.assert_called_once_with('NOVA_ID') self.assertEqual(0, profile._create_ports_from_properties.call_count) - def test__create_interfaces_failed_validation(self): + def test_create_interfaces_failed_validation(self): cc = mock.Mock() server_obj = mock.Mock() cc.server_get.return_value = server_obj @@ -846,7 +846,7 @@ mock_validate.assert_called_once_with(obj, networks[0], 'update') self.assertEqual(0, cc.server_interface_create.call_count) - def test__delete_interfaces(self): + def test_delete_interfaces(self): cc = mock.Mock() nc = mock.Mock() net1 = mock.Mock(id='net1') @@ -886,7 +886,7 @@ mock.call('port3', ignore_missing=True), ]) - def test__delete_interfaces_failed_delete(self): + def test_delete_interfaces_failed_delete(self): cc = mock.Mock() profile = server.ServerProfile('t', self.spec) profile._computeclient = cc @@ -918,7 +918,7 @@ @mock.patch.object(server.ServerProfile, '_update_network_remove_port') @mock.patch.object(server.ServerProfile, '_update_network_add_port') - def test__update_network(self, mock_create, mock_delete): + def test_update_network(self, mock_create, mock_delete): obj = mock.Mock(physical_id='FAKE_ID') old_spec = copy.deepcopy(self.spec) @@ -1136,7 +1136,9 @@ x_server = mock.Mock(image=x_image) cc = mock.Mock() cc.server_get.return_value = x_server + gc = mock.Mock() profile._computeclient = cc + profile._glanceclient = gc new_spec = copy.deepcopy(self.spec) new_spec['properties']['flavor'] = 'FAKE_FLAVOR_NEW' new_profile = server.ServerProfile('t', new_spec) @@ -1144,6 +1146,7 @@ res = profile.do_update(obj, new_profile) self.assertTrue(res) mock_update_flavor.assert_called_with(obj, new_profile) + gc.image_find.assert_called_with('FAKE_IMAGE', False) @mock.patch.object(server.ServerProfile, '_update_flavor') def test_do_update_update_flavor_failed(self, mock_update_flavor): @@ -1156,7 +1159,9 @@ x_server = mock.Mock(image=x_image) cc = mock.Mock() cc.server_get.return_value = x_server + gc = mock.Mock() profile._computeclient = cc + profile._glanceclient = gc new_spec = copy.deepcopy(self.spec) new_spec['properties']['flavor'] = 'FAKE_FLAVOR_NEW' new_profile = server.ServerProfile('t', new_spec) @@ -1169,6 +1174,7 @@ self.assertEqual("Failed in updating server 'NOVA_ID': " "Flavor Not Found.", six.text_type(ex)) + gc.image_find.assert_called_with('FAKE_IMAGE', False) @mock.patch.object(server.ServerProfile, '_update_flavor') @mock.patch.object(server.ServerProfile, '_update_network') @@ -1179,8 +1185,10 @@ x_image = {'id': '123'} x_server = mock.Mock(image=x_image) cc = mock.Mock() + gc = mock.Mock() cc.server_get.return_value = x_server profile._computeclient = cc + profile._glanceclient = gc obj = mock.Mock(physical_id='NOVA_ID') @@ -1192,6 +1200,7 @@ res = profile.do_update(obj, new_profile) self.assertTrue(res) + gc.image_find.assert_called_with('FAKE_IMAGE', False) mock_update_network.assert_called_with(obj, new_profile) @mock.patch.object(server.ServerProfile, '_update_password') diff -Nru senlin-6.0.0/senlin/tests/unit/profiles/test_nova_server_validate.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_nova_server_validate.py --- senlin-6.0.0/senlin/tests/unit/profiles/test_nova_server_validate.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_nova_server_validate.py 2018-11-19 18:48:08.000000000 +0000 @@ -310,11 +310,13 @@ super(TestImageValidation, self).setUp() self.cc = mock.Mock() + self.gc = mock.Mock() self.profile = server.ServerProfile('t', spec) self.profile._computeclient = self.cc + self.profile._glanceclient = self.gc def test_validation(self): - self.cc.image_find.side_effect = self.validate_result + self.gc.image_find.side_effect = self.validate_result node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID') image = 'IMAGE' @@ -328,7 +330,7 @@ node, image, self.reason) self.assertEqual(self.message, six.text_type(ex)) - self.cc.image_find.assert_called_once_with(image, False) + self.gc.image_find.assert_called_once_with(image, False) class TestVolumeValidation(base.SenlinTestCase): diff -Nru senlin-6.0.0/senlin/tests/unit/profiles/test_profile_base.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_profile_base.py --- senlin-6.0.0/senlin/tests/unit/profiles/test_profile_base.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/profiles/test_profile_base.py 2018-11-19 18:48:08.000000000 +0000 @@ -84,6 +84,7 @@ self.ctx = utils.dummy_context(project='profile_test_project') g_env = environment.global_env() g_env.register_profile('os.dummy-1.0', DummyProfile) + g_env.register_profile('os.dummy-1.1', DummyProfile) self.spec = parser.simple_parse(sample_profile) def _create_profile(self, name, pid=None, context=None): @@ -131,6 +132,72 @@ self.assertIsNone(profile._block_storageclient) @mock.patch.object(senlin_ctx, 'get_service_credentials') + def test_init_version_as_float(self, mock_creds): + mock_creds.return_value = {'foo': 'bar'} + self.spec['version'] = 1.1 + profile = self._create_profile('test-profile') + + self.assertIsNone(profile.id) + self.assertEqual('test-profile', profile.name) + self.assertEqual(self.spec, profile.spec) + self.assertEqual('os.dummy', profile.type_name) + self.assertEqual('1.1', profile.version) + self.assertEqual('os.dummy-1.1', profile.type) + self.assertEqual(self.ctx.user_id, profile.user) + self.assertEqual(self.ctx.project_id, profile.project) + self.assertEqual(self.ctx.domain_id, profile.domain) + self.assertEqual({}, profile.metadata) + self.assertIsNone(profile.created_at) + self.assertIsNone(profile.updated_at) + + spec_data = profile.spec_data + self.assertEqual('os.dummy', spec_data['type']) + self.assertEqual('1.1', spec_data['version']) + self.assertEqual('value1', spec_data['properties']['key1']) + self.assertEqual(2, spec_data['properties']['key2']) + self.assertEqual('value1', profile.properties['key1']) + self.assertEqual(2, profile.properties['key2']) + self.assertEqual({'foo': 'bar'}, profile.context) + + self.assertIsNone(profile._computeclient) + self.assertIsNone(profile._networkclient) + self.assertIsNone(profile._orchestrationclient) + self.assertIsNone(profile._block_storageclient) + + @mock.patch.object(senlin_ctx, 'get_service_credentials') + def test_init_version_as_string(self, mock_creds): + mock_creds.return_value = {'foo': 'bar'} + self.spec['version'] = '1.1' + profile = self._create_profile('test-profile') + + self.assertIsNone(profile.id) + self.assertEqual('test-profile', profile.name) + self.assertEqual(self.spec, profile.spec) + self.assertEqual('os.dummy', profile.type_name) + self.assertEqual('1.1', profile.version) + self.assertEqual('os.dummy-1.1', profile.type) + self.assertEqual(self.ctx.user_id, profile.user) + self.assertEqual(self.ctx.project_id, profile.project) + self.assertEqual(self.ctx.domain_id, profile.domain) + self.assertEqual({}, profile.metadata) + self.assertIsNone(profile.created_at) + self.assertIsNone(profile.updated_at) + + spec_data = profile.spec_data + self.assertEqual('os.dummy', spec_data['type']) + self.assertEqual('1.1', spec_data['version']) + self.assertEqual('value1', spec_data['properties']['key1']) + self.assertEqual(2, spec_data['properties']['key2']) + self.assertEqual('value1', profile.properties['key1']) + self.assertEqual(2, profile.properties['key2']) + self.assertEqual({'foo': 'bar'}, profile.context) + + self.assertIsNone(profile._computeclient) + self.assertIsNone(profile._networkclient) + self.assertIsNone(profile._orchestrationclient) + self.assertIsNone(profile._block_storageclient) + + @mock.patch.object(senlin_ctx, 'get_service_credentials') def test_init_with_context(self, mock_creds): mock_creds.return_value = {'foo': 'bar'} profile = self._create_profile('test-profile', @@ -570,7 +637,7 @@ self.assertRaises(exception.ESchema, profile.validate) @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test__init_context(self, mock_creds): + def test_init_context(self, mock_creds): fake_ctx = mock.Mock() mock_creds.return_value = fake_ctx @@ -585,7 +652,7 @@ mock_creds.assert_called_once_with() @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test__init_context_for_real(self, mock_creds): + def test_init_context_for_real(self, mock_creds): fake_ctx = { 'project_name': 'this project', 'project_domain_name': 'this domain', @@ -607,7 +674,7 @@ self.assertEqual(expected, profile.context) @mock.patch.object(senlin_ctx, 'get_service_credentials') - def test__init_context_for_real_with_data(self, mock_creds): + def test_init_context_for_real_with_data(self, mock_creds): fake_ctx = { 'project_name': 'this project', 'project_domain_name': 'this domain', @@ -633,7 +700,7 @@ @mock.patch.object(co.Credential, 'get') @mock.patch.object(oslo_ctx, 'get_current') - def test__build_conn_params(self, mock_current, mock_get): + def test_build_conn_params(self, mock_current, mock_get): profile = self._create_profile('test-profile') profile.context = {'foo': 'bar'} fake_cred = mock.Mock(cred={'openstack': {'trust': 'TRUST_ID'}}) @@ -655,7 +722,7 @@ @mock.patch.object(co.Credential, 'get') @mock.patch.object(oslo_ctx, 'get_current') - def test__build_conn_params_trust_not_found(self, mock_current, mock_get): + def test_build_conn_params_trust_not_found(self, mock_current, mock_get): profile = self._create_profile('test-profile') mock_get.return_value = None fake_ctx = mock.Mock() @@ -698,6 +765,25 @@ @mock.patch.object(pb.Profile, '_build_conn_params') @mock.patch("senlin.drivers.base.SenlinDriver") + def test_glance_client(self, mock_senlindriver, mock_params): + obj = mock.Mock() + sd = mock.Mock() + gc = mock.Mock() + sd.glance.return_value = gc + mock_senlindriver.return_value = sd + fake_params = mock.Mock() + mock_params.return_value = fake_params + profile = self._create_profile('test-profile') + + res = profile.glance(obj) + + self.assertEqual(gc, res) + self.assertEqual(gc, profile._glanceclient) + mock_params.assert_called_once_with(obj.user, obj.project) + sd.glance.assert_called_once_with(fake_params) + + @mock.patch.object(pb.Profile, '_build_conn_params') + @mock.patch("senlin.drivers.base.SenlinDriver") def test_neutron_client(self, mock_senlindriver, mock_params): obj = mock.Mock() sd = mock.Mock() diff -Nru senlin-6.0.0/senlin/tests/unit/test_common_scaleutils.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/test_common_scaleutils.py --- senlin-6.0.0/senlin/tests/unit/test_common_scaleutils.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/test_common_scaleutils.py 2018-11-19 18:48:08.000000000 +0000 @@ -279,7 +279,7 @@ self.assertEqual(['N1', 'N2', 'N15', 'N13', 'N12'], res) @mock.patch.object(su, 'filter_error_nodes') - def test__victims_by_profile_age_oldest(self, mock_filter): + def test_victims_by_profile_age_oldest(self, mock_filter): good_nodes = [ mock.Mock(id='N11', profile_created_at=110), mock.Mock(id='N15', profile_created_at=150), diff -Nru senlin-6.0.0/senlin/tests/unit/test_common_schema.py senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/test_common_schema.py --- senlin-6.0.0/senlin/tests/unit/test_common_schema.py 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin/tests/unit/test_common_schema.py 2018-11-19 18:48:08.000000000 +0000 @@ -90,12 +90,12 @@ self.assertEqual('VVV', res) mock_resolve.assert_called_once_with('DEFAULT') - def test__validate_default(self): + def test_validate_default(self): sot = FakeSchema() self.assertIsNone(sot._validate_default(mock.Mock())) - def test__validate_default_with_value(self): + def test_validate_default_with_value(self): sot = FakeSchema(default='DEFAULT') mock_validate = self.patchobject(sot, 'validate', return_value=None) fake_context = mock.Mock() @@ -105,7 +105,7 @@ self.assertIsNone(res) mock_validate.assert_called_once_with('DEFAULT', fake_context) - def test__validate_default_with_value_but_failed(self): + def test_validate_default_with_value_but_failed(self): sot = FakeSchema(default='DEFAULT') mock_validate = self.patchobject(sot, 'validate', side_effect=ValueError('boom')) @@ -143,7 +143,7 @@ c1.validate.assert_called_once_with('FOO', schema=None, context=ctx) self.assertEqual('BOOM', six.text_type(ex)) - def test__validate_version(self): + def test_validate_version(self): sot = FakeSchema(min_version='1.0', max_version='2.0') res = sot._validate_version('field', '1.0') @@ -170,7 +170,7 @@ 'spec version 2.1.', six.text_type(ex)) - def test__validate_version_no_min_version(self): + def test_validate_version_no_min_version(self): sot = FakeSchema(max_version='2.0') res = sot._validate_version('field', '1.0') @@ -186,7 +186,7 @@ 'spec version 2.1.', six.text_type(ex)) - def test__validate_version_no_max_version(self): + def test_validate_version_no_max_version(self): sot = FakeSchema(min_version='1.0') res = sot._validate_version('field', '1.0') @@ -202,7 +202,7 @@ 'spec version 0.5.', six.text_type(ex)) - def test__validate_version_no_version_restriction(self): + def test_validate_version_no_version_restriction(self): sot = FakeSchema() res = sot._validate_version('field', '1.0') @@ -464,10 +464,6 @@ res = sot.validate(u'unicode') self.assertIsNone(res) - ex = self.assertRaises(exc.ESchema, sot.validate, 1) - self.assertEqual("The value '1' is not a valid string.", - six.text_type(ex)) - mock_constraints = self.patchobject(sot, 'validate_constraints', return_value=None) @@ -535,7 +531,7 @@ self.assertEqual('List', sot['type']) self.assertEqual('desc', sot['description']) - def test__get_children(self): + def test_get_children(self): sot = schema.List('desc', schema=schema.String()) res = sot._get_children(['v1', 'v2'], [0, 1]) @@ -574,7 +570,7 @@ self.assertEqual('Map', sot['type']) self.assertEqual('desc', sot['description']) - def test__get_children(self): + def test_get_children(self): sot = schema.Map('desc', schema={'foo': schema.String()}) res = sot._get_children({'foo': 'bar'}) @@ -897,7 +893,7 @@ self.assertIn("Required spec item 'key2' not provided", six.text_type(ex.message)) - def test___getitem__(self): + def test__getitem__(self): data = {'key2': 2} sot = schema.Spec(self.spec_schema, data, version='1.2') @@ -906,14 +902,14 @@ res = sot['key2'] self.assertEqual(2, res) - def test___len__(self): + def test__len__(self): data = {'key2': 2} sot = schema.Spec(self.spec_schema, data, version='1.2') res = len(sot) self.assertEqual(2, res) - def test___contains__(self): + def test__contains__(self): data = {'key2': 2} sot = schema.Spec(self.spec_schema, data, version='1.2') diff -Nru senlin-6.0.0/senlin.egg-info/entry_points.txt senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/entry_points.txt --- senlin-6.0.0/senlin.egg-info/entry_points.txt 2018-08-30 14:19:54.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/entry_points.txt 2018-11-19 18:48:10.000000000 +0000 @@ -2,6 +2,7 @@ senlin-api = senlin.cmd.api:main senlin-engine = senlin.cmd.engine:main senlin-manage = senlin.cmd.manage:main +senlin-status = senlin.cmd.status:main [oslo.config.opts] senlin.config = senlin.common.config:list_opts @@ -30,6 +31,7 @@ senlin.policy.deletion-1.0 = senlin.policies.deletion_policy:DeletionPolicy senlin.policy.deletion-1.1 = senlin.policies.deletion_policy:DeletionPolicy senlin.policy.health-1.0 = senlin.policies.health_policy:HealthPolicy +senlin.policy.health-1.1 = senlin.policies.health_policy:HealthPolicy senlin.policy.loadbalance-1.0 = senlin.policies.lb_policy:LoadBalancingPolicy senlin.policy.loadbalance-1.1 = senlin.policies.lb_policy:LoadBalancingPolicy senlin.policy.region_placement-1.0 = senlin.policies.region_placement:RegionPlacementPolicy diff -Nru senlin-6.0.0/senlin.egg-info/pbr.json senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/pbr.json --- senlin-6.0.0/senlin.egg-info/pbr.json 2018-08-30 14:19:54.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/pbr.json 2018-11-19 18:48:10.000000000 +0000 @@ -1 +1 @@ -{"git_version": "1ea4238", "is_release": true} \ No newline at end of file +{"git_version": "0ddbc114", "is_release": false} \ No newline at end of file diff -Nru senlin-6.0.0/senlin.egg-info/PKG-INFO senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/PKG-INFO --- senlin-6.0.0/senlin.egg-info/PKG-INFO 2018-08-30 14:19:54.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/PKG-INFO 2018-11-19 18:48:10.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: senlin -Version: 6.0.0 +Version: 6.1.0.dev64 Summary: OpenStack Clustering Home-page: https://docs.openstack.org/senlin/latest/ Author: OpenStack diff -Nru senlin-6.0.0/senlin.egg-info/requires.txt senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/requires.txt --- senlin-6.0.0/senlin.egg-info/requires.txt 2018-08-30 14:19:54.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/requires.txt 2018-11-19 18:48:10.000000000 +0000 @@ -1,5 +1,9 @@ -pbr!=2.1.0,>=2.0.0 Babel!=2.4.0,>=2.3.4 +PasteDeploy>=1.5.0 +PyYAML>=3.12 +Routes>=2.3.1 +SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 +WebOb>=1.7.1 docker>=2.4.2 eventlet!=0.18.3,!=0.20.1,>=0.18.2 jsonpath-rw<2.0,>=1.2.0 @@ -7,7 +11,7 @@ keystoneauth1>=3.4.0 keystonemiddleware>=4.17.0 microversion-parse>=0.2.1 -openstacksdk>=0.11.2 +openstacksdk>=0.17.2 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.27.0 @@ -18,17 +22,14 @@ oslo.policy>=1.30.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 +oslo.upgradecheck>=0.1.0 oslo.utils>=3.33.0 oslo.versionedobjects>=1.31.2 osprofiler>=1.4.0 -PasteDeploy>=1.5.0 +pbr!=2.1.0,>=2.0.0 pytz>=2013.6 -PyYAML>=3.12 requests>=2.14.2 -Routes>=2.3.1 six>=1.10.0 -SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 sqlalchemy-migrate>=0.11.0 stevedore>=1.20.0 tenacity>=4.9.0 -WebOb>=1.7.1 diff -Nru senlin-6.0.0/senlin.egg-info/SOURCES.txt senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/SOURCES.txt --- senlin-6.0.0/senlin.egg-info/SOURCES.txt 2018-08-30 14:19:55.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/senlin.egg-info/SOURCES.txt 2018-11-19 18:48:10.000000000 +0000 @@ -173,6 +173,8 @@ doc/source/contributor/policies/region_v1.rst doc/source/contributor/policies/scaling_v1.rst doc/source/contributor/policies/zone_v1.rst +doc/source/ext/__init__.py +doc/source/ext/resources.py doc/source/install/index.rst doc/source/install/install-devstack.rst doc/source/install/install-rdo.rst @@ -184,6 +186,7 @@ doc/source/reference/man/senlin-api.rst doc/source/reference/man/senlin-engine.rst doc/source/reference/man/senlin-manage.rst +doc/source/reference/man/senlin-status.rst doc/source/scenarios/affinity.rst doc/source/scenarios/autoscaling_ceilometer.rst doc/source/scenarios/autoscaling_heat.rst @@ -212,9 +215,14 @@ doc/source/user/policy_types/region_placement.rst doc/source/user/policy_types/scaling.rst doc/source/user/policy_types/zone_placement.rst +doc/source/user/profile_types/docker.rst +doc/source/user/profile_types/nova.rst +doc/source/user/profile_types/stack.rst doc/specs/README.rst doc/specs/cluster-fast-scaling.rst +doc/specs/fail-fast-on-locked_resource.rst doc/specs/lifecycle-hook.rst +doc/specs/multiple-detection-modes.rst doc/specs/template.rst doc/specs/workflow-recover.rst doc/specs/approved/README.rst @@ -281,6 +289,7 @@ releasenotes/notes/cluster-ops-433a5aa608a0eb7f.yaml releasenotes/notes/cluster-recover-d87d429873b376db.yaml releasenotes/notes/cluster-resize-fix-bee18840a98907d8.yaml +releasenotes/notes/cluster-scale-action-conflict-0e1e64591e943e25.yaml releasenotes/notes/cluster-status-update-dd9133092aef05ab.yaml releasenotes/notes/compute-instance-fencing-63b931cdf35b127c.yaml releasenotes/notes/config-doc-cb8b37e360422301.yaml @@ -430,6 +439,7 @@ releasenotes/notes/scheduler-thread-pool-size-40905866197ef8bd.yaml releasenotes/notes/secure-password-e60243ae2befbbf6.yaml releasenotes/notes/senlin-osprofiler-fc8cb7161bdb1a6e.yaml +releasenotes/notes/senlin-status-upgrade-check-framework-b9db3bb9db8d1015.yaml releasenotes/notes/server-image-id-27c1619fa818c6a0.yaml releasenotes/notes/service-cleanup-afacddfacd7b4dcd.yaml releasenotes/notes/service-list-5f4037ae52514f2a.yaml @@ -462,8 +472,11 @@ releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst +releasenotes/source/rocky.rst releasenotes/source/unreleased.rst releasenotes/source/_templates/.placeholder +releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/zh_CN/LC_MESSAGES/releasenotes.po senlin/__init__.py senlin/version.py @@ -512,6 +525,7 @@ senlin/cmd/api_wsgi.py senlin/cmd/engine.py senlin/cmd/manage.py +senlin/cmd/status.py senlin/common/__init__.py senlin/common/config.py senlin/common/constraints.py @@ -586,6 +600,7 @@ senlin/drivers/os_test/README.rst senlin/drivers/os_test/__init__.py senlin/drivers/os_test/cinder_v2.py +senlin/drivers/os_test/glance_v2.py senlin/drivers/os_test/heat_v1.py senlin/drivers/os_test/keystone_v3.py senlin/drivers/os_test/lbaas.py @@ -682,9 +697,6 @@ senlin/rpc/__init__.py senlin/rpc/client.py senlin/tests/__init__.py -senlin/tests/tempest/README.rst -senlin/tests/tempest/post_test_hook.sh -senlin/tests/tempest/pre_test_hook.sh senlin/tests/unit/__init__.py senlin/tests/unit/fakes.py senlin/tests/unit/test_common_constraints.py @@ -731,6 +743,8 @@ senlin/tests/unit/api/openstack/v1/test_services.py senlin/tests/unit/api/openstack/v1/test_version.py senlin/tests/unit/api/openstack/v1/test_webhooks.py +senlin/tests/unit/cmd/__init__.py +senlin/tests/unit/cmd/test_status.py senlin/tests/unit/common/__init__.py senlin/tests/unit/common/base.py senlin/tests/unit/common/utils.py @@ -825,7 +839,6 @@ senlin/tests/unit/objects/test_action.py senlin/tests/unit/objects/test_base.py senlin/tests/unit/objects/test_cluster.py -senlin/tests/unit/objects/test_cluster_policy.py senlin/tests/unit/objects/test_event.py senlin/tests/unit/objects/test_fields.py senlin/tests/unit/objects/test_health_registry.py diff -Nru senlin-6.0.0/setup.cfg senlin-7.0.0~b1~git2018111913.0ddbc114/setup.cfg --- senlin-6.0.0/setup.cfg 2018-08-30 14:19:55.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/setup.cfg 2018-11-19 18:48:11.000000000 +0000 @@ -30,6 +30,7 @@ senlin-api = senlin.cmd.api:main senlin-engine = senlin.cmd.engine:main senlin-manage = senlin.cmd.manage:main + senlin-status = senlin.cmd.status:main wsgi_scripts = senlin-wsgi-api = senlin.cmd.api_wsgi:init_app oslo.config.opts = @@ -50,6 +51,7 @@ senlin.policy.deletion-1.1 = senlin.policies.deletion_policy:DeletionPolicy senlin.policy.scaling-1.0 = senlin.policies.scaling_policy:ScalingPolicy senlin.policy.health-1.0 = senlin.policies.health_policy:HealthPolicy + senlin.policy.health-1.1 = senlin.policies.health_policy:HealthPolicy senlin.policy.loadbalance-1.0 = senlin.policies.lb_policy:LoadBalancingPolicy senlin.policy.loadbalance-1.1 = senlin.policies.lb_policy:LoadBalancingPolicy senlin.policy.region_placement-1.0 = senlin.policies.region_placement:RegionPlacementPolicy diff -Nru senlin-6.0.0/tox.ini senlin-7.0.0~b1~git2018111913.0ddbc114/tox.ini --- senlin-6.0.0/tox.ini 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/tox.ini 2018-11-19 18:48:08.000000000 +0000 @@ -1,6 +1,6 @@ [tox] -minversion = 1.6 -envlist = py35,py27,pep8,functional +minversion = 2.0 +envlist = py36,py35,py27,pep8,functional skipsdist = True [testenv] @@ -16,7 +16,7 @@ install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} commands = find . -type f -name "*.py[c|o]" -delete - stestr run '{posargs}' + stestr run {posargs} stestr slowest whitelist_externals = bash @@ -38,7 +38,7 @@ [testenv:pep8] basepython = python3 commands = - flake8 senlin + flake8 senlin doc/source/ext [testenv:genconfig] basepython = python3 diff -Nru senlin-6.0.0/.zuul.yaml senlin-7.0.0~b1~git2018111913.0ddbc114/.zuul.yaml --- senlin-6.0.0/.zuul.yaml 2018-08-30 14:16:49.000000000 +0000 +++ senlin-7.0.0~b1~git2018111913.0ddbc114/.zuul.yaml 2018-11-19 18:48:07.000000000 +0000 @@ -1,29 +1,34 @@ - project: + templates: + - check-requirements + - openstack-lower-constraints-jobs + - openstack-python-jobs + - openstack-python35-jobs + - openstack-python36-jobs + - publish-openstack-docs-pti + - release-notes-jobs-python3 check: jobs: - senlin-dsvm-tempest-py27-api - senlin-dsvm-tempest-py35-api: voting: false - branches: ^(?!stable/newton).*$ - senlin-dsvm-tempest-py27-functional - senlin-dsvm-tempest-py35-functional: voting: false - branches: ^(?!stable/newton).*$ - senlin-dsvm-tempest-py27-integration: voting: false - senlin-dsvm-tempest-py35-integration: voting: false - branches: ^(?!stable/newton).*$ - - openstack-tox-lower-constraints + - openstack-tox-cover: + voting: false gate: + queue: senlin jobs: - senlin-dsvm-tempest-py27-api - senlin-dsvm-tempest-py27-functional - - openstack-tox-lower-constraints experimental: jobs: - - rally-dsvm-senlin-senlin: - voting: false + - rally-dsvm-senlin-senlin - job: name: senlin-dsvm-tempest-py27-api