diff -Nru heat-9.0.0/AUTHORS heat-10.0.0~b1/AUTHORS --- heat-9.0.0/AUTHORS 2017-08-30 11:11:34.000000000 +0000 +++ heat-10.0.0~b1/AUTHORS 2017-10-27 07:39:25.000000000 +0000 @@ -162,9 +162,11 @@ Jeff Sloyer Jennifer Mulsow Jens Rosenboom +Jeremy Liu Jeremy Pugh Jeremy Stanley Jesse Andrews +Jesse Pretorius Jesse Proudman Ji-Wei Jia Dong @@ -178,6 +180,7 @@ Johannes Grassler Johnu George JordanP +Jose Luis Franco Arza Joshua Harlow JuPing Juan Antonio Osorio Robles @@ -219,9 +222,11 @@ Martin Oemke Masco Kaliyamoorthy Matt Riedemann +Matthew Edmonds Matthew Flusche Matthew Gilliard Matthew Printz +Matthew Treinish Mehdi Abaakouk (sileht) Mehdi Abaakouk Mehdi Abaakouk @@ -236,6 +241,7 @@ Mike Asthalter Mike Spreitzer Mitsuru Kanabuchi +Mohammed Naser Mohankumar Monty Taylor Morgan Fainberg @@ -283,6 +289,7 @@ Robert Collins Robert Pothier Robert van Leeuwen +Roberto Polli Roman Podoliaka Russell Bryant Ryan Brown @@ -382,6 +389,7 @@ ZhiQiang Fan Zhiqiang Fan Ziad Sawalha +Zuul abdul nizamuddin abhishekkekane aivanitskiy @@ -392,6 +400,7 @@ chenaidong1 chenxiao chenxing +chestack cyli danny deepakmourya @@ -429,9 +438,11 @@ lvdongbing matts2006 mohankumar_n +npraveen35 pallavi pawnesh.kumar rabi +rajat29 rajiv rico.lin ricolin @@ -451,6 +462,7 @@ ubuntu venkatamahesh wangtianfa +wbluo0907 xiaolihope xiexs yangxurong diff -Nru heat-9.0.0/bin/heat-keystone-setup heat-10.0.0~b1/bin/heat-keystone-setup --- heat-9.0.0/bin/heat-keystone-setup 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/bin/heat-keystone-setup 2017-10-27 07:35:34.000000000 +0000 @@ -38,8 +38,8 @@ set +o xtrace local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" echo $msg 1>&2; - if [[ -n ${SCREEN_LOGDIR} ]]; then - echo $msg >> "${SCREEN_LOGDIR}/error.log" + if [[ -n ${LOGDIR} ]]; then + echo $msg >> "${LOGDIR}/error.log" fi $errXTRACE return $exitcode diff -Nru heat-9.0.0/ChangeLog heat-10.0.0~b1/ChangeLog --- heat-9.0.0/ChangeLog 2017-08-30 11:11:33.000000000 +0000 +++ heat-10.0.0~b1/ChangeLog 2017-10-27 07:39:24.000000000 +0000 @@ -1,21 +1,100 @@ CHANGES ======= -9.0.0 ------ +10.0.0.0b1 +---------- +* Do not validate property network of sahara cluster +* Imported Translations from Zanata +* Unit tests: Remove deprecated oslo\_db test case classes +* Unit tests: Don't leave RPC server running +* Unit tests: ensure all threads complete +* Don't attempt to update tmpl ID when resource in progress +* Retrigger new traversals after resource timeout +* Make scheduler.Timeout exception hashable +* Add integration tests for simultaneous updates +* Hidden OS::Designate::Record and OS::Designate::Domain +* Correct the filter attribute of db model 'ResourcePropertiesData' +* Imported Translations from Zanata +* Raise NotFound() when group member does not exist +* Updated from global requirements +* Correct example for list\_concat-unique function +* Fix regex for updated\_time in PATCH update api test +* Add action\_wait\_secs for update +* Imported Translations from Zanata +* API Tests: Ensure updated\_time changes in stack patch update +* Allow convergence-specific Tempest tests +* Fix get\_watch\_server\_url for ipv6 addresses +* Skip test\_notifications.NotificationTest.\* functional tests +* Add release note for StackDefinition API +* Make private flavor tenant access works +* Fix translate tenants in flavor +* Use a namedtuple for convergence graph nodes +* Don't use Dependencies repr in tests +* Increase interface detach polling period +* Use show\_output in TemplateResource.get\_reference\_id() +* Don't load resource data for list\_outputs +* Speed up show-stack with outputs in legacy path +* Replace SCREEN\_LOGDIR with LOGDIR +* Add attributes schema for OS::Keystone::Project +* Make sure we can get watch server url in uwsgi mode +* Updated from global requirements +* Fix unit tests with oslo\_messaging 5.32.0 +* Add default configuration files to data\_files +* Refactor FloatingIP add\_dependencies() method +* Update incorrect timezone description +* Updated from global requirements +* Improve update\_wait\_condition\_handle docstring +* Fix client plugin name when calling ignore\_not\_found +* Remove the existing snapshots from the backend +* Updated from global requirements +* Avoid cloudwatch endpoint retrieve when disabled +* Don't override get\_reference\_id() for scaling policy +* Updated from global requirements +* Don't mock Target unnecessarily +* Support property 'domain' for keystone role +* Add support for tagging Mistral workflows +* Updated from global requirements +* Add missing 'of' in order description +* Check for keystoneauth exceptions in mistral client plugin +* Don't remove uwsgi config files on stop +* Remove vestigate HUDSON\_PUBLISH\_DOCS reference +* Updated from global requirements +* Add sem-ver flag so pbr generates correct version +* Parse live data with correct key 'readonly' +* Updated from global requirements +* Updated from global requirements +* Unskip StackSnapshotRestoreTest +* Set resource.\_properties\_data=None when loading from db +* Adds 5 backport db migration place holder for Pike +* heat config eliminate ec2authtoken +* \* Add new HOT version for Queens release +* \* Add Queens version '10.0.0' in doc +* Updated from global requirements +* fix heat\_keystoneclient config gen * Add release notes for heat +* Run heat api tests after upgrade +* Add gabbi api tests for stack update/patch-update +* Fix error for invalid auth\_encryption\_key +* Don't call update\_port with empty dict +* Revert "Disable nova quota check in gate jobs" +* Make sure port updating works if enable observe +* Reoder barbican container import +* Update reno for stable/pike * Rollback stack with correct tags -* Update UPPER\_CONSTRAINTS\_FILE for stable/pike -* Update .gitreview for stable/pike 9.0.0.0rc1 ---------- * Imported Translations from Zanata * Fix race in ZaqarEventSinkTest.test\_events +* Allow conditions to return null values +* Use zaqar v2 in integration tests +* Switch integration jobs to use local mirrors * Extend addresses attribute of Zun container +* Minor doc fix: tmpl\_diff argument for handle\_update * External subnet for floating ip +* Update URL in README.rst * Fix readme guide for heat\_integrationtests * Add converge flag in stack update for observing on reality * Updated from global requirements @@ -29,6 +108,7 @@ * Only check service availability during validation * Do not disassociate floating ip again * Fix formatting in make\_url error messages +* Eager load resource\_properties\_data in resource * Allow CREATE\_COMPLETE status when cluster/bay update check * Add upgrades guideline for operators * Remove install-guide env which is no longer effective @@ -40,6 +120,7 @@ --------- * Updated from global requirements +* Remove duplicated \`show\_deleted\` param extraction * Add unit test for translate with no translate value * Remove unrequired check None in bay resource * Updated from global requirements @@ -51,7 +132,13 @@ * Don't get resource twice in resource\_signal() * Neutron resources observe reality implementation * Updated from global requirements +* Fix no-change updates of failed resources with restricted actions * Respect locks when changing FAILED resource to COMPLETE +* Strip disabled resources from depends\_on in add\_resource() +* Use StackDefinition.all\_rsrc\_names() API +* Use stored properties values in actions +* Rename Resource.\_resolve\_all\_attributes() method +* Avoid creating two Stacks when loading Resource * Implement all\_dep\_attrs() more efficiently for get\_attr * Cache dep\_attrs for all resources in definitions * Cache names of required resources in ResourceDefinition @@ -91,6 +178,7 @@ * Add tag for server nic * Cinder volume attachment attributes should not be cached * Add CACHE\_NONE for FloatingIP attributes +* Show correct version of data in convergence resource list * Don't load new copies of current resources * Updated from global requirements * Change tags type from 'string' to 'array' @@ -106,6 +194,7 @@ * Revert "Revert "Enable heat to use uwsgi with devstack"" * [api-ref] Correct config\_id in url instead of in body * Fix \_retrigger\_replaced in convergence worker +* Fix nonsensical docs in dependencies.py * Fix race in new condition test * Keep existing stack tags for patch update * Fix wrong function description @@ -189,6 +278,7 @@ * Load all templates for generating parameter schema * Using fixtures instead of deprecated mockpatch module * Remove deprecated functions +* Clarify make\_url validation errors * Updated from global requirements * Allow function 'yaql' as condition function * Implements custom resource type managed by Mistral workflows diff -Nru heat-9.0.0/config-generator.conf heat-10.0.0~b1/config-generator.conf --- heat-9.0.0/config-generator.conf 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/config-generator.conf 2017-10-27 07:35:34.000000000 +0000 @@ -4,7 +4,7 @@ namespace = heat.common.config namespace = heat.common.context namespace = heat.common.crypt -namespace = heat.engine.clients.keystone.heat_keystoneclient +namespace = heat.engine.clients.os.keystone.heat_keystoneclient namespace = heat.common.wsgi namespace = heat.engine.clients namespace = heat.engine.notification diff -Nru heat-9.0.0/debian/changelog heat-10.0.0~b1/debian/changelog --- heat-9.0.0/debian/changelog 2017-08-30 13:22:39.000000000 +0000 +++ heat-10.0.0~b1/debian/changelog 2017-11-15 10:58:14.000000000 +0000 @@ -1,3 +1,10 @@ +heat (1:10.0.0~b1-0ubuntu1) bionic; urgency=medium + + * New upstream release for OpenStack Queens. + * d/control: Align (Build-)Depends with upstream. + + -- James Page Wed, 15 Nov 2017 10:58:14 +0000 + heat (1:9.0.0-0ubuntu1) artful; urgency=medium * New upstream release for OpenStack Pike. diff -Nru heat-9.0.0/debian/control heat-10.0.0~b1/debian/control --- heat-9.0.0/debian/control 2017-08-30 13:22:39.000000000 +0000 +++ heat-10.0.0~b1/debian/control 2017-11-15 10:58:14.000000000 +0000 @@ -34,7 +34,7 @@ python-fixtures (>= 3.0.0), python-gabbi (>= 1.35.0), python-glanceclient (>= 1:2.8.0), - python-greenlet, + python-greenlet (>= 0.4.10), python-hacking (>= 0.10.0), python-heatclient (>= 1.6.1), python-keystoneauth1 (>= 3.1.0), @@ -46,30 +46,30 @@ python-manilaclient (>= 1.12.0), python-migrate (>= 0.11.0), python-mistralclient (>= 1:3.1.0), - python-mock (>= 2.0), + python-mock (>= 2.0.0), python-monascaclient (>= 1.7.0), python-mox3 (>= 0.7.0), - python-netaddr (>= 0.7.13), + python-netaddr (>= 0.7.18), python-neutronclient (>= 1:6.3.0), python-novaclient (>= 2:9.0.0), python-openstackclient (>= 3.3.0), python-openstacksdk (>= 0.9.17), - python-openstackdocstheme (>= 1.16.0), + python-openstackdocstheme (>= 1.17.0), python-os-api-ref (>= 1.0.0), python-oslo.cache (>= 1.5.0), - python-oslo.concurrency (>= 3.8.0), - python-oslo.config (>= 1:4.0.0), + python-oslo.concurrency (>= 3.20.0), + python-oslo.config (>= 1:4.6.0), python-oslo.context (>= 2.14.0), - python-oslo.db (>= 4.24.0), - python-oslo.i18n (>= 2.1.0), - python-oslo.log (>= 3.22.0), - python-oslo.messaging (>= 5.25.0), - python-oslo.middleware (>= 3.27.0), + python-oslo.db (>= 4.27.0), + python-oslo.i18n (>= 3.15.3), + python-oslo.log (>= 3.30.0), + python-oslo.messaging (>= 5.29.0), + python-oslo.middleware (>= 3.31.0), python-oslo.policy (>= 1.23.0), python-oslo.reports (>= 0.6.0), - python-oslo.serialization (>= 1.10.0), - python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.20.0), + python-oslo.serialization (>= 2.18.0), + python-oslo.service (>= 1.24.0), + python-oslo.utils (>= 3.28.0), python-oslo.versionedobjects (>= 1.17.0), python-oslotest (>= 1.10.0), python-osprofiler (>= 1.4.0), @@ -89,7 +89,7 @@ python-tempest (>= 1:16.1.0), python-tenacity (>= 3.2.1), python-testrepository (>= 0.0.18), - python-testresources (>= 0.2.4), + python-testresources (>= 2.0.0), python-testscenarios (>= 0.4), python-testtools (>= 1.4.0), python-troveclient (>= 1:2.2.0), @@ -123,7 +123,7 @@ python-eventlet (>= 0.18.2), python-fixtures (>= 3.0.0), python-glanceclient (>= 1:2.8.0), - python-greenlet, + python-greenlet (>= 0.4.10), python-heatclient (>= 1.6.1), python-keystoneauth1 (>= 3.1.0), python-keystoneclient (>= 1:3.8.0), @@ -134,25 +134,25 @@ python-migrate (>= 0.11.0), python-mistralclient (>= 1:3.1.0), python-monascaclient (>= 1.7.0), - python-netaddr (>= 0.7.12), + python-netaddr (>= 0.7.18), python-neutronclient (>= 1:6.3.0), python-novaclient (>= 2:9.0.0), python-openstackclient (>= 3.3.0), python-openstacksdk (>= 0.9.17), python-oslo.cache (>= 1.5.0), - python-oslo.concurrency (>= 3.8.0), - python-oslo.config (>= 1:4.0.0), + python-oslo.concurrency (>= 3.20.0), + python-oslo.config (>= 1:4.6.0), python-oslo.context (>= 2.14.0), - python-oslo.db (>= 4.24.0), - python-oslo.i18n (>= 2.1.0), - python-oslo.log (>= 3.22.0), - python-oslo.messaging (>= 5.25.0), - python-oslo.middleware (>= 3.27.0), + python-oslo.db (>= 4.27.0), + python-oslo.i18n (>= 3.15.3), + python-oslo.log (>= 3.30.0), + python-oslo.messaging (>= 5.29.0), + python-oslo.middleware (>= 3.31.0), python-oslo.policy (>= 1.23.0), python-oslo.reports (>= 0.6.0), - python-oslo.serialization (>= 1.10.0), - python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.20.0), + python-oslo.serialization (>= 2.18.0), + python-oslo.service (>= 1.24.0), + python-oslo.utils (>= 3.28.0), python-oslo.versionedobjects (>= 1.17.0), python-osprofiler (>= 1.4.0), python-pastedeploy, diff -Nru heat-9.0.0/devstack/lib/heat heat-10.0.0~b1/devstack/lib/heat --- heat-9.0.0/devstack/lib/heat 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/devstack/lib/heat 2017-10-27 07:35:34.000000000 +0000 @@ -352,9 +352,6 @@ if [[ "$HEAT_USE_APACHE" == "True" ]]; then if [[ "$WSGI_MODE" == "uwsgi" ]]; then - remove_uwsgi_config "$HEAT_API_UWSGI_CONF" "$HEAT_API_UWSGI" - remove_uwsgi_config "$HEAT_CFN_API_UWSGI_CONF" "$HEAT_CFN_API_UWSGI" - remove_uwsgi_config "$HEAT_CW_API_UWSGI_CONF" "$HEAT_CW_API_UWSGI" _stop_processes else disable_apache_site heat-api diff -Nru heat-9.0.0/devstack/upgrade/resources.sh heat-10.0.0~b1/devstack/upgrade/resources.sh --- heat-9.0.0/devstack/upgrade/resources.sh 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/devstack/upgrade/resources.sh 2017-10-27 07:35:34.000000000 +0000 @@ -36,9 +36,11 @@ OS_PROJECT_DOMAIN_ID=$DEFAULT_DOMAIN } -function create { - # run heat_integrationtests instead of tempest smoke before create - pushd $BASE_DEVSTACK_DIR/../tempest +function _run_heat_api_tests { + local devstack_dir=$1 + + pushd $devstack_dir/../tempest + sed -i -e '/group_regex/c\group_regex=heat_integrationtests\\.api\\.test_heat_api(?:\\.|_)([^_]+)' .testr.conf conf_file=etc/tempest.conf iniset_multiline $conf_file service_available heat_plugin True iniset $conf_file heat_plugin username $OS_USERNAME @@ -50,8 +52,14 @@ iniset $conf_file heat_plugin user_domain_name $OS_USER_DOMAIN_NAME iniset $conf_file heat_plugin project_domain_name $OS_PROJECT_DOMAIN_NAME iniset $conf_file heat_plugin region $OS_REGION_NAME - tempest run --regex '(test_create_update.CreateStackTest|test_create_update.UpdateStackTest)' + iniset $conf_file heat_plugin auth_version $OS_IDENTITY_API_VERSION + tempest run --regex heat_integrationtests.api popd +} + +function create { + # run heat api tests instead of tempest smoke before create + _run_heat_api_tests $BASE_DEVSTACK_DIR # creates a tenant for the server eval $(openstack project create -f shell -c id $HEAT_PROJECT) @@ -84,6 +92,10 @@ function verify { _heat_set_user + local side="$1" + if [[ "$side" = "post-upgrade" ]]; then + _run_heat_api_tests $TARGET_DEVSTACK_DIR + fi stack_name=$(resource_get heat stack_name) heat stack-show $stack_name # TODO(sirushtim): Create more granular checks for Heat. @@ -115,7 +127,7 @@ verify_noapi ;; "verify") - verify + verify $2 ;; "destroy") destroy diff -Nru heat-9.0.0/doc/source/conf.py heat-10.0.0~b1/doc/source/conf.py --- heat-9.0.0/doc/source/conf.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/doc/source/conf.py 2017-10-27 07:35:34.000000000 +0000 @@ -90,10 +90,7 @@ todo_include_todos = True # Add any paths that contain templates here, relative to this directory. -if os.getenv('HUDSON_PUBLISH_DOCS'): - templates_path = ['_ga', '_templates'] -else: - templates_path = ['_templates'] +templates_path = [] # The suffix of source filenames. source_suffix = '.rst' diff -Nru heat-9.0.0/doc/source/developing_guides/pluginguide.rst heat-10.0.0~b1/doc/source/developing_guides/pluginguide.rst --- heat-9.0.0/doc/source/developing_guides/pluginguide.rst 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/doc/source/developing_guides/pluginguide.rst 2017-10-27 07:35:34.000000000 +0000 @@ -478,8 +478,9 @@ :param json_snippet: the resource definition from the updated template :type json_snippet: collections.Mapping - :param templ_diff: changed values from the original template definition - :type templ_diff: collections.Mapping + :param tmpl_diff: values in the updated definition that have changed + with respect to the original template definition. + :type tmpl_diff: collections.Mapping :param prop_diff: property values that are different between the original definition and the updated definition; keys are property names and values are the new values. Deleted or diff -Nru heat-9.0.0/doc/source/ext/resources.py heat-10.0.0~b1/doc/source/ext/resources.py --- heat-9.0.0/doc/source/ext/resources.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/doc/source/ext/resources.py 2017-10-27 07:35:34.000000000 +0000 @@ -35,7 +35,8 @@ '6.0.0': 'Mitaka', '7.0.0': 'Newton', '8.0.0': 'Ocata', - '9.0.0': 'Pike'} + '9.0.0': 'Pike', + '10.0.0': 'Queens'} all_resources = {} diff -Nru heat-9.0.0/doc/source/install/install-debian.rst heat-10.0.0~b1/doc/source/install/install-debian.rst --- heat-9.0.0/doc/source/install/install-debian.rst 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/doc/source/install/install-debian.rst 2017-10-27 07:35:34.000000000 +0000 @@ -23,16 +23,6 @@ :doc:`service endpoint registration `, and :doc:`message broker credentials `. -#. Edit the ``/etc/heat/heat.conf`` file and complete the following - actions: - - * In the ``[ec2authtoken]`` section, configure Identity service access: - - .. code-block:: none - - [ec2authtoken] - ... - auth_uri = http://controller:5000/v2.0 Finalize installation --------------------- diff -Nru heat-9.0.0/doc/source/install/install-obs.rst heat-10.0.0~b1/doc/source/install/install-obs.rst --- heat-9.0.0/doc/source/install/install-obs.rst 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/doc/source/install/install-obs.rst 2017-10-27 07:35:34.000000000 +0000 @@ -348,8 +348,8 @@ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. - * In the ``[keystone_authtoken]``, ``[trustee]``, - ``[clients_keystone]``, and ``[ec2authtoken]`` sections, + * In the ``[keystone_authtoken]``, ``[trustee]`` + and ``[clients_keystone]`` sections, configure Identity service access: .. code-block:: none @@ -376,10 +376,6 @@ [clients_keystone] ... - auth_uri = http://controller:35357 - - [ec2authtoken] - ... auth_uri = http://controller:5000 Replace ``HEAT_PASS`` with the password you chose for the diff -Nru heat-9.0.0/doc/source/install/install-rdo.rst heat-10.0.0~b1/doc/source/install/install-rdo.rst --- heat-9.0.0/doc/source/install/install-rdo.rst 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/doc/source/install/install-rdo.rst 2017-10-27 07:35:34.000000000 +0000 @@ -349,7 +349,7 @@ ``openstack`` account in ``RabbitMQ``. * In the ``[keystone_authtoken]``, ``[trustee]``, - ``[clients_keystone]``, and ``[ec2authtoken]`` sections, + and ``[clients_keystone]`` sections, configure Identity service access: .. code-block:: none @@ -376,10 +376,6 @@ [clients_keystone] ... - auth_uri = http://controller:35357 - - [ec2authtoken] - ... auth_uri = http://controller:5000 Replace ``HEAT_PASS`` with the password you chose for the diff -Nru heat-9.0.0/doc/source/install/install-ubuntu.rst heat-10.0.0~b1/doc/source/install/install-ubuntu.rst --- heat-9.0.0/doc/source/install/install-ubuntu.rst 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/doc/source/install/install-ubuntu.rst 2017-10-27 07:35:34.000000000 +0000 @@ -347,8 +347,8 @@ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. - * In the ``[keystone_authtoken]``, ``[trustee]``, - ``[clients_keystone]``, and ``[ec2authtoken]`` sections, + * In the ``[keystone_authtoken]``, ``[trustee]`` and + ``[clients_keystone]`` sections, configure Identity service access: .. code-block:: none @@ -375,10 +375,6 @@ [clients_keystone] ... - auth_uri = http://controller:35357 - - [ec2authtoken] - ... auth_uri = http://controller:5000 Replace ``HEAT_PASS`` with the password you chose for the diff -Nru heat-9.0.0/doc/source/template_guide/hot_spec.rst heat-10.0.0~b1/doc/source/template_guide/hot_spec.rst --- heat-9.0.0/doc/source/template_guide/hot_spec.rst 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/doc/source/template_guide/hot_spec.rst 2017-10-27 07:35:34.000000000 +0000 @@ -334,6 +334,44 @@ yaql contains +2018-03-02 | queens +------------------- + The key with value ``2018-03-02`` or ``queens`` indicates that the YAML + document is a HOT template and it may contain features added and/or removed + up until the Queens release. The complete list of supported functions is:: + + digest + filter + get_attr + get_file + get_param + get_resource + list_join + make_url + list_concat + list_concat_unique + contains + map_merge + map_replace + repeat + resource_facade + str_replace + str_replace_strict + str_replace_vstrict + str_split + yaql + if + + The complete list of supported condition functions is:: + + equals + get_param + not + and + or + yaql + contains + .. _hot_spec_parameter_groups: Parameter groups section @@ -2000,7 +2038,7 @@ .. code-block:: yaml - list_concat_unique: [['v1', 'v2'], ['v2', 'v43']] + list_concat_unique: [['v1', 'v2'], ['v2', 'v3']] Will resolve to the list ``['v1', 'v2', 'v3']``. diff -Nru heat-9.0.0/heat/api/middleware/fault.py heat-10.0.0~b1/heat/api/middleware/fault.py --- heat-9.0.0/heat/api/middleware/fault.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/api/middleware/fault.py 2017-10-27 07:35:34.000000000 +0000 @@ -77,6 +77,7 @@ 'StopActionFailed': webob.exc.HTTPInternalServerError, 'EventSendFailed': webob.exc.HTTPInternalServerError, 'ServerBuildFailed': webob.exc.HTTPInternalServerError, + 'InvalidEncryptionKey': webob.exc.HTTPInternalServerError, 'NotSupported': webob.exc.HTTPBadRequest, 'MissingCredentialError': webob.exc.HTTPBadRequest, 'UserParameterMissing': webob.exc.HTTPBadRequest, diff -Nru heat-9.0.0/heat/common/crypt.py heat-10.0.0~b1/heat/common/crypt.py --- heat-9.0.0/heat/common/crypt.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/common/crypt.py 2017-10-27 07:35:34.000000000 +0000 @@ -21,6 +21,7 @@ from oslo_utils import encodeutils from oslo_utils import importutils +from heat.common import exception from heat.common.i18n import _ auth_opts = [ @@ -128,7 +129,10 @@ encryption_key = get_valid_encryption_key(encryption_key, fix_length=True) encoded_key = base64.b64encode(encryption_key.encode('utf-8')) sym = fernet.Fernet(encoded_key) - return sym.decrypt(encodeutils.safe_encode(value)) + try: + return sym.decrypt(encodeutils.safe_encode(value)) + except fernet.InvalidToken: + raise exception.InvalidEncryptionKey() def get_valid_encryption_key(encryption_key, fix_length=False): diff -Nru heat-9.0.0/heat/common/exception.py heat-10.0.0~b1/heat/common/exception.py --- heat-9.0.0/heat/common/exception.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/common/exception.py 2017-10-27 07:35:34.000000000 +0000 @@ -161,6 +161,11 @@ msg_fmt = _('Error in %(resource)s output %(attribute)s: %(message)s') +class InvalidEncryptionKey(HeatException): + msg_fmt = _('Can not decrypt data with the auth_encryption_key' + ' in heat config.') + + class InvalidExternalResourceDependency(HeatException): msg_fmt = _("Invalid dependency with external %(resource_type)s " "resource: %(external_id)s") diff -Nru heat-9.0.0/heat/common/grouputils.py heat-10.0.0~b1/heat/common/grouputils.py --- heat-9.0.0/heat/common/grouputils.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/common/grouputils.py 2017-10-27 07:35:34.000000000 +0000 @@ -14,6 +14,7 @@ import six from heat.common import exception +from heat.common.i18n import _ def get_size(group, include_failed=False): @@ -69,7 +70,7 @@ return [r.name for r in get_members(group)] -def get_resource(stack, resource_name, use_indices, key): +def get_resource(stack, resource_name, use_indices, key=None): nested_stack = stack.nested() if not nested_stack: return None @@ -79,18 +80,20 @@ else: return nested_stack[resource_name] except (IndexError, KeyError): - raise exception.InvalidTemplateAttribute(resource=stack.name, - key=key) + raise exception.NotFound(_("Member '%(mem)s' not found " + "in group resource '%(grp)s'.") + % {'mem': resource_name, + 'grp': stack.name}) def get_rsrc_attr(stack, key, use_indices, resource_name, *attr_path): - resource = get_resource(stack, resource_name, use_indices, key) + resource = get_resource(stack, resource_name, use_indices) if resource: return resource.FnGetAtt(*attr_path) def get_rsrc_id(stack, key, use_indices, resource_name): - resource = get_resource(stack, resource_name, use_indices, key) + resource = get_resource(stack, resource_name, use_indices) if resource: return resource.FnGetRefId() diff -Nru heat-9.0.0/heat/common/messaging.py heat-10.0.0~b1/heat/common/messaging.py --- heat-9.0.0/heat/common/messaging.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/common/messaging.py 2017-10-27 07:35:34.000000000 +0000 @@ -105,6 +105,11 @@ eventlet.monkey_patch(time=True) if not TRANSPORT or not NOTIFICATIONS_TRANSPORT: setup_transports(url, optional) + # In the fake driver, make the dict of exchanges local to each exchange + # manager, instead of using the shared class attribute. Doing otherwise + # breaks the unit tests. + if url and url.startswith("fake://"): + TRANSPORT._driver._exchange_manager._exchanges = {} if not NOTIFIER and NOTIFICATIONS_TRANSPORT: serializer = RequestContextSerializer(JsonPayloadSerializer()) diff -Nru heat-9.0.0/heat/db/sqlalchemy/api.py heat-10.0.0~b1/heat/db/sqlalchemy/api.py --- heat-9.0.0/heat/db/sqlalchemy/api.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/db/sqlalchemy/api.py 2017-10-27 07:35:34.000000000 +0000 @@ -174,9 +174,14 @@ return result -def resource_get(context, resource_id, refresh=False, refresh_data=False): - result = context.session.query(models.Resource).get(resource_id) +def resource_get(context, resource_id, refresh=False, refresh_data=False, + eager=True): + query = context.session.query(models.Resource) + if eager: + query = query.options(orm.joinedload("data")).options( + orm.joinedload("rsrc_prop_data")) + result = query.get(resource_id) if not result: raise exception.NotFound(_("resource with id %s not found") % resource_id) @@ -299,7 +304,7 @@ {'rid': resource_id, 'aid': attr_id}) session.query( models.ResourcePropertiesData).filter( - models.ResourcePropertiesData.attr_id == attr_id).delete() + models.ResourcePropertiesData.id == attr_id).delete() return False @@ -443,7 +448,7 @@ models.Resource ).filter_by( stack_id=stack_id - ).options(orm.joinedload("data")) + ).options(orm.joinedload("data")).options(orm.joinedload("rsrc_prop_data")) query = db_filters.exact_filter(query, models.Resource, filters) results = query.all() diff -Nru heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/081_placeholder.py heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/081_placeholder.py --- heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/081_placeholder.py 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/081_placeholder.py 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is a placeholder for Pike backports. +# Do not use this number for new Queens work, which starts after +# all the placeholders. + + +def upgrade(migrate_engine): + pass diff -Nru heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/082_placeholder.py heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/082_placeholder.py --- heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/082_placeholder.py 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/082_placeholder.py 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is a placeholder for Pike backports. +# Do not use this number for new Queens work, which starts after +# all the placeholders. + + +def upgrade(migrate_engine): + pass diff -Nru heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/083_placeholder.py heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/083_placeholder.py --- heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/083_placeholder.py 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/083_placeholder.py 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is a placeholder for Pike backports. +# Do not use this number for new Queens work, which starts after +# all the placeholders. + + +def upgrade(migrate_engine): + pass diff -Nru heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/084_placeholder.py heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/084_placeholder.py --- heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/084_placeholder.py 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/084_placeholder.py 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is a placeholder for Pike backports. +# Do not use this number for new Queens work, which starts after +# all the placeholders. + + +def upgrade(migrate_engine): + pass diff -Nru heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/085_placeholder.py heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/085_placeholder.py --- heat-9.0.0/heat/db/sqlalchemy/migrate_repo/versions/085_placeholder.py 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat/db/sqlalchemy/migrate_repo/versions/085_placeholder.py 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,20 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This is a placeholder for Pike backports. +# Do not use this number for new Queens work, which starts after +# all the placeholders. + + +def upgrade(migrate_engine): + pass diff -Nru heat-9.0.0/heat/engine/api.py heat-10.0.0~b1/heat/engine/api.py --- heat-9.0.0/heat/engine/api.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/api.py 2017-10-27 07:35:34.000000000 +0000 @@ -51,10 +51,6 @@ bool_value = param_utils.extract_bool(name, params[name]) kwargs[name] = bool_value - name = rpc_api.PARAM_SHOW_DELETED - if name in params: - params[name] = param_utils.extract_bool(name, params[name]) - adopt_data = params.get(rpc_api.PARAM_ADOPT_STACK_DATA) if adopt_data: try: diff -Nru heat-9.0.0/heat/engine/cfn/template.py heat-10.0.0~b1/heat/engine/cfn/template.py --- heat-9.0.0/heat/engine/cfn/template.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/cfn/template.py 2017-10-27 07:35:34.000000000 +0000 @@ -150,14 +150,23 @@ name = definition.name hot_tmpl = definition.render_hot() + if self.t.get(self.RESOURCES) is None: + self.t[self.RESOURCES] = {} + cfn_tmpl = dict((self.HOT_TO_CFN_RES_ATTRS[k], v) for k, v in hot_tmpl.items()) - if len(cfn_tmpl.get(self.RES_DEPENDS_ON, [])) == 1: - cfn_tmpl[self.RES_DEPENDS_ON] = cfn_tmpl[self.RES_DEPENDS_ON][0] + dep_list = cfn_tmpl.get(self.RES_DEPENDS_ON, []) + if len(dep_list) == 1: + dep_res = cfn_tmpl[self.RES_DEPENDS_ON][0] + if dep_res in self.t[self.RESOURCES]: + cfn_tmpl[self.RES_DEPENDS_ON] = dep_res + else: + del cfn_tmpl[self.RES_DEPENDS_ON] + elif dep_list: + cfn_tmpl[self.RES_DEPENDS_ON] = [d for d in dep_list + if d in self.t[self.RESOURCES]] - if self.t.get(self.RESOURCES) is None: - self.t[self.RESOURCES] = {} self.t[self.RESOURCES][name] = cfn_tmpl def add_output(self, definition): diff -Nru heat-9.0.0/heat/engine/check_resource.py heat-10.0.0~b1/heat/engine/check_resource.py --- heat-9.0.0/heat/engine/check_resource.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/check_resource.py 2017-10-27 07:35:34.000000000 +0000 @@ -151,11 +151,8 @@ self._handle_resource_failure(cnxt, is_update, rsrc.id, stack, reason) except scheduler.Timeout: - # reload the stack to verify current traversal - stack = parser.Stack.load(cnxt, stack_id=stack.id) - if stack.current_traversal != current_traversal: - return - self._handle_stack_timeout(cnxt, stack) + self._handle_resource_failure(cnxt, is_update, rsrc.id, + stack, u'Timed out') except CancelOperation: pass @@ -195,7 +192,7 @@ stack): deps = stack.convergence_dependencies graph = deps.graph() - graph_key = (resource_id, is_update) + graph_key = parser.ConvergenceNode(resource_id, is_update) if graph_key not in graph and rsrc.replaces is not None: # If we are a replacement, impersonate the replaced resource for @@ -204,10 +201,10 @@ # graph. Our real resource ID is sent in the input_data, so the # dependencies will get updated to point to this resource in time # for the next traversal. - graph_key = (rsrc.replaces, is_update) + graph_key = parser.ConvergenceNode(rsrc.replaces, is_update) - def _get_input_data(req, fwd, input_forward_data=None): - if fwd: + def _get_input_data(req_node, input_forward_data=None): + if req_node.is_update: if input_forward_data is None: return rsrc.node_data().as_dict() else: @@ -216,7 +213,7 @@ else: # Don't send data if initiating clean-up for self i.e. # initiating delete of a replaced resource - if req not in graph_key: + if req_node.rsrc_id != graph_key.rsrc_id: # send replaced resource as needed_by if it exists return (rsrc.replaced_by if rsrc.replaced_by is not None @@ -225,13 +222,14 @@ try: input_forward_data = None - for req, fwd in deps.required_by(graph_key): - input_data = _get_input_data(req, fwd, input_forward_data) - if fwd: + for req_node in deps.required_by(graph_key): + input_data = _get_input_data(req_node, input_forward_data) + if req_node.is_update: input_forward_data = input_data propagate_check_resource( - cnxt, self._rpc_client, req, current_traversal, - set(graph[(req, fwd)]), graph_key, input_data, fwd, + cnxt, self._rpc_client, req_node.rsrc_id, + current_traversal, set(graph[req_node]), + graph_key, input_data, req_node.is_update, stack.adopt_stack_data) if is_update: if input_forward_data is None: @@ -241,7 +239,7 @@ else: rsrc.store_attributes() check_stack_complete(cnxt, stack, current_traversal, - graph_key[0], deps, graph_key[1]) + graph_key.rsrc_id, deps, graph_key.is_update) except exception.EntityNotFound as e: if e.entity == "Sync Point": # Reload the stack to determine the current traversal, and diff -Nru heat-9.0.0/heat/engine/clients/os/barbican.py heat-10.0.0~b1/heat/engine/clients/os/barbican.py --- heat-9.0.0/heat/engine/clients/os/barbican.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/clients/os/barbican.py 2017-10-27 07:35:34.000000000 +0000 @@ -18,9 +18,9 @@ from heat.engine import constraints try: - from barbicanclient import containers -except ImportError: from barbicanclient.v1 import containers +except ImportError: + from barbicanclient import containers CLIENT_NAME = 'barbican' diff -Nru heat-9.0.0/heat/engine/clients/os/heat_plugin.py heat-10.0.0~b1/heat/engine/clients/os/heat_plugin.py --- heat-9.0.0/heat/engine/clients/os/heat_plugin.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/clients/os/heat_plugin.py 2017-10-27 07:35:34.000000000 +0000 @@ -13,6 +13,7 @@ from oslo_config import cfg import six +from six.moves import urllib from heatclient import client as hc from heatclient import exc @@ -91,12 +92,26 @@ def get_watch_server_url(self): cfn_url = self.get_heat_cfn_url() - url_parts = cfn_url.split(':') - port_and_version = url_parts[-1].split('/') - port_and_version[0] = ( - six.text_type(cfg.CONF.heat_api_cloudwatch.bind_port)) - url_parts[-1] = '/'.join(port_and_version) - return ':'.join(url_parts) + parsed_url = urllib.parse.urlparse(cfn_url) + host = parsed_url.hostname + port = parsed_url.port + # For ipv6 we need to include the host in brackets + if parsed_url.netloc.startswith('['): + host = "[%s]" % host + # The old url model, like http://localhost:port/v1 + if port: + watch_api_port = ( + six.text_type(cfg.CONF.heat_api_cloudwatch.bind_port)) + replaced_netloc = ':'.join([host, str(watch_api_port)]) + parsed_url = parsed_url._replace(netloc=replaced_netloc) + # The uwsgi url mode, like http://ip/heat-api-cfn/v1 + else: + paths = parsed_url.path.split('/') + paths[1] = 'heat-api-cloudwatch' + replaced_paths = '/'.join(paths) + parsed_url = parsed_url._replace(path=replaced_paths) + + return urllib.parse.urlunparse(parsed_url) def get_insecure_option(self): return self._get_client_option(CLIENT_NAME, 'insecure') diff -Nru heat-9.0.0/heat/engine/clients/os/mistral.py heat-10.0.0~b1/heat/engine/clients/os/mistral.py --- heat-9.0.0/heat/engine/clients/os/mistral.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/clients/os/mistral.py 2017-10-27 07:35:34.000000000 +0000 @@ -11,6 +11,7 @@ # License for the specific language governing permissions and limitations # under the License. +from keystoneauth1.exceptions import http as ka_exceptions from mistralclient.api import base as mistral_base from mistralclient.api import client as mistral_client @@ -39,16 +40,28 @@ return client def is_not_found(self, ex): - return (isinstance(ex, mistral_base.APIException) and - ex.error_code == 404) + # check for keystoneauth exceptions till requirements change + # to python-mistralclient > 3.1.2 + ka_not_found = isinstance(ex, ka_exceptions.NotFound) + mistral_not_found = (isinstance(ex, mistral_base.APIException) and + ex.error_code == 404) + return ka_not_found or mistral_not_found def is_over_limit(self, ex): - return (isinstance(ex, mistral_base.APIException) and - ex.error_code == 413) + # check for keystoneauth exceptions till requirements change + # to python-mistralclient > 3.1.2 + ka_overlimit = isinstance(ex, ka_exceptions.RequestEntityTooLarge) + mistral_overlimit = (isinstance(ex, mistral_base.APIException) and + ex.error_code == 413) + return ka_overlimit or mistral_overlimit def is_conflict(self, ex): - return (isinstance(ex, mistral_base.APIException) and - ex.error_code == 409) + # check for keystoneauth exceptions till requirements change + # to python-mistralclient > 3.1.2 + ka_conflict = isinstance(ex, ka_exceptions.Conflict) + mistral_conflict = (isinstance(ex, mistral_base.APIException) and + ex.error_code == 409) + return ka_conflict or mistral_conflict def get_workflow_by_identifier(self, workflow_identifier): try: diff -Nru heat-9.0.0/heat/engine/clients/os/nova.py heat-10.0.0~b1/heat/engine/clients/os/nova.py --- heat-9.0.0/heat/engine/clients/os/nova.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/clients/os/nova.py 2017-10-27 07:35:34.000000000 +0000 @@ -393,14 +393,15 @@ attachments.append((jsonutils.dumps(metadata), 'cfn-init-data', 'x-cfninitdata')) - heat_client_plugin = self.context.clients.client_plugin('heat') - watch_url = cfg.CONF.heat_watch_server_url - if not watch_url: - watch_url = heat_client_plugin.get_watch_server_url() + if is_cfntools: + heat_client_plugin = self.context.clients.client_plugin('heat') + watch_url = cfg.CONF.heat_watch_server_url + if not watch_url: + watch_url = heat_client_plugin.get_watch_server_url() - attachments.append((watch_url, 'cfn-watch-server', 'x-cfninitdata')) + attachments.append((watch_url, + 'cfn-watch-server', 'x-cfninitdata')) - if is_cfntools: cfn_md_url = heat_client_plugin.get_cfn_metadata_server_url() attachments.append((cfn_md_url, 'cfn-metadata-server', 'x-cfninitdata')) @@ -702,7 +703,7 @@ @tenacity.retry( stop=tenacity.stop_after_attempt( cfg.CONF.max_interface_check_attempts), - wait=tenacity.wait_fixed(0.5), + wait=tenacity.wait_exponential(multiplier=0.5, max=2.0), retry=tenacity.retry_if_result(client_plugin.retry_if_result_is_false)) def check_interface_detach(self, server_id, port_id): with self.ignore_not_found: diff -Nru heat-9.0.0/heat/engine/dependencies.py heat-10.0.0~b1/heat/engine/dependencies.py --- heat-9.0.0/heat/engine/dependencies.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/dependencies.py 2017-10-27 07:35:34.000000000 +0000 @@ -56,7 +56,9 @@ return iter(self.satisfy) def requires(self, target=None): - """Add a key that this node requires, and optionally add a new one.""" + """List the keys that this node requires, and optionally add a new one. + + """ if target is not None: self.require.add(target) return iter(self.require) @@ -205,12 +207,12 @@ return self._graph[last].required_by() - def requires(self, target): - """List the keys that require the specified node.""" - if target not in self._graph: + def requires(self, source): + """List the keys that the specified node requires.""" + if source not in self._graph: raise KeyError - return self._graph[target].requires() + return self._graph[source].requires() def __getitem__(self, last): """Return a partial dependency graph starting with the specified node. @@ -268,8 +270,7 @@ def __repr__(self): """Return a consistent string representation of the object.""" - edge_reprs = list(repr(e) for e in self._graph.edges()) - edge_reprs.sort() + edge_reprs = sorted(repr(e) for e in self._graph.edges()) text = 'Dependencies([%s])' % ', '.join(edge_reprs) return text diff -Nru heat-9.0.0/heat/engine/event.py heat-10.0.0~b1/heat/engine/event.py --- heat-9.0.0/heat/engine/event.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/event.py 2017-10-27 07:35:34.000000000 +0000 @@ -13,14 +13,14 @@ from heat.common import identifier from heat.objects import event as event_object -from heat.objects import resource_properties_data as rpd_objects class Event(object): """Class representing a Resource state change.""" def __init__(self, context, stack, action, status, reason, - physical_resource_id, resource_properties, resource_name, + physical_resource_id, resource_prop_data_id, + resource_properties, resource_name, resource_type, uuid=None, timestamp=None, id=None): """Initialise from a context, stack, and event information. @@ -35,17 +35,10 @@ self.physical_resource_id = physical_resource_id self.resource_name = resource_name self.resource_type = resource_type - self.rsrc_prop_data = None - if isinstance(resource_properties, - rpd_objects.ResourcePropertiesData): - self.rsrc_prop_data = resource_properties - self.resource_properties = self.rsrc_prop_data.data - elif resource_properties is None: + self.rsrc_prop_data_id = resource_prop_data_id + self.resource_properties = resource_properties + if self.resource_properties is None: self.resource_properties = {} - else: - raise AssertionError( - 'resource_properties is unexpected type %s' % - type(resource_properties).__name__) self.uuid = uuid self.timestamp = timestamp self.id = id @@ -68,8 +61,8 @@ if self.timestamp is not None: ev['created_at'] = self.timestamp - if self.rsrc_prop_data: - ev['rsrc_prop_data_id'] = self.rsrc_prop_data.id + if self.rsrc_prop_data_id is not None: + ev['rsrc_prop_data_id'] = self.rsrc_prop_data_id new_ev = event_object.Event.create(self.context, ev) diff -Nru heat-9.0.0/heat/engine/function.py heat-10.0.0~b1/heat/engine/function.py --- heat-9.0.0/heat/engine/function.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/function.py 2017-10-27 07:35:34.000000000 +0000 @@ -246,6 +246,8 @@ """ if isinstance(self.parsed, Function): return self.parsed.__reduce__() + if self.parsed is None: + return lambda x: None, (None,) return type(self.parsed), (self.parsed,) def _repr_result(self): diff -Nru heat-9.0.0/heat/engine/hot/functions.py heat-10.0.0~b1/heat/engine/hot/functions.py --- heat-9.0.0/heat/engine/hot/functions.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/hot/functions.py 2017-10-27 07:35:34.000000000 +0000 @@ -1469,10 +1469,17 @@ try: port = int(port) except ValueError: - raise ValueError(_('Invalid URL port "%s"') % - port) + raise ValueError( + _('Invalid URL port "%(port)s" ' + 'for %(fn_name)s called with ' + '%(args)s') + % {'fn_name': self.fn_name, + 'port': port, 'args': args}) + if not (0 < port <= 65535): - raise ValueError(_('Invalid URL port %d') % port) + raise ValueError( + _('Invalid URL port %d, ' + 'must be in range 1-65535') % port) else: if not isinstance(args[arg], (function.Function, six.string_types)): diff -Nru heat-9.0.0/heat/engine/hot/template.py heat-10.0.0~b1/heat/engine/hot/template.py --- heat-9.0.0/heat/engine/hot/template.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/hot/template.py 2017-10-27 07:35:34.000000000 +0000 @@ -266,7 +266,14 @@ if self.t.get(self.RESOURCES) is None: self.t[self.RESOURCES] = {} - self.t[self.RESOURCES][name] = definition.render_hot() + rendered = definition.render_hot() + + dep_list = rendered.get(self.RES_DEPENDS_ON) + if dep_list: + rendered[self.RES_DEPENDS_ON] = [d for d in dep_list + if d in self.t[self.RESOURCES]] + + self.t[self.RESOURCES][name] = rendered def add_output(self, definition): if self.t.get(self.OUTPUTS) is None: @@ -568,6 +575,69 @@ functions = { 'get_attr': hot_funcs.GetAttAllAttributes, 'get_file': hot_funcs.GetFile, + 'get_param': hot_funcs.GetParam, + 'get_resource': hot_funcs.GetResource, + 'list_join': hot_funcs.JoinMultiple, + 'repeat': hot_funcs.RepeatWithNestedLoop, + 'resource_facade': hot_funcs.ResourceFacade, + 'str_replace': hot_funcs.ReplaceJson, + + # functions added in 2015-04-30 + 'digest': hot_funcs.Digest, + + # functions added in 2015-10-15 + 'str_split': hot_funcs.StrSplit, + + # functions added in 2016-04-08 + 'map_merge': hot_funcs.MapMerge, + + # functions added in 2016-10-14 + 'yaql': hot_funcs.Yaql, + 'map_replace': hot_funcs.MapReplace, + 'if': hot_funcs.If, + + # functions added in 2017-02-24 + 'filter': hot_funcs.Filter, + 'str_replace_strict': hot_funcs.ReplaceJsonStrict, + + # functions added in 2017-09-01 + 'make_url': hot_funcs.MakeURL, + 'list_concat': hot_funcs.ListConcat, + 'str_replace_vstrict': hot_funcs.ReplaceJsonVeryStrict, + 'list_concat_unique': hot_funcs.ListConcatUnique, + 'contains': hot_funcs.Contains, + + # functions removed from 2015-10-15 + 'Fn::Select': hot_funcs.Removed, + + # functions removed from 2014-10-16 + 'Fn::GetAZs': hot_funcs.Removed, + 'Fn::Join': hot_funcs.Removed, + 'Fn::Split': hot_funcs.Removed, + 'Fn::Replace': hot_funcs.Removed, + 'Fn::Base64': hot_funcs.Removed, + 'Fn::MemberListToMap': hot_funcs.Removed, + 'Fn::ResourceFacade': hot_funcs.Removed, + 'Ref': hot_funcs.Removed, + } + + condition_functions = { + 'get_param': hot_funcs.GetParam, + 'equals': hot_funcs.Equals, + 'not': hot_funcs.Not, + 'and': hot_funcs.And, + 'or': hot_funcs.Or, + + # functions added in 2017-09-01 + 'yaql': hot_funcs.Yaql, + 'contains': hot_funcs.Contains + } + + +class HOTemplate20180302(HOTemplate20170901): + functions = { + 'get_attr': hot_funcs.GetAttAllAttributes, + 'get_file': hot_funcs.GetFile, 'get_param': hot_funcs.GetParam, 'get_resource': hot_funcs.GetResource, 'list_join': hot_funcs.JoinMultiple, diff -Nru heat-9.0.0/heat/engine/resource.py heat-10.0.0~b1/heat/engine/resource.py --- heat-9.0.0/heat/engine/resource.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resource.py 2017-10-27 07:35:49.000000000 +0000 @@ -45,10 +45,10 @@ from heat.engine import status from heat.engine import support from heat.engine import sync_point +from heat.engine import template from heat.objects import resource as resource_objects from heat.objects import resource_data as resource_data_objects from heat.objects import resource_properties_data as rpd_objects -from heat.objects import stack as stack_objects from heat.rpc import client as rpc_client cfg.CONF.import_opt('action_retry_limit', 'heat.common.config') @@ -237,7 +237,7 @@ self._data = None self._attr_data_id = None self._rsrc_metadata = None - self._rsrc_prop_data = None + self._rsrc_prop_data_id = None self._stored_properties_data = None self.created_time = stack.created_time self.updated_time = stack.updated_time @@ -287,7 +287,7 @@ self._attr_data_id = resource.attr_data_id self._rsrc_metadata = resource.rsrc_metadata self._stored_properties_data = resource.properties_data - self._rsrc_prop_data = resource.rsrc_prop_data + self._rsrc_prop_data_id = resource.rsrc_prop_data_id self.created_time = resource.created_at self.updated_time = resource.updated_at self.needed_by = resource.needed_by @@ -320,36 +320,45 @@ @classmethod def load(cls, context, resource_id, current_traversal, is_update, data): + """Load a specified resource from the database to check. + + Returns a tuple of the Resource, the StackDefinition corresponding to + the resource's ResourceDefinition (i.e. the one the resource was last + updated to if it has already been created, or the one it will be + created with if it hasn't been already), and the Stack containing the + latest StackDefinition (i.e. the one that the latest traversal is + updating to. + + The latter two must remain in-scope, because the Resource holds weak + references to them. + """ from heat.engine import stack as stack_mod db_res = resource_objects.Resource.get_obj(context, resource_id) curr_stack = stack_mod.Stack.load(context, stack_id=db_res.stack_id, cache_data=data) - resource_owning_stack = curr_stack + initial_stk_defn = latest_stk_defn = curr_stack.defn if (db_res.current_template_id != curr_stack.t.id and (db_res.action != cls.INIT or not is_update or current_traversal != curr_stack.current_traversal)): - # load stack with template owning the resource - db_stack = stack_objects.Stack.get_by_id(context, db_res.stack_id) - db_stack.raw_template = None - db_stack.raw_template_id = db_res.current_template_id - resource_owning_stack = stack_mod.Stack.load(context, - stack=db_stack) + # load the definition associated with the resource's template + current_template_id = db_res.current_template_id + current_template = template.Template.load(context, + current_template_id) + initial_stk_defn = curr_stack.defn.clone_with_new_template( + current_template, + curr_stack.identifier()) + curr_stack.defn = initial_stk_defn # Load only the resource in question; don't load all resources # by invoking stack.resources. Maintain light-weight stack. - res_defn = resource_owning_stack.defn.resource_definition(db_res.name) - resource = cls(db_res.name, res_defn, resource_owning_stack) + res_defn = initial_stk_defn.resource_definition(db_res.name) + resource = cls(db_res.name, res_defn, curr_stack) resource._load_data(db_res) - # assign current stack to the resource for updates - if is_update: - resource.stack = curr_stack - - # return resource owning stack so that it is not GCed since it - # is the only stack instance with a weak-ref from resource - return resource, resource_owning_stack, curr_stack + curr_stack.defn = latest_stk_defn + return resource, initial_stk_defn, curr_stack def make_replacement(self, new_tmpl_id): # 1. create the replacement with "replaces" = self.id @@ -942,7 +951,7 @@ old_props = self._stored_properties_data self._stored_properties_data = function.resolve(self.properties.data) if self._stored_properties_data != old_props: - self._rsrc_prop_data = None + self._rsrc_prop_data_id = None self.attributes.reset_resolved_values() def referenced_attrs(self, stk_defn=None, @@ -1043,9 +1052,12 @@ for e in get_attrs(out_attrs - dep_attrs, cacheable_only=True): pass + # Calculate attribute values *before* reference ID, to potentially + # save an extra RPC call in TemplateResource + attribute_values = dict(get_attrs(dep_attrs)) + return node_data.NodeData(self.id, self.name, self.uuid, - self.FnGetRefId(), - dict(get_attrs(dep_attrs)), + self.FnGetRefId(), attribute_values, self.action, self.status) def preview(self): @@ -1372,6 +1384,8 @@ runner(timeout=timeout, progress_callback=progress_callback) except UpdateReplace: raise + except exception.UpdateInProgress: + raise except BaseException: with excutils.save_and_reraise_exception(): update_templ_id_and_requires(persist=True) @@ -1391,6 +1405,8 @@ self.update_template_diff_properties(after_props, before_props) return True + else: + return False def _check_restricted_actions(self, actions, after, before, after_props, before_props, @@ -1414,6 +1430,8 @@ raise exception.ResourceActionRestricted(action='replace') raise + return False + def _prepare_update_props(self, after, before): before_props = before.properties(self.properties_schema, @@ -1503,29 +1521,27 @@ registry = self.stack.env.registry restr_actions = registry.get_rsrc_restricted_actions(self.name) if restr_actions: - if not self._check_restricted_actions(restr_actions, - after, before, - after_props, - before_props, - prev_resource): - if update_templ_func is not None: - update_templ_func(persist=True) - return + needs_update = self._check_restricted_actions(restr_actions, + after, before, + after_props, + before_props, + prev_resource) else: - if not self._needs_update(after, before, - after_props, before_props, - prev_resource): - if update_templ_func is not None: - update_templ_func(persist=True) - if self.status == self.FAILED: - status_reason = _('Update status to COMPLETE for ' - 'FAILED resource neither update ' - 'nor replace.') - lock = (self.LOCK_RESPECT if self.stack.convergence - else self.LOCK_NONE) - self.state_set(self.action, self.COMPLETE, - status_reason, lock=lock) - return + needs_update = self._needs_update(after, before, + after_props, before_props, + prev_resource) + if not needs_update: + if update_templ_func is not None: + update_templ_func(persist=True) + if self.status == self.FAILED: + status_reason = _('Update status to COMPLETE for ' + 'FAILED resource neither update ' + 'nor replace.') + lock = (self.LOCK_RESPECT if self.stack.convergence + else self.LOCK_NONE) + self.state_set(self.action, self.COMPLETE, + status_reason, lock=lock) + return if not self.stack.convergence: if (self.action, self.status) in ( @@ -1610,7 +1626,9 @@ exc = Exception(_('Resource %s not created yet.') % self.name) failure = exception.ResourceFailure(exc, self, action) raise failure - return self._do_action(action) + + with self.frozen_properties(): + return self._do_action(action) else: reason = '%s not supported for %s' % (action, self.type()) self.state_set(action, self.COMPLETE, reason) @@ -1649,7 +1667,8 @@ raise exception.ResourceFailure(exc, self, action) LOG.info('suspending %s', self) - return self._do_action(action) + with self.frozen_properties(): + return self._do_action(action) def resume(self): """Return a task to resume the resource. @@ -1667,13 +1686,16 @@ exc = exception.Error(_('State %s invalid for resume') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) + LOG.info('resuming %s', self) - return self._do_action(action) + with self.frozen_properties(): + return self._do_action(action) def snapshot(self): """Snapshot the resource and return the created data, if any.""" LOG.info('snapshotting %s', self) - return self._do_action(self.SNAPSHOT) + with self.frozen_properties(): + return self._do_action(self.SNAPSHOT) @scheduler.wrappertask def delete_snapshot(self, data): @@ -2015,8 +2037,8 @@ """Add a state change event to the database.""" physical_res_id = self.resource_id or self.physical_resource_name() ev = event.Event(self.context, self.stack, action, status, reason, - physical_res_id, self._rsrc_prop_data, - self.name, self.type()) + physical_res_id, self._rsrc_prop_data_id, + self._stored_properties_data, self.name, self.type()) ev.store() self.stack.dispatch_event(ev) @@ -2035,11 +2057,11 @@ else: self._store_with_lock({}, self.LOCK_RELEASE) - def _resolve_all_attributes(self, attr): - """Method for resolving all attributes. + def _resolve_any_attribute(self, attr): + """Method for resolving any attribute, including base attributes. This method uses basic _resolve_attribute method for resolving - specific attributes. Base attributes will be resolved with + plugin-specific attributes. Base attributes will be resolved with corresponding method, which should be defined in each resource class. @@ -2480,16 +2502,16 @@ return True def _create_or_replace_rsrc_prop_data(self): - if self._rsrc_prop_data is not None: - return self._rsrc_prop_data.id + if self._rsrc_prop_data_id is not None: + return self._rsrc_prop_data_id if not self._stored_properties_data: return None - self._rsrc_prop_data = \ + self._rsrc_prop_data_id = \ rpd_objects.ResourcePropertiesData(self.context).create( - self.context, self._stored_properties_data) - return self._rsrc_prop_data.id + self.context, self._stored_properties_data).id + return self._rsrc_prop_data_id def is_using_neutron(self): try: @@ -2511,5 +2533,5 @@ res = ref() if res is None: raise RuntimeError("Resource collected") - return res._resolve_all_attributes(attr) + return res._resolve_any_attribute(attr) return resolve diff -Nru heat-9.0.0/heat/engine/resources/alarm_base.py heat-10.0.0~b1/heat/engine/resources/alarm_base.py --- heat-9.0.0/heat/engine/resources/alarm_base.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/alarm_base.py 2017-10-27 07:35:34.000000000 +0000 @@ -163,7 +163,7 @@ TIMEZONE: properties.Schema( properties.Schema.STRING, _("Timezone for the time constraint " - "(eg. 'Taiwan/Taipei', 'Europe/Amsterdam')."), + "(eg. 'Asia/Taipei', 'Europe/Amsterdam')."), constraints=[ constraints.CustomConstraint('timezone') ], diff -Nru heat-9.0.0/heat/engine/resources/aws/cfn/wait_condition.py heat-10.0.0~b1/heat/engine/resources/aws/cfn/wait_condition.py --- heat-9.0.0/heat/engine/resources/aws/cfn/wait_condition.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/aws/cfn/wait_condition.py 2017-10-27 07:35:34.000000000 +0000 @@ -94,11 +94,6 @@ raise ValueError(_("WaitCondition invalid Handle %s") % handle_id.resource_name) - def _get_handle_resource(self): - handle_url = self.properties[self.HANDLE] - handle_id = identifier.ResourceIdentifier.from_arn_url(handle_url) - return self.stack[handle_id.resource_name] - def handle_create(self): self._validate_handle_url() return super(WaitCondition, self).handle_create() diff -Nru heat-9.0.0/heat/engine/resources/openstack/barbican/order.py heat-10.0.0~b1/heat/engine/resources/openstack/barbican/order.py --- heat-9.0.0/heat/engine/resources/openstack/barbican/order.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/barbican/order.py 2017-10-27 07:35:34.000000000 +0000 @@ -155,7 +155,7 @@ ), PASS_PHRASE: properties.Schema( properties.Schema.STRING, - _('The passphrase the created key. Can be set only ' + _('The passphrase of the created key. Can be set only ' 'for asymmetric type of order.'), support_status=support.SupportStatus(version='5.0.0'), ), diff -Nru heat-9.0.0/heat/engine/resources/openstack/cinder/volume.py heat-10.0.0~b1/heat/engine/resources/openstack/cinder/volume.py --- heat-9.0.0/heat/engine/resources/openstack/cinder/volume.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/cinder/volume.py 2017-10-27 07:35:34.000000000 +0000 @@ -676,8 +676,8 @@ if (resource_data.get(self.METADATA) and resource_data.get(self.METADATA).get( - self.READ_ONLY) is not None): - read_only = resource_data.get(self.METADATA).pop(self.READ_ONLY) + 'readonly') is not None): + read_only = resource_data.get(self.METADATA).pop('readonly') volume_reality.update({self.READ_ONLY: read_only}) old_vt = self.data().get(self.VOLUME_TYPE) diff -Nru heat-9.0.0/heat/engine/resources/openstack/designate/domain.py heat-10.0.0~b1/heat/engine/resources/openstack/designate/domain.py --- heat-9.0.0/heat/engine/resources/openstack/designate/domain.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/designate/domain.py 2017-10-27 07:35:34.000000000 +0000 @@ -28,10 +28,13 @@ """ support_status = support.SupportStatus( - status=support.DEPRECATED, - version='8.0.0', + status=support.HIDDEN, + version='10.0.0', message=_('Use OS::Designate::Zone instead.'), - previous_status=support.SupportStatus(version='5.0.0')) + previous_status=support.SupportStatus( + status=support.DEPRECATED, + version='8.0.0', + previous_status=support.SupportStatus(version='5.0.0'))) entity = 'domains' diff -Nru heat-9.0.0/heat/engine/resources/openstack/designate/record.py heat-10.0.0~b1/heat/engine/resources/openstack/designate/record.py --- heat-9.0.0/heat/engine/resources/openstack/designate/record.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/designate/record.py 2017-10-27 07:35:34.000000000 +0000 @@ -29,10 +29,13 @@ """ support_status = support.SupportStatus( - status=support.DEPRECATED, - version='8.0.0', + status=support.HIDDEN, + version='10.0.0', message=_('Use OS::Designate::RecordSet instead.'), - previous_status=support.SupportStatus(version='5.0.0')) + previous_status=support.SupportStatus( + status=support.DEPRECATED, + version='8.0.0', + previous_status=support.SupportStatus(version='5.0.0'))) entity = 'records' diff -Nru heat-9.0.0/heat/engine/resources/openstack/heat/scaling_policy.py heat-10.0.0~b1/heat/engine/resources/openstack/heat/scaling_policy.py --- heat-9.0.0/heat/engine/resources/openstack/heat/scaling_policy.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/heat/scaling_policy.py 2017-10-27 07:35:34.000000000 +0000 @@ -209,9 +209,6 @@ elif name == self.SIGNAL_URL: return six.text_type(self._get_heat_signal_url()) - def get_reference_id(self): - return resource.Resource.get_reference_id(self) - def resource_mapping(): return { diff -Nru heat-9.0.0/heat/engine/resources/openstack/heat/wait_condition_handle.py heat-10.0.0~b1/heat/engine/resources/openstack/heat/wait_condition_handle.py --- heat-9.0.0/heat/engine/resources/openstack/heat/wait_condition_handle.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/heat/wait_condition_handle.py 2017-10-27 07:35:34.000000000 +0000 @@ -222,8 +222,8 @@ class UpdateWaitConditionHandle(aws_wch.WaitConditionHandle): """WaitConditionHandle that clears signals and changes handle on update. - This works identically to a regular WaitConditionHandle, except that - on update it clears all signals received and changes the handle. Using + This works similarly to an AWS::CloudFormation::WaitConditionHandle, except + that on update it clears all signals received and changes the handle. Using this handle means that you must setup the signal senders to send their signals again any time the update handle changes. This allows us to roll out new configurations and be confident that they are rolled out once diff -Nru heat-9.0.0/heat/engine/resources/openstack/keystone/project.py heat-10.0.0~b1/heat/engine/resources/openstack/keystone/project.py --- heat-9.0.0/heat/engine/resources/openstack/keystone/project.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/keystone/project.py 2017-10-27 07:35:34.000000000 +0000 @@ -12,6 +12,7 @@ # under the License. from heat.common.i18n import _ +from heat.engine import attributes from heat.engine import constraints from heat.engine import properties from heat.engine import resource @@ -77,6 +78,45 @@ ), } + ATTRIBUTES = ( + NAME_ATTR, PARENT_ATTR, DOMAIN_ATTR, ENABLED_ATTR, IS_DOMAIN_ATTR + ) = ( + 'name', 'parent_id', 'domain_id', 'enabled', 'is_domain' + ) + attributes_schema = { + NAME_ATTR: attributes.Schema( + _('Project name.'), + support_status=support.SupportStatus(version='10.0.0'), + type=attributes.Schema.STRING + ), + PARENT_ATTR: attributes.Schema( + _('Parent project id.'), + support_status=support.SupportStatus(version='10.0.0'), + type=attributes.Schema.STRING + ), + DOMAIN_ATTR: attributes.Schema( + _('Domain id for project.'), + support_status=support.SupportStatus(version='10.0.0'), + type=attributes.Schema.STRING + ), + ENABLED_ATTR: attributes.Schema( + _('Flag of enable project.'), + support_status=support.SupportStatus(version='10.0.0'), + type=attributes.Schema.BOOLEAN + ), + IS_DOMAIN_ATTR: attributes.Schema( + _('Indicates whether the project also acts as a domain.'), + support_status=support.SupportStatus(version='10.0.0'), + type=attributes.Schema.BOOLEAN + ), + } + + def _resolve_attribute(self, name): + if self.resource_id is None: + return + project = self.client().projects.get(self.resource_id) + return getattr(project, name, None) + def translation_rules(self, properties): return [ translation.TranslationRule( diff -Nru heat-9.0.0/heat/engine/resources/openstack/keystone/role.py heat-10.0.0~b1/heat/engine/resources/openstack/keystone/role.py --- heat-9.0.0/heat/engine/resources/openstack/keystone/role.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/keystone/role.py 2017-10-27 07:35:34.000000000 +0000 @@ -12,9 +12,11 @@ # under the License. from heat.common.i18n import _ +from heat.engine import constraints from heat.engine import properties from heat.engine import resource from heat.engine import support +from heat.engine import translation class KeystoneRole(resource.Resource): @@ -22,7 +24,8 @@ Roles dictate the level of authorization the end user can obtain. Roles can be granted at either the domain or project level. Role can be assigned to - the individual user or at the group level. Role names are globally unique. + the individual user or at the group level. Role name is unique within the + owning domain. """ support_status = support.SupportStatus( @@ -34,9 +37,9 @@ entity = 'roles' PROPERTIES = ( - NAME + NAME, DOMAIN, ) = ( - 'name' + 'name', 'domain', ) properties_schema = { @@ -44,17 +47,36 @@ properties.Schema.STRING, _('Name of keystone role.'), update_allowed=True + ), + DOMAIN: properties.Schema( + properties.Schema.STRING, + _('Name or id of keystone domain.'), + default='default', + constraints=[constraints.CustomConstraint('keystone.domain')], + support_status=support.SupportStatus(version='10.0.0') ) } + def translation_rules(self, properties): + return [ + translation.TranslationRule( + properties, + translation.TranslationRule.RESOLVE, + [self.DOMAIN], + client_plugin=self.client_plugin(), + finder='get_domain_id' + ) + ] + def client(self): return super(KeystoneRole, self).client().client def handle_create(self): role_name = (self.properties[self.NAME] or self.physical_resource_name()) - - role = self.client().roles.create(name=role_name) + domain = self.properties[self.DOMAIN] + role = self.client().roles.create(name=role_name, + domain=domain) self.resource_id_set(role.id) diff -Nru heat-9.0.0/heat/engine/resources/openstack/keystone/user.py heat-10.0.0~b1/heat/engine/resources/openstack/keystone/user.py --- heat-9.0.0/heat/engine/resources/openstack/keystone/user.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/keystone/user.py 2017-10-27 07:35:34.000000000 +0000 @@ -106,8 +106,8 @@ NAME_ATTR, DEFAULT_PROJECT_ATTR, DOMAIN_ATTR, ENABLED_ATTR, PASSWORD_EXPIRES_AT_ATTR ) = ( - 'name', 'default_project_id', 'domain_id', - 'enabled', 'password_expires_at' + 'name', 'default_project_id', 'domain_id', 'enabled', + 'password_expires_at' ) attributes_schema = { NAME_ATTR: attributes.Schema( diff -Nru heat-9.0.0/heat/engine/resources/openstack/mistral/workflow.py heat-10.0.0~b1/heat/engine/resources/openstack/mistral/workflow.py --- heat-9.0.0/heat/engine/resources/openstack/mistral/workflow.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/mistral/workflow.py 2017-10-27 07:35:34.000000000 +0000 @@ -48,10 +48,10 @@ PROPERTIES = ( NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS, - TASK_DEFAULTS, USE_REQUEST_BODY_AS_INPUT + TASK_DEFAULTS, USE_REQUEST_BODY_AS_INPUT, TAGS ) = ( 'name', 'type', 'description', 'input', 'output', 'tasks', 'params', - 'task_defaults', 'use_request_body_as_input' + 'task_defaults', 'use_request_body_as_input', 'tags' ) _TASKS_KEYS = ( @@ -109,6 +109,12 @@ update_allowed=True, support_status=support.SupportStatus(version='6.0.0') ), + TAGS: properties.Schema( + properties.Schema.LIST, + _('List of tags to set on the workflow.'), + update_allowed=True, + support_status=support.SupportStatus(version='10.0.0') + ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Workflow description.'), @@ -532,6 +538,7 @@ defn_name: {self.TYPE: props.get(self.TYPE), self.DESCRIPTION: props.get( self.DESCRIPTION), + self.TAGS: props.get(self.TAGS), self.OUTPUT: props.get(self.OUTPUT)}} for key in list(definition[defn_name].keys()): if definition[defn_name][key] is None: diff -Nru heat-9.0.0/heat/engine/resources/openstack/neutron/floatingip.py heat-10.0.0~b1/heat/engine/resources/openstack/neutron/floatingip.py --- heat-9.0.0/heat/engine/resources/openstack/neutron/floatingip.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/neutron/floatingip.py 2017-10-27 07:35:34.000000000 +0000 @@ -198,6 +198,45 @@ ) ] + def _add_router_interface_dependencies(self, deps, resource): + def port_on_subnet(resource, subnet): + if not resource.has_interface('OS::Neutron::Port'): + return False + + fixed_ips = resource.properties.get(port.Port.FIXED_IPS) + if not fixed_ips: + # During create we have only unresolved value for + # functions, so can not use None value for building + # correct dependencies. Depend on all RouterInterfaces + # when the port has no fixed IP specified, since we + # can't safely assume that any are in different + # networks. + if subnet is None: + return True + + p_net = (resource.properties.get(port.Port.NETWORK) or + resource.properties.get(port.Port.NETWORK_ID)) + if p_net: + network = self.client().show_network(p_net)['network'] + return subnet in network['subnets'] + else: + for fixed_ip in resource.properties.get( + port.Port.FIXED_IPS): + + port_subnet = (fixed_ip.get(port.Port.FIXED_IP_SUBNET) or + fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID)) + if subnet == port_subnet: + return True + return False + + interface_subnet = ( + resource.properties.get(router.RouterInterface.SUBNET) or + resource.properties.get(router.RouterInterface.SUBNET_ID)) + for d in deps.graph()[self]: + if port_on_subnet(d, interface_subnet): + deps += (self, resource) + break + def add_dependencies(self, deps): super(FloatingIP, self).add_dependencies(deps) @@ -216,46 +255,7 @@ # with the same subnet that this floating IP's port is assigned # to elif resource.has_interface('OS::Neutron::RouterInterface'): - - def port_on_subnet(resource, subnet): - if not resource.has_interface('OS::Neutron::Port'): - return False - - fixed_ips = resource.properties.get(port.Port.FIXED_IPS) - if not fixed_ips: - # During create we have only unresolved value for - # functions, so can not use None value for building - # correct dependencies. Depend on all RouterInterfaces - # when the port has no fixed IP specified, since we - # can't safely assume that any are in different - # networks. - if subnet is None: - return True - - p_net = (resource.properties.get(port.Port.NETWORK) or - resource.properties.get(port.Port.NETWORK_ID)) - if p_net: - subnets = self.client().show_network(p_net)[ - 'network']['subnets'] - return subnet in subnets - else: - for fixed_ip in resource.properties.get( - port.Port.FIXED_IPS): - - port_subnet = ( - fixed_ip.get(port.Port.FIXED_IP_SUBNET) - or fixed_ip.get(port.Port.FIXED_IP_SUBNET_ID)) - if subnet == port_subnet: - return True - return False - - interface_subnet = ( - resource.properties.get(router.RouterInterface.SUBNET) or - resource.properties.get(router.RouterInterface.SUBNET_ID)) - for d in deps.graph()[self]: - if port_on_subnet(d, interface_subnet): - deps += (self, resource) - break + self._add_router_interface_dependencies(deps, resource) # depend on Router with EXTERNAL_GATEWAY_NETWORK property # this template with the same network_id as this # floating_network_id diff -Nru heat-9.0.0/heat/engine/resources/openstack/neutron/port.py heat-10.0.0~b1/heat/engine/resources/openstack/neutron/port.py --- heat-9.0.0/heat/engine/resources/openstack/neutron/port.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/neutron/port.py 2017-10-27 07:35:34.000000000 +0000 @@ -123,14 +123,16 @@ DEVICE_ID: properties.Schema( properties.Schema.STRING, _('Device ID of this port.'), - update_allowed=True + update_allowed=True, + default='' ), DEVICE_OWNER: properties.Schema( properties.Schema.STRING, _('Name of the network owning the port. ' 'The value is typically network:floatingip ' 'or network:router_interface or network:dhcp.'), - update_allowed=True + update_allowed=True, + default='' ), FIXED_IPS: properties.Schema( properties.Schema.LIST, @@ -284,7 +286,8 @@ 'direct-physical', 'baremetal']), ], support_status=support.SupportStatus(version='2015.1'), - update_allowed=True + update_allowed=True, + default='normal' ), PORT_SECURITY_ENABLED: properties.Schema( properties.Schema.BOOLEAN, @@ -562,8 +565,10 @@ self.set_tags(tags) self._prepare_port_properties(prop_diff, prepare_for_update=True) - LOG.debug('updating port with %s', prop_diff) - self.client().update_port(self.resource_id, {'port': prop_diff}) + if prop_diff: + LOG.debug('updating port with %s', prop_diff) + self.client().update_port(self.resource_id, + {'port': prop_diff}) def check_update_complete(self, *args): attributes = self._show_resource() diff -Nru heat-9.0.0/heat/engine/resources/openstack/nova/flavor.py heat-10.0.0~b1/heat/engine/resources/openstack/nova/flavor.py --- heat-9.0.0/heat/engine/resources/openstack/nova/flavor.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/nova/flavor.py 2017-10-27 07:35:34.000000000 +0000 @@ -149,7 +149,7 @@ properties, translation.TranslationRule.RESOLVE, [self.TENANTS], - client_plugin=self.client_plugin(), + client_plugin=self.client_plugin('keystone'), finder='get_project_id' ) ] @@ -167,10 +167,10 @@ if flavor_keys: flavor.set_keys(flavor_keys) - if not self.IS_PUBLIC: + if not self.properties[self.IS_PUBLIC]: if not tenants: - LOG.info('Tenant property is recommended if IS_PUBLIC ' - 'is false.') + LOG.info('Tenant property is recommended ' + 'for the private flavors.') tenant = self.stack.context.tenant_id self.client().flavor_access.add_tenant_access(flavor, tenant) else: @@ -189,7 +189,7 @@ if new_keys is not None: flavor.set_keys(new_keys) """Update tenant access list.""" - if self.TENANTS in prop_diff and not self.IS_PUBLIC: + if self.TENANTS in prop_diff and not self.properties[self.IS_PUBLIC]: kwargs = {'flavor': self.resource_id} old_tenants = [ x.tenant_id for x in self.client().flavor_access.list(**kwargs) diff -Nru heat-9.0.0/heat/engine/resources/openstack/nova/server.py heat-10.0.0~b1/heat/engine/resources/openstack/nova/server.py --- heat-9.0.0/heat/engine/resources/openstack/nova/server.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/nova/server.py 2017-10-27 07:35:34.000000000 +0000 @@ -1697,7 +1697,7 @@ def handle_delete_snapshot(self, snapshot): image_id = snapshot['resource_data'].get('snapshot_image_id') - with self.client_plugin().ignore_not_found: + with self.client_plugin('glance').ignore_not_found: self.client('glance').images.delete(image_id) def handle_restore(self, defn, restore_data): diff -Nru heat-9.0.0/heat/engine/resources/openstack/sahara/cluster.py heat-10.0.0~b1/heat/engine/resources/openstack/sahara/cluster.py --- heat-9.0.0/heat/engine/resources/openstack/sahara/cluster.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/openstack/sahara/cluster.py 2017-10-27 07:35:34.000000000 +0000 @@ -126,6 +126,7 @@ MANAGEMENT_NETWORK: properties.Schema( properties.Schema.STRING, _('Name or UUID of network.'), + required=True, constraints=[ constraints.CustomConstraint('neutron.network') ], @@ -282,13 +283,6 @@ if res: return res - # check if running on neutron and MANAGEMENT_NETWORK missing - if (self.is_using_neutron() and - not self.properties[self.MANAGEMENT_NETWORK]): - msg = _("%s must be provided" - ) % self.MANAGEMENT_NETWORK - raise exception.StackValidationFailed(message=msg) - self.client_plugin().validate_hadoop_version( self.properties[self.PLUGIN_NAME], self.properties[self.HADOOP_VERSION] diff -Nru heat-9.0.0/heat/engine/resources/template_resource.py heat-10.0.0~b1/heat/engine/resources/template_resource.py --- heat-9.0.0/heat/engine/resources/template_resource.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/resources/template_resource.py 2017-10-27 07:35:34.000000000 +0000 @@ -25,11 +25,14 @@ from heat.engine import properties from heat.engine.resources import stack_resource from heat.engine import template +from heat.rpc import api as rpc_api REMOTE_SCHEMES = ('http', 'https') LOCAL_SCHEMES = ('file',) +STACK_ID_OUTPUT = 'OS::stack_id' + def generate_class_from_template(name, data, param_defaults): tmpl = template.Template(template_format.parse(data)) @@ -300,10 +303,25 @@ if self.resource_id is None: return six.text_type(self.name) + stack_identity = self.nested_identifier() try: - return self.get_output('OS::stack_id') - except exception.InvalidTemplateAttribute: - return self.nested_identifier().arn() + if self._outputs is not None: + return self.get_output(STACK_ID_OUTPUT) + + output = self.rpc_client().show_output(self.context, + dict(stack_identity), + STACK_ID_OUTPUT) + if rpc_api.OUTPUT_ERROR in output: + raise exception.TemplateOutputError( + resource=self.name, + attribute=STACK_ID_OUTPUT, + message=output[rpc_api.OUTPUT_ERROR]) + except (exception.InvalidTemplateAttribute, exception.NotFound): + pass + else: + return output[rpc_api.OUTPUT_VALUE] + + return stack_identity.arn() def get_attribute(self, key, *path): if self.resource_id is None: diff -Nru heat-9.0.0/heat/engine/rsrc_defn.py heat-10.0.0~b1/heat/engine/rsrc_defn.py --- heat-9.0.0/heat/engine/rsrc_defn.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/rsrc_defn.py 2017-10-27 07:35:34.000000000 +0000 @@ -255,7 +255,7 @@ """Return the Resource objects in given stack on which this depends.""" def get_resource(res_name): if res_name not in stack: - if res_name in stack.t.get(stack.t.RESOURCES): + if res_name in stack.defn.all_rsrc_names(): # The resource is conditionally defined, allow dependencies # on it return diff -Nru heat-9.0.0/heat/engine/scheduler.py heat-10.0.0~b1/heat/engine/scheduler.py --- heat-9.0.0/heat/engine/scheduler.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/scheduler.py 2017-10-27 07:35:34.000000000 +0000 @@ -11,7 +11,6 @@ # License for the specific language governing permissions and limitations # under the License. -import functools import sys import types @@ -47,7 +46,6 @@ return encodeutils.safe_decode(repr(task)) -@functools.total_ordering class Timeout(BaseException): """Raised when task has exceeded its allotted (wallclock) running time. @@ -78,14 +76,11 @@ generator.close() return False - def __eq__(self, other): - if not isinstance(other, Timeout): - return NotImplemented - return not (self < other or other < self) - - def __lt__(self, other): - if not isinstance(other, Timeout): - return NotImplemented + def earlier_than(self, other): + if other is None: + return True + + assert isinstance(other, Timeout), "Invalid type for Timeout compare" return self._duration.endtime() < other._duration.endtime() @@ -245,7 +240,7 @@ else: if timeout is not None: new_timeout = Timeout(self, timeout) - if self._timeout is None or new_timeout < self._timeout: + if new_timeout.earlier_than(self._timeout): self._timeout = new_timeout done = self.step() if resuming else self.done() @@ -281,7 +276,7 @@ self._runner.close() else: timeout = TimedCancel(self, grace_period) - if self._timeout is None or timeout < self._timeout: + if timeout.earlier_than(self._timeout): self._timeout = timeout def started(self): diff -Nru heat-9.0.0/heat/engine/service.py heat-10.0.0~b1/heat/engine/service.py --- heat-9.0.0/heat/engine/service.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/service.py 2017-10-27 07:35:34.000000000 +0000 @@ -530,7 +530,9 @@ def show(stack): if resolve_outputs: for res in stack._explicit_dependencies(): - node_data = res.node_data(for_outputs=True) + ensure_cache = stack.convergence and res.id is not None + node_data = res.node_data(for_resources=ensure_cache, + for_outputs=True) stk_defn.update_resource_data(stack.defn, res.name, node_data) @@ -541,7 +543,7 @@ # * Near simultaneous updates (say by an update and a # signal) # * The first time resolving a pre-Pike stack - if stack.convergence and res.id is not None: + if ensure_cache: res.store_attributes() return api.format_stack(stack, resolve_outputs=resolve_outputs) @@ -1361,8 +1363,7 @@ s = self._get_stack(cntx, stack_identity) stack = parser.Stack.load(cntx, stack=s) - stack._update_all_resource_data(for_resources=False, for_outputs=True) - return api.format_stack_outputs(stack.outputs) + return api.format_stack_outputs(stack.outputs, resolve_value=False) @context.request_context def show_output(self, cntx, stack_identity, output_key): diff -Nru heat-9.0.0/heat/engine/stack.py heat-10.0.0~b1/heat/engine/stack.py --- heat-9.0.0/heat/engine/stack.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/stack.py 2017-10-27 07:35:34.000000000 +0000 @@ -57,6 +57,10 @@ LOG = logging.getLogger(__name__) +ConvergenceNode = collections.namedtuple('ConvergenceNode', + ['rsrc_id', 'is_update']) + + class ForcedCancel(Exception): """Exception raised to cancel task execution.""" @@ -396,7 +400,9 @@ except KeyError: return None - return resource.Resource(db_res.name, defn, self) + res = resource.Resource(db_res.name, defn, self) + res._load_data(db_res) + return res def resource_get(self, name): """Return a stack resource, even if not in the current template.""" @@ -918,7 +924,7 @@ def _add_event(self, action, status, reason): """Add a state change event to the database.""" ev = event.Event(self.context, self, action, status, reason, - self.id, None, + self.id, None, None, self.name, 'OS::Heat::Stack') ev.store() @@ -1440,27 +1446,32 @@ def _compute_convg_dependencies(self, existing_resources, current_template_deps, current_resources): def make_graph_key(rsrc): - return current_resources[rsrc.name].id, True + return ConvergenceNode(current_resources[rsrc.name].id, True) + dep = current_template_deps.translate(make_graph_key) if existing_resources: for rsrc_id, rsrc in existing_resources.items(): - dep += (rsrc_id, False), None + dep += ConvergenceNode(rsrc_id, False), None for requirement in rsrc.requires: if requirement in existing_resources: - dep += (requirement, False), (rsrc_id, False) + dep += (ConvergenceNode(requirement, False), + ConvergenceNode(rsrc_id, False)) if rsrc.replaces in existing_resources: - dep += (rsrc.replaces, False), (rsrc_id, False) + dep += (ConvergenceNode(rsrc.replaces, False), + ConvergenceNode(rsrc_id, False)) - if (rsrc.id, True) in dep: - dep += (rsrc_id, False), (rsrc_id, True) + if ConvergenceNode(rsrc.id, True) in dep: + dep += (ConvergenceNode(rsrc_id, False), + ConvergenceNode(rsrc_id, True)) self._convg_deps = dep @property def convergence_dependencies(self): if self._convg_deps is None: - current_deps = ([tuple(i), (tuple(j) if j is not None else None)] + current_deps = ((ConvergenceNode(*i), + ConvergenceNode(*j) if j is not None else None) for i, j in self.current_deps['edges']) self._convg_deps = dependencies.Dependencies(edges=current_deps) @@ -1935,7 +1946,8 @@ snapshot_data = snapshot.data if snapshot_data: data = snapshot.data['resources'].get(name) - scheduler.TaskRunner(rsrc.delete_snapshot, data)() + if data: + scheduler.TaskRunner(rsrc.delete_snapshot, data)() def restore_data(self, snapshot): env = environment.Environment(snapshot.data['environment']) diff -Nru heat-9.0.0/heat/engine/sync_point.py heat-10.0.0~b1/heat/engine/sync_point.py --- heat-9.0.0/heat/engine/sync_point.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/sync_point.py 2017-10-27 07:35:34.000000000 +0000 @@ -74,7 +74,7 @@ def str_pack_tuple(t): - return u'tuple:' + str(t) + return u'tuple:' + str(tuple(t)) def _str_unpack_tuple(s): diff -Nru heat-9.0.0/heat/engine/worker.py heat-10.0.0~b1/heat/engine/worker.py --- heat-9.0.0/heat/engine/worker.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/engine/worker.py 2017-10-27 07:35:34.000000000 +0000 @@ -145,7 +145,7 @@ def _retrigger_replaced(self, is_update, rsrc, stack, check_resource): graph = stack.convergence_dependencies.graph() - key = (rsrc.id, is_update) + key = parser.ConvergenceNode(rsrc.id, is_update) if key not in graph and rsrc.replaces is not None: # This resource replaces old one and is not needed in # current traversal. You need to mark the resource as @@ -154,9 +154,9 @@ db_api.resource_update_and_save(stack.context, rsrc.id, values) # The old resource might be in the graph (a rollback case); # just re-trigger it. - key = (rsrc.replaces, is_update) + key = parser.ConvergenceNode(rsrc.replaces, is_update) check_resource.retrigger_check_resource(stack.context, is_update, - key[0], stack) + key.rsrc_id, stack) @context.request_context @log_exceptions @@ -170,8 +170,10 @@ in_data = sync_point.deserialize_input_data(data) resource_data = node_data.load_resources_data(in_data if is_update else {}) - rsrc, rsrc_owning_stack, stack = check_resource.load_resource( - cnxt, resource_id, resource_data, current_traversal, is_update) + rsrc, stk_defn, stack = check_resource.load_resource(cnxt, resource_id, + resource_data, + current_traversal, + is_update) if rsrc is None: return diff -Nru heat-9.0.0/heat/locale/de/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/de/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/de/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/de/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -9,9 +9,9 @@ # Robert Simai , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6446,12 +6446,6 @@ msgid "The parent URL of the container." msgstr "Die übergeordnete URL des Containers." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "" -"Der Verschlüsselungstext des erstellen Schlüssel. Kann nur für Anfragen vom " -"Typ 'asymmetric' definiert werden." - msgid "The payload of the created certificate, if available." msgstr "Die Nutzdaten des erstellten Zertifikats, sofern verfügbar." @@ -6965,12 +6959,6 @@ "Zeitlimit in Sekunden für Stackaktion (d. h. erstellen oder aktualisieren)." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "" -"Zeitzone für die Zeiteinschränkung (Beispiel: 'Taiwan/Taipei', 'Europe/" -"Amsterdam')." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/es/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/es/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/es/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/es/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -8,9 +8,9 @@ # Omar Rivera , 2017. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6250,12 +6250,6 @@ msgid "The parent URL of the container." msgstr "El URL padre del contenedor." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "" -"La frase de contraseña de la clave creada. Solo se puede definir para " -"órdenes de tipo asimétrico." - msgid "The payload of the created certificate, if available." msgstr "La carga útil del certificado creado, si está disponible." @@ -6765,12 +6759,6 @@ "actualizár)." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "" -"Zona horaria de la restricción de tiempo (p.e. 'Taiwan/Taipei', 'Europa/" -"Amsterdam')." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/fr/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/fr/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/fr/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/fr/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6205,12 +6205,6 @@ msgid "The parent URL of the container." msgstr "URL parente du conteneur." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "" -"Phrase de passe de la clé créée. Peut uniquement être définie pour un type " -"de commande asymétrique." - msgid "The payload of the created certificate, if available." msgstr "Contenu du certificat créé, le cas échéant." @@ -6706,12 +6700,6 @@ "à jour)." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "" -"Fuseau horaire de la contrainte de temps (par ex., 'Taïwan/Taipei', 'Europe/" -"Amsterdam')." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/it/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/it/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/it/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/it/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6184,12 +6184,6 @@ msgid "The parent URL of the container." msgstr "L'URL parent del contenitore." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "" -"La passphrase della chiave creata. Può essere impostata sol per un tipo di " -"ordine asimmetrico." - msgid "The payload of the created certificate, if available." msgstr "Il payload del certificato creato, se disponibile." @@ -6689,12 +6683,6 @@ "Timeout in secondi per l'azione stack (ad esempio creare o aggiornare)." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "" -"Fuso orario del vincolo di tempo (ad esempio, 'Taiwan/Taipei', 'Europa/" -"Amsterdam')." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/ja/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/ja/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/ja/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/ja/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -10,9 +10,9 @@ # Yuko Fukuda , 2017. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6037,10 +6037,6 @@ msgid "The parent URL of the container." msgstr "コンテナーの親 URL。" -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "作成された鍵のパスフレーズ。非対称なタイプの注文にのみ設定できます。" - msgid "The payload of the created certificate, if available." msgstr "作成した証明書 (存在する場合) のペイロード。" @@ -6519,10 +6515,6 @@ msgstr "スタックアクション (作成または更新) のタイムアウト (秒)。" msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "時間制約のタイムゾーン ('Taiwan/Taipei' や 'Europe/Amsterdam' など)。" - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/ko_KR/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/ko_KR/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/ko_KR/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/ko_KR/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -9,9 +9,9 @@ # minwook-shin , 2017. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -5797,10 +5797,6 @@ msgid "The parent URL of the container." msgstr "컨테이너의 상위 URL입니다." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "작성된 키의 암호입니다. 비대칭 타입의 순서에만 설정할 수 있습니다." - msgid "The payload of the created certificate, if available." msgstr "사용 가능한 경우, 작성된 인증서의 페이로드입니다." @@ -6264,10 +6260,6 @@ msgstr "스택 조치(즉, 작성 또는 업데이트)에 대한 제한시간(초)입니다." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "시간 제한 조건의 시간대(예. '대만/타이페이', '유럽/암스테르담')입니다." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/pt_BR/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/pt_BR/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/pt_BR/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/pt_BR/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6111,12 +6111,6 @@ msgid "The parent URL of the container." msgstr "A URL pai do contêiner." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "" -"A passphrase criada pela chave. Pode ser configurada somente para tipo de " -"pedido assimétrico." - msgid "The payload of the created certificate, if available." msgstr "A carga útil do certificado criado, se disponível." @@ -6608,12 +6602,6 @@ "atualização)." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "" -"Fuso horário para a restrição de tempo (por exemplo 'Taiwan/Taipei', 'Europa/" -"Amsterdã')." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/ru/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/ru/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/ru/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/ru/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -6028,12 +6028,6 @@ msgid "The parent URL of the container." msgstr "Родительский URL контейнера." -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "" -"Пароль созданного ключа. Может быть задан только для заказов асимметричных " -"типов." - msgid "The payload of the created certificate, if available." msgstr "Полезная нагрузка созданного сертификата, если она задана." @@ -6522,12 +6516,6 @@ "Тайм-аут в секундах для действия над стеком (например, создать или обновить)." msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "" -"Часовой пояс ограничения времени, (например, 'Taiwan/Taipei', 'Europe/" -"Amsterdam')." - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/zh_CN/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/zh_CN/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/zh_CN/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/zh_CN/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -5559,10 +5559,6 @@ msgid "The parent URL of the container." msgstr "容器的父 URL。" -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "所生成密钥的密码。只能针对非对称类型的指令设置。" - msgid "The payload of the created certificate, if available." msgstr "所创建证书的有效内容(如果提供)。" @@ -6005,10 +6001,6 @@ msgstr "堆栈操作(即,创建或更新)的超时,以秒计。" msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "时间约束的时区(例如,“Taiwan/Taipei”,“Europe/Amsterdam”)。" - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/locale/zh_TW/LC_MESSAGES/heat.po heat-10.0.0~b1/heat/locale/zh_TW/LC_MESSAGES/heat.po --- heat-9.0.0/heat/locale/zh_TW/LC_MESSAGES/heat.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/locale/zh_TW/LC_MESSAGES/heat.po 2017-10-27 07:35:34.000000000 +0000 @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: heat 9.0.0.0b4.dev55\n" +"Project-Id-Version: heat 10.0.0.dev107\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2017-08-12 12:09+0000\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -5570,10 +5570,6 @@ msgid "The parent URL of the container." msgstr "儲存器的母項 URL。" -msgid "" -"The passphrase the created key. Can be set only for asymmetric type of order." -msgstr "所建立之金鑰的通行詞組。只能針對非對稱類型的順序進行設定。" - msgid "The payload of the created certificate, if available." msgstr "所建立之憑證的有效負載(如果有的話)。" @@ -6016,10 +6012,6 @@ msgstr "堆疊動作(例如,建立或更新)的逾時值(以秒為單位)。" msgid "" -"Timezone for the time constraint (eg. 'Taiwan/Taipei', 'Europe/Amsterdam')." -msgstr "時間限制的時區(例如:「台灣/台北」和「歐洲/阿姆斯特丹」)。" - -msgid "" "Toggle to enable/disable caching when Orchestration Engine looks for other " "OpenStack service resources using name or id. Please note that the global " "toggle for oslo.cache(enabled=True in [cache] group) must be enabled to use " diff -Nru heat-9.0.0/heat/objects/resource.py heat-10.0.0~b1/heat/objects/resource.py --- heat-9.0.0/heat/objects/resource.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/objects/resource.py 2017-10-27 07:35:34.000000000 +0000 @@ -85,8 +85,6 @@ resource_data.ResourceData, nullable=True ), - 'rsrc_prop_data': fields.ObjectField( - rpd.ResourcePropertiesData, nullable=True), 'rsrc_prop_data_id': fields.ObjectField( fields.IntegerField(nullable=True)), 'engine_id': fields.StringField(nullable=True), @@ -114,12 +112,21 @@ elif field != 'attr_data': resource[field] = db_resource[field] - if db_resource['rsrc_prop_data'] is not None: - resource['rsrc_prop_data'] = \ - rpd.ResourcePropertiesData._from_db_object( - rpd.ResourcePropertiesData(context), context, - db_resource['rsrc_prop_data']) - resource._properties_data = resource['rsrc_prop_data'].data + if db_resource['rsrc_prop_data_id'] is not None: + if hasattr(db_resource, '__dict__'): + rpd_obj = db_resource.__dict__.get('rsrc_prop_data') + else: + rpd_obj = None + if rpd_obj is not None: + # Object is already eager loaded + rpd_obj = ( + rpd.ResourcePropertiesData._from_db_object( + rpd.ResourcePropertiesData(), + context, + rpd_obj)) + resource._properties_data = rpd_obj.data + else: + resource._properties_data = {} if db_resource['properties_data']: LOG.error( 'Unexpected condition where resource.rsrc_prop_data ' @@ -136,7 +143,7 @@ else: resource._properties_data = db_resource['properties_data'] else: - resource._properties_data = {} + resource._properties_data = None if db_resource['attr_data'] is not None: resource._attr_data = rpd.ResourcePropertiesData._from_db_object( @@ -155,6 +162,12 @@ @property def properties_data(self): + if (not self._properties_data and + self.rsrc_prop_data_id is not None): + LOG.info('rsrp_prop_data lazy load') + rpd_obj = rpd.ResourcePropertiesData.get_by_id( + self._context, self.rsrc_prop_data_id) + self._properties_data = rpd_obj.data or {} return self._properties_data @classmethod diff -Nru heat-9.0.0/heat/tests/autoscaling/test_heat_scaling_group.py heat-10.0.0~b1/heat/tests/autoscaling/test_heat_scaling_group.py --- heat-9.0.0/heat/tests/autoscaling/test_heat_scaling_group.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/autoscaling/test_heat_scaling_group.py 2017-10-27 07:35:34.000000000 +0000 @@ -462,7 +462,7 @@ mock_members.return_value = members self.assertEqual(output[0], self.group.FnGetAtt('resource.0', 'Bar')) self.assertEqual(output[1], self.group.FnGetAtt('resource.1.Bar')) - self.assertRaises(exception.InvalidTemplateAttribute, + self.assertRaises(exception.NotFound, self.group.FnGetAtt, 'resource.2') diff -Nru heat-9.0.0/heat/tests/clients/test_clients.py heat-10.0.0~b1/heat/tests/clients/test_clients.py --- heat-9.0.0/heat/tests/clients/test_clients.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/clients/test_clients.py 2017-10-27 07:35:34.000000000 +0000 @@ -20,6 +20,7 @@ from keystoneauth1 import exceptions as keystone_exc from keystoneauth1.identity import generic from manilaclient import exceptions as manila_exc +from mistralclient.api import base as mistral_base import mock from neutronclient.common import exceptions as neutron_exc from openstack import exceptions @@ -74,7 +75,7 @@ obj._get_client_option.return_value = result self.assertEqual(result, obj.get_heat_url()) - def _client_cfn_url(self): + def _client_cfn_url(self, use_uwsgi=False, use_ipv6=False): con = mock.Mock() c = clients.Clients(con) con.clients = c @@ -82,7 +83,16 @@ obj._get_client_option = mock.Mock() obj._get_client_option.return_value = None obj.url_for = mock.Mock(name="url_for") - obj.url_for.return_value = "http://0.0.0.0:8000/v1/" + if use_ipv6: + if use_uwsgi: + obj.url_for.return_value = "http://[::1]/heat-api-cfn/v1/" + else: + obj.url_for.return_value = "http://[::1]:8000/v1/" + else: + if use_uwsgi: + obj.url_for.return_value = "http://0.0.0.0/heat-api-cfn/v1/" + else: + obj.url_for.return_value = "http://0.0.0.0:8000/v1/" return obj def test_clients_get_heat_cfn_url(self): @@ -91,7 +101,23 @@ def test_clients_get_watch_server_url(self): obj = self._client_cfn_url() - self.assertEqual("http://0.0.0.0:8003/v1/", obj.get_watch_server_url()) + self.assertEqual("http://0.0.0.0:8003/v1/", + obj.get_watch_server_url()) + + def test_clients_get_watch_server_url_ipv6(self): + obj = self._client_cfn_url(use_ipv6=True) + self.assertEqual("http://[::1]:8003/v1/", + obj.get_watch_server_url()) + + def test_clients_get_watch_server_url_use_uwsgi_ipv6(self): + obj = self._client_cfn_url(use_uwsgi=True, use_ipv6=True) + self.assertEqual("http://[::1]/heat-api-cloudwatch/v1/", + obj.get_watch_server_url()) + + def test_clients_get_watch_server_url_use_uwsgi(self): + obj = self._client_cfn_url(use_uwsgi=True) + self.assertEqual("http://0.0.0.0/heat-api-cloudwatch/v1/", + obj.get_watch_server_url()) def test_clients_get_heat_cfn_metadata_url(self): obj = self._client_cfn_url() @@ -792,6 +818,22 @@ plugin='manila', exception=lambda: manila_exc.Conflict(), )), + ('mistral_not_found1', dict( + is_not_found=True, + is_over_limit=False, + is_client_exception=False, + is_conflict=False, + plugin='mistral', + exception=lambda: mistral_base.APIException(404), + )), + ('mistral_not_found2', dict( + is_not_found=True, + is_over_limit=False, + is_client_exception=False, + is_conflict=False, + plugin='mistral', + exception=lambda: keystone_exc.NotFound(), + )), ] def test_is_not_found(self): diff -Nru heat-9.0.0/heat/tests/db/test_migrations.py heat-10.0.0~b1/heat/tests/db/test_migrations.py --- heat-9.0.0/heat/tests/db/test_migrations.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/db/test_migrations.py 2017-10-27 07:35:34.000000000 +0000 @@ -26,11 +26,13 @@ import uuid from migrate.versioning import repository -from oslo_db.sqlalchemy import test_base +from oslo_db.sqlalchemy import enginefacade +from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import utils from oslo_serialization import jsonutils from oslo_utils import timeutils +from oslotest import base as test_base import six import sqlalchemy import testtools @@ -738,18 +740,24 @@ 'attr_data_id') -class TestHeatMigrationsMySQL(HeatMigrationsCheckers, - test_base.MySQLOpportunisticTestCase): - pass +class DbTestCase(test_fixtures.OpportunisticDBTestMixin, + test_base.BaseTestCase): + def setUp(self): + super(DbTestCase, self).setUp() + self.engine = enginefacade.writer.get_engine() + self.sessionmaker = enginefacade.writer.get_sessionmaker() -class TestHeatMigrationsPostgreSQL(HeatMigrationsCheckers, - test_base.PostgreSQLOpportunisticTestCase): - pass + +class TestHeatMigrationsMySQL(DbTestCase, HeatMigrationsCheckers): + FIXTURE = test_fixtures.MySQLOpportunisticFixture -class TestHeatMigrationsSQLite(HeatMigrationsCheckers, - test_base.DbTestCase): +class TestHeatMigrationsPostgreSQL(DbTestCase, HeatMigrationsCheckers): + FIXTURE = test_fixtures.PostgresqlOpportunisticFixture + + +class TestHeatMigrationsSQLite(DbTestCase, HeatMigrationsCheckers): pass @@ -770,19 +778,19 @@ return True -class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.MySQLOpportunisticTestCase): - pass +class ModelsMigrationsSyncMysql(DbTestCase, + ModelsMigrationSyncMixin, + test_migrations.ModelsMigrationsSync): + FIXTURE = test_fixtures.MySQLOpportunisticFixture -class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.PostgreSQLOpportunisticTestCase): - pass +class ModelsMigrationsSyncPostgres(DbTestCase, + ModelsMigrationSyncMixin, + test_migrations.ModelsMigrationsSync): + FIXTURE = test_fixtures.PostgresqlOpportunisticFixture -class ModelsMigrationsSyncSQLite(ModelsMigrationSyncMixin, - test_migrations.ModelsMigrationsSync, - test_base.DbTestCase): +class ModelsMigrationsSyncSQLite(DbTestCase, + ModelsMigrationSyncMixin, + test_migrations.ModelsMigrationsSync): pass diff -Nru heat-9.0.0/heat/tests/engine/service/test_service_engine.py heat-10.0.0~b1/heat/tests/engine/service/test_service_engine.py --- heat-9.0.0/heat/tests/engine/service/test_service_engine.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_service_engine.py 2017-10-27 07:35:34.000000000 +0000 @@ -15,7 +15,6 @@ import mock from oslo_config import cfg -from oslo_service import threadgroup from oslo_utils import timeutils from heat.common import context @@ -309,6 +308,10 @@ self.eng.thread_group_mgr.groups['sample-uuid2'] = dtg2 self.eng.service_id = 'sample-service-uuid' + self.patchobject(self.eng.manage_thread_grp, 'stop', + new=mock.Mock(wraps=self.eng.manage_thread_grp.stop)) + self.patchobject(self.eng, '_stop_rpc_server', + new=mock.Mock(wraps=self.eng._stop_rpc_server)) orig_stop = self.eng.thread_group_mgr.stop with mock.patch.object(self.eng.thread_group_mgr, 'stop') as stop: @@ -328,7 +331,7 @@ mock.call('sample-uuid2', True)] self.eng.thread_group_mgr.stop.assert_has_calls(calls, True) - # # Manage Thread group + # Manage Thread group self.eng.manage_thread_grp.stop.assert_called_with() # Service delete @@ -339,12 +342,8 @@ self.eng.service_id ) - @mock.patch.object(service.EngineService, - '_stop_rpc_server') @mock.patch.object(worker.WorkerService, 'stop') - @mock.patch.object(threadgroup.ThreadGroup, - 'stop') @mock.patch('heat.common.context.get_admin_context', return_value=mock.Mock()) @mock.patch('heat.objects.service.Service.delete', @@ -353,17 +352,13 @@ self, service_delete_method, admin_context_method, - thread_group_stop, - worker_service_stop, - rpc_server_stop): + worker_service_stop): cfg.CONF.set_default('convergence_engine', True) self._test_engine_service_stop( service_delete_method, admin_context_method ) - @mock.patch.object(service.EngineService, '_stop_rpc_server') - @mock.patch.object(threadgroup.ThreadGroup, 'stop') @mock.patch('heat.common.context.get_admin_context', return_value=mock.Mock()) @mock.patch('heat.objects.service.Service.delete', @@ -371,9 +366,7 @@ def test_engine_service_stop_in_non_convergence_mode( self, service_delete_method, - admin_context_method, - thread_group_stop, - rpc_server_stop): + admin_context_method): cfg.CONF.set_default('convergence_engine', False) self._test_engine_service_stop( service_delete_method, @@ -385,8 +378,6 @@ self.eng.reset() setup_logging_mock.assert_called_once_with(cfg.CONF, 'heat') - @mock.patch('oslo_messaging.Target', - return_value=mock.Mock()) @mock.patch('heat.common.messaging.get_rpc_client', return_value=mock.Mock()) @mock.patch('heat.common.service_utils.generate_engine_id', @@ -406,8 +397,8 @@ engine_listener_class, thread_group_manager_class, sample_uuid_method, - rpc_client_class, - target_class): + rpc_client_class): + self.addCleanup(self.eng._stop_rpc_server) self.eng.start() self.assertEqual(cfg.CONF.executor_thread_pool_size, cfg.CONF.database.max_overflow) diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_action.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_action.py --- heat-9.0.0/heat/tests/engine/service/test_stack_action.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_action.py 2017-10-27 07:35:34.000000000 +0000 @@ -30,7 +30,7 @@ super(StackServiceActionsTest, self).setUp() self.ctx = utils.dummy_context() self.man = service.EngineService('a-host', 'a-topic') - self.man.create_periodic_tasks() + self.man.thread_group_mgr = service.ThreadGroupManager() @mock.patch.object(stack.Stack, 'load') @mock.patch.object(service.ThreadGroupManager, 'start') diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_adopt.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_adopt.py --- heat-9.0.0/heat/tests/engine/service/test_stack_adopt.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_adopt.py 2017-10-27 07:35:34.000000000 +0000 @@ -21,7 +21,6 @@ from heat.engine import stack as parser from heat.objects import stack as stack_object from heat.tests import common -from heat.tests.engine import tools from heat.tests import utils @@ -31,7 +30,7 @@ super(StackServiceAdoptTest, self).setUp() self.ctx = utils.dummy_context() self.man = service.EngineService('a-host', 'a-topic') - self.man.thread_group_mgr = tools.DummyThreadGroupManager() + self.man.thread_group_mgr = service.ThreadGroupManager() def _get_adopt_data_and_template(self, environment=None): template = { @@ -55,14 +54,20 @@ "metadata": {}}}} return template, adopt_data + def _do_adopt(self, stack_name, template, input_params, adopt_data): + result = self.man.create_stack(self.ctx, stack_name, + template, input_params, None, + {'adopt_stack_data': str(adopt_data)}) + self.man.thread_group_mgr.stop(result['stack_id'], graceful=True) + return result + def test_stack_adopt_with_params(self): cfg.CONF.set_override('enable_stack_adopt', True) cfg.CONF.set_override('convergence_engine', False) env = {'parameters': {"app_dbx": "test"}} template, adopt_data = self._get_adopt_data_and_template(env) - result = self.man.create_stack(self.ctx, "test_adopt_with_params", - template, {}, None, - {'adopt_stack_data': str(adopt_data)}) + result = self._do_adopt("test_adopt_with_params", template, {}, + adopt_data) stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id']) self.assertEqual(template, stack.raw_template.template) @@ -78,9 +83,8 @@ cfg.CONF.set_override('convergence_engine', True) env = {'parameters': {"app_dbx": "test"}} template, adopt_data = self._get_adopt_data_and_template(env) - result = self.man.create_stack(self.ctx, "test_adopt_with_params", - template, {}, None, - {'adopt_stack_data': str(adopt_data)}) + result = self._do_adopt("test_adopt_with_params", template, {}, + adopt_data) stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id']) self.assertEqual(template, stack.raw_template.template) @@ -96,9 +100,8 @@ "parameters": {"app_dbx": "bar"} } template, adopt_data = self._get_adopt_data_and_template(env) - result = self.man.create_stack(self.ctx, "test_adopt_saves_inputs", - template, input_params, None, - {'adopt_stack_data': str(adopt_data)}) + result = self._do_adopt("test_adopt_saves_inputs", template, + input_params, adopt_data) stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id']) self.assertEqual(template, stack.raw_template.template) @@ -116,9 +119,8 @@ "parameters": {"app_dbx": "bar"} } template, adopt_data = self._get_adopt_data_and_template(env) - result = self.man.create_stack(self.ctx, "test_adopt_saves_inputs", - template, input_params, None, - {'adopt_stack_data': str(adopt_data)}) + result = self._do_adopt("test_adopt_saves_inputs", template, + input_params, adopt_data) stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id']) self.assertEqual(template, stack.raw_template.template) @@ -131,12 +133,11 @@ cfg.CONF.set_override('convergence_engine', False) env = {'parameters': {"app_dbx": "test"}} template, adopt_data = self._get_adopt_data_and_template(env) - result = self.man.create_stack(self.ctx, "test_adopt_stack_state", - template, {}, None, - {'adopt_stack_data': str(adopt_data)}) + result = self._do_adopt("test_adopt_stack_state", template, {}, + adopt_data) stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id']) - self.assertEqual((parser.Stack.ADOPT, parser.Stack.IN_PROGRESS), + self.assertEqual((parser.Stack.ADOPT, parser.Stack.COMPLETE), (stack.action, stack.status)) @mock.patch.object(parser.Stack, '_converge_create_or_update') @@ -147,9 +148,8 @@ cfg.CONF.set_override('convergence_engine', True) env = {'parameters': {"app_dbx": "test"}} template, adopt_data = self._get_adopt_data_and_template(env) - result = self.man.create_stack(self.ctx, "test_adopt_stack_state", - template, {}, None, - {'adopt_stack_data': str(adopt_data)}) + result = self._do_adopt("test_adopt_stack_state", template, {}, + adopt_data) stack = stack_object.Stack.get_by_id(self.ctx, result['stack_id']) self.assertEqual((parser.Stack.ADOPT, parser.Stack.IN_PROGRESS), diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_create.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_create.py --- heat-9.0.0/heat/tests/engine/service/test_stack_create.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_create.py 2017-10-27 07:35:34.000000000 +0000 @@ -38,7 +38,7 @@ super(StackCreateTest, self).setUp() self.ctx = utils.dummy_context() self.man = service.EngineService('a-host', 'a-topic') - self.man.create_periodic_tasks() + self.man.thread_group_mgr = service.ThreadGroupManager() @mock.patch.object(threadgroup, 'ThreadGroup') @mock.patch.object(stack.Stack, 'validate') diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_delete.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_delete.py --- heat-9.0.0/heat/tests/engine/service/test_stack_delete.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_delete.py 2017-10-27 07:35:34.000000000 +0000 @@ -33,7 +33,7 @@ super(StackDeleteTest, self).setUp() self.ctx = utils.dummy_context() self.man = service.EngineService('a-host', 'a-topic') - self.man.create_periodic_tasks() + self.man.thread_group_mgr = service.ThreadGroupManager() @mock.patch.object(parser.Stack, 'load') def test_stack_delete(self, mock_load): @@ -104,7 +104,7 @@ mock_acquire, mock_try, mock_load): cfg.CONF.set_override('error_wait_time', 0) - self.man.start() + self.man.engine_id = service_utils.generate_engine_id() stack_name = 'service_delete_test_stack_current_active_lock' stack = tools.get_stack(stack_name, self.ctx) sid = stack.store() @@ -117,16 +117,20 @@ mock_load.return_value = stack mock_try.return_value = self.man.engine_id - mock_stop = self.patchobject(self.man.thread_group_mgr, 'stop') mock_send = self.patchobject(self.man.thread_group_mgr, 'send') mock_expired.side_effect = [False, True] - self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier())) - self.man.thread_group_mgr.groups[sid].wait() + with mock.patch.object(self.man.thread_group_mgr, 'stop') as mock_stop: + self.assertIsNone(self.man.delete_stack(self.ctx, + stack.identifier())) + self.man.thread_group_mgr.groups[sid].wait() + + mock_load.assert_called_with(self.ctx, stack=st) + mock_send.assert_called_once_with(stack.id, 'cancel') + mock_stop.assert_called_once_with(stack.id) + + self.man.thread_group_mgr.stop(sid, graceful=True) - mock_load.assert_called_with(self.ctx, stack=st) - mock_send.assert_called_once_with(stack.id, 'cancel') - mock_stop.assert_called_once_with(stack.id) self.assertEqual(2, len(mock_load.mock_calls)) mock_try.assert_called_with() mock_acquire.assert_called_once_with(True) @@ -140,7 +144,10 @@ mock_load): cfg.CONF.set_override('error_wait_time', 0) OTHER_ENGINE = "other-engine-fake-uuid" - self.man.start() + self.man.engine_id = service_utils.generate_engine_id() + self.man.listener = service.EngineListener(self.man.host, + self.man.engine_id, + self.man.thread_group_mgr) stack_name = 'service_delete_test_stack_other_engine_lock_fail' stack = tools.get_stack(stack_name, self.ctx) sid = stack.store() @@ -180,7 +187,10 @@ cfg.CONF.set_override('error_wait_time', 0) OTHER_ENGINE = "other-engine-fake-uuid" - self.man.start() + self.man.engine_id = service_utils.generate_engine_id() + self.man.listener = service.EngineListener(self.man.host, + self.man.engine_id, + self.man.thread_group_mgr) stack_name = 'service_delete_test_stack_other_engine_lock' stack = tools.get_stack(stack_name, self.ctx) sid = stack.store() @@ -197,7 +207,7 @@ return_value=None) self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier())) - self.man.thread_group_mgr.groups[sid].wait() + self.man.thread_group_mgr.stop(sid, graceful=True) self.assertEqual(2, len(mock_load.mock_calls)) mock_load.assert_called_with(self.ctx, stack=st) @@ -236,7 +246,7 @@ mock_expired.side_effect = [False, True] self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier())) - self.man.thread_group_mgr.groups[sid].wait() + self.man.thread_group_mgr.stop(sid, graceful=True) mock_load.assert_called_with(self.ctx, stack=st) mock_try.assert_called_with() diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_events.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_events.py --- heat-9.0.0/heat/tests/engine/service/test_stack_events.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_events.py 2017-10-27 07:35:34.000000000 +0000 @@ -32,7 +32,7 @@ self.ctx = utils.dummy_context(tenant_id='stack_event_test_tenant') self.eng = service.EngineService('a-host', 'a-topic') - self.eng.create_periodic_tasks() + self.eng.thread_group_mgr = service.ThreadGroupManager() @tools.stack_context('service_event_list_test_stack') @mock.patch.object(service.EngineService, '_get_stack') diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_resources.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_resources.py --- heat-9.0.0/heat/tests/engine/service/test_stack_resources.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_resources.py 2017-10-27 07:35:34.000000000 +0000 @@ -70,8 +70,8 @@ def _test_describe_stack_resource(self, mock_load): mock_load.return_value = self.stack - # Patch _resolve_all_attributes or it tries to call novaclient - self.patchobject(res.Resource, '_resolve_all_attributes', + # Patch _resolve_any_attribute or it tries to call novaclient + self.patchobject(res.Resource, '_resolve_any_attribute', return_value=None) r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(), diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_snapshot.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_snapshot.py --- heat-9.0.0/heat/tests/engine/service/test_stack_snapshot.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_snapshot.py 2017-10-27 07:35:34.000000000 +0000 @@ -34,7 +34,7 @@ self.ctx = utils.dummy_context() self.engine = service.EngineService('a-host', 'a-topic') - self.engine.create_periodic_tasks() + self.engine.thread_group_mgr = service.ThreadGroupManager() def _create_stack(self, stack_name, files=None): t = template_format.parse(tools.wp_template) diff -Nru heat-9.0.0/heat/tests/engine/service/test_stack_update.py heat-10.0.0~b1/heat/tests/engine/service/test_stack_update.py --- heat-9.0.0/heat/tests/engine/service/test_stack_update.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/service/test_stack_update.py 2017-10-27 07:35:34.000000000 +0000 @@ -1074,8 +1074,8 @@ mock_validate = self.patchobject(stk, 'validate', return_value=None) mock_merge = self.patchobject(env_util, 'merge_environments') - # Patch _resolve_all_attributes or it tries to call novaclient - self.patchobject(resource.Resource, '_resolve_all_attributes', + # Patch _resolve_any_attribute or it tries to call novaclient + self.patchobject(resource.Resource, '_resolve_any_attribute', return_value=None) # do preview_update_stack diff -Nru heat-9.0.0/heat/tests/engine/test_check_resource.py heat-10.0.0~b1/heat/tests/engine/test_check_resource.py --- heat-9.0.0/heat/tests/engine/test_check_resource.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/test_check_resource.py 2017-10-27 07:35:34.000000000 +0000 @@ -476,15 +476,15 @@ mock_hf.assert_called_once_with(self.ctx, self.stack, u'Timed out') @mock.patch.object(check_resource.CheckResource, - '_handle_stack_timeout') + '_handle_failure') def test_do_check_resource_marks_stack_as_failed_if_stack_timesout( - self, mock_hst, mock_cru, mock_crc, mock_pcr, mock_csc): + self, mock_hf, mock_cru, mock_crc, mock_pcr, mock_csc): mock_cru.side_effect = scheduler.Timeout(None, 60) self.is_update = True self.cr._do_check_resource(self.ctx, self.stack.current_traversal, self.stack.t, {}, self.is_update, self.resource, self.stack, {}) - mock_hst.assert_called_once_with(self.ctx, self.stack) + mock_hf.assert_called_once_with(self.ctx, self.stack, u'Timed out') @mock.patch.object(check_resource.CheckResource, '_handle_stack_timeout') diff -Nru heat-9.0.0/heat/tests/engine/test_resource_type.py heat-10.0.0~b1/heat/tests/engine/test_resource_type.py --- heat-9.0.0/heat/tests/engine/test_resource_type.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/test_resource_type.py 2017-10-27 07:35:34.000000000 +0000 @@ -43,9 +43,7 @@ mock_is_service_available): mock_is_service_available.return_value = (True, None) resources = self.eng.list_resource_types(self.ctx, "DEPRECATED") - self.assertEqual(set(['OS::Designate::Domain', - 'OS::Designate::Record', - 'OS::Heat::HARestarter', + self.assertEqual(set(['OS::Heat::HARestarter', 'OS::Magnum::Bay', 'OS::Magnum::BayModel', 'OS::Glance::Image', @@ -77,12 +75,6 @@ resources = self.eng.list_resource_types(self.ctx, with_description=True) self.assertIsInstance(resources, list) - description = ("Heat Template Resource for Designate Domain.\n\n" - "Designate provides DNS-as-a-Service services for " - "OpenStack. So, domain\nis a realm with an " - "identification string, unique in DNS.") - self.assertIn({'resource_type': 'OS::Designate::Domain', - 'description': description}, resources) self.assertIn({'resource_type': 'AWS::RDS::DBInstance', 'description': 'Builtin AWS::RDS::DBInstance'}, resources) diff -Nru heat-9.0.0/heat/tests/engine/test_scheduler.py heat-10.0.0~b1/heat/tests/engine/test_scheduler.py --- heat-9.0.0/heat/tests/engine/test_scheduler.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/engine/test_scheduler.py 2017-10-27 07:35:34.000000000 +0000 @@ -1283,10 +1283,8 @@ eventlet.sleep(0.01) later = scheduler.Timeout(task, 10) - self.assertLess(earlier, later) - self.assertGreater(later, earlier) - self.assertEqual(earlier, earlier) - self.assertNotEqual(earlier, later) + self.assertTrue(earlier.earlier_than(later)) + self.assertFalse(later.earlier_than(earlier)) class DescriptionTest(common.HeatTestCase): diff -Nru heat-9.0.0/heat/tests/generic_resource.py heat-10.0.0~b1/heat/tests/generic_resource.py --- heat-9.0.0/heat/tests/generic_resource.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/generic_resource.py 2017-10-27 07:35:34.000000000 +0000 @@ -382,6 +382,9 @@ props['a_string'] = value return defn.freeze(properties=props) + def handle_delete_snapshot(self, snapshot): + return snapshot['resource_data'].get('a_string') + class ResourceTypeUnSupportedLiberty(GenericResource): support_status = support.SupportStatus( diff -Nru heat-9.0.0/heat/tests/openstack/cinder/test_volume.py heat-10.0.0~b1/heat/tests/openstack/cinder/test_volume.py --- heat-9.0.0/heat/tests/openstack/cinder/test_volume.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/cinder/test_volume.py 2017-10-27 07:35:34.000000000 +0000 @@ -1318,7 +1318,7 @@ 'availability_zone': 'nova', 'snapshot_id': None, 'size': 1, - 'metadata': {'test': 'test_value', 'read_only': False}, + 'metadata': {'test': 'test_value', 'readonly': False}, 'consistencygroup_id': '4444', 'volume_image_metadata': {'image_id': '1234', 'image_name': 'test'}, diff -Nru heat-9.0.0/heat/tests/openstack/heat/test_resource_group.py heat-10.0.0~b1/heat/tests/openstack/heat/test_resource_group.py --- heat-9.0.0/heat/tests/openstack/heat/test_resource_group.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/heat/test_resource_group.py 2017-10-27 07:35:34.000000000 +0000 @@ -865,8 +865,10 @@ resg = self._create_dummy_stack() self.assertEqual("ID-0", resg.FnGetAtt('resource.0')) self.assertEqual("ID-1", resg.FnGetAtt('resource.1')) - self.assertRaises(exception.InvalidTemplateAttribute, resg.FnGetAtt, - 'resource.2') + ex = self.assertRaises(exception.NotFound, resg.FnGetAtt, + 'resource.2') + self.assertIn("Member '2' not found in group resource 'group1'.", + six.text_type(ex)) @mock.patch.object(grouputils, 'get_rsrc_id') def test_get_attribute(self, mock_get_rsrc_id): diff -Nru heat-9.0.0/heat/tests/openstack/keystone/test_project.py heat-10.0.0~b1/heat/tests/openstack/keystone/test_project.py --- heat-9.0.0/heat/tests/openstack/keystone/test_project.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/keystone/test_project.py 2017-10-27 07:35:34.000000000 +0000 @@ -73,7 +73,11 @@ value = mock.MagicMock() project_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' value.id = project_id - + value.name = 'test_project_1' + value.domain_id = 'default' + value.enabled = True + value.parent_id = 'my_father' + value.is_domain = False return value def test_project_handle_create(self): @@ -377,3 +381,24 @@ self.assertEqual(set(expected.keys()), set(reality.keys())) for key in expected: self.assertEqual(expected[key], reality[key]) + + def test_resolve_attributes(self): + mock_project = self._get_mock_project() + self.test_project.resource_id = mock_project['id'] + self.projects.get.return_value = mock_project + self.assertEqual( + 'test_project_1', + self.test_project._resolve_attribute( + project.KeystoneProject.NAME_ATTR)) + self.assertEqual( + 'my_father', + self.test_project._resolve_attribute( + project.KeystoneProject.PARENT_ATTR)) + self.assertEqual( + 'default', + self.test_project._resolve_attribute( + project.KeystoneProject.DOMAIN_ATTR)) + self.assertTrue(self.test_project._resolve_attribute( + project.KeystoneProject.ENABLED_ATTR)) + self.assertFalse(self.test_project._resolve_attribute( + project.KeystoneProject.IS_DOMAIN_ATTR)) diff -Nru heat-9.0.0/heat/tests/openstack/keystone/test_role.py heat-10.0.0~b1/heat/tests/openstack/keystone/test_role.py --- heat-9.0.0/heat/tests/openstack/keystone/test_role.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/keystone/test_role.py 2017-10-27 07:35:34.000000000 +0000 @@ -11,6 +11,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import mock from heat.engine import resource @@ -41,14 +42,6 @@ super(KeystoneRoleTest, self).setUp() self.ctx = utils.dummy_context() - - self.stack = stack.Stack( - self.ctx, 'test_stack_keystone', - template.Template(keystone_role_template) - ) - - self.test_role = self.stack['test_role'] - # Mock client self.keystoneclient = mock.Mock() self.patchobject(resource.Resource, 'client', @@ -56,53 +49,75 @@ client=self.keystoneclient)) self.roles = self.keystoneclient.roles - def _get_mock_role(self): + def _get_rsrc(self, domain='default', without_name=False): + t = template.Template(keystone_role_template) + tmpl = copy.deepcopy(t) + tmpl['resources']['test_role']['Properties']['domain'] = domain + if without_name: + tmpl['resources']['test_role']['Properties'].pop('name') + test_stack = stack.Stack(self.ctx, 'test_keystone_role', tmpl) + test_role = test_stack['test_role'] + return test_role + + def _get_mock_role(self, domain='default'): value = mock.MagicMock() role_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' + domain_id = domain value.id = role_id - + value.domain_id = domain_id return value - def test_role_handle_create(self): - mock_role = self._get_mock_role() + def _test_handle_create(self, domain='default'): + test_role = self._get_rsrc(domain) + mock_role = self._get_mock_role(domain) self.roles.create.return_value = mock_role # validate the properties self.assertEqual('test_role_1', - self.test_role.properties.get(role.KeystoneRole.NAME)) + test_role.properties.get(role.KeystoneRole.NAME)) + self.assertEqual(domain, + test_role.properties.get(role.KeystoneRole.DOMAIN)) - self.test_role.handle_create() + test_role.handle_create() # validate role creation with given name - self.roles.create.assert_called_once_with(name='test_role_1') + self.roles.create.assert_called_once_with(name='test_role_1', + domain=domain) # validate physical resource id - self.assertEqual(mock_role.id, self.test_role.resource_id) + self.assertEqual(mock_role.id, test_role.resource_id) + + def test_role_handle_create(self): + self._test_handle_create() + + def test_role_handle_create_with_domain(self): + self._test_handle_create(domain='d_test') def test_role_handle_create_default_name(self): # reset the NAME value to None, to make sure role is # created with physical_resource_name - self.test_role.properties = mock.MagicMock() - self.test_role.properties.__getitem__.return_value = None - - self.test_role.handle_create() + test_role = self._get_rsrc(without_name=True) + test_role.physical_resource_name = mock.Mock( + return_value='phy_role_name') + test_role.handle_create() # validate role creation with default name - physical_resource_name = self.test_role.physical_resource_name() - self.roles.create.assert_called_once_with(name=physical_resource_name) + self.roles.create.assert_called_once_with(name='phy_role_name', + domain='default') def test_role_handle_update(self): - self.test_role.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' + test_role = self._get_rsrc() + test_role.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' # update the name property prop_diff = {role.KeystoneRole.NAME: 'test_role_1_updated'} - self.test_role.handle_update(json_snippet=None, - tmpl_diff=None, - prop_diff=prop_diff) + test_role.handle_update(json_snippet=None, + tmpl_diff=None, + prop_diff=prop_diff) self.roles.update.assert_called_once_with( - role=self.test_role.resource_id, + role=test_role.resource_id, name=prop_diff[role.KeystoneRole.NAME] ) @@ -110,5 +125,6 @@ role = mock.Mock() role.to_dict.return_value = {'attr': 'val'} self.roles.get.return_value = role - res = self.test_role._show_resource() + test_role = self._get_rsrc() + res = test_role._show_resource() self.assertEqual({'attr': 'val'}, res) diff -Nru heat-9.0.0/heat/tests/openstack/mistral/test_workflow.py heat-10.0.0~b1/heat/tests/openstack/mistral/test_workflow.py --- heat-9.0.0/heat/tests/openstack/mistral/test_workflow.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/mistral/test_workflow.py 2017-10-27 07:35:34.000000000 +0000 @@ -44,7 +44,21 @@ publish: result: <% $.hello %> """ - +workflow_template_with_tags = """ +heat_template_version: queens +resources: + workflow: + type: OS::Mistral::Workflow + properties: + type: direct + tags: + - tagged + tasks: + - name: hello + action: std.echo output='Good morning!' + publish: + result: <% $.hello %> +""" workflow_template_with_params = """ heat_template_version: 2013-05-23 resources: @@ -743,6 +757,22 @@ 'params': {'test': 'param_value', 'test1': 'param_value_1'}} execution = mock.Mock() execution.id = '12345' + self.mistral.executions.create.side_effect = ( + lambda *args, **kw: self.verify_params(*args, **kw)) + scheduler.TaskRunner(wf.signal, details)() + + def test_workflow_tags(self): + tmpl = template_format.parse(workflow_template_with_tags) + stack = utils.parse_stack(tmpl) + rsrc_defns = stack.t.resource_definitions(stack)['workflow'] + wf = workflow.Workflow('workflow', rsrc_defns, stack) + self.mistral.workflows.create.return_value = [ + FakeWorkflow('workflow')] + scheduler.TaskRunner(wf.create)() + details = {'tags': ['mytag'], + 'params': {'test': 'param_value', 'test1': 'param_value_1'}} + execution = mock.Mock() + execution.id = '12345' self.mistral.executions.create.side_effect = ( lambda *args, **kw: self.verify_params(*args, **kw)) scheduler.TaskRunner(wf.signal, details)() diff -Nru heat-9.0.0/heat/tests/openstack/neutron/test_neutron_floating_ip.py heat-10.0.0~b1/heat/tests/openstack/neutron/test_neutron_floating_ip.py --- heat-9.0.0/heat/tests/openstack/neutron/test_neutron_floating_ip.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/neutron/test_neutron_floating_ip.py 2017-10-27 07:35:34.000000000 +0000 @@ -328,7 +328,11 @@ {'subnet_id': u'sub1234', 'ip_address': u'10.0.0.10'} ], 'name': utils.PhysName(stack.name, 'port_floating'), - 'admin_state_up': True}} + 'admin_state_up': True, + 'device_owner': '', + 'device_id': '', + 'binding:vnic_type': 'normal' + }} ).AndReturn({'port': { "status": "BUILD", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766" @@ -653,7 +657,10 @@ {'subnet_id': u'sub1234', 'ip_address': u'10.0.0.10'} ], 'name': utils.PhysName(stack.name, 'port_floating'), - 'admin_state_up': True}} + 'admin_state_up': True, + 'binding:vnic_type': 'normal', + 'device_owner': '', + 'device_id': ''}} ).AndReturn({'port': { "status": "BUILD", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766" diff -Nru heat-9.0.0/heat/tests/openstack/neutron/test_neutron_port.py heat-10.0.0~b1/heat/tests/openstack/neutron/test_neutron_port.py --- heat-9.0.0/heat/tests/openstack/neutron/test_neutron_port.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/neutron/test_neutron_port.py 2017-10-27 07:35:34.000000000 +0000 @@ -114,7 +114,10 @@ ], 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'}}) + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + }}) self.port_show_mock.assert_called_once_with( 'fc68ea2c-b60b-4b4f-bd82-94ec81110766') @@ -140,7 +143,10 @@ ], 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'}}) + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + }}) self.port_show_mock.assert_called_once_with( 'fc68ea2c-b60b-4b4f-bd82-94ec81110766') @@ -168,7 +174,10 @@ 'network_id': u'net1234', 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'}}) + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + }}) def test_allowed_address_pair(self): t = template_format.parse(neutron_port_with_address_pair_template) @@ -193,7 +202,11 @@ 'mac_address': u'00-B0-D0-86-BB-F7' }], 'name': utils.PhysName(stack.name, 'port'), - 'admin_state_up': True}}) + 'admin_state_up': True, + 'binding:vnic_type': 'normal', + 'device_id': '', + 'device_owner': '' + }}) def test_port_security_enabled(self): t = template_format.parse(neutron_port_security_template) @@ -217,7 +230,11 @@ 'network_id': u'abcd1234', 'port_security_enabled': False, 'name': utils.PhysName(stack.name, 'port'), - 'admin_state_up': True}}) + 'admin_state_up': True, + 'binding:vnic_type': 'normal', + 'device_id': '', + 'device_owner': '' + }}) def test_missing_mac_address(self): t = template_format.parse(neutron_port_with_address_pair_template) @@ -244,7 +261,10 @@ 'ip_address': u'10.0.3.21', }], 'name': utils.PhysName(stack.name, 'port'), - 'admin_state_up': True}}) + 'admin_state_up': True, + 'binding:vnic_type': 'normal', + 'device_owner': '', + 'device_id': ''}}) def test_ip_address_is_cidr(self): t = template_format.parse(neutron_port_with_address_pair_template) @@ -271,7 +291,11 @@ 'mac_address': u'00-B0-D0-86-BB-F7' }], 'name': utils.PhysName(stack.name, 'port'), - 'admin_state_up': True}}) + 'admin_state_up': True, + 'binding:vnic_type': 'normal', + 'device_owner': '', + 'device_id': '' + }}) def _mock_create_with_props(self): self.find_mock.return_value = 'net_or_sub' @@ -300,7 +324,10 @@ ], 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'} + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + } set_tag_mock = self.patchobject(neutronclient.Client, 'replace_tag') self._mock_create_with_props() @@ -328,7 +355,10 @@ ], 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'} + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + } self._mock_create_with_props() @@ -350,7 +380,10 @@ ], 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'} + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + } self._mock_create_with_props() port = stack['port'] @@ -373,7 +406,9 @@ ], 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp' + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' } self._mock_create_with_props() @@ -412,7 +447,9 @@ props = {'network_id': u'net1234', 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'} + 'device_owner': u'network:dhcp', + 'device_id': '', + 'binding:vnic_type': 'normal'} self.find_mock.return_value = 'net1234' self.create_mock.return_value = {'port': { @@ -456,7 +493,10 @@ net2 = '0064eec9-5681-4ba7-a745-6f8e32db9503' props = {'network_id': net1, 'name': 'test_port', - 'device_owner': u'network:dhcp'} + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + } create_kwargs = props.copy() create_kwargs['admin_state_up'] = True @@ -555,7 +595,10 @@ 'network_id': u'net1234', 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'}}) + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': '' + }}) self.assertEqual('DOWN', port.FnGetAtt('status')) self.assertEqual([], port.FnGetAtt('allowed_address_pairs')) self.assertTrue(port.FnGetAtt('admin_state_up')) @@ -613,7 +656,9 @@ 'network_id': u'net1234', 'name': utils.PhysName(stack.name, 'port'), 'admin_state_up': True, - 'device_owner': u'network:dhcp'}} + 'device_owner': u'network:dhcp', + 'binding:vnic_type': 'normal', + 'device_id': ''}} ) def test_prepare_for_replace_port_not_created(self): @@ -867,7 +912,6 @@ def test_update_port(self): t = template_format.parse(neutron_port_template) - t['resources']['port']['properties'].pop('fixed_ips') stack = utils.parse_stack(t) self.patchobject(neutronV20, 'find_resourceid_by_name_or_id', @@ -940,6 +984,10 @@ self.assertIsNotNone( port.update_template_diff_properties(after_props, before_props)) + # With fixed_ips removed + scheduler.TaskRunner(port.handle_update, update_snippet, + {}, {'fixed_ips': None})() + # update with empty prop_diff scheduler.TaskRunner(port.handle_update, update_snippet, {}, {})() self.assertEqual(1, update_port.call_count) diff -Nru heat-9.0.0/heat/tests/openstack/neutron/test_neutron.py heat-10.0.0~b1/heat/tests/openstack/neutron/test_neutron.py --- heat-9.0.0/heat/tests/openstack/neutron/test_neutron.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/neutron/test_neutron.py 2017-10-27 07:35:34.000000000 +0000 @@ -98,7 +98,7 @@ {'attr2': attributes.Schema(type=attributes.Schema.STRING)}) res.attributes = attributes.Attributes(res.name, res.attributes_schema, - res._resolve_all_attributes) + res._resolve_any_attribute) side_effect = [{'attr1': 'val1', 'attr2': 'val2'}, {'attr1': 'val1', 'attr2': 'val2'}, {'attr1': 'val1', 'attr2': 'val2'}, @@ -108,8 +108,8 @@ self.assertEqual({'attr1': 'val1', 'attr2': 'val2'}, res.FnGetAtt('show')) self.assertEqual('val2', res.attributes['attr2']) - self.assertRaises(KeyError, res._resolve_all_attributes, 'attr3') - self.assertIsNone(res._resolve_all_attributes('attr1')) + self.assertRaises(KeyError, res._resolve_any_attribute, 'attr3') + self.assertIsNone(res._resolve_any_attribute('attr1')) res.resource_id = None # use local cached object for non-show attribute diff -Nru heat-9.0.0/heat/tests/openstack/nova/test_flavor.py heat-10.0.0~b1/heat/tests/openstack/nova/test_flavor.py --- heat-9.0.0/heat/tests/openstack/nova/test_flavor.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/nova/test_flavor.py 2017-10-27 07:35:34.000000000 +0000 @@ -118,7 +118,6 @@ flavor_id = '927202df-1afb-497f-8368-9c2d2f26e5db' value.id = flavor_id value.is_public = False - self.my_flavor.IS_PUBLIC = False self.flavors.create.return_value = value self.flavors.get.return_value = value self.my_flavor.handle_create() @@ -143,12 +142,11 @@ value.set_keys.assert_called_once_with(new_keys) def test_flavor_handle_update_add_tenants(self): - self.create_flavor() + self.create_flavor(is_public=False) value = mock.MagicMock() new_tenants = ["new_foo", "new_bar"] prop_diff = {'tenants': new_tenants} - self.my_flavor.IS_PUBLIC = False self.flavors.get.return_value = value self.my_flavor.handle_update(json_snippet=None, @@ -165,7 +163,6 @@ new_tenants = [] prop_diff = {'tenants': new_tenants} - self.my_flavor.IS_PUBLIC = False self.flavors.get.return_value = value itemFoo = mock.MagicMock() diff -Nru heat-9.0.0/heat/tests/openstack/nova/test_server.py heat-10.0.0~b1/heat/tests/openstack/nova/test_server.py --- heat-9.0.0/heat/tests/openstack/nova/test_server.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/nova/test_server.py 2017-10-27 07:35:34.000000000 +0000 @@ -786,14 +786,14 @@ @mock.patch.object(heat_plugin.HeatClientPlugin, 'url_for') def test_server_create_software_config(self, fake_url): - fake_url.return_value = 'the-cfn-url' + fake_url.return_value = 'http://ip:8000/v1' server = self._server_create_software_config() self.assertEqual({ 'os-collect-config': { 'cfn': { 'access_key_id': '4567', - 'metadata_url': 'the-cfn-url/v1/', + 'metadata_url': 'http://ip:8000/v1/', 'path': 'WebServer.Metadata', 'secret_access_key': '8901', 'stack_name': 'software_config_s' @@ -805,13 +805,13 @@ @mock.patch.object(heat_plugin.HeatClientPlugin, 'url_for') def test_resolve_attribute_os_collect_config(self, fake_url): - fake_url.return_value = 'the-cfn-url' + fake_url.return_value = 'http://ip/heat-api-cfn/v1' server = self._server_create_software_config() self.assertEqual({ 'cfn': { 'access_key_id': '4567', - 'metadata_url': 'the-cfn-url/v1/', + 'metadata_url': 'http://ip/heat-api-cfn/v1/', 'path': 'WebServer.Metadata', 'secret_access_key': '8901', 'stack_name': 'software_config_s' @@ -822,14 +822,14 @@ @mock.patch.object(heat_plugin.HeatClientPlugin, 'url_for') def test_server_create_software_config_metadata(self, fake_url): md = {'os-collect-config': {'polling_interval': 10}} - fake_url.return_value = 'the-cfn-url' + fake_url.return_value = 'http://ip/heat-api-cfn/v1' server = self._server_create_software_config(md=md) self.assertEqual({ 'os-collect-config': { 'cfn': { 'access_key_id': '4567', - 'metadata_url': 'the-cfn-url/v1/', + 'metadata_url': 'http://ip/heat-api-cfn/v1/', 'path': 'WebServer.Metadata', 'secret_access_key': '8901', 'stack_name': 'software_config_s' @@ -1650,7 +1650,7 @@ @mock.patch.object(heat_plugin.HeatClientPlugin, 'url_for') def test_server_update_metadata_software_config(self, fake_url): - fake_url.return_value = 'the-cfn-url' + fake_url.return_value = 'http://ip:8000/v1' server, ud_tmpl = self._server_create_software_config( stack_name='update_meta_sc', ret_tmpl=True) @@ -1658,7 +1658,7 @@ 'os-collect-config': { 'cfn': { 'access_key_id': '4567', - 'metadata_url': 'the-cfn-url/v1/', + 'metadata_url': 'http://ip:8000/v1/', 'path': 'WebServer.Metadata', 'secret_access_key': '8901', 'stack_name': 'update_meta_sc' @@ -1678,7 +1678,7 @@ @mock.patch.object(heat_plugin.HeatClientPlugin, 'url_for') def test_server_update_metadata_software_config_merge(self, fake_url): md = {'os-collect-config': {'polling_interval': 10}} - fake_url.return_value = 'the-cfn-url' + fake_url.return_value = 'http://ip/heat-api-cfn/v1' server, ud_tmpl = self._server_create_software_config( stack_name='update_meta_sc', ret_tmpl=True, md=md) @@ -1687,7 +1687,7 @@ 'os-collect-config': { 'cfn': { 'access_key_id': '4567', - 'metadata_url': 'the-cfn-url/v1/', + 'metadata_url': 'http://ip/heat-api-cfn/v1/', 'path': 'WebServer.Metadata', 'secret_access_key': '8901', 'stack_name': 'update_meta_sc' @@ -1708,7 +1708,7 @@ @mock.patch.object(heat_plugin.HeatClientPlugin, 'url_for') def test_server_update_software_config_transport(self, fake_url): md = {'os-collect-config': {'polling_interval': 10}} - fake_url.return_value = 'the-cfn-url' + fake_url.return_value = 'http://ip/heat-api-cfn/v1' server = self._server_create_software_config( stack_name='update_meta_sc', md=md) @@ -1716,7 +1716,7 @@ 'os-collect-config': { 'cfn': { 'access_key_id': '4567', - 'metadata_url': 'the-cfn-url/v1/', + 'metadata_url': 'http://ip/heat-api-cfn/v1/', 'path': 'WebServer.Metadata', 'secret_access_key': '8901', 'stack_name': 'update_meta_sc' @@ -3200,7 +3200,7 @@ server.resource_id = '1234' self.patchobject(self.fc.servers, 'get', side_effect=fakes_nova.fake_exception()) - self.assertEqual('', server._resolve_all_attributes("accessIPv4")) + self.assertEqual('', server._resolve_any_attribute("accessIPv4")) def test_resolve_attribute_console_url(self): server = self.fc.servers.list()[0] @@ -3211,7 +3211,7 @@ 'WebServer', tmpl.resource_definitions(stack)['WebServer'], stack) ws.resource_id = server.id self.patchobject(self.fc.servers, 'get', return_value=server) - console_urls = ws._resolve_all_attributes('console_urls') + console_urls = ws._resolve_any_attribute('console_urls') self.assertIsInstance(console_urls, collections.Mapping) supported_consoles = ('novnc', 'xvpvnc', 'spice-html5', 'rdp-html5', 'serial', 'webmks') @@ -3232,7 +3232,7 @@ expect_networks = {"fake_uuid": ["10.0.0.3"], "fake_net": ["10.0.0.3"]} self.assertEqual(expect_networks, - server._resolve_all_attributes("networks")) + server._resolve_any_attribute("networks")) def test_empty_instance_user(self): """Test Nova server doesn't set instance_user in build_userdata diff -Nru heat-9.0.0/heat/tests/openstack/sahara/test_cluster.py heat-10.0.0~b1/heat/tests/openstack/sahara/test_cluster.py --- heat-9.0.0/heat/tests/openstack/sahara/test_cluster.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/openstack/sahara/test_cluster.py 2017-10-27 07:35:34.000000000 +0000 @@ -176,11 +176,11 @@ self.t['resources']['super-cluster']['properties'].pop( 'neutron_management_network') cluster = self._init_cluster(self.t) - self.patchobject(cluster, 'is_using_neutron', return_value=True) ex = self.assertRaises(exception.StackValidationFailed, cluster.validate) - self.assertEqual("neutron_management_network must be provided", - six.text_type(ex)) + error_msg = ('Property error: resources.super-cluster.properties: ' + 'Property neutron_management_network not assigned') + self.assertEqual(error_msg, six.text_type(ex)) def test_deprecated_properties_correctly_translates(self): tmpl = ''' diff -Nru heat-9.0.0/heat/tests/test_convg_stack.py heat-10.0.0~b1/heat/tests/test_convg_stack.py --- heat-9.0.0/heat/tests/test_convg_stack.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_convg_stack.py 2017-10-27 07:35:34.000000000 +0000 @@ -54,8 +54,8 @@ stack.converge_stack(template=stack.t, action=stack.CREATE) self.assertIsNone(stack.ext_rsrcs_db) - self.assertEqual('Dependencies([((1, True), None)])', - repr(stack.convergence_dependencies)) + self.assertEqual([((1, True), None)], + list(stack.convergence_dependencies._graph.edges())) stack_db = stack_object.Stack.get_by_id(stack.context, stack.id) self.assertIsNotNone(stack_db.current_traversal) @@ -82,12 +82,11 @@ stack.store() stack.converge_stack(template=stack.t, action=stack.CREATE) self.assertIsNone(stack.ext_rsrcs_db) - self.assertEqual('Dependencies([' - '((1, True), (3, True)), ' - '((2, True), (3, True)), ' - '((3, True), (4, True)), ' - '((3, True), (5, True))])', - repr(stack.convergence_dependencies)) + self.assertEqual([((1, True), (3, True)), + ((2, True), (3, True)), + ((3, True), (4, True)), + ((3, True), (5, True))], + sorted(stack.convergence_dependencies._graph.edges())) stack_db = stack_object.Stack.get_by_id(stack.context, stack.id) self.assertIsNotNone(stack_db.current_traversal) @@ -179,18 +178,18 @@ curr_stack.converge_stack(template=template2, action=stack.UPDATE) self.assertIsNotNone(curr_stack.ext_rsrcs_db) - self.assertEqual('Dependencies([' - '((3, False), (1, False)), ' - '((3, False), (2, False)), ' - '((4, False), (3, False)), ' - '((4, False), (4, True)), ' - '((5, False), (3, False)), ' - '((5, False), (5, True)), ' - '((6, True), (8, True)), ' - '((7, True), (8, True)), ' - '((8, True), (4, True)), ' - '((8, True), (5, True))])', - repr(curr_stack.convergence_dependencies)) + deps = curr_stack.convergence_dependencies + self.assertEqual([((3, False), (1, False)), + ((3, False), (2, False)), + ((4, False), (3, False)), + ((4, False), (4, True)), + ((5, False), (3, False)), + ((5, False), (5, True)), + ((6, True), (8, True)), + ((7, True), (8, True)), + ((8, True), (4, True)), + ((8, True), (5, True))], + sorted(deps._graph.edges())) stack_db = stack_object.Stack.get_by_id(curr_stack.context, curr_stack.id) @@ -305,12 +304,12 @@ curr_stack.converge_stack(template=template2, action=stack.DELETE) self.assertIsNotNone(curr_stack.ext_rsrcs_db) - self.assertEqual('Dependencies([' - '((3, False), (1, False)), ' - '((3, False), (2, False)), ' - '((4, False), (3, False)), ' - '((5, False), (3, False))])', - repr(curr_stack.convergence_dependencies)) + deps = curr_stack.convergence_dependencies + self.assertEqual([((3, False), (1, False)), + ((3, False), (2, False)), + ((4, False), (3, False)), + ((5, False), (3, False))], + sorted(deps._graph.edges())) stack_db = stack_object.Stack.get_by_id(curr_stack.context, curr_stack.id) @@ -722,12 +721,11 @@ self.stack._compute_convg_dependencies(self.stack.ext_rsrcs_db, self.stack.dependencies, self.current_resources) - self.assertEqual('Dependencies([' - '((1, True), (3, True)), ' - '((2, True), (3, True)), ' - '((3, True), (4, True)), ' - '((3, True), (5, True))])', - repr(self.stack._convg_deps)) + self.assertEqual([((1, True), (3, True)), + ((2, True), (3, True)), + ((3, True), (4, True)), + ((3, True), (5, True))], + sorted(self.stack._convg_deps._graph.edges())) def test_dependencies_update_same_template(self): t = template_format.parse(tools.string_template_five) @@ -740,21 +738,20 @@ self.stack._compute_convg_dependencies(db_resources, self.stack.dependencies, curr_resources) - self.assertEqual('Dependencies([' - '((1, False), (1, True)), ' - '((1, True), (3, True)), ' - '((2, False), (2, True)), ' - '((2, True), (3, True)), ' - '((3, False), (1, False)), ' - '((3, False), (2, False)), ' - '((3, False), (3, True)), ' - '((3, True), (4, True)), ' - '((3, True), (5, True)), ' - '((4, False), (3, False)), ' - '((4, False), (4, True)), ' - '((5, False), (3, False)), ' - '((5, False), (5, True))])', - repr(self.stack._convg_deps)) + self.assertEqual([((1, False), (1, True)), + ((1, True), (3, True)), + ((2, False), (2, True)), + ((2, True), (3, True)), + ((3, False), (1, False)), + ((3, False), (2, False)), + ((3, False), (3, True)), + ((3, True), (4, True)), + ((3, True), (5, True)), + ((4, False), (3, False)), + ((4, False), (4, True)), + ((5, False), (3, False)), + ((5, False), (5, True))], + sorted(self.stack._convg_deps._graph.edges())) def test_dependencies_update_new_template(self): t = template_format.parse(tools.string_template_five_update) @@ -777,18 +774,17 @@ self.stack._compute_convg_dependencies(db_resources, self.stack.dependencies, curr_resources) - self.assertEqual('Dependencies([' - '((3, False), (1, False)), ' - '((3, False), (2, False)), ' - '((4, False), (3, False)), ' - '((4, False), (4, True)), ' - '((5, False), (3, False)), ' - '((5, False), (5, True)), ' - '((6, True), (8, True)), ' - '((7, True), (8, True)), ' - '((8, True), (4, True)), ' - '((8, True), (5, True))])', - repr(self.stack._convg_deps)) + self.assertEqual([((3, False), (1, False)), + ((3, False), (2, False)), + ((4, False), (3, False)), + ((4, False), (4, True)), + ((5, False), (3, False)), + ((5, False), (5, True)), + ((6, True), (8, True)), + ((7, True), (8, True)), + ((8, True), (4, True)), + ((8, True), (5, True))], + sorted(self.stack._convg_deps._graph.edges())) def test_dependencies_update_replace_rollback(self): t = template_format.parse(tools.string_template_five) @@ -815,23 +811,22 @@ self.stack._compute_convg_dependencies(db_resources, self.stack.dependencies, curr_resources) - self.assertEqual('Dependencies([' - '((1, False), (1, True)), ' - '((1, False), (6, False)), ' - '((1, True), (3, True)), ' - '((2, False), (2, True)), ' - '((2, True), (3, True)), ' - '((3, False), (1, False)), ' - '((3, False), (2, False)), ' - '((3, False), (3, True)), ' - '((3, False), (6, False)), ' - '((3, True), (4, True)), ' - '((3, True), (5, True)), ' - '((4, False), (3, False)), ' - '((4, False), (4, True)), ' - '((5, False), (3, False)), ' - '((5, False), (5, True))])', - repr(self.stack._convg_deps)) + self.assertEqual([((1, False), (1, True)), + ((1, False), (6, False)), + ((1, True), (3, True)), + ((2, False), (2, True)), + ((2, True), (3, True)), + ((3, False), (1, False)), + ((3, False), (2, False)), + ((3, False), (3, True)), + ((3, False), (6, False)), + ((3, True), (4, True)), + ((3, True), (5, True)), + ((4, False), (3, False)), + ((4, False), (4, True)), + ((5, False), (3, False)), + ((5, False), (5, True))], + sorted(self.stack._convg_deps._graph.edges())) def test_dependencies_update_delete(self): tmpl = templatem.Template.create_empty_template( @@ -844,12 +839,11 @@ self.stack._compute_convg_dependencies(db_resources, self.stack.dependencies, curr_resources) - self.assertEqual('Dependencies([' - '((3, False), (1, False)), ' - '((3, False), (2, False)), ' - '((4, False), (3, False)), ' - '((5, False), (3, False))])', - repr(self.stack._convg_deps)) + self.assertEqual([((3, False), (1, False)), + ((3, False), (2, False)), + ((4, False), (3, False)), + ((5, False), (3, False))], + sorted(self.stack._convg_deps._graph.edges())) class TestConvergenceMigration(common.HeatTestCase): diff -Nru heat-9.0.0/heat/tests/test_crypt.py heat-10.0.0~b1/heat/tests/test_crypt.py --- heat-9.0.0/heat/tests/test_crypt.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_crypt.py 2017-10-27 07:35:34.000000000 +0000 @@ -60,3 +60,17 @@ def test_encrypt_decrypt_dict_default_enc_key(self): self._test_encrypt_decrypt_dict() + + def test_decrypt_dict_invalid_key(self): + data = {'p1': u'happy', + '2': [u'a', u'little', u'blue'], + '6': 7} + encrypted_data = crypt.encrypted_dict( + data, '767c3ed056cbaa3b9dfedb8c6f825bf0') + ex = self.assertRaises(exception.InvalidEncryptionKey, + crypt.decrypted_dict, + encrypted_data, + '767c3ed056cbaa3b9dfedb8c6f825bf1') + self.assertEqual('Can not decrypt data with the auth_encryption_key ' + 'in heat config.', + six.text_type(ex)) diff -Nru heat-9.0.0/heat/tests/test_engine_api_utils.py heat-10.0.0~b1/heat/tests/test_engine_api_utils.py --- heat-9.0.0/heat/tests/test_engine_api_utils.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_engine_api_utils.py 2017-10-27 07:35:34.000000000 +0000 @@ -63,7 +63,9 @@ ev = event.Event(self.context, self.stack, 'CREATE', 'COMPLETE', 'state changed', 'z3455xyc-9f88-404d-a85b-5315293e67de', - res_properties, resource.name, resource.type(), + resource._rsrc_prop_data_id, + resource._stored_properties_data, + resource.name, resource.type(), uuid=ev_uuid) ev.store() return event_object.Event.get_all_by_stack( @@ -322,7 +324,8 @@ resource = self.stack['generic1'] resource._update_stored_properties() resource.store() - event = self._dummy_event(res_properties=resource._rsrc_prop_data) + event = self._dummy_event( + res_properties=resource._stored_properties_data) formatted = api.format_event(event, self.stack.identifier(), include_rsrc_prop_data=True) self.assertEqual({'k1': 'v1'}, formatted[rpc_api.EVENT_RES_PROPERTIES]) diff -Nru heat-9.0.0/heat/tests/test_event.py heat-10.0.0~b1/heat/tests/test_event.py --- heat-9.0.0/heat/tests/test_event.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_event.py 2017-10-27 07:35:34.000000000 +0000 @@ -85,14 +85,16 @@ self.resource.resource_id_set('resource_physical_id') e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'alabama', self.resource._rsrc_prop_data, + 'alabama', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type()) e.store() self.assertEqual(1, len(event_object.Event.get_all_by_stack( self.ctx, self.stack.id))) e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'arizona', self.resource._rsrc_prop_data, + 'arizona', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type()) e.store() events = event_object.Event.get_all_by_stack(self.ctx, self.stack.id) @@ -105,7 +107,7 @@ self.resource.resource_id_set('resource_physical_id') e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'arkansas', self.resource._rsrc_prop_data, + 'arkansas', None, None, self.resource.name, self.resource.type()) e.store() @@ -114,7 +116,7 @@ mock_random_uniform.return_value = 2.0 / 100 - .0001 e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'alaska', self.resource._rsrc_prop_data, + 'alaska', None, None, self.resource.name, self.resource.type()) e.store() events = event_object.Event.get_all_by_stack(self.ctx, self.stack.id) @@ -126,7 +128,7 @@ mock_random_uniform.return_value = 2.0 / 100 + .0001 e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'aardvark', self.resource._rsrc_prop_data, + 'aardvark', None, None, self.resource.name, self.resource.type()) e.store() events = event_object.Event.get_all_by_stack(self.ctx, self.stack.id) @@ -138,16 +140,17 @@ self.resource.resource_id_set('resource_physical_id') e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'alabama', self.resource._rsrc_prop_data, + 'alabama', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type()) e.store() - rpd1_id = self.resource._rsrc_prop_data.id + rpd1_id = self.resource._rsrc_prop_data_id rpd2 = rpd_object.ResourcePropertiesData.create( self.ctx, {'encrypted': False, 'data': {'foo': 'bar'}}) rpd2_id = rpd2.id e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'arizona', rpd2, + 'arizona', rpd2_id, rpd2.data, self.resource.name, self.resource.type()) e.store() @@ -155,7 +158,7 @@ self.ctx, {'encrypted': False, 'data': {'foo': 'bar'}}) rpd3_id = rpd3.id e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'arkansas', rpd3, + 'arkansas', rpd3_id, rpd3.data, self.resource.name, self.resource.type()) e.store() @@ -163,7 +166,7 @@ self.ctx, {'encrypted': False, 'data': {'foo': 'bar'}}) rpd4_id = rpd4.id e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'arkansas', rpd4, + 'arkansas', rpd4_id, rpd4.data, self.resource.name, self.resource.type()) e.store() @@ -187,7 +190,8 @@ def test_identifier(self): event_uuid = 'abc123yc-9f88-404d-a85b-531529456xyz' e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'wibble', self.resource._rsrc_prop_data, + 'wibble', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type(), uuid=event_uuid) @@ -202,7 +206,7 @@ def test_identifier_is_none(self): e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'wibble', self.resource._rsrc_prop_data, + 'wibble', None, None, self.resource.name, self.resource.type()) self.assertIsNone(e.identifier()) @@ -211,7 +215,8 @@ def test_as_dict(self): e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'wibble', self.resource._rsrc_prop_data, + 'wibble', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type()) e.store() @@ -233,7 +238,8 @@ def test_load_deprecated_prop_data(self): e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'wibble', self.resource._rsrc_prop_data, + 'wibble', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type()) e.store() @@ -273,7 +279,8 @@ def test_props_encrypted(self): e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', - 'wibble', self.resource._rsrc_prop_data, + 'wibble', self.resource._rsrc_prop_data_id, + self.resource._stored_properties_data, self.resource.name, self.resource.type()) e.store() diff -Nru heat-9.0.0/heat/tests/test_hot.py heat-10.0.0~b1/heat/tests/test_hot.py --- heat-9.0.0/heat/tests/test_hot.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_hot.py 2017-10-27 07:35:34.000000000 +0000 @@ -1413,6 +1413,15 @@ resolved = self.resolve(snippet, tmpl, stack) self.assertEqual('value_if_false', resolved) + def test_if_null_return(self): + snippet = {'if': [True, None, 'value_if_false']} + # when condition is true, if function resolve to value_if_true + tmpl = template.Template(hot_newton_tpl_empty) + stack = parser.Stack(utils.dummy_context(), + 'test_if_null_return', tmpl) + resolved = self.resolve(snippet, tmpl, stack) + self.assertIsNone(resolved) + def test_if_using_condition_function(self): tmpl_with_conditions = template_format.parse(''' heat_template_version: 2016-10-14 @@ -1429,6 +1438,54 @@ resolved = self.resolve(snippet, tmpl, stack) self.assertEqual('value_if_true', resolved) + def test_if_referenced_by_resource(self): + tmpl_with_conditions = template_format.parse(''' +heat_template_version: pike +conditions: + create_prod: False +resources: + AResource: + type: ResourceWithPropsType + properties: + Foo: + if: + - create_prod + - "one" + - "two" +''') + tmpl = template.Template(tmpl_with_conditions) + self.stack = parser.Stack(utils.dummy_context(), + 'test_if_referenced_by_resource', tmpl) + self.stack.store() + self.stack.create() + self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE), + self.stack.state) + self.assertEqual('two', self.stack['AResource'].properties['Foo']) + + def test_if_referenced_by_resource_null(self): + tmpl_with_conditions = template_format.parse(''' +heat_template_version: pike +conditions: + create_prod: True +resources: + AResource: + type: ResourceWithPropsType + properties: + Foo: + if: + - create_prod + - null + - "two" +''') + tmpl = template.Template(tmpl_with_conditions) + self.stack = parser.Stack(utils.dummy_context(), + 'test_if_referenced_by_resource_null', tmpl) + self.stack.store() + self.stack.create() + self.assertEqual((parser.Stack.CREATE, parser.Stack.COMPLETE), + self.stack.state) + self.assertEqual('', self.stack['AResource'].properties['Foo']) + def test_if_invalid_args(self): snippet = {'if': ['create_prod', 'one_value']} tmpl = template.Template(hot_newton_tpl_empty) @@ -1934,15 +1991,24 @@ foo: bar resource2: type: AWS::EC2::Instance + resource3: + type: AWS::EC2::Instance + depends_on: + - resource1 + - dummy + - resource2 ''') source = template.Template(hot_tpl) empty = template.Template(copy.deepcopy(hot_tpl_empty)) stack = parser.Stack(utils.dummy_context(), 'test_stack', source) - for defn in six.itervalues(source.resource_definitions(stack)): + for rname, defn in sorted(source.resource_definitions(stack).items()): empty.add_resource(defn) - self.assertEqual(hot_tpl['resources'], empty.t['resources']) + expected = copy.deepcopy(hot_tpl['resources']) + expected['resource1']['depends_on'] = [] + expected['resource3']['depends_on'] = ['resource1', 'resource2'] + self.assertEqual(expected, empty.t['resources']) def test_add_output(self): hot_tpl = template_format.parse(''' diff -Nru heat-9.0.0/heat/tests/test_resource.py heat-10.0.0~b1/heat/tests/test_resource.py --- heat-9.0.0/heat/tests/test_resource.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_resource.py 2017-10-27 07:35:34.000000000 +0000 @@ -655,6 +655,27 @@ self.assertEqual('word', res.attributes['string']) self.assertEqual(0, res_attr.call_count) + def test_store_attributes_fail(self): + res_def = rsrc_defn.ResourceDefinition('test_resource', + 'ResWithStringPropAndAttr') + res = generic_rsrc.ResWithStringPropAndAttr( + 'test_res_attr_store', res_def, self.stack) + + res.action = res.UPDATE + res.status = res.COMPLETE + res.store() + attr_data = {'string': 'word'} + # set the attr_data_id first + resource_objects.Resource.update_by_id(res.context, res.id, + {'attr_data_id': 99}) + new_attr_data_id = resource_objects.Resource.store_attributes( + res.context, res.id, res._atomic_key, attr_data, None) + # fail to store new attr data + self.assertIsNone(new_attr_data_id) + res._load_data(resource_objects.Resource.get_obj( + res.context, res.id)) + self.assertEqual({}, res.attributes._resolved_values) + def test_resource_object_resource_properties_data(self): cfg.CONF.set_override('encrypt_parameters_and_properties', True) data = {'p1': 'i see', @@ -680,13 +701,13 @@ ctx2, res_obj2.id) # verify the resource_properties_data association - # can be set by id or object - self.assertEqual(rpd_db_obj.id, res_obj1.rsrc_prop_data.id) - self.assertEqual(res_obj1.rsrc_prop_data.id, - res_obj2.rsrc_prop_data.id) + # can be set by id + self.assertEqual(rpd_db_obj.id, res_obj1.rsrc_prop_data_id) + self.assertEqual(res_obj1.rsrc_prop_data_id, + res_obj2.rsrc_prop_data_id) # properties data appears unencrypted to resource object - self.assertEqual(data, res_obj1.rsrc_prop_data.data) - self.assertEqual(data, res_obj2.rsrc_prop_data.data) + self.assertEqual(data, res_obj1.properties_data) + self.assertEqual(data, res_obj2.properties_data) def test_make_replacement(self): tmpl = rsrc_defn.ResourceDefinition('test_resource', 'Foo') @@ -921,7 +942,10 @@ res.state_set(res.CREATE, res.IN_PROGRESS, 'test_rpd') # Modernity, the data is where it belongs - self.assertEqual(res._rsrc_prop_data.data, {'Foo': 'lucky'}) + rsrc_prop_data_db_obj = db_api.resource_prop_data_get( + self.stack.context, res._rsrc_prop_data_id) + self.assertEqual(rsrc_prop_data_db_obj['data'], {'Foo': 'lucky'}) + # legacy locations aren't being used anymore self.assertFalse(hasattr(res, 'properties_data')) self.assertFalse(hasattr(res, 'properties_data_encrypted')) @@ -962,11 +986,14 @@ # Modernity, the data is where it belongs # The db object data is encrypted rsrc_prop_data_db_obj = db_api.resource_prop_data_get( - self.stack.context, res._rsrc_prop_data.id) + self.stack.context, res._rsrc_prop_data_id) self.assertNotEqual(rsrc_prop_data_db_obj['data'], {'Foo': 'lucky'}) - self.assertEqual(rsrc_prop_data_db_obj.encrypted, True) # But the objects/ rsrc_prop_data.data is always unencrypted - self.assertEqual(res._rsrc_prop_data.data, {'Foo': 'lucky'}) + rsrc_prop_data_obj = rpd_object.ResourcePropertiesData._from_db_object( + rpd_object.ResourcePropertiesData(), self.stack.context, + rsrc_prop_data_db_obj) + self.assertEqual(rsrc_prop_data_obj.data, {'Foo': 'lucky'}) + # legacy locations aren't being used anymore self.assertFalse(hasattr(res, 'properties_data')) self.assertFalse(hasattr(res, 'properties_data_encrypted')) @@ -1996,20 +2023,20 @@ # The properties data should be decrypted when the object is # loaded using get_obj res_obj = resource_objects.Resource.get_obj(res.context, res.id) - self.assertEqual('string', res_obj.rsrc_prop_data.data['prop1']) + self.assertEqual('string', res_obj.properties_data['prop1']) # _stored_properties_data should be decrypted when the object is # loaded using get_all_by_stack res_objs = resource_objects.Resource.get_all_by_stack(res.context, self.stack.id) res_obj = res_objs['test_res_enc'] - self.assertEqual('string', res_obj.rsrc_prop_data.data['prop1']) + self.assertEqual('string', res_obj.properties_data['prop1']) # The properties data should be decrypted when the object is # refreshed res_obj = resource_objects.Resource.get_obj(res.context, res.id) res_obj.refresh() - self.assertEqual('string', res_obj.rsrc_prop_data.data['prop1']) + self.assertEqual('string', res_obj.properties_data['prop1']) def test_properties_data_no_encryption(self): cfg.CONF.set_override('encrypt_parameters_and_properties', False) @@ -2040,16 +2067,16 @@ # is loaded using get_obj prev_rsrc_prop_data_id = db_res.rsrc_prop_data.id res_obj = resource_objects.Resource.get_obj(res.context, res.id) - self.assertEqual('string', res_obj.rsrc_prop_data.data['prop1']) - self.assertEqual(prev_rsrc_prop_data_id, res_obj.rsrc_prop_data.id) + self.assertEqual('string', res_obj.properties_data['prop1']) + self.assertEqual(prev_rsrc_prop_data_id, res_obj.rsrc_prop_data_id) # The properties data should not be modified when the object # is loaded using get_all_by_stack res_objs = resource_objects.Resource.get_all_by_stack(res.context, self.stack.id) res_obj = res_objs['test_res_enc'] - self.assertEqual('string', res_obj.rsrc_prop_data.data['prop1']) - self.assertEqual(prev_rsrc_prop_data_id, res_obj.rsrc_prop_data.id) + self.assertEqual('string', res_obj.properties_data['prop1']) + self.assertEqual(prev_rsrc_prop_data_id, res_obj.rsrc_prop_data_id) def _assert_resource_lock(self, res_id, engine_id, atomic_key): rs = resource_objects.Resource.get_obj(self.stack.context, res_id) @@ -4555,19 +4582,11 @@ def test_create_or_replace_rsrc_prop_data(self): res = self.res res._stored_properties_data = self.old_rpd - if self.replaced: - res._rsrc_prop_data = None res.store() - if res._rsrc_prop_data is None: - old_rpd_id = -1 - else: - old_rpd_id = res._rsrc_prop_data.id - res._stored_properties_data = self.new_rpd - if self.replaced: - res._rsrc_prop_data = None + old_rpd_id = res._rsrc_prop_data_id + with mock.patch("heat.engine.function.resolve") as mock_fr: + mock_fr.return_value = self.new_rpd + res._update_stored_properties() res.store() - if res._rsrc_prop_data is None: - new_rpd_id = -1 - else: - new_rpd_id = res._rsrc_prop_data.id + new_rpd_id = res._rsrc_prop_data_id self.assertEqual(self.replaced, old_rpd_id != new_rpd_id) diff -Nru heat-9.0.0/heat/tests/test_stack_delete.py heat-10.0.0~b1/heat/tests/test_stack_delete.py --- heat-9.0.0/heat/tests/test_stack_delete.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_stack_delete.py 2017-10-27 07:35:34.000000000 +0000 @@ -84,6 +84,46 @@ self.assertEqual([], snapshot_object.Snapshot.get_all( self.ctx, stack_id)) + def test_delete_with_snapshot_after_stack_add_resource(self): + tpl = {'heat_template_version': 'queens', + 'resources': + {'A': {'type': 'ResourceWithRestoreType'}}} + self.stack = stack.Stack(self.ctx, 'stack_delete_with_snapshot', + template.Template(tpl)) + stack_id = self.stack.store() + self.stack.create() + + data = copy.deepcopy(self.stack.prepare_abandon()) + data['resources']['A']['resource_data']['a_string'] = 'foo' + snapshot_fake = { + 'tenant': self.ctx.tenant_id, + 'name': 'Snapshot', + 'stack_id': stack_id, + 'status': 'COMPLETE', + 'data': data + } + snapshot_object.Snapshot.create(self.ctx, snapshot_fake) + + self.assertIsNotNone(snapshot_object.Snapshot.get_all( + self.ctx, stack_id)) + + new_tmpl = {'heat_template_version': 'queens', + 'resources': + {'A': {'type': 'ResourceWithRestoreType'}, + 'B': {'type': 'ResourceWithRestoreType'}}} + updated_stack = stack.Stack(self.ctx, 'update_stack_add_res', + template.Template(new_tmpl)) + self.stack.update(updated_stack) + self.assertEqual(2, len(self.stack.resources)) + + self.stack.delete() + db_s = stack_object.Stack.get_by_id(self.ctx, stack_id) + self.assertIsNone(db_s) + self.assertEqual((stack.Stack.DELETE, stack.Stack.COMPLETE), + self.stack.state) + self.assertEqual([], snapshot_object.Snapshot.get_all( + self.ctx, stack_id)) + def test_delete_user_creds(self): self.stack = stack.Stack(self.ctx, 'delete_test', self.tmpl) stack_id = self.stack.store() diff -Nru heat-9.0.0/heat/tests/test_stack.py heat-10.0.0~b1/heat/tests/test_stack.py --- heat-9.0.0/heat/tests/test_stack.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_stack.py 2017-10-27 07:35:34.000000000 +0000 @@ -1021,7 +1021,7 @@ loaded_stack._update_all_resource_data(False, True) self.assertEqual('AResource', loaded_stack.outputs['TestOutput'].get_value()) - self.assertEqual({}, loaded_stack['AResource']._stored_properties_data) + self.assertIsNone(loaded_stack['AResource']._stored_properties_data) def test_adopt_stack_fails(self): adopt_data = '''{ diff -Nru heat-9.0.0/heat/tests/test_template.py heat-10.0.0~b1/heat/tests/test_template.py --- heat-9.0.0/heat/tests/test_template.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat/tests/test_template.py 2017-10-27 07:35:34.000000000 +0000 @@ -734,7 +734,8 @@ valid_versions = ['2013-05-23', '2014-10-16', '2015-04-30', '2015-10-15', '2016-04-08', '2016-10-14', '2017-02-24', '2017-09-01', - 'newton', 'ocata', 'pike'] + '2018-03-02', + 'newton', 'ocata', 'pike', 'queens'] ex_error_msg = ('The template version is invalid: ' '"heat_template_version: 2012-12-12". ' '"heat_template_version" should be one of: %s' @@ -1420,15 +1421,24 @@ foo: bar resource2: Type: AWS::EC2::Instance + resource3: + Type: AWS::EC2::Instance + DependsOn: + - resource1 + - dummy + - resource2 ''') source = template.Template(cfn_tpl) empty = template.Template(copy.deepcopy(empty_template)) stk = stack.Stack(self.ctx, 'test_stack', source) - for defn in six.itervalues(source.resource_definitions(stk)): + for rname, defn in sorted(source.resource_definitions(stk).items()): empty.add_resource(defn) - self.assertEqual(cfn_tpl['Resources'], empty.t['Resources']) + expected = copy.deepcopy(cfn_tpl['Resources']) + del expected['resource1']['DependsOn'] + expected['resource3']['DependsOn'] = ['resource1', 'resource2'] + self.assertEqual(expected, empty.t['Resources']) def test_add_output(self): cfn_tpl = template_format.parse(''' diff -Nru heat-9.0.0/heat.egg-info/entry_points.txt heat-10.0.0~b1/heat.egg-info/entry_points.txt --- heat-9.0.0/heat.egg-info/entry_points.txt 2017-08-30 11:11:34.000000000 +0000 +++ heat-10.0.0~b1/heat.egg-info/entry_points.txt 2017-10-27 07:39:25.000000000 +0000 @@ -120,9 +120,11 @@ heat_template_version.2016-10-14 = heat.engine.hot.template:HOTemplate20161014 heat_template_version.2017-02-24 = heat.engine.hot.template:HOTemplate20170224 heat_template_version.2017-09-01 = heat.engine.hot.template:HOTemplate20170901 +heat_template_version.2018-03-02 = heat.engine.hot.template:HOTemplate20180302 heat_template_version.newton = heat.engine.hot.template:HOTemplate20161014 heat_template_version.ocata = heat.engine.hot.template:HOTemplate20170224 heat_template_version.pike = heat.engine.hot.template:HOTemplate20170901 +heat_template_version.queens = heat.engine.hot.template:HOTemplate20180302 [oslo.config.opts] heat.api.aws.ec2token = heat.api.aws.ec2token:list_opts @@ -130,9 +132,9 @@ heat.common.config = heat.common.config:list_opts heat.common.context = heat.common.context:list_opts heat.common.crypt = heat.common.crypt:list_opts -heat.common.heat_keystoneclient = heat.engine.clients.os.keystone.heat_keystoneclient:list_opts heat.common.wsgi = heat.common.wsgi:list_opts heat.engine.clients = heat.engine.clients:list_opts +heat.engine.clients.os.keystone.heat_keystoneclient = heat.engine.clients.os.keystone.heat_keystoneclient:list_opts heat.engine.notification = heat.engine.notification:list_opts heat.engine.resources = heat.engine.resources:list_opts heat_integrationtests.common.config = heat_integrationtests.common.config:list_opts diff -Nru heat-9.0.0/heat.egg-info/pbr.json heat-10.0.0~b1/heat.egg-info/pbr.json --- heat-9.0.0/heat.egg-info/pbr.json 2017-08-30 11:11:34.000000000 +0000 +++ heat-10.0.0~b1/heat.egg-info/pbr.json 2017-10-27 07:39:25.000000000 +0000 @@ -1 +1 @@ -{"git_version": "6b7b132", "is_release": true} \ No newline at end of file +{"git_version": "1f73478", "is_release": true} \ No newline at end of file diff -Nru heat-9.0.0/heat.egg-info/PKG-INFO heat-10.0.0~b1/heat.egg-info/PKG-INFO --- heat-9.0.0/heat.egg-info/PKG-INFO 2017-08-30 11:11:34.000000000 +0000 +++ heat-10.0.0~b1/heat.egg-info/PKG-INFO 2017-10-27 07:39:25.000000000 +0000 @@ -1,11 +1,12 @@ Metadata-Version: 1.1 Name: heat -Version: 9.0.0 +Version: 10.0.0.0b1 Summary: OpenStack Orchestration Home-page: http://docs.openstack.org/developer/heat/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN +Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== @@ -34,7 +35,7 @@ * Wiki: http://wiki.openstack.org/Heat - * Developer docs: http://docs.openstack.org/developer/heat + * Developer docs: http://docs.openstack.org/heat/latest * Template samples: https://git.openstack.org/cgit/openstack/heat-templates * Agents: https://git.openstack.org/cgit/openstack/heat-agents diff -Nru heat-9.0.0/heat.egg-info/requires.txt heat-10.0.0~b1/heat.egg-info/requires.txt --- heat-9.0.0/heat.egg-info/requires.txt 2017-08-30 11:11:34.000000000 +0000 +++ heat-10.0.0~b1/heat.egg-info/requires.txt 2017-10-27 07:39:25.000000000 +0000 @@ -1,55 +1,55 @@ pbr!=2.1.0,>=2.0.0 Babel!=2.4.0,>=2.3.4 croniter>=0.3.4 -cryptography!=2.0,>=1.6 +cryptography!=2.0,>=1.9 debtcollector>=1.2.0 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 -keystoneauth1>=3.1.0 -keystonemiddleware>=4.12.0 -lxml!=3.7.0,>=2.3 -netaddr!=0.7.16,>=0.7.13 -openstacksdk>=0.9.17 -oslo.cache>=1.5.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 -oslo.concurrency>=3.8.0 -oslo.context>=2.14.0 -oslo.db>=4.24.0 -oslo.i18n!=3.15.2,>=2.1.0 -oslo.log>=3.22.0 -oslo.messaging!=5.25.0,>=5.24.2 -oslo.middleware>=3.27.0 +keystoneauth1>=3.2.0 +keystonemiddleware>=4.17.0 +lxml!=3.7.0,>=3.4.1 +netaddr>=0.7.18 +openstacksdk>=0.9.18 +oslo.cache>=1.26.0 +oslo.config>=4.6.0 +oslo.concurrency>=3.20.0 +oslo.context!=2.19.1,>=2.14.0 +oslo.db>=4.27.0 +oslo.i18n>=3.15.3 +oslo.log>=3.30.0 +oslo.messaging>=5.29.0 +oslo.middleware>=3.31.0 oslo.policy>=1.23.0 -oslo.reports>=0.6.0 -oslo.serialization!=2.19.1,>=1.10.0 -oslo.service>=1.10.0 -oslo.utils>=3.20.0 +oslo.reports>=1.18.0 +oslo.serialization!=2.19.1,>=2.18.0 +oslo.service>=1.24.0 +oslo.utils>=3.28.0 osprofiler>=1.4.0 -oslo.versionedobjects>=1.17.0 +oslo.versionedobjects>=1.28.0 PasteDeploy>=1.5.0 pycrypto>=2.6 -aodhclient>=0.7.0 +aodhclient>=0.9.0 python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 python-ceilometerclient>=2.5.0 -python-cinderclient>=3.1.0 -python-designateclient>=1.5.0 +python-cinderclient>=3.2.0 +python-designateclient>=2.7.0 python-glanceclient>=2.8.0 -python-heatclient>=1.6.1 +python-heatclient>=1.10.0 python-keystoneclient>=3.8.0 python-magnumclient>=2.0.0 -python-manilaclient>=1.12.0 +python-manilaclient>=1.16.0 python-mistralclient>=3.1.0 python-monascaclient>=1.7.0 python-neutronclient>=6.3.0 -python-novaclient>=9.0.0 -python-openstackclient!=3.10.0,>=3.3.0 -python-saharaclient>=1.1.0 +python-novaclient>=9.1.0 +python-openstackclient>=3.12.0 +python-saharaclient>=1.2.0 python-senlinclient>=1.1.0 python-swiftclient>=3.2.0 python-troveclient>=2.2.0 python-zaqarclient>=1.0.0 python-zunclient>=0.2.0 pytz>=2013.6 -PyYAML>=3.10.0 +PyYAML>=3.10 requests>=2.14.2 tenacity>=3.2.1 Routes>=2.3.1 @@ -58,4 +58,4 @@ sqlalchemy-migrate>=0.11.0 stevedore>=1.20.0 WebOb>=1.7.1 -yaql>=1.1.0 +yaql>=1.1.3 diff -Nru heat-9.0.0/heat.egg-info/SOURCES.txt heat-10.0.0~b1/heat.egg-info/SOURCES.txt --- heat-9.0.0/heat.egg-info/SOURCES.txt 2017-08-30 11:11:36.000000000 +0000 +++ heat-10.0.0~b1/heat.egg-info/SOURCES.txt 2017-10-27 07:39:26.000000000 +0000 @@ -365,6 +365,11 @@ heat/db/sqlalchemy/migrate_repo/versions/078_placeholder.py heat/db/sqlalchemy/migrate_repo/versions/079_resource_properties_data.py heat/db/sqlalchemy/migrate_repo/versions/080_resource_attrs_data.py +heat/db/sqlalchemy/migrate_repo/versions/081_placeholder.py +heat/db/sqlalchemy/migrate_repo/versions/082_placeholder.py +heat/db/sqlalchemy/migrate_repo/versions/083_placeholder.py +heat/db/sqlalchemy/migrate_repo/versions/084_placeholder.py +heat/db/sqlalchemy/migrate_repo/versions/085_placeholder.py heat/db/sqlalchemy/migrate_repo/versions/__init__.py heat/engine/__init__.py heat/engine/api.py @@ -1102,6 +1107,7 @@ heat_integrationtests/functional/test_resource_chain.py heat_integrationtests/functional/test_resource_group.py heat_integrationtests/functional/test_resources_list.py +heat_integrationtests/functional/test_simultaneous_update.py heat_integrationtests/functional/test_snapshot_restore.py heat_integrationtests/functional/test_software_config.py heat_integrationtests/functional/test_software_deployment_group.py @@ -1117,6 +1123,7 @@ heat_integrationtests/functional/test_update_restricted.py heat_integrationtests/functional/test_validation.py heat_integrationtests/functional/test_waitcondition.py +heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po heat_integrationtests/locale/ja/LC_MESSAGES/heat_integrationtests.po heat_integrationtests/locale/ko_KR/LC_MESSAGES/heat_integrationtests.po heat_integrationtests/scenario/__init__.py @@ -1190,8 +1197,10 @@ releasenotes/notes/get-server-webmks-console-url-f7066a9e14429084.yaml releasenotes/notes/give-me-a-network-67e23600945346cd.yaml releasenotes/notes/glance-image-tag-6fa123ca30be01aa.yaml +releasenotes/notes/hidden-designate-domain-record-res-d445ca7f1251b63d.yaml releasenotes/notes/immutable-parameters-a13dc9bec7d6fa0f.yaml releasenotes/notes/keystone-domain-support-e06e2c65c5925ae5.yaml +releasenotes/notes/keystone-project-allow-get-attribute-b382fe97694e3987.yaml releasenotes/notes/keystone-region-ce3b435c73c81ce4.yaml releasenotes/notes/know-limit-releasenote-4d21fc4d91d136d9.yaml releasenotes/notes/legacy-stack-user-id-cebbad8b0f2ed490.yaml @@ -1221,6 +1230,7 @@ releasenotes/notes/set-tags-for-port-471155bb53436361.yaml releasenotes/notes/set-tags-for-subnet-17a97b88dd11de63.yaml releasenotes/notes/set-tags-for-subnetpool-d86ca0d7e35a05f1.yaml +releasenotes/notes/stack-definition-in-functions-3f7f172a53edf535.yaml releasenotes/notes/store-resource-attributes-8bcbedca2f86986e.yaml releasenotes/notes/subnet-pool-resource-c32ff97d4f956b73.yaml releasenotes/notes/support-rbac-for-qos-policy-a55434654e1dd953.yaml @@ -1233,9 +1243,11 @@ releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst +releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder +releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po diff -Nru heat-9.0.0/heat_integrationtests/api/gabbits/stacks.yaml heat-10.0.0~b1/heat_integrationtests/api/gabbits/stacks.yaml --- heat-9.0.0/heat_integrationtests/api/gabbits/stacks.yaml 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/api/gabbits/stacks.yaml 2017-10-27 07:35:34.000000000 +0000 @@ -45,3 +45,118 @@ DELETE: $LAST_URL redirects: True status: 204 + +- name: create stack + POST: /stacks + request_headers: + content-type: application/json + data: + files: {} + disable_rollback: true + parameters: {'test_val': value} + stack_name: $ENVIRON['PREFIX']-stack + template: + heat_template_version: pike + parameters: + test_val: + type: string + resources: + test: + type: OS::Heat::TestResource + properties: + value: {get_param: test_val} + outputs: + output_value: + value: {get_attr: [test, output]} + + status: 201 + response_headers: + location: //stacks/$ENVIRON['PREFIX']-stack/[a-f0-9-]+/ + +- name: poll for stack CREATE_COMPLETE + GET: $LOCATION + redirects: True + poll: + count: 5 + delay: 1.0 + response_json_paths: + $.stack.stack_status: CREATE_COMPLETE + +- name: show stack + GET: $LAST_URL + redirects: True + status: 200 + +- name: update stack + PUT: $LAST_URL + request_headers: + content-type: application/json + data: + files: {} + disable_rollback: true + parameters: {'test_val': new_value} + stack_name: $ENVIRON['PREFIX']-stack + template: + heat_template_version: pike + parameters: + test_val: + type: string + resources: + test: + type: OS::Heat::TestResource + properties: + value: {get_param: test_val} + action_wait_secs: + update: 1 + outputs: + output_value: + value: {get_attr: [test, output]} + + status: 202 + +- name: poll for stack UPDATE_COMPLETE + GET: $LAST_URL + redirects: True + poll: + count: 5 + delay: 1.0 + response_json_paths: + $.stack.stack_status: UPDATE_COMPLETE + +- name: patch update stack + PATCH: $LAST_URL + request_headers: + content-type: application/json + data: + parameters: {'test_val': new_patched_value} + + status: 202 + +- name: poll for stack patch UPDATE_COMPLETE + GET: $LAST_URL + redirects: True + poll: + count: 5 + delay: 1.0 + response_json_paths: + $.stack.stack_status: UPDATE_COMPLETE + $.stack.updated_time: /^(?!$HISTORY['poll for stack UPDATE_COMPLETE'].$RESPONSE['$.stack.updated_time'])/ + +- name: list stack outputs + GET: $LAST_URL/outputs + redirects: True + status: 200 + response_json_paths: + $.outputs[0].output_key: output_value + +- name: get stack output + GET: $LAST_URL/output_value + redirects: True + status: 200 + response_json_paths: + $.output.output_value: new_patched_value + +- name: delete stack + DELETE: /stacks/$ENVIRON['PREFIX']-stack + redirects: True + status: 204 diff -Nru heat-9.0.0/heat_integrationtests/cleanup_test_env.sh heat-10.0.0~b1/heat_integrationtests/cleanup_test_env.sh --- heat-9.0.0/heat_integrationtests/cleanup_test_env.sh 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/cleanup_test_env.sh 2017-10-27 07:35:34.000000000 +0000 @@ -30,4 +30,4 @@ openstack flavor delete m1.heat_micro # delete the image created -openstack image delete Fedora-Cloud-Base-24-1.2.x86_64 +openstack image delete Fedora-Cloud-Base-26-1.5.x86_64 diff -Nru heat-9.0.0/heat_integrationtests/common/config.py heat-10.0.0~b1/heat_integrationtests/common/config.py --- heat-9.0.0/heat_integrationtests/common/config.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/common/config.py 2017-10-27 07:35:34.000000000 +0000 @@ -140,6 +140,10 @@ cfg.ListOpt('skip_test_stack_action_list', help="List of stack actions in tests to skip " "ex. ABANDON, ADOPT, SUSPEND, RESUME"), + cfg.BoolOpt('convergence_engine_enabled', + default=True, + help="Test features that are only present for stacks with " + "convergence enabled."), cfg.IntOpt('volume_size', default=1, help='Default size in GB for volumes created by volumes tests'), diff -Nru heat-9.0.0/heat_integrationtests/common/test.py heat-10.0.0~b1/heat_integrationtests/common/test.py --- heat-9.0.0/heat_integrationtests/common/test.py 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/common/test.py 2017-10-27 07:35:34.000000000 +0000 @@ -68,6 +68,17 @@ return randbits +def requires_convergence(test_method): + '''Decorator for convergence-only tests. + + The decorated test will be skipped when convergence is disabled. + ''' + convergence_enabled = config.CONF.heat_plugin.convergence_engine_enabled + skipper = testtools.skipUnless(convergence_enabled, + "Convergence-only tests are disabled") + return skipper(test_method) + + class HeatIntegrationTest(testscenarios.WithScenarios, testtools.TestCase): diff -Nru heat-9.0.0/heat_integrationtests/functional/test_event_sinks.py heat-10.0.0~b1/heat_integrationtests/functional/test_event_sinks.py --- heat-9.0.0/heat_integrationtests/functional/test_event_sinks.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/functional/test_event_sinks.py 2017-10-27 07:35:34.000000000 +0000 @@ -12,7 +12,7 @@ import uuid -from zaqarclient.queues.v1 import client as zaqarclient +from zaqarclient.queues.v2 import client as zaqarclient from heat_integrationtests.common import test from heat_integrationtests.functional import functional_base @@ -53,7 +53,7 @@ } } - zaqar = zaqarclient.Client(conf=conf, version=1.1) + zaqar = zaqarclient.Client(conf=conf) queue = zaqar.queue(queue_id) def validate_messages(): diff -Nru heat-9.0.0/heat_integrationtests/functional/test_simultaneous_update.py heat-10.0.0~b1/heat_integrationtests/functional/test_simultaneous_update.py --- heat-9.0.0/heat_integrationtests/functional/test_simultaneous_update.py 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/functional/test_simultaneous_update.py 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import copy +import time + +from heat_integrationtests.common import test +from heat_integrationtests.functional import functional_base + +_test_template = { + 'heat_template_version': 'pike', + 'description': 'Test template to create two resources.', + 'resources': { + 'test1': { + 'type': 'OS::Heat::TestResource', + 'properties': { + 'value': 'Test1', + 'fail': False, + 'update_replace': False, + 'wait_secs': 0, + } + }, + 'test2': { + 'type': 'OS::Heat::TestResource', + 'properties': { + 'value': 'Test1', + 'fail': False, + 'update_replace': False, + 'wait_secs': 0, + 'action_wait_secs': { + 'create': 30, + } + }, + 'depends_on': ['test1'] + } + } +} + + +def get_templates(fail=False, delay_s=None): + before = copy.deepcopy(_test_template) + + after = copy.deepcopy(before) + for r in after['resources'].values(): + r['properties']['value'] = 'Test2' + + before_props = before['resources']['test2']['properties'] + before_props['fail'] = fail + if delay_s is not None: + before_props['action_wait_secs']['create'] = delay_s + + return before, after + + +class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase): + + @test.requires_convergence + def test_retrigger_success(self): + before, after = get_templates() + stack_id = self.stack_create(template=before, + expected_status='CREATE_IN_PROGRESS') + time.sleep(10) + + self.update_stack(stack_id, after) + + @test.requires_convergence + def test_retrigger_failure(self): + before, after = get_templates(fail=True) + stack_id = self.stack_create(template=before, + expected_status='CREATE_IN_PROGRESS') + time.sleep(10) + + self.update_stack(stack_id, after) + + @test.requires_convergence + def test_retrigger_timeout(self): + before, after = get_templates(delay_s=70) + stack_id = self.stack_create(template=before, + expected_status='CREATE_IN_PROGRESS', + timeout=1) + time.sleep(50) + + self.update_stack(stack_id, after) diff -Nru heat-9.0.0/heat_integrationtests/functional/test_templates.py heat-10.0.0~b1/heat_integrationtests/functional/test_templates.py --- heat-9.0.0/heat_integrationtests/functional/test_templates.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/functional/test_templates.py 2017-10-27 07:35:34.000000000 +0000 @@ -53,7 +53,8 @@ "2012-12-12", "2010-09-09", "2016-04-08", "2016-10-14", "newton", "2017-02-24", "ocata", - "2017-09-01", "pike"] + "2017-09-01", "pike", + "2018-03-02", "queens"] for template in template_versions: self.assertIn(template.version.split(".")[1], supported_template_versions) diff -Nru heat-9.0.0/heat_integrationtests/functional/test_waitcondition.py heat-10.0.0~b1/heat_integrationtests/functional/test_waitcondition.py --- heat-9.0.0/heat_integrationtests/functional/test_waitcondition.py 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/functional/test_waitcondition.py 2017-10-27 07:35:34.000000000 +0000 @@ -13,7 +13,7 @@ import json from keystoneclient.v3 import client as keystoneclient -from zaqarclient.queues.v1 import client as zaqarclient +from zaqarclient.queues.v2 import client as zaqarclient from heat_integrationtests.functional import functional_base @@ -63,7 +63,7 @@ } } - zaqar = zaqarclient.Client(endpoint, conf=conf, version=1.1) + zaqar = zaqarclient.Client(endpoint, conf=conf) queue = zaqar.queue(signal['queue_id']) queue.post({'body': {'data': 'here!', 'id': 'data_id'}, 'ttl': 600}) diff -Nru heat-9.0.0/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po heat-10.0.0~b1/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po --- heat-9.0.0/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,18 @@ +# Andi Chandler , 2017. #zanata +msgid "" +msgstr "" +"Project-Id-Version: heat 10.0.0.dev107\n" +"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" +"POT-Creation-Date: 2017-10-06 21:39+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2017-10-06 07:42+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en-GB\n" +"X-Generator: Zanata 3.9.6\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "Please specify version in auth_url or auth_version in config." +msgstr "Please specify version in auth_url or auth_version in config." diff -Nru heat-9.0.0/heat_integrationtests/prepare_test_env.sh heat-10.0.0~b1/heat_integrationtests/prepare_test_env.sh --- heat-9.0.0/heat_integrationtests/prepare_test_env.sh 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/prepare_test_env.sh 2017-10-27 07:35:49.000000000 +0000 @@ -51,19 +51,23 @@ openstack flavor create m1.heat_int --ram 512 openstack flavor create m1.heat_micro --ram 128 -iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-24-1.2.x86_64 +iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-26-1.5.x86_64 iniset $conf_file heat_plugin boot_config_env $DEST/heat-templates/hot/software-config/boot-config/test_image_env.yaml iniset $conf_file heat_plugin heat_config_notify_script $DEST/heat-templates/hot/software-config/elements/heat-config/bin/heat-config-notify iniset $conf_file heat_plugin minimal_image_ref cirros-0.3.5-x86_64-disk # Skip test_cancel_update_server_with_port till bug #1607714 is fixed in nova # Skip ReloadOnSighupTest. Most jobs now run with apache+uwsgi, so the test has no significance -iniset $conf_file heat_plugin skip_functional_test_list 'CancelUpdateTest.test_cancel_update_server_with_port, ReloadOnSighupTest, StackSnapshotRestoreTest' +# Skip NotificationTest till bug #1721202 is fixed +iniset $conf_file heat_plugin skip_functional_test_list 'CancelUpdateTest.test_cancel_update_server_with_port, ReloadOnSighupTest, NotificationTest' # Add scenario tests to skip # VolumeBackupRestoreIntegrationTest skipped until failure rate can be reduced ref bug #1382300 # test_server_signal_userdata_format_software_config is skipped untill bug #1651768 is resolved -# StackSnapshotRestoreTest skipped until bug #1694371 is resolved iniset $conf_file heat_plugin skip_scenario_test_list 'SoftwareConfigIntegrationTest, VolumeBackupRestoreIntegrationTest' +if [ "$DISABLE_CONVERGENCE" == "true" ]; then + iniset $conf_file heat_plugin convergence_engine_enabled false +fi + cat $conf_file diff -Nru heat-9.0.0/heat_integrationtests/pre_test_hook.sh heat-10.0.0~b1/heat_integrationtests/pre_test_hook.sh --- heat-9.0.0/heat_integrationtests/pre_test_hook.sh 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/heat_integrationtests/pre_test_hook.sh 2017-10-27 07:35:34.000000000 +0000 @@ -39,13 +39,16 @@ echo -e '[oslo_messaging_notifications]\ndriver=messagingv2\n' >> $localconf -# Disable nova quota check. -echo -e '[[post-config|$NOVA_CONF]]\n[DEFAULT]\n' >> $localconf -echo -e 'quota_driver=nova.quota.NoopQuotaDriver\n' >> $localconf - echo "[[local|localrc]]" >> $localconf -# Create the images required for testing -echo "IMAGE_URLS+=http://fedora.bhs.mirrors.ovh.net/linux/releases/24/CloudImages/x86_64/images/Fedora-Cloud-Base-24-1.2.x86_64.qcow2" >> $localconf + +# NOTE(mnaser): This will use the region local mirrors to avoid going out +# to network +if [[ -e /etc/ci/mirror_info.sh ]]; then + source /etc/ci/mirror_info.sh + echo "IMAGE_URLS+=${NODEPOOL_FEDORA_MIRROR}/releases/26/CloudImages/x86_64/images/Fedora-Cloud-Base-26-1.5.x86_64.qcow2" >> $localconf +else + echo "IMAGE_URLS+=https://download.fedoraproject.org/pub/fedora/linux/releases/26/CloudImages/x86_64/images/Fedora-Cloud-Base-26-1.5.x86_64.qcow2" >> $localconf +fi echo "CEILOMETER_PIPELINE_INTERVAL=60" >> $localconf echo "HEAT_ENABLE_ADOPT_ABANDON=True" >> $localconf diff -Nru heat-9.0.0/PKG-INFO heat-10.0.0~b1/PKG-INFO --- heat-9.0.0/PKG-INFO 2017-08-30 11:11:36.000000000 +0000 +++ heat-10.0.0~b1/PKG-INFO 2017-10-27 07:39:27.000000000 +0000 @@ -1,11 +1,12 @@ Metadata-Version: 1.1 Name: heat -Version: 9.0.0 +Version: 10.0.0.0b1 Summary: OpenStack Orchestration Home-page: http://docs.openstack.org/developer/heat/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN +Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== @@ -34,7 +35,7 @@ * Wiki: http://wiki.openstack.org/Heat - * Developer docs: http://docs.openstack.org/developer/heat + * Developer docs: http://docs.openstack.org/heat/latest * Template samples: https://git.openstack.org/cgit/openstack/heat-templates * Agents: https://git.openstack.org/cgit/openstack/heat-agents diff -Nru heat-9.0.0/README.rst heat-10.0.0~b1/README.rst --- heat-9.0.0/README.rst 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/README.rst 2017-10-27 07:35:34.000000000 +0000 @@ -26,7 +26,7 @@ * Wiki: http://wiki.openstack.org/Heat -* Developer docs: http://docs.openstack.org/developer/heat +* Developer docs: http://docs.openstack.org/heat/latest * Template samples: https://git.openstack.org/cgit/openstack/heat-templates * Agents: https://git.openstack.org/cgit/openstack/heat-agents diff -Nru heat-9.0.0/releasenotes/notes/hidden-designate-domain-record-res-d445ca7f1251b63d.yaml heat-10.0.0~b1/releasenotes/notes/hidden-designate-domain-record-res-d445ca7f1251b63d.yaml --- heat-9.0.0/releasenotes/notes/hidden-designate-domain-record-res-d445ca7f1251b63d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/notes/hidden-designate-domain-record-res-d445ca7f1251b63d.yaml 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,6 @@ +--- +deprecations: + - | + Hidden Designate resource plugins ``OS::Designate::Domain`` and + ``OS::Designate::Record``. To use ``OS::Designate::Zone`` and + ``OS::Designate::RecordSet`` instead. \ No newline at end of file diff -Nru heat-9.0.0/releasenotes/notes/keystone-project-allow-get-attribute-b382fe97694e3987.yaml heat-10.0.0~b1/releasenotes/notes/keystone-project-allow-get-attribute-b382fe97694e3987.yaml --- heat-9.0.0/releasenotes/notes/keystone-project-allow-get-attribute-b382fe97694e3987.yaml 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/notes/keystone-project-allow-get-attribute-b382fe97694e3987.yaml 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,4 @@ +--- +fixes: + - Add attribute schema to `OS::Keystone::Project`. This allow get_attr + function can work with project resource. diff -Nru heat-9.0.0/releasenotes/notes/stack-definition-in-functions-3f7f172a53edf535.yaml heat-10.0.0~b1/releasenotes/notes/stack-definition-in-functions-3f7f172a53edf535.yaml --- heat-9.0.0/releasenotes/notes/stack-definition-in-functions-3f7f172a53edf535.yaml 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/notes/stack-definition-in-functions-3f7f172a53edf535.yaml 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,11 @@ +--- +other: + - Intrinsic function plugins will now be passed a StackDefinition object + instead of a Stack object. When accessing resources, the StackDefinition + will return ResourceProxy objects instead of Resource objects. These + classes replicate the parts of the Stack and Resource APIs that are used by + the built-in Function plugins, but authors of custom third-party + Template/Function plugins should audit them to ensure they do not depend on + unstable parts of the API that are no longer accessible. The + StackDefinition and ResourceProxy APIs are considered stable and any future + changes to them will go through the standard deprecation process. diff -Nru heat-9.0.0/releasenotes/source/index.rst heat-10.0.0~b1/releasenotes/source/index.rst --- heat-9.0.0/releasenotes/source/index.rst 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/source/index.rst 2017-10-27 07:35:34.000000000 +0000 @@ -6,6 +6,7 @@ :maxdepth: 1 unreleased + pike ocata newton mitaka diff -Nru heat-9.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po heat-10.0.0~b1/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po --- heat-9.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,1106 @@ +# Andi Chandler , 2017. #zanata +msgid "" +msgstr "" +"Project-Id-Version: Heat Release Notes 10.0.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-10-20 16:28+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2017-10-21 02:18+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en-GB\n" +"X-Generator: Zanata 3.9.6\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "" +"'CEPHFS' can be used as a share protocol when using OS::Manila::Share " +"resource." +msgstr "" +"'CEPHFS' can be used as a share protocol when using OS::Manila::Share " +"resource." + +msgid "5.0.1" +msgstr "5.0.1" + +msgid "6.0.0" +msgstr "6.0.0" + +msgid "7.0.0" +msgstr "7.0.0" + +msgid "8.0.0" +msgstr "8.0.0" + +msgid "9.0.0" +msgstr "9.0.0" + +msgid "9.0.0.0rc1-85" +msgstr "9.0.0.0rc1-85" + +msgid "" +"A new 'parameter_merge_strategies' section can be added to the environment " +"file, where 'default' and/or parameter specific merge strategies can be " +"specified." +msgstr "" +"A new 'parameter_merge_strategies' section can be added to the environment " +"file, where 'default' and/or parameter specific merge strategies can be " +"specified." + +msgid "" +"A new OS::Mistral::ExternalResource is added that allows users to manage " +"resources that are not known to Heat by specifying in the template Mistral " +"workflows to handle actions such as create, update and delete." +msgstr "" +"A new OS::Mistral::ExternalResource is added that allows users to manage " +"resources that are not known to Heat by specifying in the template Mistral " +"workflows to handle actions such as create, update and delete." + +msgid "" +"A new OS::Zun::Container resource is added that allows users to manage " +"docker containers powered by Zun. This resource will have an 'addresses' " +"attribute that contains various networking information including the neutron " +"port id. This allows users to orchestrate containers with other networking " +"resources (i.e. floating ip)." +msgstr "" +"A new OS::Zun::Container resource is added that allows users to manage " +"Docker containers powered by Zun. This resource will have an 'addresses' " +"attribute that contains various networking information including the Neutron " +"port id. This allows users to orchestrate containers with other networking " +"resources (i.e. floating ip)." + +msgid "" +"A new ``OS::Barbican::CertificateContainer`` resource for storing the " +"secrets that are relevant to certificates." +msgstr "" +"A new ``OS::Barbican::CertificateContainer`` resource for storing the " +"secrets that are relevant to certificates." + +msgid "" +"A new ``OS::Keystone::Region`` resource that helps in managing the lifecycle " +"of keystone region." +msgstr "" +"A new ``OS::Keystone::Region`` resource that helps in managing the lifecycle " +"of Keystone region." + +msgid "" +"A new ``OS::Neutron:AddressScope`` resource that helps in managing the " +"lifecycle of neutron address scope. Availability of this resource depends on " +"availability of neutron ``address-scope`` API extension. This resource can " +"be associated with multiple subnet pools in a one-to-many relationship. The " +"subnet pools under an address scope must not overlap." +msgstr "" +"A new ``OS::Neutron:AddressScope`` resource that helps in managing the " +"lifecycle of Neutron address scope. Availability of this resource depends on " +"availability of Neutron ``address-scope`` API extension. This resource can " +"be associated with multiple subnet pools in a one-to-many relationship. The " +"subnet pools under an address scope must not overlap." + +msgid "" +"A new ``OS::Neutron:Segment`` resource to create routed networks. " +"Availability of this resource depends on availability of neutron ``segment`` " +"API extension." +msgstr "" +"A new ``OS::Neutron:Segment`` resource to create routed networks. " +"Availability of this resource depends on availability of Neutron ``segment`` " +"API extension." + +msgid "" +"A new ``OS::Neutron:SubnetPool`` resource that helps in managing the " +"lifecycle of neutron subnet pool. Availability of this resource depends on " +"availability of neutron ``subnet_allocation`` API extension." +msgstr "" +"A new ``OS::Neutron:SubnetPool`` resource that helps in managing the " +"lifecycle of Neutron subnet pool. Availability of this resource depends on " +"availability of neutron ``subnet_allocation`` API extension." + +msgid "" +"A new ``openstack`` client plugin to use python-openstacksdk library and a " +"``neutron.segment`` custom constraint." +msgstr "" +"A new ``openstack`` client plugin to use python-openstacksdk library and a " +"``neutron.segment`` custom constraint." + +msgid "" +"A new property, deployment_swift_data is added to the OS::Nova::Server and " +"OS::Heat::DeployedServer resources. The property is used to define the Swift " +"container and object name that is used for deployment data for the server. " +"If unset, the fallback is the previous behavior where these values will be " +"automatically generated." +msgstr "" +"A new property, deployment_swift_data is added to the OS::Nova::Server and " +"OS::Heat::DeployedServer resources. The property is used to define the Swift " +"container and object name that is used for deployment data for the server. " +"If unset, the fallback is the previous behaviour where these values will be " +"automatically generated." + +msgid "" +"A new resource ``OS::Sahara::Job`` has been added, which allows to create " +"and launch sahara jobs. Job can be launched with resource-signal." +msgstr "" +"A new resource ``OS::Sahara::Job`` has been added, which allows to create " +"and launch Sahara jobs. Job can be launched with resource-signal." + +msgid "" +"A new resource plugin ``OS::Keystone::Domain`` is added to support the " +"lifecycle of keystone domain." +msgstr "" +"A new resource plugin ``OS::Keystone::Domain`` is added to support the " +"lifecycle of Keystone domain." + +msgid "" +"A stack can be searched for resources based on their name, status, type, " +"action, id and physcial_resource_id. And this feature is enabled both in " +"REST API and CLI. For more details, please refer orchestration API document " +"and heat CLI user guide." +msgstr "" +"A stack can be searched for resources based on their name, status, type, " +"action, id and physical_resource_id. And this feature is enabled both in " +"REST API and CLI. For more details, please refer orchestration API document " +"and Heat CLI user guide." + +msgid "" +"Add ``map_replace`` function, that takes 2 arguments an input map and a map " +"containing a ``keys`` and/or ``values`` map. key/value substitutions on the " +"input map are performed based on the mappings passed in ``keys`` and " +"``values``." +msgstr "" +"Add ``map_replace`` function, that takes 2 arguments an input map and a map " +"containing a ``keys`` and/or ``values`` map. key/value substitutions on the " +"input map are performed based on the mappings passed in ``keys`` and " +"``values``." + +msgid "" +"Add ``yaql`` function, that takes 2 arguments ``expression`` of type string " +"and ``data`` of type map and evaluates ``expression`` on a given ``data``." +msgstr "" +"Add ``yaql`` function, that takes 2 arguments ``expression`` of type string " +"and ``data`` of type map and evaluates ``expression`` on a given ``data``." + +msgid "" +"Add `converge` parameter for stack update (and update preview) API. This " +"parameter will force resources to observe the reality of resources before " +"actually update it. The value of this parameter can be any boolean value. " +"This will replace config flag `observe_on_update` in near future." +msgstr "" +"Add `converge` parameter for stack update (and update preview) API. This " +"parameter will force resources to observe the reality of resources before " +"actually update it. The value of this parameter can be any boolean value. " +"This will replace config flag `observe_on_update` in near future." + +msgid "" +"Add `external_id` attribute for resource to reference on an exists external " +"resource. The resource (with `external_id` attribute) will not able to be " +"updated. This will keep management rights stay externally." +msgstr "" +"Add `external_id` attribute for resource to reference on an exists external " +"resource. The resource (with `external_id` attribute) will not able to be " +"updated. This will keep management rights stay externally." + +msgid "" +"Add `template_dir` to config. Normally heat has template directory `/etc/" +"heat/templates`. This change makes it more official. In the future, it is " +"possible to implement features like access templates directly from global " +"template environment." +msgstr "" +"Add `template_dir` to config. Normally Heat has template directory `/etc/" +"heat/templates`. This change makes it more official. In the future, it is " +"possible to implement features like access templates directly from global " +"template environment." + +msgid "" +"Add attribute schema to `OS::Keystone::Project`. This allow get_attr " +"function can work with project resource." +msgstr "" +"Add attribute schema to `OS::Keystone::Project`. This allow get_attr " +"function can work with project resource." + +msgid "" +"Add new ``OS::Barbican::GenericContainer`` resource for storing arbitrary " +"barbican secrets." +msgstr "" +"Add new ``OS::Barbican::GenericContainer`` resource for storing arbitrary " +"Barbican secrets." + +msgid "" +"Add new ``OS::Barbican::RSAContainer`` resource for storing RSA public keys, " +"private keys, and private key pass phrases." +msgstr "" +"Add new ``OS::Barbican::RSAContainer`` resource for storing RSA public keys, " +"private keys, and private key pass phrases." + +msgid "" +"Add optional 'period' property for Monasca Notification resource. The new " +"added property will now allow the user to tell Monasca the interval in " +"seconds to periodically invoke a webhook until the ALARM state transitions " +"back to an OK state or vice versa. This is useful when the user wants to " +"create a stack which will automatically scale up or scale down more than " +"once if the alarm state continues to be in the same state. To conform to " +"the existing Heat autoscaling behaviour, we manually create the monasca " +"notification resource in Heat with a default interval value of 60." +msgstr "" +"Add optional 'period' property for Monasca Notification resource. The new " +"added property will now allow the user to tell Monasca the interval in " +"seconds to periodically invoke a webhook until the ALARM state transitions " +"back to an OK state or vice versa. This is useful when the user wants to " +"create a stack which will automatically scale up or scale down more than " +"once if the alarm state continues to be in the same state. To conform to " +"the existing Heat autoscaling behaviour, we manually create the Monasca " +"notification resource in Heat with a default interval value of 60." + +msgid "" +"Added a new ``event-sinks`` element to the environment which allows " +"specifying a target where events from the stack are sent. It supports the " +"``zaqar-queue`` element for now." +msgstr "" +"Added a new ``event-sinks`` element to the environment which allows " +"specifying a target where events from the stack are sent. It supports the " +"``zaqar-queue`` element for now." + +msgid "" +"Added new API calls for showing and listing stack outputs ``/stack/outputs`` " +"and ``/stack/outputs/output_key``." +msgstr "" +"Added new API calls for showing and listing stack outputs ``/stack/outputs`` " +"and ``/stack/outputs/output_key``." + +msgid "" +"Added new functionality for showing and listing stack outputs without " +"resolving all outputs during stack initialisation." +msgstr "" +"Added new functionality for showing and listing stack outputs without " +"resolving all outputs during stack initialisation." + +msgid "" +"Added new section ``permutations`` for ``repeat`` function, to decide " +"whether to iterate nested the over all the permutations of the elements in " +"the given lists. If 'permutations' is not specified, we set the default " +"value to true to compatible with before behavior. The args have to be lists " +"instead of dicts if 'permutations' is False because keys in a dict are " +"unordered, and the list args all have to be of the same length." +msgstr "" +"Added new section ``permutations`` for ``repeat`` function, to decide " +"whether to iterate nested the over all the permutations of the elements in " +"the given lists. If 'permutations' is not specified, we set the default " +"value to true to compatible with before behaviour. The args have to be lists " +"instead of dicts if 'permutations' is False because keys in a dict are " +"unordered, and the list args all have to be of the same length." + +msgid "" +"Added using of new API in python-heatclient for ``output_show`` and " +"``output_list``. Now, if version of Heat API is 1.19 or above, Heat client " +"will use API calls ``output_show`` and ``output_list`` instead of parsing of " +"stack get response. If version of Heat API is lower than 1.19, outputs " +"resolve in Heat client as well as before." +msgstr "" +"Added using of new API in python-heatclient for ``output_show`` and " +"``output_list``. Now, if version of Heat API is 1.19 or above, Heat client " +"will use API calls ``output_show`` and ``output_list`` instead of parsing of " +"stack get response. If version of Heat API is lower than 1.19, outputs " +"resolve in Heat client as well as before." + +msgid "" +"Adds a new \"immutable\" boolean field to the parameters section in a HOT " +"template. This gives template authors the ability to mark template " +"parameters as immutable to restrict updating parameters which have " +"destructive effects on the application. A value of True results in the " +"engine rejecting stack-updates that include changes to that parameter. When " +"not specified in the template, \"immutable\" defaults to False to ensure " +"backwards compatibility with old templates." +msgstr "" +"Adds a new \"immutable\" boolean field to the parameters section in a HOT " +"template. This gives template authors the ability to mark template " +"parameters as immutable to restrict updating parameters which have " +"destructive effects on the application. A value of True results in the " +"engine rejecting stack-updates that include changes to that parameter. When " +"not specified in the template, \"immutable\" defaults to False to ensure " +"backwards compatibility with old templates." + +msgid "" +"Adds a new feature to restrict update or replace of a resource when a stack " +"is being updated. Template authors can set ``restricted_actions`` in the " +"``resources`` section of ``resource_registry`` in an environment file to " +"restrict update or replace." +msgstr "" +"Adds a new feature to restrict update or replace of a resource when a stack " +"is being updated. Template authors can set ``restricted_actions`` in the " +"``resources`` section of ``resource_registry`` in an environment file to " +"restrict update or replace." + +msgid "" +"Adds function ``if`` to return corresponding value based on condition " +"evaluation. This function can be used to conditionally set the value of " +"resource properties and outputs." +msgstr "" +"Adds function ``if`` to return corresponding value based on condition " +"evaluation. This function can be used to conditionally set the value of " +"resource properties and outputs." + +msgid "" +"Adds new 'max_server_name_length' configuration option which defaults to the " +"prior upper bound (53) and can be lowered by users (if they need to, for " +"example due to ldap or other internal name limit restrictions)." +msgstr "" +"Adds new 'max_server_name_length' configuration option which defaults to the " +"prior upper bound (53) and can be lowered by users (if they need to, for " +"example due to LDAP or other internal name limit restrictions)." + +msgid "" +"Adds optional section ``condition`` for resource and output definitions. " +"Condition name defined in ``conditions`` and condition functions can be " +"referenced in this section, in order to conditionally create resources or " +"conditionally give outputs of a stack." +msgstr "" +"Adds optional section ``condition`` for resource and output definitions. " +"Condition name defined in ``conditions`` and condition functions can be " +"referenced in this section, in order to conditionally create resources or " +"conditionally give outputs of a stack." + +msgid "" +"Adds optional section ``conditions`` for hot template " +"( heat_template_version.2016-10-14) and ``Conditions`` for cfn template " +"(AWSTemplateFormatVersion.2010-09-09)." +msgstr "" +"Adds optional section ``conditions`` for hot template " +"( heat_template_version.2016-10-14) and ``Conditions`` for cfn template " +"(AWSTemplateFormatVersion.2010-09-09)." + +msgid "" +"Adds some condition functions, like ``equals``, ``not``, ``and`` and ``or``, " +"these condition functions can be used in ``conditions`` section to define " +"one or more conditions which are evaluated based on input parameter values " +"provided when a user creates or updates a stack." +msgstr "" +"Adds some condition functions, like ``equals``, ``not``, ``and`` and ``or``, " +"these condition functions can be used in ``conditions`` section to define " +"one or more conditions which are evaluated based on input parameter values " +"provided when a user creates or updates a stack." + +msgid "" +"All developer, contributor, and user content from various guides in " +"openstack-manuals has been moved in-tree and are published at `https://docs." +"openstack.org/heat/pike/`." +msgstr "" +"All developer, contributor, and user content from various guides in " +"openstack-manuals has been moved in-tree and are published at `https://docs." +"openstack.org/heat/pike/`." + +msgid "" +"Allow to configure Heat service to forbid creation of stacks containing " +"Volume resources with ``deletion_policy`` set to ``Snapshot`` when there is " +"no Cinder backup service available." +msgstr "" +"Allow to configure Heat service to forbid creation of stacks containing " +"Volume resources with ``deletion_policy`` set to ``Snapshot`` when there is " +"no Cinder backup service available." + +msgid "Allow to set or update the tags for OS::Neutron::Net resource." +msgstr "Allow to set or update the tags for OS::Neutron::Net resource." + +msgid "Allow to set or update the tags for OS::Neutron::Port resource." +msgstr "Allow to set or update the tags for OS::Neutron::Port resource." + +msgid "Allow to set or update the tags for OS::Neutron::Router resource." +msgstr "Allow to set or update the tags for OS::Neutron::Router resource." + +msgid "Allow to set or update the tags for OS::Neutron::Subnet resource." +msgstr "Allow to set or update the tags for OS::Neutron::Subnet resource." + +msgid "Allow to set or update the tags for OS::Neutron::SubnetPool resource." +msgstr "Allow to set or update the tags for OS::Neutron::SubnetPool resource." + +msgid "Bug Fixes" +msgstr "Bug Fixes" + +msgid "Critical Issues" +msgstr "Critical Issues" + +msgid "Current Series Release Notes" +msgstr "Current Series Release Notes" + +msgid "" +"Custom constraints for all sahara resources added - sahara.cluster, sahara." +"cluster_template, sahara.data_source, sahara.job_binary, sahara.job_type." +msgstr "" +"Custom constraints for all sahara resources added - sahara.cluster, sahara." +"cluster_template, sahara.data_source, sahara.job_binary, sahara.job_type." + +msgid "Deprecation Notes" +msgstr "Deprecation Notes" + +msgid "" +"Designate v1 resource plugins OS::Designate::Domain and OS::Designate::" +"Record are deprecated." +msgstr "" +"Designate v1 resource plugins OS::Designate::Domain and OS::Designate::" +"Record are deprecated." + +msgid "" +"Designate v2 resource plugins OS::Designate::Zone and OS::Designate::" +"RecordSet are newly added." +msgstr "" +"Designate v2 resource plugins OS::Designate::Zone and OS::Designate::" +"RecordSet are newly added." + +msgid "Heat Release Notes" +msgstr "Heat Release Notes" + +msgid "" +"Heat does not work with keystone identity federation. This is a known " +"limitation as heat uses keystone trusts for deferred authentication and " +"trusts don't work with federated keystone. For more details check `https://" +"etherpad.openstack.org/p/pike-ptg-cross-project-federation`." +msgstr "" +"Heat does not work with Keystone identity federation. This is a known " +"limitation as Heat uses keystone trusts for deferred authentication and " +"trusts don't work with federated Keystone. For more details check `https://" +"etherpad.openstack.org/p/pike-ptg-cross-project-federation`." + +msgid "" +"Hidden Designate resource plugins ``OS::Designate::Domain`` and ``OS::" +"Designate::Record``. To use ``OS::Designate::Zone`` and ``OS::Designate::" +"RecordSet`` instead." +msgstr "" +"Hidden Designate resource plugins ``OS::Designate::Domain`` and ``OS::" +"Designate::Record``. To use ``OS::Designate::Zone`` and ``OS::Designate::" +"RecordSet`` instead." + +msgid "" +"If upgrading with pre-icehouse stacks which contain resources that create " +"users (such as OS::Nova::Server, OS::Heat::SoftwareDeployment, and OS::Heat::" +"WaitConditionHandle), it is possible that the users will not be removed upon " +"stack deletion due to the removal of a legacy fallback code path. In such a " +"situation, these users will require manual removal." +msgstr "" +"If upgrading with pre-Icehouse stacks which contain resources that create " +"users (such as OS::Nova::Server, OS::Heat::SoftwareDeployment, and OS::Heat::" +"WaitConditionHandle), it is possible that the users will not be removed upon " +"stack deletion due to the removal of a legacy fallback code path. In such a " +"situation, these users will require manual removal." + +msgid "" +"Intrinsic function plugins will now be passed a StackDefinition object " +"instead of a Stack object. When accessing resources, the StackDefinition " +"will return ResourceProxy objects instead of Resource objects. These classes " +"replicate the parts of the Stack and Resource APIs that are used by the " +"built-in Function plugins, but authors of custom third-party Template/" +"Function plugins should audit them to ensure they do not depend on unstable " +"parts of the API that are no longer accessible. The StackDefinition and " +"ResourceProxy APIs are considered stable and any future changes to them will " +"go through the standard deprecation process." +msgstr "" +"Intrinsic function plugins will now be passed a StackDefinition object " +"instead of a Stack object. When accessing resources, the StackDefinition " +"will return ResourceProxy objects instead of Resource objects. These classes " +"replicate the parts of the Stack and Resource APIs that are used by the " +"built-in Function plugins, but authors of custom third-party Template/" +"Function plugins should audit them to ensure they do not depend on unstable " +"parts of the API that are no longer accessible. The StackDefinition and " +"ResourceProxy APIs are considered stable and any future changes to them will " +"go through the standard deprecation process." + +msgid "" +"Introduce a Zun client plugin module that will be used by the Zun's " +"resources that are under development." +msgstr "" +"Introduce a Zun client plugin module that will be used by the Zun's " +"resources that are under development." + +msgid "Known Issues" +msgstr "Known Issues" + +msgid "Liberty Series Release Notes" +msgstr "Liberty Series Release Notes" + +msgid "" +"Magnum recently changed terminology to more intuitively convey key concepts " +"in order to align with industry standards. \"Bay\" is now \"Cluster\" and " +"\"BayModel\" is now \"ClusterTemplate\". This release deprecates the old " +"names in favor of the new." +msgstr "" +"Magnum recently changed terminology to more intuitively convey key concepts " +"in order to align with industry standards. \"Bay\" is now \"Cluster\" and " +"\"BayModel\" is now \"ClusterTemplate\". This release deprecates the old " +"names in favour of the new." + +msgid "" +"Magnum terminology deprecations * `OS::Magnum::Bay` is now deprecated, " +"should use `OS::Magnum::Cluster` instead * `OS::Magnum::BayModel` is now " +"deprecated, should use `OS::Magnum::ClusterTemplate` instead Deprecation " +"warnings are printed for old usages." +msgstr "" +"Magnum terminology deprecations * `OS::Magnum::Bay` is now deprecated, " +"should use `OS::Magnum::Cluster` instead * `OS::Magnum::BayModel` is now " +"deprecated, should use `OS::Magnum::ClusterTemplate` instead Deprecation " +"warnings are printed for old usages." + +msgid "Mitaka Series Release Notes" +msgstr "Mitaka Series Release Notes" + +msgid "" +"Multiple environment files may be passed to the server in the files " +"dictionary along with an ordered list of the environment file names. The " +"server will generate the stack's environment from the provided files rather " +"than requiring the client to merge the environments together. This is " +"optional; the existing interface to pass in the already resolved environment " +"is still present." +msgstr "" +"Multiple environment files may be passed to the server in the files " +"dictionary along with an ordered list of the environment file names. The " +"server will generate the stack's environment from the provided files rather " +"than requiring the client to merge the environments together. This is " +"optional; the existing interface to pass in the already resolved environment " +"is still present." + +msgid "New Features" +msgstr "New Features" + +msgid "" +"New ``OS::Zaqar::Notification`` and ``OS::Zaqar::MistralTrigger`` resource " +"types allow users to attach to Zaqar queues (respectively) notifications in " +"general, and notifications that trigger Mistral workflow executions in " +"particular." +msgstr "" +"New ``OS::Zaqar::Notification`` and ``OS::Zaqar::MistralTrigger`` resource " +"types allow users to attach to Zaqar queues (respectively) notifications in " +"general, and notifications that trigger Mistral workflow executions in " +"particular." + +msgid "" +"New ``OS::Zaqar::Subscription`` and ``OS::Zaqar::MistralTrigger`` resource " +"types allow users to attach to Zaqar queues (respectively) notifications in " +"general, and notifications that trigger Mistral workflow executions in " +"particular." +msgstr "" +"New ``OS::Zaqar::Subscription`` and ``OS::Zaqar::MistralTrigger`` resource " +"types allow users to attach to Zaqar queues (respectively) notifications in " +"general, and notifications that trigger Mistral workflow executions in " +"particular." + +msgid "" +"New config section ``volumes`` with new config option " +"``[volumes]backups_enabled`` (defaults to ``True``). Operators that do not " +"have Cinder backup service deployed in their cloud are encouraged to set " +"this option to ``False``." +msgstr "" +"New config section ``volumes`` with new config option " +"``[volumes]backups_enabled`` (defaults to ``True``). Operators that do not " +"have Cinder backup service deployed in their cloud are encouraged to set " +"this option to ``False``." + +msgid "" +"New item key 'allocate_network' of 'networks' with allowed values 'auto' and " +"'none' for OS::Nova::Server, to support 'Give Me a Network' nova feature. " +"Specifying 'auto' would auto allocate a network topology for the project if " +"there is no existing network available; Specifying 'none' means no " +"networking will be allocated for the created server. This feature requires " +"nova API micro version 2.37 or later and the ``auto-allocated-topology`` API " +"is available in the Neutron networking service." +msgstr "" +"New item key 'allocate_network' of 'networks' with allowed values 'auto' and " +"'none' for OS::Nova::Server, to support 'Give Me a Network' nova feature. " +"Specifying 'auto' would auto allocate a network topology for the project if " +"there is no existing network available; Specifying 'none' means no " +"networking will be allocated for the created server. This feature requires " +"nova API micro version 2.37 or later and the ``auto-allocated-topology`` API " +"is available in the Neutron networking service." + +msgid "" +"New resource ``OS::Cinder::Quota`` is added to manage cinder quotas. Cinder " +"quotas are operational limits to projects on cinder block storage resources. " +"These include gigabytes, snapshots, and volumes." +msgstr "" +"New resource ``OS::Cinder::Quota`` is added to manage cinder quotas. Cinder " +"quotas are operational limits to projects on cinder block storage resources. " +"These include gigabytes, snapshots, and volumes." + +msgid "" +"New resource ``OS::Neutron::LBaaS::HealthMonitor`` is added to create and " +"manage Health Monitors which watch status of the Load Balanced servers." +msgstr "" +"New resource ``OS::Neutron::LBaaS::HealthMonitor`` is added to create and " +"manage Health Monitors which watch status of the Load Balanced servers." + +msgid "" +"New resource ``OS::Neutron::LBaaS::Listener`` is added to create and manage " +"Listeners which represent a listening endpoint for the Load Balancer." +msgstr "" +"New resource ``OS::Neutron::LBaaS::Listener`` is added to create and manage " +"Listeners which represent a listening endpoint for the Load Balancer." + +msgid "" +"New resource ``OS::Neutron::LBaaS::LoadBalancer`` is added to create and " +"manage Load Balancers which allow traffic to be directed between servers." +msgstr "" +"New resource ``OS::Neutron::LBaaS::LoadBalancer`` is added to create and " +"manage Load Balancers which allow traffic to be directed between servers." + +msgid "" +"New resource ``OS::Neutron::LBaaS::PoolMember`` is added to create and " +"manage Pool members which represent a single backend node." +msgstr "" +"New resource ``OS::Neutron::LBaaS::PoolMember`` is added to create and " +"manage Pool members which represent a single backend node." + +msgid "" +"New resource ``OS::Neutron::LBaaS::Pool`` is added to create and manage " +"Pools which represent a group of nodes. Pools define the subnet where nodes " +"reside, the balancing algorithm, and the nodes themselves." +msgstr "" +"New resource ``OS::Neutron::LBaaS::Pool`` is added to create and manage " +"Pools which represent a group of nodes. Pools define the subnet where nodes " +"reside, the balancing algorithm, and the nodes themselves." + +msgid "New resource ``OS::Neutron::Quota`` is added to manage neutron quotas." +msgstr "New resource ``OS::Neutron::Quota`` is added to manage Neutron quotas." + +msgid "New resource ``OS::Neutron::Trunk`` is added to manage Neutron Trunks." +msgstr "New resource ``OS::Neutron::Trunk`` is added to manage Neutron Trunks." + +msgid "" +"New resource ``OS::Nova::Quota`` is added to enable an admin to manage " +"Compute service quotas for a specific project." +msgstr "" +"New resource ``OS::Nova::Quota`` is added to enable an admin to manage " +"Compute service quotas for a specific project." + +msgid "" +"New resource ``OS::Senlin::Cluster`` is added to create a cluster in senlin. " +"A cluster is a group of homogeneous nodes." +msgstr "" +"New resource ``OS::Senlin::Cluster`` is added to create a cluster in Senlin. " +"A cluster is a group of homogeneous nodes." + +msgid "" +"New resource ``OS::Senlin::Node`` is added to create a node in senlin. Node " +"represents a physical object exposed by other OpenStack services." +msgstr "" +"New resource ``OS::Senlin::Node`` is added to create a node in Senlin. Node " +"represents a physical object exposed by other OpenStack services." + +msgid "" +"New resource ``OS::Senlin::Policy`` is added to create a policy in senlin. " +"Policy is a set of rules that can be checked and/or enforced when an Action " +"is performed on a Cluster." +msgstr "" +"New resource ``OS::Senlin::Policy`` is added to create a policy in Senlin. " +"Policy is a set of rules that can be checked and/or enforced when an Action " +"is performed on a Cluster." + +msgid "" +"New resource ``OS::Senlin::Profile`` is added to create a profile in senlin. " +"Profile is a module used for creating nodes, it's the definition of a node." +msgstr "" +"New resource ``OS::Senlin::Profile`` is added to create a profile in Senlin. " +"Profile is a module used for creating nodes, it's the definition of a node." + +msgid "" +"New resource ``OS::Senlin::Receiver`` is added to create a receiver in " +"senlin. Receiver can be used to hook the engine to some external event/alarm " +"sources." +msgstr "" +"New resource ``OS::Senlin::Receiver`` is added to create a receiver in " +"Senlin. Receiver can be used to hook the engine to some external event/alarm " +"sources." + +msgid "" +"New resources for Neutron Load Balancer version 2. These are unique for " +"version 2 and do not support or mix with existing version 1 resources." +msgstr "" +"New resources for Neutron Load Balancer version 2. These are unique for " +"version 2 and do not support or mix with existing version 1 resources." + +msgid "Newton Series Release Notes" +msgstr "Newton Series Release Notes" + +msgid "" +"Now heat keystone user name charaters limit increased from 64 to 255. Any " +"extra charaters will lost when truncate the name to the last 255 charaters." +msgstr "" +"Now Heat Keystone user name characters limit increased from 64 to 255. Any " +"extra characters will lost when truncate the name to the last 255 characters." + +msgid "" +"OS::Aodh::CompositeAlarm resource plugin is added to manage Aodh composite " +"alarm, aim to replace OS::Aodh::CombinationAlarm which has been deprecated " +"in Newton release." +msgstr "" +"OS::Aodh::CompositeAlarm resource plugin is added to manage Aodh composite " +"alarm, aim to replace OS::Aodh::CombinationAlarm which has been deprecated " +"in Newton release." + +msgid "" +"OS::Cinder::QoSAssociation resource plugin is added to support cinder QoS " +"Specs Association with Volume Types, which is provided by cinder ``qos-" +"specs`` API extension." +msgstr "" +"OS::Cinder::QoSAssociation resource plugin is added to support cinder QoS " +"Specs Association with Volume Types, which is provided by Cinder ``qos-" +"specs`` API extension." + +msgid "" +"OS::Cinder::QoSSpecs resource plugin added to support cinder QoS Specs, " +"which is provided by cinder ``qos-specs`` API extension." +msgstr "" +"OS::Cinder::QoSSpecs resource plugin added to support cinder QoS Specs, " +"which is provided by cinder ``qos-specs`` API extension." + +msgid "" +"OS::Glance::Image resource plug-in is updated to support tagging when image " +"is created or updated as part of stack." +msgstr "" +"OS::Glance::Image resource plug-in is updated to support tagging when image " +"is created or updated as part of stack." + +msgid "" +"OS::Magnum::Cluster resource plugin added to support magnum cluster feature, " +"which is provided by magnum ``cluster`` API." +msgstr "" +"OS::Magnum::Cluster resource plugin added to support magnum cluster feature, " +"which is provided by Magnum ``cluster`` API." + +msgid "" +"OS::Magnum::ClusterTemplate resource plugin added to support magnum cluster " +"template feature, which is provided by magnum ``clustertemplates`` API." +msgstr "" +"OS::Magnum::ClusterTemplate resource plugin added to support magnum cluster " +"template feature, which is provided by Magnum ``clustertemplates`` API." + +msgid "" +"OS::Monasca::AlarmDefinition and OS::Monasca::Notification resource plug-ins " +"are now supported by heat community as monasca became offcial OpenStack " +"project." +msgstr "" +"OS::Monasca::AlarmDefinition and OS::Monasca::Notification resource plug-ins " +"are now supported by Heat community as Monasca became official OpenStack " +"project." + +msgid "" +"OS::Neutron::QoSBandwidthLimitRule resource plugin is added to support " +"neutron QoS bandwidth limit rule, which is provided by neutron ``qos`` API " +"extension." +msgstr "" +"OS::Neutron::QoSBandwidthLimitRule resource plugin is added to support " +"Neutron QoS bandwidth limit rule, which is provided by Neutron ``qos`` API " +"extension." + +msgid "" +"OS::Neutron::QoSPolicy resource plugin is added to support QoS policy, which " +"is provided by neutron ``qos`` API extension." +msgstr "" +"OS::Neutron::QoSPolicy resource plugin is added to support QoS policy, which " +"is provided by Neutron ``qos`` API extension." + +msgid "" +"OS::Neutron::RBACPolicy resource plugin is added to support RBAC policy, " +"which is used to manage RBAC policy in Neutron. This resource creates and " +"manages Neutron RBAC policy, which allows to share Neutron networks to " +"subsets of tenants." +msgstr "" +"OS::Neutron::RBACPolicy resource plugin is added to support RBAC policy, " +"which is used to manage RBAC policy in Neutron. This resource creates and " +"manages Neutron RBAC policy, which allows to share Neutron networks to " +"subsets of tenants." + +msgid "" +"OS::Nova::HostAggregate resource plugin is added to support host aggregate, " +"which is provided by nova ``aggregates`` API extension." +msgstr "" +"OS::Nova::HostAggregate resource plugin is added to support host aggregate, " +"which is provided by nova ``aggregates`` API extension." + +msgid "" +"OS::Nova::Server now supports ephemeral_size and ephemeral_format properties " +"for block_device_mapping_v2 property. Property ephemeral_size is integer, " +"that require flavor with ephemeral disk size greater that 0. Property " +"ephemeral_format is string with allowed values ext2, ext3, ext4, xfs and " +"ntfs for Windows guests; it is optional and if has no value, uses default, " +"defined in nova config file." +msgstr "" +"OS::Nova::Server now supports ephemeral_size and ephemeral_format properties " +"for block_device_mapping_v2 property. Property ephemeral_size is an integer, " +"that requires a flavour with an ephemeral disk size greater that 0. Property " +"ephemeral_format is a string with allowed values ext2, ext3, ext4, xfs and " +"ntfs for Windows guests; it is optional and if it has no value, it uses " +"defaults defined in nova config file." + +msgid "Ocata Series Release Notes" +msgstr "Ocata Series Release Notes" + +msgid "Other Notes" +msgstr "Other Notes" + +msgid "" +"ParameterGroups section is added to the nested stacks, for the output of the " +"stack validate templates." +msgstr "" +"ParameterGroups section is added to the nested stacks, for the output of the " +"stack validate templates." + +msgid "" +"Parameters and parameter defaults specified in the environment file would be " +"merged as per their specified strategies." +msgstr "" +"Parameters and parameter defaults specified in the environment file would be " +"merged as per their specified strategies." + +msgid "Pike Series Release Notes" +msgstr "Pike Series Release Notes" + +msgid "Prelude" +msgstr "Prelude" + +msgid "" +"Previously 'parameters' and 'parameter_defaults' specified in an environment " +"file used to overwrite their existing values." +msgstr "" +"Previously 'parameters' and 'parameter_defaults' specified in an environment " +"file used to overwrite their existing values." + +msgid "" +"Previously the event list REST API call only returned events for the " +"specified stack even when that stack contained nested stack resources. This " +"meant that fetching all nested events required an inefficient recursive " +"client-side implementation." +msgstr "" +"Previously the event list REST API call only returned events for the " +"specified stack even when that stack contained nested stack resources. This " +"meant that fetching all nested events required an inefficient recursive " +"client-side implementation." + +msgid "" +"Resource ``OS::Neutron::Net`` now supports ``l2_adjacency`` atribute on " +"whether L2 connectivity is available across the network or not." +msgstr "" +"Resource ``OS::Neutron::Net`` now supports ``l2_adjacency`` attribute on " +"whether L2 connectivity is available across the network or not." + +msgid "" +"Resource ``OS::Neutron::Subnet`` now supports ``segment`` optional property " +"to specify a segment." +msgstr "" +"Resource ``OS::Neutron::Subnet`` now supports ``segment`` optional property " +"to specify a segment." + +msgid "" +"Resource ``OS::neutron::Subnet`` now supports ``subnetpool`` optional " +"property, that will automate the allocation of CIDR for the subnet from the " +"specified subnet pool." +msgstr "" +"Resource ``OS::neutron::Subnet`` now supports ``subnetpool`` optional " +"property, that will automate the allocation of CIDR for the subnet from the " +"specified subnet pool." + +msgid "" +"Resource attributes are now stored at the time a resource is created or " +"updated, allowing for fast resolution of outputs without having to retrieve " +"live data from the underlying physical resource. To minimise compatibility " +"problems, the behaviour of the `show` attribute, the `with_attr` option to " +"the resource show API, and stacks that do not yet use the convergence " +"architecture (due to the convergence_engine being disabled at the time they " +"were created) is unchanged - in each of these cases live data will still be " +"returned." +msgstr "" +"Resource attributes are now stored at the time a resource is created or " +"updated, allowing for fast resolution of outputs without having to retrieve " +"live data from the underlying physical resource. To minimise compatibility " +"problems, the behaviour of the `show` attribute, the `with_attr` option to " +"the resource show API, and stacks that do not yet use the convergence " +"architecture (due to the convergence_engine being disabled at the time they " +"were created) is unchanged - in each of these cases live data will still be " +"returned." + +msgid "" +"Resources ``OS::Neutron::Port`` and ``OS::Neutron::Net`` now support " +"``qos_policy`` optional property, that will associate with QoS policy to " +"offer different service levels based on the policy rules." +msgstr "" +"Resources ``OS::Neutron::Port`` and ``OS::Neutron::Net`` now support " +"``qos_policy`` optional property, that will associate with QoS policy to " +"offer different service levels based on the policy rules." + +msgid "" +"Since Aodh drop support for combination alarm, therefore OS::Aodh::" +"CombinationAlarm is now mark as hidden resource with directly inheriting " +"from None resource which will make the resource do nothing when handling any " +"actions (other than delete). And please don't use it. Old resource which " +"created with that resource type still able to delete. It's recommand to " +"switch that resource type ASAP, since we will remove that resource soon." +msgstr "" +"Since Aodh drop support for combination alarm, therefore OS::Aodh::" +"CombinationAlarm is now mark as hidden resource with directly inheriting " +"from None resource which will make the resource do nothing when handling any " +"actions (other than delete). And please don't use it. Old resource which " +"created with that resource type still able to delete. It's recommand to " +"switch that resource type ASAP, since we will remove that resource soon." + +msgid "Start using reno to manage release notes." +msgstr "Start using Reno to manage release notes." + +msgid "Support external resource reference in template." +msgstr "Support external resource reference in template." + +msgid "" +"Support to managing rbac policy for 'qos_policy' resource, which allows to " +"share Neutron qos policy to subsets of tenants." +msgstr "" +"Support to managing RBAC policy for 'qos_policy' resource, which allows to " +"share Neutron QoS policy to subsets of tenants." + +msgid "" +"Supports internal DNS resolution and integration with external DNS services " +"for neutron resources. Template authors can use the ``dns_name`` and " +"``dns_domain`` properties of neutron resource plugins for this functionality." +msgstr "" +"Supports internal DNS resolution and integration with external DNS services " +"for Neutron resources. Template authors can use the ``dns_name`` and " +"``dns_domain`` properties of Neutron resource plugins for this functionality." + +msgid "" +"Supports to get the webmks console url for OS::Nova::Server resource. And " +"this requires nova api version equal or greater than 2.8." +msgstr "" +"Supports to get the WebMKS console URL for OS::Nova::Server resource. And " +"this requires Nova api version equal or greater than 2.8." + +msgid "" +"Template validation is improved to ignore the given set of error codes. For " +"example, heat will report template as invalid one, if it does not find any " +"required OpenStack services in the cloud deployment and while authoring the " +"template, user might wants to avoid such scenarios, so that (s)he could " +"create valid template without bothering about run-time environments. Please " +"refer the API documentation of validate template for more details." +msgstr "" +"Template validation is improved to ignore the given set of error codes. For " +"example, Heat will report template as invalid one, if it does not find any " +"required OpenStack services in the cloud deployment and while authoring the " +"template, user might wants to avoid such scenarios, so that (s)he could " +"create valid template without bothering about run-time environments. Please " +"refer the API documentation of validate template for more details." + +msgid "" +"The 'attachments' attribute of OS::Cinder::Volume has been deprecated in " +"favor of 'attachments_list', which has the correct type of LIST. This makes " +"this data easier for end users to process." +msgstr "" +"The 'attachments' attribute of OS::Cinder::Volume has been deprecated in " +"favour of 'attachments_list', which has the correct type of LIST. This makes " +"this data easier for end users to process." + +msgid "" +"The 'contains' function was added, which checks whether the specified value " +"is in a sequence. In addition, the new function can be used as a condition " +"function." +msgstr "" +"The 'contains' function was added, which checks whether the specified value " +"is in a sequence. In addition, the new function can be used as a condition " +"function." + +msgid "" +"The AWS::EC2::EIP domain is always assumed to be 'vpc', since nova-network " +"is not supported in OpenStack any longer." +msgstr "" +"The AWS::EC2::EIP domain is always assumed to be 'vpc', since nova-network " +"is not supported in OpenStack any longer." + +msgid "" +"The OS::Nova::Server now supports a new property user_data_update_policy, " +"which may be set to either 'REPLACE' (default) or 'IGNORE' if you wish to " +"allow user_data updates to be ignored on stack update. This is useful when " +"managing a group of servers where changed user_data should apply to new " +"servers without replacing existing servers." +msgstr "" +"The OS::Nova::Server now supports a new property user_data_update_policy, " +"which may be set to either 'REPLACE' (default) or 'IGNORE' if you wish to " +"allow user_data updates to be ignored on stack update. This is useful when " +"managing a group of servers where changed user_data should apply to new " +"servers without replacing existing servers." + +msgid "" +"The Pike version of HOT (2017-09-01) adds a make_url function to simplify " +"combining data from different sources into a URL with correct handling for " +"escaping and IPv6 addresses." +msgstr "" +"The Pike version of HOT (2017-09-01) adds a make_url function to simplify " +"combining data from different sources into a URL with correct handling for " +"escaping and IPv6 addresses." + +msgid "" +"The ``resource mark unhealthy`` command now accepts either a logical " +"resource name (as it did previously) or a physical resource ID to identify " +"the resource to be marked unhealthy." +msgstr "" +"The ``resource mark unhealthy`` command now accepts either a logical " +"resource name (as it did previously) or a physical resource ID to identify " +"the resource to be marked unhealthy." + +msgid "" +"The event list GET REST API call now has a different behaviour when the " +"'nested_depth' parameter is set to an integer greater than zero. The " +"response will contain all events down to the requested nested depth." +msgstr "" +"The event list GET REST API call now has a different behaviour when the " +"'nested_depth' parameter is set to an integer greater than zero. The " +"response will contain all events down to the requested nested depth." + +msgid "" +"The list_concat function was added, which concats several lists using " +"python's extend function." +msgstr "" +"The list_concat function was added, which concats several lists using " +"python's extend function." + +msgid "" +"The list_concat_unique function was added, which behaves identically to the " +"function ``list_concat`` to concat several lists using python's extend " +"function and make sure without repeating items." +msgstr "" +"The list_concat_unique function was added, which behaves identically to the " +"function ``list_concat`` to concat several lists using python's extend " +"function and make sure without repeating items." + +msgid "This feature only supports templates with version over `2016-10-14`." +msgstr "This feature only supports templates with version over `2016-10-14`." + +msgid "" +"Two new policies soft-affinity and soft-anti-affinity have been supported " +"for the OS::Nova::ServerGroup resource." +msgstr "" +"Two new policies soft-affinity and soft-anti-affinity have been supported " +"for the OS::Nova::ServerGroup resource." + +msgid "Upgrade Notes" +msgstr "Upgrade Notes" + +msgid "" +"When 'nested_depth' is set the response also includes an extra entry in the " +"'links' list with 'rel' set to 'root_stack'. This can be used by client side " +"implementations to detect whether it is necessary to fall back to client-" +"side recurisive event fetching." +msgstr "" +"When 'nested_depth' is set the response also includes an extra entry in the " +"'links' list with 'rel' set to 'root_stack'. This can be used by client side " +"implementations to detect whether it is necessary to fall back to client-" +"side recursive event fetching." + +msgid "" +"cinder.qos_specs constraint added to support to validate QoS Specs attribute." +msgstr "" +"cinder.qos_specs constraint added to support to validate QoS Specs attribute." + +msgid "" +"nova-network is no longer supported in OpenStack. Please use OS::Neutron::" +"FloatingIPAssociation and OS::Neutron::FloatingIP in place of OS::Nova::" +"FloatingIPAssociation and OS::Nova::FloatingIP" +msgstr "" +"nova-network is no longer supported in OpenStack. Please use OS::Neutron::" +"FloatingIPAssociation and OS::Neutron::FloatingIP in place of OS::Nova::" +"FloatingIPAssociation and OS::Nova::FloatingIP" + +msgid "" +"nova.host constraint is added to support to validate host attribute which is " +"provided by nova ``host`` API extension." +msgstr "" +"nova.host constraint is added to support to validate host attribute which is " +"provided by Nova ``host`` API extension." diff -Nru heat-9.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po heat-10.0.0~b1/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po --- heat-9.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po 2017-10-27 07:35:34.000000000 +0000 @@ -2,13 +2,13 @@ # Yuko Fukuda , 2017. #zanata msgid "" msgstr "" -"Project-Id-Version: Heat Release Notes 9.0.0\n" +"Project-Id-Version: Heat Release Notes 10.0.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-05-01 21:00+0000\n" +"POT-Creation-Date: 2017-10-13 18:39+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2017-04-28 05:25+0000\n" +"PO-Revision-Date: 2017-10-12 05:59+0000\n" "Last-Translator: Yuko Fukuda \n" "Language-Team: Japanese\n" "Language: ja\n" @@ -34,13 +34,37 @@ msgid "8.0.0" msgstr "8.0.0" +msgid "9.0.0" +msgstr "9.0.0" + msgid "" "A new 'parameter_merge_strategies' section can be added to the environment " "file, where 'default' and/or parameter specific merge strategies can be " "specified." msgstr "" -"新規 'parameter_merge_strategies' 節が環境ファイルに追加可能になりまし" -"た。'default' やパラメタ固有のマージ方針(または両方)が指定できます。" +"新規セクション 'parameter_merge_strategies' が環境ファイルに追加可能になりま" +"した。'default' やパラメタ固有のマージ方針(または両方)が指定できます。" + +msgid "" +"A new OS::Mistral::ExternalResource is added that allows users to manage " +"resources that are not known to Heat by specifying in the template Mistral " +"workflows to handle actions such as create, update and delete." +msgstr "" +"新規リソースOS::Mistral::ExternalResource が追加されました。Mistralのワークフ" +"ローで作成、更新、削除などのアクションを処理させることで、Heatが認識していな" +"いリソースの管理が可能になります。" + +msgid "" +"A new OS::Zun::Container resource is added that allows users to manage " +"docker containers powered by Zun. This resource will have an 'addresses' " +"attribute that contains various networking information including the neutron " +"port id. This allows users to orchestrate containers with other networking " +"resources (i.e. floating ip)." +msgstr "" +"新規リソースOS::Zun::Container が追加されました。Zunで実行されるDockerコンテ" +"ナの管理が可能になります。本リソースの'addresses'属性では、NeutronのポートID" +"などのネットワーク情報を保持します。これによって、他のネットワークリソースと" +"コンテナのオーケストレーションが可能になります(例:フローティングIP)。" msgid "" "A new ``OS::Barbican::CertificateContainer`` resource for storing the " @@ -95,6 +119,18 @@ "クライアントプラグイン ``openstack`` が新たに追加されました" msgid "" +"A new property, deployment_swift_data is added to the OS::Nova::Server and " +"OS::Heat::DeployedServer resources. The property is used to define the Swift " +"container and object name that is used for deployment data for the server. " +"If unset, the fallback is the previous behavior where these values will be " +"automatically generated." +msgstr "" +"OS::Nova::Server and OS::Heat::DeployedServerリソースに新規プロパティ" +"deployment_swift_data が追加されました。サーバの配備データで使用するSwiftコン" +"テナとオブジェクト名を本プロパティで定義できます。未設定の場合、過去の動作に" +"フォールバックします(値の自動生成)。" + +msgid "" "A new resource ``OS::Sahara::Job`` has been added, which allows to create " "and launch sahara jobs. Job can be launched with resource-signal." msgstr "" @@ -137,6 +173,17 @@ "``expression`` が検証されます。" msgid "" +"Add `converge` parameter for stack update (and update preview) API. This " +"parameter will force resources to observe the reality of resources before " +"actually update it. The value of this parameter can be any boolean value. " +"This will replace config flag `observe_on_update` in near future." +msgstr "" +"スタック更新(と更新プレビュー)APIに`converge`パラメーターが追加されました。本" +"パラメーターを指定することで、更新前にリソースの利用可否を強制的に確認しま" +"す。本パラメーターには、任意のブール値を指定できます。将来的に、" +"`observe_on_update`フラグは本パラメーターで置き換えられる予定です。" + +msgid "" "Add `external_id` attribute for resource to reference on an exists external " "resource. The resource (with `external_id` attribute) will not able to be " "updated. This will keep management rights stay externally." @@ -209,6 +256,20 @@ "参照・一覧表示するための機能が新規に追加されました。" msgid "" +"Added new section ``permutations`` for ``repeat`` function, to decide " +"whether to iterate nested the over all the permutations of the elements in " +"the given lists. If 'permutations' is not specified, we set the default " +"value to true to compatible with before behavior. The args have to be lists " +"instead of dicts if 'permutations' is False because keys in a dict are " +"unordered, and the list args all have to be of the same length." +msgstr "" +"``repeat``関数に新しく``permutations``セクションが追加されました。特定のリス" +"ト内の要素の順列をネストして反復するかを選択できます。'permutations'が指定さ" +"れていない場合、過去の動作に従います。'permutations' がFalseの場合は、引数は" +"dictsではなくlistsでなければなりません。理由は、dictsは整列されていないこと" +"と、lists変数はすべて同じ長さ出なければならないためです。" + +msgid "" "Added using of new API in python-heatclient for ``output_show`` and " "``output_list``. Now, if version of Heat API is 1.19 or above, Heat client " "will use API calls ``output_show`` and ``output_list`` instead of parsing of " @@ -295,6 +356,15 @@ "ラメタ値が判定されます。" msgid "" +"All developer, contributor, and user content from various guides in " +"openstack-manuals has been moved in-tree and are published at `https://docs." +"openstack.org/heat/pike/`." +msgstr "" +"すべての開発者、コントリビューター、ユーザー向けコンテンツはopenstack-manuals" +"からツリー内部に移動しており、`https://docs.openstack.org/heat/pike/`で公開さ" +"れています。" + +msgid "" "Allow to configure Heat service to forbid creation of stacks containing " "Volume resources with ``deletion_policy`` set to ``Snapshot`` when there is " "no Cinder backup service available." @@ -306,9 +376,24 @@ msgid "Allow to set or update the tags for OS::Neutron::Net resource." msgstr "OS::Neutron::Net リソースのタグの定義または更新が可能になりました。" +msgid "Allow to set or update the tags for OS::Neutron::Port resource." +msgstr "OS::Neutron::Portリソースのタグの設定と更新が可能になりました。" + +msgid "Allow to set or update the tags for OS::Neutron::Router resource." +msgstr "OS::Neutron::Routerリソースのタグの設定と更新が可能になりました。" + +msgid "Allow to set or update the tags for OS::Neutron::Subnet resource." +msgstr "OS::Neutron::Subnetリソースのタグの設定と更新が可能になりました。" + +msgid "Allow to set or update the tags for OS::Neutron::SubnetPool resource." +msgstr "OS::Neutron::SubnetPoolリソースのタグの設定と更新が可能になりました。" + msgid "Bug Fixes" msgstr "バグ修正" +msgid "Critical Issues" +msgstr "重要な問題" + msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" @@ -359,6 +444,9 @@ "開発中のZunリソースが将来的に使うZunクライアントプラグインモジュールが追加さ" "れました。" +msgid "Known Issues" +msgstr "既知の問題" + msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" @@ -594,12 +682,25 @@ msgstr "その他の注意点" msgid "" +"ParameterGroups section is added to the nested stacks, for the output of the " +"stack validate templates." +msgstr "" +"ネストされたスタックにParameterGroupsセクションが追加されました。スタック検証" +"テンプレートの出力に使用されます。" + +msgid "" "Parameters and parameter defaults specified in the environment file would be " "merged as per their specified strategies." msgstr "" "環境ファイルに指定されたパラメタとパラメタのデフォルト値は指定された方針に" "従ってマージされます。" +msgid "Pike Series Release Notes" +msgstr "Pike バージョンのリリースノート" + +msgid "Prelude" +msgstr "前置" + msgid "" "Previously 'parameters' and 'parameter_defaults' specified in an environment " "file used to overwrite their existing values." diff -Nru heat-9.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po heat-10.0.0~b1/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po --- heat-9.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po 2017-08-30 11:08:12.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po 2017-10-27 07:35:34.000000000 +0000 @@ -1,9 +1,9 @@ # minwook-shin , 2017. #zanata msgid "" msgstr "" -"Project-Id-Version: Heat Release Notes 9.0.0\n" +"Project-Id-Version: Heat Release Notes 10.0.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2017-08-12 12:08+0000\n" +"POT-Creation-Date: 2017-10-06 21:37+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -26,15 +26,6 @@ msgid "8.0.0" msgstr "8.0.0" -msgid "9.0.0.0b1" -msgstr "9.0.0.0b1" - -msgid "9.0.0.0b2" -msgstr "9.0.0.0b2" - -msgid "9.0.0.0b3" -msgstr "9.0.0.0b3" - msgid "Bug Fixes" msgstr "버그 수정" diff -Nru heat-9.0.0/releasenotes/source/pike.rst heat-10.0.0~b1/releasenotes/source/pike.rst --- heat-9.0.0/releasenotes/source/pike.rst 1970-01-01 00:00:00.000000000 +0000 +++ heat-10.0.0~b1/releasenotes/source/pike.rst 2017-10-27 07:35:34.000000000 +0000 @@ -0,0 +1,6 @@ +=================================== + Pike Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/pike diff -Nru heat-9.0.0/requirements.txt heat-10.0.0~b1/requirements.txt --- heat-9.0.0/requirements.txt 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/requirements.txt 2017-10-27 07:35:34.000000000 +0000 @@ -5,55 +5,55 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0 Babel!=2.4.0,>=2.3.4 # BSD croniter>=0.3.4 # MIT License -cryptography!=2.0,>=1.6 # BSD/Apache-2.0 +cryptography!=2.0,>=1.9 # BSD/Apache-2.0 debtcollector>=1.2.0 # Apache-2.0 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT -keystoneauth1>=3.1.0 # Apache-2.0 -keystonemiddleware>=4.12.0 # Apache-2.0 -lxml!=3.7.0,>=2.3 # BSD -netaddr!=0.7.16,>=0.7.13 # BSD -openstacksdk>=0.9.17 # Apache-2.0 -oslo.cache>=1.5.0 # Apache-2.0 -oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.context>=2.14.0 # Apache-2.0 -oslo.db>=4.24.0 # Apache-2.0 -oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 -oslo.log>=3.22.0 # Apache-2.0 -oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 -oslo.middleware>=3.27.0 # Apache-2.0 +keystoneauth1>=3.2.0 # Apache-2.0 +keystonemiddleware>=4.17.0 # Apache-2.0 +lxml!=3.7.0,>=3.4.1 # BSD +netaddr>=0.7.18 # BSD +openstacksdk>=0.9.18 # Apache-2.0 +oslo.cache>=1.26.0 # Apache-2.0 +oslo.config>=4.6.0 # Apache-2.0 +oslo.concurrency>=3.20.0 # Apache-2.0 +oslo.context!=2.19.1,>=2.14.0 # Apache-2.0 +oslo.db>=4.27.0 # Apache-2.0 +oslo.i18n>=3.15.3 # Apache-2.0 +oslo.log>=3.30.0 # Apache-2.0 +oslo.messaging>=5.29.0 # Apache-2.0 +oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.23.0 # Apache-2.0 -oslo.reports>=0.6.0 # Apache-2.0 -oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.20.0 # Apache-2.0 +oslo.reports>=1.18.0 # Apache-2.0 +oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 +oslo.service>=1.24.0 # Apache-2.0 +oslo.utils>=3.28.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 -oslo.versionedobjects>=1.17.0 # Apache-2.0 +oslo.versionedobjects>=1.28.0 # Apache-2.0 PasteDeploy>=1.5.0 # MIT pycrypto>=2.6 # Public Domain -aodhclient>=0.7.0 # Apache-2.0 +aodhclient>=0.9.0 # Apache-2.0 python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 # Apache-2.0 python-ceilometerclient>=2.5.0 # Apache-2.0 -python-cinderclient>=3.1.0 # Apache-2.0 -python-designateclient>=1.5.0 # Apache-2.0 +python-cinderclient>=3.2.0 # Apache-2.0 +python-designateclient>=2.7.0 # Apache-2.0 python-glanceclient>=2.8.0 # Apache-2.0 -python-heatclient>=1.6.1 # Apache-2.0 +python-heatclient>=1.10.0 # Apache-2.0 python-keystoneclient>=3.8.0 # Apache-2.0 python-magnumclient>=2.0.0 # Apache-2.0 -python-manilaclient>=1.12.0 # Apache-2.0 +python-manilaclient>=1.16.0 # Apache-2.0 python-mistralclient>=3.1.0 # Apache-2.0 python-monascaclient>=1.7.0 # Apache-2.0 python-neutronclient>=6.3.0 # Apache-2.0 -python-novaclient>=9.0.0 # Apache-2.0 -python-openstackclient!=3.10.0,>=3.3.0 # Apache-2.0 -python-saharaclient>=1.1.0 # Apache-2.0 +python-novaclient>=9.1.0 # Apache-2.0 +python-openstackclient>=3.12.0 # Apache-2.0 +python-saharaclient>=1.2.0 # Apache-2.0 python-senlinclient>=1.1.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0 python-troveclient>=2.2.0 # Apache-2.0 python-zaqarclient>=1.0.0 # Apache-2.0 python-zunclient>=0.2.0 # Apache-2.0 pytz>=2013.6 # MIT -PyYAML>=3.10.0 # MIT +PyYAML>=3.10 # MIT requests>=2.14.2 # Apache-2.0 tenacity>=3.2.1 # Apache-2.0 Routes>=2.3.1 # MIT @@ -62,4 +62,4 @@ sqlalchemy-migrate>=0.11.0 # Apache-2.0 stevedore>=1.20.0 # Apache-2.0 WebOb>=1.7.1 # MIT -yaql>=1.1.0 # Apache 2.0 License +yaql>=1.1.3 # Apache 2.0 License diff -Nru heat-9.0.0/setup.cfg heat-10.0.0~b1/setup.cfg --- heat-9.0.0/setup.cfg 2017-08-30 11:11:36.000000000 +0000 +++ heat-10.0.0~b1/setup.cfg 2017-10-27 07:39:27.000000000 +0000 @@ -19,6 +19,12 @@ Programming Language :: Python :: 3.5 [files] +data_files = + etc/heat = + etc/heat/api-paste.ini + etc/heat/policy.json + etc/heat/environment.d = etc/heat/environment.d/* + etc/heat/templates = etc/heat/templates/* packages = heat heat_integrationtests @@ -43,7 +49,7 @@ heat.common.config = heat.common.config:list_opts heat.common.context = heat.common.context:list_opts heat.common.crypt = heat.common.crypt:list_opts - heat.common.heat_keystoneclient = heat.engine.clients.os.keystone.heat_keystoneclient:list_opts + heat.engine.clients.os.keystone.heat_keystoneclient = heat.engine.clients.os.keystone.heat_keystoneclient:list_opts heat.common.wsgi = heat.common.wsgi:list_opts heat.engine.clients = heat.engine.clients:list_opts heat.engine.notification = heat.engine.notification:list_opts @@ -167,6 +173,8 @@ heat_template_version.ocata = heat.engine.hot.template:HOTemplate20170224 heat_template_version.2017-09-01 = heat.engine.hot.template:HOTemplate20170901 heat_template_version.pike = heat.engine.hot.template:HOTemplate20170901 + heat_template_version.2018-03-02 = heat.engine.hot.template:HOTemplate20180302 + heat_template_version.queens = heat.engine.hot.template:HOTemplate20180302 tempest.test_plugins = heat_tests = heat_integrationtests.plugin:HeatTempestPlugin diff -Nru heat-9.0.0/test-requirements.txt heat-10.0.0~b1/test-requirements.txt --- heat-9.0.0/test-requirements.txt 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/test-requirements.txt 2017-10-27 07:35:34.000000000 +0000 @@ -8,23 +8,23 @@ coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD kombu!=4.0.2,>=4.0.0 # BSD -mock>=2.0 # BSD -mox3!=0.19.0,>=0.7.0 # Apache-2.0 +mock>=2.0.0 # BSD +mox3>=0.20.0 # Apache-2.0 PyMySQL>=0.7.6 # MIT License -openstackdocstheme>=1.16.0 # Apache-2.0 -os-api-ref>=1.0.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 +openstackdocstheme>=1.17.0 # Apache-2.0 +os-api-ref>=1.4.0 # Apache-2.0 +os-testr>=1.0.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 -qpid-python;python_version=='2.7' # Apache-2.0 -psycopg2>=2.5 # LGPL/ZPL +qpid-python>=0.26;python_version=='2.7' # Apache-2.0 +psycopg2>=2.6.2 # LGPL/ZPL sphinx>=1.6.2 # BSD testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT -testresources>=0.2.4 # Apache-2.0/BSD -reno!=2.3.1,>=1.8.0 # Apache-2.0 +testresources>=2.0.0 # Apache-2.0/BSD +reno>=2.5.0 # Apache-2.0 # Next are used in integration tests only -os-collect-config # Apache-2.0 -paramiko>=2.0 # LGPLv2.1+ +os-collect-config>=0.1.35 # Apache-2.0 +paramiko>=2.0.0 # LGPLv2.1+ tempest>=16.1.0 # Apache-2.0 gabbi>=1.35.0 # Apache-2.0 diff -Nru heat-9.0.0/tox.ini heat-10.0.0~b1/tox.ini --- heat-9.0.0/tox.ini 2017-08-30 11:08:16.000000000 +0000 +++ heat-10.0.0~b1/tox.ini 2017-10-27 07:35:34.000000000 +0000 @@ -9,7 +9,7 @@ OS_TEST_PATH=heat/tests TESTR_START_DIR=heat/tests usedevelop = True -install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/pike} {opts} {packages} +install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands =