diff -Nru mistral-4.0.0/api-ref/source/conf.py mistral-5.0.0~b2/api-ref/source/conf.py --- mistral-4.0.0/api-ref/source/conf.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/api-ref/source/conf.py 2017-06-09 12:48:26.000000000 +0000 @@ -99,8 +99,8 @@ # html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] -html_last_updated_fmt = subprocess.Popen( - git_cmd, stdout=subprocess.PIPE).communicate()[0] +html_last_updated_fmt = subprocess.check_output( + git_cmd).decode('utf-8') # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". diff -Nru mistral-4.0.0/AUTHORS mistral-5.0.0~b2/AUTHORS --- mistral-4.0.0/AUTHORS 2017-02-22 13:45:26.000000000 +0000 +++ mistral-5.0.0~b2/AUTHORS 2017-06-09 12:52:03.000000000 +0000 @@ -1,4 +1,5 @@ Abhishek Chanda +Adriano Petrich Alexander Kuznetsov Anastasia Kuznetsova Andras Kovi @@ -10,6 +11,8 @@ Bertrand Lallau Bhaskar Duvvuri Bob HADDLETON +Bob Haddleton +Boris Bobrov Boris Pavlovic Brad P. Crochet Béla Vancsics @@ -38,6 +41,7 @@ Istvan Imre Jeff Peeler Jeffrey Zhang +Jeremy Liu Jeremy Stanley Ji zhaoxuan Ji-Wei @@ -67,6 +71,7 @@ Moshe Elisha Márton Csuha Nguyen Hung Phuong +Nikolay Mahotkin Nikolay Mahotkin Nina Goradia Nishant Kumar @@ -85,6 +90,7 @@ Renato Recio Rinat Sabitov Roman Dobosz +Ryan Brady Sergey Kolekonov Sergey Murashov Shaik Apsar @@ -92,16 +98,19 @@ Sharat Sharma Shuquan Huang Spencer Yu +Steven Hardy Thierry Carrez Thomas Goirand Thomas Herve Timur Nurlygayanov TimurNurlygayanov +TuanLuong Venkata Mahesh Jonnalagadda W Chan Winson Chan Winson Chan Xavier Hardy +XieYingYun Yaroslav Lobankov Zane Bitter Zhao Lei @@ -111,18 +120,21 @@ avnish bhavenst caoyue +chenaidong1 cheneydc dzimine fengchaoyang gecong1973 gengchc2 hardik +hnyang hparekh keliang kennedda kong liu-sheng lixinhui +loooosy lvdongbing manasdk noakoffman @@ -130,11 +142,14 @@ rakhmerov ravikiran rico.lin +ricolin syed ahsan shamim zaidi tengqm venkatamahesh wangzhh xpress +yong sheng gong +ypbao yushangbin zhangdetong zhangguoqing diff -Nru mistral-4.0.0/ChangeLog mistral-5.0.0~b2/ChangeLog --- mistral-4.0.0/ChangeLog 2017-02-22 13:45:25.000000000 +0000 +++ mistral-5.0.0~b2/ChangeLog 2017-06-09 12:52:03.000000000 +0000 @@ -1,36 +1,185 @@ CHANGES ======= -4.0.0 ------ +5.0.0.0b2 +--------- + +* Minor nits to README +* Added style enfore checks for assert statements +* Make "triggered\_by" work in case of "join" tasks +* Stop using abbreviation DSL in document +* Update python-neutronclient version +* [Trusts] Fixing trusts deletion +* Updated from global requirements +* Remove 'sphinxcontrib.autohttp.flask' from sphinx config +* Fixing indentation in docs +* Updated from global requirements +* Updated from global requirements +* Fix doc generation for python 3 +* Propagate "evaluate\_env" workflow parameter to subworkflows +* [Regions] Fixing determining keystone for actions +* Add one more test for task() function used in on-success +* Add 'runtime\_context' to task execution REST resource +* Add 'triggered\_by' into task execution runtime context +* Refactor rest\_utils +* Optimize API layer: using from\_db\_model() instead of from\_dict() +* Get rid of ambiguity in region\_name +* Update AdHoc Actions to support context data references +* Adding mistral\_lib actions to mistral +* Update Docker README +* Updated from global requirements +* Refactor db model methods +* Updated from global requirements +* Add release note for "action\_region" support +* Adding log to db\_sync +* Add "action\_region" param for OpenStack actions +* Updated from global requirements +* Release notes for "evaluate\_env" +* Add 'evaluate\_env' workflow parameter +* Add hide\_args=True to @profiler.trace() where it may cause problems +* Remove unused logging import +* Fix WSGI script for gunicorn +* Revert "Support transition to keystone auth plugin" +* Change service name to workflowv2 in docs +* Support transition to keystone auth plugin +* Fix a typo +* Force Python 2 for pep8 linting +* Add support for mistral-lib to Mistral +* Updated from global requirements +* Refactor Kombu-based RPC +* Make rpc\_backend not engine specific +* Add option to run actions locally on the engine +* Don't save @property methods with other attributes +* Fix the keystone auth url problem +* Optimize the link address + +5.0.0.0b1 +--------- +* Enable WSGI under Apache in devstack +* Add "Project Goals 2017" to README.rst +* Fix the doc for 'concurrency' policy +* Add documentation for the engine commands +* Optimizing lang schema validation +* Advanced publishing: add 'global' function to access global variables +* Advanced publishing: add publishing of global variables +* Advanced publishing: change workflow lang schema +* Fix serialization issue +* Fix a description of 'executor\_thread\_pool\_size' option in Kombu RPC +* Changed the README.rst and added debug guide +* Updated from global requirements +* Disable pbrs auto python-api generation +* Set the basepython for the venv tox environment +* Use Jinja2 sandbox environment +* Limit the number of finished executions +* Add Apache License Content in index.rst +* Fix gate failure +* Add release note for resource RBAC feature +* Updated from global requirements +* Rework the CLI Guide +* Allow admin user to get workflow of other tenants +* Role based resource access control - delete executions +* Use the Mistral syntax highlighting on the dsl v2 page +* Updated from global requirements +* Replace six.iteritems() with .items() +* Role based resource access control - update executions +* Add sem-ver flag so pbr generates correct version +* Remove the empty using\_yaql gude +* Use the plain syntax highlighting in the webapi example +* Remove the highlighting choice 'HTTP' +* Add a Mistral lexer for pygments +* Don't create actions when inspection fails +* Change Http action result content encoding +* Updated from global requirements +* Role based resource access control - get executions +* Remove unnecessary setUp function in testcase +* Add check for idempotent id in tempest tests +* Remove unnecessary tearDown function in testcase +* Fix work of task() without task name within on-clause cases +* Explicitly set charset to UTF-8 in rest\_utils for webob.Response +* Updated from global requirements +* Replaces uuid.uuid4 with uuidutils.generate\_uuid() +* Surpress log with context data and db data +* Add missing schema validation and unit tests for 'publish-on-error' +* Add release note for 'created\_at' support in execution() +* Add 'created\_at' to execution() yaql function +* Change some 3rd party package default log levels +* Remove log translations +* Trim yaql/jinja operation log +* Fix cinder/heat base import +* Add missing swift actions +* Use LOG.exception when adding an OpenStack action fails +* Updated from global requirements +* Add hacking for code style checks +* Fix multi\_vim tempest test failure +* Updated from global requirements +* Add unit test for deleting workflows by admin +* Improve database object access checking +* Updated from global requirements +* Log stack trace if action initialization faild +* Updated from global requirements +* Refactor methods in utils related to dicts +* Refactor workflow/action input validation +* Fully override default json values with user input +* Add head\_object action mapping for swift +* Updated from global requirements +* Deleting the expired execution with batch size +* Allow users to set the test run concurrency +* Include the missing lines in the coverage report +* Don't use 'master' as that isn't always true +* [doc] Changed the output fields in quickstart guide +* Improve the CONTRIBUTING.rst +* Add \`coverage erase\` to the cover report +* Fix update workflow by admin +* Rename package 'workbook' to 'lang' +* Fix get\_next\_execution\_time +* Add idempotent\_id decorator to tempest testcases +* Use utcnow() in expired executions policy test +* Every unit test creates and registers every OpenStack action +* Updated from global requirements +* Add idempotent\_id decorator to tempest testcases +* Verify the retry policy when passed in via variables +* Reduce the number of with-items and retried in the concurrency test +* Remove the delay from the direct workflow rerun tests +* External OpenStack action mapping file support +* Update docs for tasks function +* Remove output from list action executions API +* Update test requirement +* Updated from global requirements +* Correction in workflow state change handling +* Update Dockerfile to use Xenial +* Force Python 2 for documentation builds +* Fix memory leak related to cached lookups * Fix for coverage job showing 0% coverage for kombu -* Refactor RPC serialization: add polymophic serializer * Add Keycloak authentication doc for client side * Add details into docs about semantics of 'on-XXX' clauses * Add Keycloak authentication doc for server side +* Refactor RPC serialization: add polymophic serializer +* Updated from global requirements * Add reno for tasks function -* Remove '__task_execution' from task outbound context +* Updated from global requirements +* Remove '\_\_task\_execution' from task outbound context +* Updated from global requirements +* Revert "External OpenStack action mapping file support" +* Prepare for using standard python tests * Fix for failing services on py3 with kombu driver * Remove support for py34 +* External OpenStack action mapping file support * Remove wrong licensing * Refactor RPC serialization: remove JsonPayloadSerializer class * Update reno for stable/ocata -* Enforce style check for xrange() -* Fix doc build if git is absent -* Prepare for using standard python tests -* Update UPPER_CONSTRAINTS_FILE for stable/ocata -* Update .gitreview for stable/ocata 4.0.0.0rc1 ---------- * Fix for failing gates +* Enforce style check for xrange() * Fix for failing services on py3 with kombu driver * Fix try import of openstack client modules * Remove some profiler traces, logs, use utils.cut() where needed * Remove unnecessary evaluation of outbound context * Optimizing utils.cut() for big dictionaries and lists +* Fix doc build if git is absent 4.0.0.0b3 --------- @@ -48,7 +197,7 @@ * Make 'task' function work w/o a task name * using utcnow instead of now in expiration policy * Enforce style check for assertIsNone -* Add action "std.test_dict" +* Add action "std.test\_dict" * Register Javascript action additionally as 'js' action * Role based resource access control - update workflows * Remove insecure flag from the Baremetal Introspection client @@ -69,19 +218,19 @@ * Using sys.exit(main()) instead of main() * Use i18n for help text * Added gnocchi action pack -* Add 'retry_on_deadlock' decorator -* Fix two failing test cases in test_tasks +* Add 'retry\_on\_deadlock' decorator +* Fix two failing test cases in test\_tasks * Add the "has" DB filter * Use assertGreater() or assertLess() * Fix version response from root controller * Adding releasenotes for aodh action support * Updated from global requirements -* Refactor 'stress_test' to fit the current layout better +* Refactor 'stress\_test' to fit the current layout better * Add rally tests for 'join': 100 and 500 parallel tasks * Add a test for 'with-items' task: count=100, concurrency=10 * Add aodh actions to mistral * Disable invalid API test till it's fixed -* Copy _50_mistral.py file from enabled folder +* Copy \_50\_mistral.py file from enabled folder * Fix doc for missing dashboard config file * Role based resource access control - get workflows * Make body of std.email optional @@ -90,7 +239,7 @@ * Fix 'with-items' task completion condition * Apply locking to control 'with-items' concurrency * Slightly improve 'with-items' tests -* Get rid of with_items.py module in favor of WithItemsTask class +* Get rid of with\_items.py module in favor of WithItemsTask class * Refactor and improve 'with-items' algorithms * Fix docs in README.rst * Fix configuration generator @@ -110,18 +259,18 @@ * change the cron-trigger execution time from localtime to UTC * Use the with keyword dealing with file objects * Modify the link in 'README.rst' -* Modify the function "_get_spec_version(spec_dict)" +* Modify the function "\_get\_spec\_version(spec\_dict)" * Update the wording in the actions terminology docs * Remove commented-out Apache 2 classifier from setup.cfg * Updated from global requirements * Fix for failing kombu gate -* modify something in 'dsl_v2.rst' +* modify something in 'dsl\_v2.rst' * Fix two errors in YAML example and a error in action doc * Handling MistralException in default executor * Fix a syntax error in yaml example -* std.email action requires a smtp_password +* std.email action requires a smtp\_password * Change version '1.0' to '2.0' -* Add descriptions for on_task_state_change parameters +* Add descriptions for on\_task\_state\_change parameters * Updated from global requirements * Added releasenote for retry policy update * Cleanup obvious issues in 'with-items' tests @@ -140,9 +289,9 @@ * Make CI gate for unit tests on mysql work * Fix the default configuration file path * Updated from global requirements -* Mock the HTTP action in the with_items tests +* Mock the HTTP action in the with\_items tests * Fix devstack plugin compatibility -* Updated the retries_remain statement +* Updated the retries\_remain statement * Updated from global requirements * Add Ironic RAID actions * Revert "Remove unused scripts in tools" @@ -170,16 +319,16 @@ * Fix REST API dangling transactions * Fix error message format in action handler * Fix error message format in other task handler methods -* Migrate mistral task_type +* Migrate mistral task\_type * Fix error message format for task run and continue * Fix missing exception decorators in REST API * Remove unused scripts in tools -* Replace uuid4() with generate_uuid() from oslo_utils +* Replace uuid4() with generate\_uuid() from oslo\_utils * Updated from global requirements * Add type to tasks API -* Handle region_name in openstack actions +* Handle region\_name in openstack actions * Add more tests to mistral rally -* Replace oslo_utils.timeutils.isotime +* Replace oslo\_utils.timeutils.isotime * Adding Variables to Log Messages * Updated from global requirements * cors: update default configuration @@ -196,7 +345,7 @@ * Added additional info in devstack/readme.rst * Fixing 'join' task completion logic * Updated from global requirements -* Removal of unneccessary directory in run_tests.sh +* Removal of unneccessary directory in run\_tests.sh * Get service catalog from token info * Add one more test for YAQL error message format * Change format of YAQL errors @@ -232,7 +381,7 @@ * Make deafult executor use async messaging when returning action results * Disable Client Caching * Updated from global requirements -* Revert "Update UPPER_CONSTRAINTS_FILE for stable/newton" +* Revert "Update UPPER\_CONSTRAINTS\_FILE for stable/newton" * Remove environment data from task inbound context * Use parenthesis to wrap strings over multiple lines * Updated from global requirements @@ -241,16 +390,16 @@ * Updated from global requirements * Add tests to check deletion of delayed calls on WF execution delete * Delete all necessary delayed calls on WF stop -* Update UPPER_CONSTRAINTS_FILE for stable/newton +* Update UPPER\_CONSTRAINTS\_FILE for stable/newton * Fix for timeouting actions on run-action -* Fix a typo in access_control.py +* Fix a typo in access\_control.py * Adding a script for fast mistralclient help generation * Make Javascript implementation configurable * Add unit test case for deletion of execution in case of (error and cancelled) * Avoid storing workflow input in task inbound context -* Replace assertEqual(None, *) with assertIsNone in tests +* Replace assertEqual(None, \*) with assertIsNone in tests * Updated from global requirements -* Add __ne__ built-in function +* Add \_\_ne\_\_ built-in function * Update reno for stable/newton * Remove context.spawn * Correct documentation about task attributes 'action' and 'workflow' @@ -268,10 +417,10 @@ * Add functional tests for event engine functions * Added unit tests for Workbook and Workflow filtering * Delete unnecessary comma -* Fixed task in_bound context when retrying +* Fixed task in\_bound context when retrying * Enable changing of rpc driver from devstack -* Take os_actions_endpoint_type into use -* Fix mistral API docs Fixing v2.rst to refer to new module paths, and adding the cron trigger param to POST v2/cron_triggers/ documentation +* Take os\_actions\_endpoint\_type into use +* Fix mistral API docs Fixing v2.rst to refer to new module paths, and adding the cron trigger param to POST v2/cron\_triggers/ documentation * Add event trigger REST API * Using count() instead of all() for getting incompleted tasks * Fix for raising exception directly to kombu @@ -285,7 +434,7 @@ * cast to str for allowable types * Raise NotImplementedError instead of NotImplemented * Optionally include the output when retrieving all executions -* Add __ne__ built-in function +* Add \_\_ne\_\_ built-in function * Fix getting URLs / and /v2 * Add event configuration for event trigger @@ -293,9 +442,9 @@ --------- * Add 'uuid' YAQL function -* Sync tools/tox_install.sh +* Sync tools/tox\_install.sh * Updated from global requirements -* Fix for 'Cannot authenticate without an auth_url' +* Fix for 'Cannot authenticate without an auth\_url' * Add client caching for OpenStack actions * Add setuptools to requirements.txt * Task publish does not overwrite variable in context Edit @@ -303,7 +452,7 @@ * Clean imports in code * TrivialFix: Remove logging import unused * Add a note to the documentation about std.fail -* Some minor code optimization in post_test_hook.sh +* Some minor code optimization in post\_test\_hook.sh * Updated from global requirements * Fix for not working 'run-action' on kombu driver * Updated from global requirements @@ -322,11 +471,11 @@ * Updated from global requirements * Updated from global requirements * Fix task post completion scheduling -* Fix _possible_route() method to account for not completed tasks +* Fix \_possible\_route() method to account for not completed tasks * Add 'wait-before' policy test with two chained tasks * Fix task 'defer' * Filtering support for actions -* Increase size of 'task_executions_v2.unique_key' column +* Increase size of 'task\_executions\_v2.unique\_key' column * Add 'join after join' test * Slightly improve workflow trace logging * Fix workflow and join completion logic @@ -344,8 +493,8 @@ * Use actual session for ironic-inspector action population * Added support for SSL connection in mistra-api server * Towards non-locking model: decouple WF completion check via scheduler -* Towards non-locking model: use insert_or_ignore() for delayed calls -* Towards non-locking model: add insert_or_ignore() on DB API +* Towards non-locking model: use insert\_or\_ignore() for delayed calls +* Towards non-locking model: add insert\_or\_ignore() on DB API * Fix the use of both adhoc actions and "with-items" in workflows * Towards non-locking model: removing env update from WF controller * Updated from global requirements @@ -353,8 +502,8 @@ * Updated from global requirements * Add state info for synchronous actions run from CLI * Towards non-locking model: fix obvious workflow controller issues -* Towards non-locking model: Add 'unique_key' for delayed calls -* Add _get_fake_client to ironic-inspector actions +* Towards non-locking model: Add 'unique\_key' for delayed calls +* Add \_get\_fake\_client to ironic-inspector actions * Add target parameters to REST API * Update docs and add release not for safe-rerun flag * Invalidate workflow spec cache on workflow definition updates @@ -362,24 +511,24 @@ * Splitting executions into different tables * Added releasenote for https support * Add cancelled state to executions -* Enable user to use transport_url in kombu driver +* Enable user to use transport\_url in kombu driver * Fixed trivial issue in exception message * Updated from global requirements * Fix DSLv2 example according to Mistral Neuton * Updated from global requirements -* Use 'rpc_response_timeout' in kombu driver +* Use 'rpc\_response\_timeout' in kombu driver * Use Paginate query even if 'limit'or 'marker' is not set * Remove task result for collection REST requests * Allow to use both name and id to update action definitions * Remove some inconsistency in DB api -* Get rid of oslo_db warning about "id" not being in "sort_keys" +* Get rid of oslo\_db warning about "id" not being in "sort\_keys" * Add event engine service * Error handling test: error in 'publish' for a task with 'on-error' * Added 'pip install -r requirements.txt' instruction * Executor fails actions if they are redelivered * Move the remainder of REST resources to resources.py * Move REST resources action, action execution and task to resources.py -* Add the new endpoint /v2/tasks//workflow_executions +* Add the new endpoint /v2/tasks//workflow\_executions * Allow to use both name and id to access action definitions * Pass 'safe-rerun' param to RPC layer * Initialize RPC-related flag when starting API @@ -390,7 +539,7 @@ * Updated from global requirements * Fix SPAG errors in Quickstart and Main Features docs * Fix some trivial SPAG errors in docs -* Rename package mistral.engine.rpc to mistral.engine.rpc_backend +* Rename package mistral.engine.rpc to mistral.engine.rpc\_backend * Fixing filtering in task controller * Add Python 3.5 classifier and venv * Updated from global requirements @@ -398,7 +547,7 @@ 3.0.0.0b2 --------- -* Fix for YaqlEvaluationException in std.create_instance workflow +* Fix for YaqlEvaluationException in std.create\_instance workflow * Updated from global requirements * Add tests for Kombu driver * Release note for KeyCloak OIDC support @@ -408,10 +557,10 @@ * Add proper error handling for task continuation * Add error handling tests: invalid workflow input, error in first task * Add more tests for error handling -* Fix utility print_executions method +* Fix utility print\_executions method * Log warn openstack action generation failures -* Fix Magnum action _get_fake_class -* Fix Murano action _get_fake_class +* Fix Magnum action \_get\_fake\_class +* Fix Murano action \_get\_fake\_class * Stylistic cleanups to lazy loading patch * Add configuration option for endpoint type * Add filters to all collections listing functions (tags included) @@ -420,7 +569,7 @@ * Make RPC implementation configurable * Adding OsloRPC server and client * Add support for custom YAQL functions -* Remove obsolete config option "use_mistral_rpc" +* Remove obsolete config option "use\_mistral\_rpc" * Add tacker actions in mistral * Update Expiration Policy Documentation * New RPC layer implementation @@ -435,15 +584,15 @@ * Updated from global requirements * Updated from global requirements * Use client credentials to retrieve service list -* Remove std.mistral_http action from tests -* Doc updated for oslo_policy configuration +* Remove std.mistral\_http action from tests +* Doc updated for oslo\_policy configuration * Updated from global requirements * Remove .mailmap file * Fix mysql driver installation section in readme * Fix API inconsistencies with GET /v2/workflows * Fixed fake clients of glance and designate -* Fixed get_actions_list script to get glance actions -* Fixed get_actions_list script to get designate actions +* Fixed get\_actions\_list script to get glance actions +* Fixed get\_actions\_list script to get designate actions * Example Mistral docker container broke due to oslo.policy update * Refactored tempest tests * Release note for magnum actions support @@ -457,11 +606,11 @@ * Added murano actions * Add magnum bays actions * Enable osprofiler to measure performance -* Rename the to_string method to to_json to clarify it's purpose +* Rename the to\_string method to to\_json to clarify it's purpose * Support JSON data in JSON API type * Add Magnum actions * Updated from global requirements -* Removing redundant wf_ex_id parameter for rerun across the code +* Removing redundant wf\_ex\_id parameter for rerun across the code * Add explicit preconditions for methods of Action, Task and Workflow * Add a test that verifies an old bug with join * Refactoring workflow handler @@ -492,8 +641,8 @@ * Add Python 3.4 to the classifiers * Remove unnecessary executable permissions * Updated from global requirements -* Add baremetal.wait_for_finish action to mapping -* Update get_arg_list_as_str to skip func params +* Add baremetal.wait\_for\_finish action to mapping +* Update get\_arg\_list\_as\_str to skip func params * Updated from global requirements * Enforcing upper constraints for tox test jobs * Fix get task list on YAQL error in with-items @@ -515,33 +664,33 @@ * Rename base API test class * Disable cron trigger thread for API unit tests * Disabled ssl warnings while runing tempest tests -* Add extra checks for the existance of executor_callback +* Add extra checks for the existance of executor\_callback * Updated from global requirements * Updated from global requirements * Added script to create docker image * Switch to auto-generated cron trigger names in unit tests -* tempest: fix dir_path +* tempest: fix dir\_path * Leave more relevant comment in engine race condition test * Add utility methods to test action executions more conveniently * Fixing failing functional tests for Cinder and Heat actions * Update OpenStack actions mapping * Updated from global requirements * Unblock skipped test -* Replace self._await(lamdba: ..) constructs with more readable calls -* Add auth_enabled=False to a cron trigger test +* Replace self.\_await(lamdba: ..) constructs with more readable calls +* Add auth\_enabled=False to a cron trigger test * Updated from global requirements * Updated from global requirements * Updated from global requirements -* Unblock skipped tests in test_action_defaults.py +* Unblock skipped tests in test\_action\_defaults.py * Updated from global requirements -* Fixing issue with different versions of oslo_messaging +* Fixing issue with different versions of oslo\_messaging * Getting rid of task result proxies in workflow context * Fix typos in Mistral files * Hacking log for warning * Fixing engine transaction model and error handling -* Refactor workflow controller and fix a bug in _fail_workflow() +* Refactor workflow controller and fix a bug in \_fail\_workflow() * Fixing a bug in DB API method that acquires entity lock -* Also package mistral_tempest_tests +* Also package mistral\_tempest\_tests * module docs are not being generated * Update reno for stable/mitaka @@ -570,7 +719,7 @@ * Add release notes for M-3 * Updated from global requirements * Updated from global requirements -* Fixed 'workflow_name' key error +* Fixed 'workflow\_name' key error * Change for synchronous Mistral actions from CLI * Updated from global requirements * Delete workflow members when deleting workflow @@ -586,11 +735,11 @@ * Fix occasional test failure by SSHActions * Reduce spec parsing in workflow lifecycle * Support workflow id in execution operations -* Add workflow id column to executions_v2 table +* Add workflow id column to executions\_v2 table * Fix occasional test failure by assertListEqual * Added CORS support to Mistral * Fix spellings for two words -* BaremetalIntrospectionAction get endpoint by service_type +* BaremetalIntrospectionAction get endpoint by service\_type * Implement basic Zaqar queue operations * Fix with-items concurrency for sub-workflows * Mistral tests will run from tempest plugin @@ -612,7 +761,7 @@ --------- * Add release note for swift action support -* Add task_execution_id to workflow execution in API +* Add task\_execution\_id to workflow execution in API * Support workflow sharing API * Change LOG.warn to LOG.warning * Add db operations for resource members @@ -625,7 +774,7 @@ * Replace assertTrue(isinstance()) with assertIsInstance() * Updated from global requirements * Support workflow UUID when creating cron trigger -* "test_ssh_actions" failed test has been fix +* "test\_ssh\_actions" failed test has been fix * Fix db error when running python34 unit tests * Updated dynamic credential support for funtional test * Trival: Remove unused logging import @@ -634,12 +783,12 @@ * Added README.rst file for tempest plugin * Added base.py to tempest plugin * Added engine to tempest plugin -* Added test_mistral_basic_v2.py to tempest plugin +* Added test\_mistral\_basic\_v2.py to tempest plugin * Initial layout for mistral tempest plugin * Added mistral default actions * If task fails on timeout - there is no clear message of failure * devstack/plugin.sh: stop using deprecated option group for rabbit -* Fix client name in setUpClass's method in 'test_ssh_actions' +* Fix client name in setUpClass's method in 'test\_ssh\_actions' * Documentation for Mistral and Docker * Added Dockerfile to create docker image * Fix example for workbook in doc @@ -656,17 +805,17 @@ * Add support for OpenStack Ironic Inspector actions * Updated from global requirements * Refactor action generator -* Fix concurrency issues by using READ_COMMITTED +* Fix concurrency issues by using READ\_COMMITTED * Ignored PEP257 errors * Fix example for ad-hoc action in doc * Numerous debug messages due to iso8601 log level * Fixing execution saved in wrong tenant * Updated from global requirements * Pass environment variables of proxy to tox -* Make test_expiration_policy_for_executions stable +* Make test\_expiration\_policy\_for\_executions stable * Delete python bytecode before every test run -* Fix state_info details for with-items task error -* Reset task state_info on task re-run +* Fix state\_info details for with-items task error +* Reset task state\_info on task re-run * Run pep8 on some tools python files * Remove version from setup.cfg @@ -674,8 +823,8 @@ --------- * Add support for OpenStack Ironic actions -* Fix tools/get_action_list.py -* Update install_venv.py so it says 'Mistral' +* Fix tools/get\_action\_list.py +* Update install\_venv.py so it says 'Mistral' * Add etc/mistral.conf.sample to .gitignore * Add database indices to improve query performance * Result will be [], if list for with-items is empty @@ -685,7 +834,7 @@ * Add release notes for Cinder v2 support * Updated from global requirements * Force releasenotes warnings to be treated as errors -* Send mail to mutli to_addrs failed +* Send mail to mutli to\_addrs failed * Correct heatclient comment in mapping.json * Remove running of CLI tests on commit to mistral repo * Change installation of python-mistralclient in the gates @@ -698,19 +847,19 @@ * Add the CONTRIBUTING.rst file * Fix with-items concurrency greater than the number of items * Adding releasenotes management to Mistral -* Use setup_develop instead of setup_package in plugin.sh +* Use setup\_develop instead of setup\_package in plugin.sh * Add Trove to mistral actions * Fix cron-trigger's execution with pattern and first time * Pass creds into the clients.Manager() in functional tests * Move base.py and config.py under unit/ folder * Add ceilometer action support -* Increased size of "state_info" column to 64kb +* Increased size of "state\_info" column to 64kb * Skipped some tests in py3 environment -* Fixing reference of floating_ips_client in tests +* Fixing reference of floating\_ips\_client in tests * OpenStack typo * Updated from global requirements * Ensure only one WF execution for every CT cycle -* Wrap sync_db operations in transactions +* Wrap sync\_db operations in transactions * Remove iso8601 dependency * Fix all H405 pep8 errors * Adding callback url to action context @@ -719,7 +868,7 @@ * Move the default directories into settings file * Removing wait() when initializing notification listener * Updated from global requirements -* Do not use len() in log_exec decorator +* Do not use len() in log\_exec decorator * Fixing wf execution creation at initial stage * remove default=None for config options * Fixing workflow execution state calculation @@ -737,13 +886,13 @@ * devstack: add support for mistraldashboard * Fixing SSH actions to use names of private keys * [Docs] Add 'Cookbooks' page -* Use oslo_config new type PortOpt for port options +* Use oslo\_config new type PortOpt for port options * Add decode() function for string comparison * Refactored filter implementation * mistral-documentation: dashboard documentation regarding debug known issue * Fix mistral dsvm gate * Updated from global requirements -* Adding 'json_pp' function in YAQL +* Adding 'json\_pp' function in YAQL * Added home-page value with mistral docs * filter() is wrapped around list() * Updated from global requirements @@ -754,7 +903,7 @@ * Adding functional tests for SSH actions * Fixing "Task result / Data Flow" section of "Main Features" in docs * Fixing terminoloty/actions section in documentation -* Fixing description of "mistral_http" action in DSL spec +* Fixing description of "mistral\_http" action in DSL spec * Adding section about validation into API v2 spec * Adding "Cron triggers" section into API v2 specification * Action definition updated, when workbook is created @@ -776,24 +925,24 @@ * mistral-documentation: dashboard documentation regarding debug * Fix more unit tests in py34 job * Fixing scheduler tests -* Remove usage of expandtabs() in get_workflow_definition -* Renaming state DELAYED to RUNNING_DELAYED in doc +* Remove usage of expandtabs() in get\_workflow\_definition +* Renaming state DELAYED to RUNNING\_DELAYED in doc 1.0.0.0rc1 ---------- -* Renaming state DELAYED to RUNNING_DELAYED +* Renaming state DELAYED to RUNNING\_DELAYED * Support JSON and arrays in JavaScript action in Mistral * Fix some spelling typo in manual and program output * Fix order of arguments in assertEqual * Fix more tests in python34 gate * Using six.iteritems() to avoid some python3 tests failure * Fixing run action when error occurs -* Fixing std.create_instance workflow +* Fixing std.create\_instance workflow * Adding devstack installation doc * Fixing searching errors in mistral.exceptions * Check for trigger before delete wf -* Change ignore-errors to ignore_errors +* Change ignore-errors to ignore\_errors * Removing "skip" decorators for some OpenStack actions tests * Workflow definition updated, when workbook is created * Fail task on publish error @@ -804,13 +953,13 @@ * Updated from global requirements * Adding validation of workflow graph * Mistral documentation: CLI operations -* Adding 'is_system' to definition model +* Adding 'is\_system' to definition model * Fixing uploading public workflow or action * Fixing DSL documentation * Initial commit that fix py34 tests run -* Refactor get_task_spec using mechanism of polymorphic DSL entities -* get_action_list: improve generated JSON output -* get_action_list: use novaclient.client.Client +* Refactor get\_task\_spec using mechanism of polymorphic DSL entities +* get\_action\_list: improve generated JSON output +* get\_action\_list: use novaclient.client.Client * Adding test where with-items evaluates 'env' * Fixing indentation in 'create action' tutorial * Minor changes to Mistral docs @@ -831,10 +980,10 @@ * Mistral docs terminology: executions * The Link for plugin samples is added * Mistral documentation: mistralclient -* Support action_execution deletion +* Support action\_execution deletion * Use default devstack functional for Mistral user/service/endpoint creation * Fix timing in expired execution unit test -* Fix execution update where state_info is unset +* Fix execution update where state\_info is unset * Fix creation of Mistral service and endpoints * Removes unused posix-ipc requirement * Mistral documentation: architecture @@ -842,8 +991,8 @@ * Updated from global requirements * Small adjustements and fixes for execution expiration policy * Mistral docs terminology: workbooks and workflows -* Fixing occasional fail of test_create_action_execution -* Adding project_id to expiration-policy for executions ctx +* Fixing occasional fail of test\_create\_action\_execution +* Adding project\_id to expiration-policy for executions ctx * Fixing 2 typos in comments * Mistral documentation: adding configuration guide * Refactor task controller with new json type @@ -869,7 +1018,7 @@ * Moving to YAQL 1.0 * Fixing cron trigger test * Update the gitingore file and tox.ini -* Enabling direct workflow cycles: fixing find_task_execution() function +* Enabling direct workflow cycles: fixing find\_task\_execution() function * Enabling direct workflow cycles: adding a test that now doesn't pass * Add pagination support for actions query API * Add functional tests for workflow query @@ -901,13 +1050,13 @@ * Fixing std.http action * Add coordination util for service management * Support large datasets for execution objects -* Fixing execution state_info -* Fixing import error in sync_db.py +* Fixing execution state\_info +* Fixing import error in sync\_db.py * Error result: fix std.http action * Error result: doc explaining error result in base action class * Error result: adding more tests * Making / and /v2 URLs allowed without auth -* Error result: allow actions to return instance of wf_utils.Result +* Error result: allow actions to return instance of wf\_utils.Result * Error result: adding a test for error result * Remove explicit requirements.txt occurrence from tox.ini * Remove H803, H305 @@ -921,7 +1070,7 @@ * Fix cron triggers * Fix mistralclient errors when reinstalling devstack * use task.spec to result always a list for with-items remove redundant 'if' Change-Id: Id656685c45856e628ded2686d1f44dac8aa491de Closes-Bug: #1468419 -* Modify run_tests.sh to support PostgreSQL +* Modify run\_tests.sh to support PostgreSQL * Add Mistral service and endpoint registration to README.rst * Fix inappropriate condition for retry policy * Fix invalid workflow completion in case of "join" @@ -932,16 +1081,16 @@ --------- * Removing redundant header from setup.py -* Simplifying a few data_flow methods +* Simplifying a few data\_flow methods * Workflow variables: modifying engine so that variables work * Workflow variables: adding "vars" property in to workflow specification * Fixing devstack gate failure * Bug fix with-items tasks should always have result of list type * Set default log level of loopingcall module to 'INFO' -* Implementing action_execution POST API -* Implementing 'start_action' on engine side -* Fix wrong zuul_project name in mistral gate script -* Creating action_handler to separate action functionality +* Implementing action\_execution POST API +* Implementing 'start\_action' on engine side +* Fix wrong zuul\_project name in mistral gate script +* Creating action\_handler to separate action functionality * Get rid of openstack/common package * Improving devstack docs * Drop use of 'oslo' namespace package @@ -951,7 +1100,7 @@ * Fix wrong db connection string in README.rst file * Add description param to execution creation API * Update .gitreview file for project rename -* Add description field to executions_v2 table +* Add description field to executions\_v2 table * Make use of graduated oslo.log module * Implementing 'continue-on' retry policy property * Adding some more constraints to cron trigger @@ -969,11 +1118,11 @@ * Adjust docs API to last changes * Fixing YAQL related errors * Skip test on heat action -* Removing incorrect 2015.* tags for client in devstack script +* Removing incorrect 2015.\* tags for client in devstack script * Adding migrations README * Fix dsvm gate failure * Fixing YAQL len() function in Mistral -* Adding 'workflow_params' to cron triggers +* Adding 'workflow\_params' to cron triggers * Allowing a single string value for "requires" clause * Adding "requires" to "task-defaults" clause * Updating requirements to master @@ -985,7 +1134,7 @@ * Retry policy one line syntax * Fixing yaql version * Fix yaql error caused by the ply dependency -* Fixing action_executions API +* Fixing action\_executions API * Adding script for retrieving OpenStack action list * Adding tests on 'break-on' of retry policy * Update mapping.json for OpenStack actions @@ -1004,7 +1153,7 @@ * Fixing result ordering in 'with-items' * Fixing tags of wf as part of wb * Fixing variable names in db/v2/sqlalchemy/api.py -* Fix a logging issue in ssh_utils +* Fix a logging issue in ssh\_utils * Pin oslo pip requirements * Add YAQL parsing to DSL validation * Fixing engine concurrent issues @@ -1012,7 +1161,7 @@ * Add schema for workflow input with default value support * Remove transport from WSGI script * Fixing API 500 errors on Engine side -* Fix typo in wf_v2.yaml +* Fix typo in wf\_v2.yaml * Moving to YAQL 1.0 * Get rid of v1 in installation scripts * Fixing exception type that workbook negative tests expect @@ -1023,9 +1172,9 @@ * Add workbook and workflow validation endpoints * Deleting all v1 related stuff * Fixing docs on target task property in README -* Rename 'wf_db' to 'wf_def' to keep consistency -* Provide 'output' in action_execution API correctly -* Small data_flow refactoring, added TODOs to think about design +* Rename 'wf\_db' to 'wf\_def' to keep consistency +* Provide 'output' in action\_execution API correctly +* Small data\_flow refactoring, added TODOs to think about design * Fixing version info in server title * Fixing 'with-items' with plain input * Add 'keep-result' property to task-spec @@ -1034,9 +1183,9 @@ * Expanding generators when evaluating yaql expressions * Add mistral-db-manage script * Small refactoring in engine, task handler and workflow utils -* Fixing big type column for output and in_context +* Fixing big type column for output and in\_context * Harden v2 DSL schema for validation -* Fix bug with redundant task_id in part of logs +* Fix bug with redundant task\_id in part of logs * Fixing 'with-items' functionality * Fixing task API (published vars) * Support subclass iteration for Workflow controller @@ -1045,10 +1194,10 @@ ---------- * Fixing tasks API endpoint -* Add action_execution API +* Add action\_execution API * Fixing pause-before policy * Fixing timeout policy -* Implementing 'acquire_lock' method and fixing workflow completion +* Implementing 'acquire\_lock' method and fixing workflow completion * Fix retry policy * Fixing wait-after policy * Fixing wait-before policy @@ -1056,28 +1205,28 @@ * Refactor task output: full engine redesign * Fix DSL schema in test workbook * Fixing scheduler work -* Small refactoring in test_javascript +* Small refactoring in test\_javascript * Add WSGI script for API server * Fix list of upstream tasks for task with no join * Fixing finishing workflow in case DELAYED task state * Adding validation in policies * Refactor task output: DB API methods for action executions -* Refactor task output: 'db_tasks'->'task_execs', 'db_execs'->'wf_execs' -* Refactoring task output: 'task_db' -> 'task_ex', 'exec_db' -> 'wf_ex' +* Refactor task output: 'db\_tasks'->'task\_execs', 'db\_execs'->'wf\_execs' +* Refactoring task output: 'task\_db' -> 'task\_ex', 'exec\_db' -> 'wf\_ex' * Refactoring task output: full redesign of DB models * Adding string() YAQL function registered at Mistral level * Fixing published vars for parallel tasks (and join) -* Limit WorkflowExecution.state_info size +* Limit WorkflowExecution.state\_info size * Fixing YAQL in policies * Default workflow type to 'direct' * Fix wrong log task changing state * Fix mismatch to new YAQL syntax * Adjust standard actions and workflows * Changing YAQL syntax delimeters -* Remove eventlet monkey patch in mistral __init__ +* Remove eventlet monkey patch in mistral \_\_init\_\_ * Refactoring task output: renaming DB models for better consistency * Fix OS action client initialization -* Expose stop_workflow in API +* Expose stop\_workflow in API * Add simple integration tests for OpenStack actions * Fix formatting endpoint urls in OS actions * Fixing a bug in logging logic and small refactoring @@ -1087,14 +1236,14 @@ * Add support for auth against keystone on https * Support ssl cert verification on outgoing https * Make spec object more readable in logging -* Fix test_nova_actions after changes in tempest +* Fix test\_nova\_actions after changes in tempest * Task specification improvement -* Renaming _find_completed_tasks to _find_successful_tasks +* Renaming \_find\_completed\_tasks to \_find\_successful\_tasks * Adding more tests for parallel tasks publishing * Fixing bug with context publishing of parallel tasks * Fix keystone actions * Fix tempest gate, add tempest import to our script -* Fix the wrong project name in run_tests.sh usage +* Fix the wrong project name in run\_tests.sh usage * Track execution and task IDs in WF trace log * Changing InlineYAQLEvaluator: treat only {yaql} as YAQL * Fix H904 pep8 error @@ -1107,8 +1256,8 @@ * JavaScript action: part 2 * Allowing multiple hosts for ssh action * Catch workflow errors -* Rename environment to env in start_workflow -* Fix action_context in with_items +* Rename environment to env in start\_workflow +* Fix action\_context in with\_items * Fix sequential tasks publishing the same variable * fix doc dsl v2 * JavaScript action: part 1 @@ -1124,7 +1273,7 @@ * Working on secure DB access (part 2) * Working on secure DB access (part 1) * Concurrency: part 2 -* Adding assertions for "updated_at" field in DB tests +* Adding assertions for "updated\_at" field in DB tests * Fix imports due to changes in tempest * Fixing environment tests * Concurrency: part 1 @@ -1183,7 +1332,7 @@ * Working on "join": making "full join" work with incoming errors * Adding "std.fail" action that always throws ActionException * Adding "std.noop" action (can be useful for testing) -* Raise human-readable exception if workflow_name is not a dict +* Raise human-readable exception if workflow\_name is not a dict * Working on "join": first basic implementation of full join * Working on "join": add "join" property into task specification * Working on "join": implement basic test for full join @@ -1193,15 +1342,15 @@ * Make able to resume workflow * Refactor API tests for v2 * Fix creating std actions -* Renaming trusts.py to security.py and adding method add_security_info +* Renaming trusts.py to security.py and adding method add\_security\_info * Refactoring workbooks service to be symmetric with other services * Use YAML text instead of JSON in HTTP body * Renaming "commands" to "cmds" in engine to avoid name conflicts * Refactor std.email action * Update README files * Sort executions and tasks by time -* Add 'project_id' to Execution and Task -* Fill 'wf_name' task_db field +* Add 'project\_id' to Execution and Task +* Fill 'wf\_name' task\_db field * Add cinder actions * Add possibility to pass variables from context to for-each * Implement for-each task property @@ -1213,7 +1362,7 @@ 0.1.1 ----- -* Construct and pass action_context to action +* Construct and pass action\_context to action * Add passing auth info to std.http * Adding print out of server information into launch script * Adding method for authentication based on config keystone properties @@ -1250,11 +1399,11 @@ * Add documentation - part 2 * Add documentation - part 1 * Update tearDown methods in API integration tests -* Use $(COMMAND) instead of `COMMAND` +* Use $(COMMAND) instead of \`COMMAND\` * Making execution context immutable * Add workflow trace logging in engine v2 * Fix scheduler test -* Fix providing 'is_system' property in /actions +* Fix providing 'is\_system' property in /actions * Fix tasks in order of execution * Stop using intersphinx * Style changes in Scheduler and its tests @@ -1280,7 +1429,7 @@ * Making workflow endpoint able to upload multiple workflows * Fixing v2 workbooks controller not to deal with 'name' * Modifying workbook service to infer name and tags from definition -* Adding 'name' to reverse_workflow.yaml workbook +* Adding 'name' to reverse\_workflow.yaml workbook * Add workflow service module * Fix providing result (task-update API) * Add param 'name' to the test definition @@ -1303,14 +1452,14 @@ * Improving exceptions for OpenStack actions * Getting rid of explicit 'start-task' property in workflow DSL * Implementing workflow 'on-task-XXX' clauses -* Fix wrong passing parameter 'workflow_input' +* Fix wrong passing parameter 'workflow\_input' * Fixing workflow specification to support 'on-task-XXX' clauses * Fixing workflow handlers to return all possible commands * Refactoring engine using abstraction of command * Delete explicit raising DBError from transaction -* Fixing passing raw_result in v1 +* Fixing passing raw\_result in v1 * Register v2 API on keystone by default -* Renaming 'stop_workflow' to 'pause_workflow' +* Renaming 'stop\_workflow' to 'pause\_workflow' * Adding unit for tests engine instructions * Fixing task v2 specification * Fix run workflow in case task state == ERROR @@ -1320,11 +1469,11 @@ * Implement short syntax for passing base-parameters into adhoc-action * Changing all DSL keywords to lower case * Additional testing of reverse workflow -* Pass output from task API to convey_task_result +* Pass output from task API to convey\_task\_result * Moving all API tests under 'mistral.tests.unit' package * Fixing workbook definition upload for v1 -* Add check on config file in sync_db script -* Fixed Execution WSME model and to_dict() +* Add check on config file in sync\_db script +* Fixed Execution WSME model and to\_dict() * Saving description from definition in actions endpoint * Fixing workflows controller to fill 'spec' property based on definition * Adding actions endpoint @@ -1339,36 +1488,36 @@ * Creating ad-hoc actions engine test * Removing obsolete namespace related methods from task v2 spec * Fixing subworkflow resolution algorithm -* Removing 'workflow_parameters' from workflow spec -* Switching to using 'with db_api.transaction()' +* Removing 'workflow\_parameters' from workflow spec +* Switching to using 'with db\_api.transaction()' * Removing redundant parameters from methods of policies * Add 'description' field to specifications * Add serializers to scheduler call * Implement Wait-before policy * Refactoring engine to build and call task policies * Provide executor info about action -* Create action_factory without access to DB +* Create action\_factory without access to DB * Delete code related to Namespaces * Change instruction how to start Mistral -* Dividing get_action_class on two separate methods -* Rename action_factory to action_manager -* Modify action_factory to store actions in DB +* Dividing get\_action\_class on two separate methods +* Rename action\_factory to action\_manager +* Modify action\_factory to store actions in DB * Work toward Python 3.4 support and testing * Renaming 'on-finish' to 'on-complete' in task spec * Adding "wait-before" and "wait-after" to task policies * Fixing workflow spec to return start task spec instead its name * Including "policies" into task spec * Adjusting policy interfaces -* Renaming 'workflow_parameters' to 'workflow-parameters' +* Renaming 'workflow\_parameters' to 'workflow-parameters' * Small optimizations and fixes * Fixing processing subworkflow result * Renaming 'class' to 'base' in action spec -* Renaming 'start_task' to 'start-task' in workflow spec -* Fix execution state ERROR if task_spec has on-finish +* Renaming 'start\_task' to 'start-task' in workflow spec +* Fix execution state ERROR if task\_spec has on-finish * Additional changes in Delayed calls -* Fixing services/workbooks.py to use create_or_update_workflow() +* Fixing services/workbooks.py to use create\_or\_update\_workflow() * Implement REST API v2.0 -* Adding new methods to DB API v2 (load_xxx and create_or_update_xxx) +* Adding new methods to DB API v2 (load\_xxx and create\_or\_update\_xxx) * Adding unit tests for workflow DB model * Add service for delayed calls * Improving services/workbooks @@ -1382,11 +1531,11 @@ * Adding transaction context manager function for db transactions * Fail workflow if any task fails * Fixing validation for action specifications ('output' property) -* Working on linear workflow: on_task_result() -* Working on linear workflow: start_workflow() -* Working on engine implementation: on_task_result() +* Working on linear workflow: on\_task\_result() +* Working on linear workflow: start\_workflow() +* Working on engine implementation: on\_task\_result() * Renaming base class for Mistral DB models -* Working on engine implementation: start_workflow() +* Working on engine implementation: start\_workflow() * Fix small issues in tests * Cosmetic changes in integration tests * Rename resource directory @@ -1398,9 +1547,9 @@ * Add neutron actions * Small fixes in openstack-actions * Moving TaskResult and states to 'workflow' package -* Adding implementation of method __repr__ for DB models -* Working on reverse workflow: on_task_result() -* Working on reverse workflow: implementing method start_workflow() +* Adding implementation of method \_\_repr\_\_ for DB models +* Working on reverse workflow: on\_task\_result() +* Working on reverse workflow: implementing method start\_workflow() * Replacing NotImplemented with NotImplementedError * Working on reverse workflow: fixing specification version injection * Unit tests for v2 DB model @@ -1429,11 +1578,11 @@ * Fixing wrong access to Mistral security context in engine * Make OpenStack related data available in actions -* Add project_id to the workbook and filter by it +* Add project\_id to the workbook and filter by it * Make sure the context is correctly passed through the rpc * Add Executions and Tasks root API endpoints * Removing obsolete folder "scripts" -* Remove redundant convey_task_results arguments +* Remove redundant convey\_task\_results arguments * Remove redundant DB API arguments * 'requires' should take a string or list * Fix get task list of nonexistent execution @@ -1443,30 +1592,30 @@ * Fix hacking rule H236 * Fix Hacking rule H302 (import only modules) * Expose Task's output and parameters through API -* Make the service_type more consistent +* Make the service\_type more consistent * Switch from unittest2 to oslotest(testtools) * Fix hacking rules H101 and E265 * Temporarily disable the new hacking rules -* Renaming all example config files from *.conf.example to *.conf.sample +* Renaming all example config files from \*.conf.example to \*.conf.sample * Fixing obsolete file name in README.rst * Fix devstack gate * Add upload definition action in test * Do a better job of quietening the logs * All tests should call the base class setUp() * Move all tests to use base.BaseTest -* Add OS_LOG_CAPTURE to testr.conf +* Add OS\_LOG\_CAPTURE to testr.conf * Fix create execution when workbook does not exist -* Fix getting action_spec in create tasks +* Fix getting action\_spec in create tasks * Added information about automated tests -* Refactor test_task_retry to not rely on start_task +* Refactor test\_task\_retry to not rely on start\_task * Clean up configuration settings -* Refactor test_engine to not rely on start_task +* Refactor test\_engine to not rely on start\_task * Fix update nonexistent task * Fix get execution list when workbook does not exist * Fix keystone config group for trust creation * fix mistral devstack scripts * Fix bug with getting nonexistent task -* Fix duplicate keystone auth_token config options +* Fix duplicate keystone auth\_token config options * Move tests to testr * Add negative functional tests * Add new tests for executions and tasks @@ -1474,9 +1623,9 @@ * Implement new mistral tests * Remove unneccesary oslo modules * Making "Namespaces" section truly optional -* Restore script update_env_deps in tools +* Restore script update\_env\_deps in tools * Fix devstack integration scripts -* Remove unused function get_state_by_http_status_code +* Remove unused function get\_state\_by\_http\_status\_code * Sync code with oslo-incubator * Small engine bugfixing/refactoring * Make field 'Namespaces' optional @@ -1525,7 +1674,7 @@ * Fixing setup.cfg * Fix work on MySQL backend * Replace rabbit config to 'default' section -* Additional workflow trace logging in abstract_engine.py +* Additional workflow trace logging in abstract\_engine.py * Fixing wrong comparison in retry.py * Engine as a standalone process * Improved README file @@ -1536,11 +1685,11 @@ * Add workflow logging * Fixing inline expressions evaluation * Making execution data available in data flow context -* Fixing initialization of variable 'action_spec' in abstract_engine.py +* Fixing initialization of variable 'action\_spec' in abstract\_engine.py * Remove redundant update task operation * Fix convert params and result in AdHocAction * Adding parameters to adhoc action namespaces -* Removing 'base_output' from ad-hoc actions specification +* Removing 'base\_output' from ad-hoc actions specification * Temporarily commenting assertions in task retry tests * Temporarily commenting assertions in task retry tests * Fix result of HTTP action @@ -1571,13 +1720,13 @@ * Implements: blueprint mistral-std-repeat-action * Correct fake action test name * Remove unneeded declarations in unit tests -* Add keystone auth_token in context +* Add keystone auth\_token in context * Fix keystone config group name * Add script to allow update dependencies in all envs * Fixing ordering bugs in local engine tests * Fixing ordering bugs in workbook model and tests * Fixing executor launch script -* Fix getting task on-* properties +* Fix getting task on-\* properties * Rename 'events' to 'triggers' * Implement new object-model specification * Use oslo.messaging for AMQP communications @@ -1592,11 +1741,11 @@ * Working on Data Flow (step 1) * Add scheduling specific task on sucess/error * Send email action, part 2 -* Rename "target_task" to "task" +* Rename "target\_task" to "task" * Send email action, step 1 * Add negative tests to api * Fixing access to task "parameters" property in DSL -* Fix getting task on-* properties in DSL +* Fix getting task on-\* properties in DSL * Fix task keys properties in DSL parser * Add YAQL expression evaluation * Modified Rest action for process 'input' property diff -Nru mistral-4.0.0/CONTRIBUTING.rst mistral-5.0.0~b2/CONTRIBUTING.rst --- mistral-4.0.0/CONTRIBUTING.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/CONTRIBUTING.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,22 +1,66 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: +======================= +Contributing to Mistral +======================= - http://docs.openstack.org/infra/manual/developers.html +If you're interested in contributing to the Mistral project, +the following will help get you started. -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: +Contributor License Agreement +============================= - http://docs.openstack.org/infra/manual/developers.html#development-workflow +In order to contribute to the Mistral project, you need to have +signed OpenStack's contributor's agreement: -You can get the mistral documentation at: +* https://docs.openstack.org/infra/manual/developers.html +* https://wiki.openstack.org/CLA - http://docs.openstack.org/developer/mistral -Pull requests submitted through GitHub will be ignored. +Project Hosting Details +======================= -Bugs should be filed on Launchpad, not GitHub: +* Bug trackers + * General mistral tracker: https://launchpad.net/mistral - https://bugs.launchpad.net/mistral + * Python client tracker: https://launchpad.net/python-mistralclient +* Mailing list (prefix subjects with ``[Mistral]`` for faster responses) + https://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev +* Documentation + * https://docs.openstack.org/developer/mistral/ + +* IRC channel + * #openstack-mistral at FreeNode + + * https://wiki.openstack.org/wiki/Mistral/Meetings_Meetings + +* Code Hosting + * https://github.com/openstack/mistral + + * https://github.com/openstack/python-mistralclient + + * https://github.com/openstack/mistral-dashboard + + * https://github.com/openstack/mistral-lib + + * https://github.com/openstack/mistral-specs + + * https://github.com/openstack/mistral-specs + +* Code Review + * https://review.openstack.org/#/q/mistral + + * https://review.openstack.org/#/q/python-mistralclient + + * https://review.openstack.org/#/q/mistral-dashboard + + * https://review.openstack.org/#/q/mistral-lib + + * https://review.openstack.org/#/q/mistral-extra + + * https://review.openstack.org/#/q/mistral-specs + + * https://docs.openstack.org/infra/manual/developers.html#development-workflow + +* Mistral Design Specifications + * https://specs.openstack.org/openstack/mistral-specs/ diff -Nru mistral-4.0.0/debian/changelog mistral-5.0.0~b2/debian/changelog --- mistral-4.0.0/debian/changelog 2017-02-22 15:15:43.000000000 +0000 +++ mistral-5.0.0~b2/debian/changelog 2017-06-13 12:09:27.000000000 +0000 @@ -1,3 +1,17 @@ +mistral (5.0.0~b2-0ubuntu1) artful; urgency=medium + + * New upstream milestone for OpenStack Pike. + * d/control: Align (Build-)Depends with upstream. + + -- James Page Tue, 13 Jun 2017 13:09:27 +0100 + +mistral (5.0.0~b1-0ubuntu1) artful; urgency=medium + + * New upstream milestone for OpenStack Pike. + * Align (Build-)Depends with upstream milestone. + + -- James Page Fri, 28 Apr 2017 14:34:24 +0100 + mistral (4.0.0-0ubuntu1) zesty; urgency=medium * New upstream release for OpenStack Ocata. diff -Nru mistral-4.0.0/debian/control mistral-5.0.0~b2/debian/control --- mistral-4.0.0/debian/control 2017-02-22 15:15:43.000000000 +0000 +++ mistral-5.0.0~b2/debian/control 2017-06-13 12:09:27.000000000 +0000 @@ -10,23 +10,23 @@ openstack-pkg-tools (>= 53~), po-debconf, python-all, - python-pbr (>= 1.8), + python-pbr (>= 2.0.0), python-setuptools (>= 16.0), - python-sphinx, + python-sphinx (>= 1.5.1), Build-Depends-Indep: python-alembic (>= 0.8.10), python-aodhclient (>= 0.7.0), python-babel (>= 2.3.4), python-barbicanclient (>= 4.0.0), python-cachetools (>= 1.1.0), python-ceilometerclient (>= 2.5.0), - python-cinderclient (>= 1.6.0), + python-cinderclient (>= 1:2.0.1), python-coverage (>= 4.0), python-croniter (>= 0.3.4), python-designateclient (>= 1.5.0), python-docutils, python-eventlet (>= 0.18.2), python-fixtures (>= 3.0.0), - python-glanceclient (>= 1:2.5.0), + python-glanceclient (>= 1:2.7.0), python-gnocchiclient (>= 2.7.0), python-hacking (>= 0.10.0), python-heatclient (>= 1.6.1), @@ -36,26 +36,26 @@ python-keystoneclient (>= 1:3.8.0), python-keystonemiddleware (>= 4.12.0), python-magnumclient (>= 2.0.0), - python-mistralclient (>= 1:2.0.0), + python-mistralclient (>= 1:3.1.0), python-mock (>= 2.0), python-muranoclient (>= 0.8.2), python-networkx (>= 1.10), - python-neutronclient (>= 1:5.1.0), + python-neutronclient (>= 1:6.3.0), python-nose, - python-novaclient (>= 2:6.0.0), + python-novaclient (>= 2:7.1.0), python-openstackdocstheme (>= 1.5.0), python-os-api-ref (>= 1.0.0), python-oslo.concurrency (>= 3.8.0), - python-oslo.config (>= 1:3.14.0), - python-oslo.db (>= 4.15.0), + python-oslo.config (>= 1:4.0.0), + python-oslo.db (>= 4.21.1), python-oslo.i18n (>= 2.1.0), - python-oslo.log (>= 3.11.0), - python-oslo.messaging (>= 5.14.0), - python-oslo.middleware (>= 3.0.0), + python-oslo.log (>= 3.22.0), + python-oslo.messaging (>= 5.25.0), + python-oslo.middleware (>= 3.27.0), python-oslo.policy (>= 1.17.0), python-oslo.serialization (>= 1.10.0), python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.18.0), + python-oslo.utils (>= 3.20.0), python-oslosphinx (>= 4.7.0), python-oslotest (>= 1.10.0), python-osprofiler (>= 1.4.0), @@ -71,7 +71,7 @@ python-sphinxcontrib-httpdomain, python-sphinxcontrib-pecanwsme (>= 0.8), python-sqlalchemy (>= 1.0.10), - python-stevedore (>= 1.17.1), + python-stevedore (>= 1.20.0), python-swiftclient (>= 1:3.2.0), python-tenacity (>= 3.2.1), python-testrepository (>= 0.0.18), @@ -101,11 +101,11 @@ python-barbicanclient (>= 4.0.0), python-cachetools (>= 1.1.0), python-ceilometerclient (>= 2.5.0), - python-cinderclient (>= 1.6.0), + python-cinderclient (>= 1:2.0.1), python-croniter (>= 0.3.4), python-designateclient (>= 1.5.0), python-eventlet (>= 0.18.2), - python-glanceclient (>= 1:2.5.0), + python-glanceclient (>= 1:2.7.0), python-gnocchiclient (>= 2.7.0), python-heatclient (>= 1.6.1), python-ironicclient (>= 1.6.0), @@ -114,23 +114,23 @@ python-keystoneclient (>= 1:3.8.0), python-keystonemiddleware (>= 4.2.0), python-magnumclient (>= 2.0.0), - python-mistralclient (>= 1:2.0.0), + python-mistralclient (>= 1:3.1.0), python-mock (>= 2.0), python-muranoclient (>= 0.8.2), python-networkx (>= 1.10), - python-neutronclient (>= 1:5.1.0), - python-novaclient (>= 2:2.29.0), + python-neutronclient (>= 1:6.3.0), + python-novaclient (>= 2:7.1.0), python-oslo.concurrency (>= 3.8.0), - python-oslo.config (>= 1:3.14.0), - python-oslo.db (>= 4.11.0), + python-oslo.config (>= 1:4.0.0), + python-oslo.db (>= 4.21.1), python-oslo.i18n (>= 2.1.0), - python-oslo.log (>= 3.11.0), - python-oslo.messaging (>= 5.14.0), - python-oslo.middleware (>= 3.0.0), + python-oslo.log (>= 3.22.0), + python-oslo.messaging (>= 5.25.0), + python-oslo.middleware (>= 3.27.0), python-oslo.policy (>= 1.17.0), python-oslo.serialization (>= 1.10.0), python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.18.0), + python-oslo.utils (>= 3.20.0), python-osprofiler (>= 1.4.0), python-paramiko (>= 2.0), python-pecan (>= 1.0.0), @@ -142,10 +142,10 @@ python-setuptools (>= 16.0), python-six (>= 1.9.0), python-sqlalchemy (>= 1.0.10), - python-stevedore (>= 1.17.1), + python-stevedore (>= 1.20.0), python-swiftclient (>= 1:3.2.0), python-tenacity (>= 3.2.1), - python-tooz (>= 1.28.0), + python-tooz (>= 1.47.0), python-troveclient (>= 1:2.2.0), python-wsme (>= 0.8), python-yaml (>= 3.10.0), diff -Nru mistral-4.0.0/devstack/files/apache-mistral-api.template mistral-5.0.0~b2/devstack/files/apache-mistral-api.template --- mistral-4.0.0/devstack/files/apache-mistral-api.template 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/devstack/files/apache-mistral-api.template 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,28 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess mistral-api processes=%API_WORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup mistral-api + WSGIScriptAlias / %MISTRAL_BIN_DIR%/mistral-wsgi-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + AllowEncodedSlashes On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/mistral_api.log + CustomLog /var/log/%APACHE_NAME%/mistral_api_access.log combined + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff -Nru mistral-4.0.0/devstack/plugin.sh mistral-5.0.0~b2/devstack/plugin.sh --- mistral-4.0.0/devstack/plugin.sh 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/devstack/plugin.sh 2017-06-09 12:48:26.000000000 +0000 @@ -21,6 +21,11 @@ MISTRAL_BIN_DIR=$(get_python_exec_prefix) fi +# Toggle for deploying Mistral API under HTTPD + mod_wsgi +MISTRAL_USE_MOD_WSGI=${MISTRAL_USE_MOD_WSGI:-True} + +MISTRAL_FILES_DIR=$MISTRAL_DIR/devstack/files + # create_mistral_accounts - Set up common required mistral accounts # # Tenant User Roles @@ -76,7 +81,8 @@ iniset $MISTRAL_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $MISTRAL_CONF_FILE keystone_authtoken admin_user $MISTRAL_ADMIN_USER iniset $MISTRAL_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $MISTRAL_CONF_FILE keystone_authtoken auth_uri "http://${KEYSTONE_AUTH_HOST}:5000/v3" + iniset $MISTRAL_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_AUTH_URI_V3 + iniset $MISTRAL_CONF_FILE keystone_authtoken identity_uri $KEYSTONE_AUTH_URI # Setup RabbitMQ credentials iniset $MISTRAL_CONF_FILE oslo_messaging_rabbit rabbit_userid $RABBIT_USERID @@ -100,6 +106,10 @@ if [ "$MISTRAL_RPC_IMPLEMENTATION" ]; then iniset $MISTRAL_CONF_FILE DEFAULT rpc_implementation $MISTRAL_RPC_IMPLEMENTATION fi + + if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then + _config_mistral_apache_wsgi + fi } @@ -121,6 +131,10 @@ if is_service_enabled horizon; then _install_mistraldashboard fi + + if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then + install_apache_wsgi + fi } @@ -145,9 +159,19 @@ # start_mistral - Start running processes, including screen function start_mistral { + # If the site is not enabled then we are in a grenade scenario + local enabled_site_file + enabled_site_file=$(apache_site_config_for mistral-api) + if is_service_enabled mistral-api && is_service_enabled mistral-engine && is_service_enabled mistral-executor && is_service_enabled mistral-event-engine ; then echo_summary "Installing all mistral services in separate processes" - run_process mistral-api "$MISTRAL_BIN_DIR/mistral-server --server api --config-file $MISTRAL_CONF_DIR/mistral.conf" + if [ -f ${enabled_site_file} ] && [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then + enable_apache_site mistral-api + restart_apache_server + tail_log mistral-api /var/log/$APACHE_NAME/mistral_api.log + else + run_process mistral-api "$MISTRAL_BIN_DIR/mistral-server --server api --config-file $MISTRAL_CONF_DIR/mistral.conf" + fi run_process mistral-engine "$MISTRAL_BIN_DIR/mistral-server --server engine --config-file $MISTRAL_CONF_DIR/mistral.conf" run_process mistral-executor "$MISTRAL_BIN_DIR/mistral-server --server executor --config-file $MISTRAL_CONF_DIR/mistral.conf" run_process mistral-event-engine "$MISTRAL_BIN_DIR/mistral-server --server event-engine --config-file $MISTRAL_CONF_DIR/mistral.conf" @@ -161,9 +185,17 @@ # stop_mistral - Stop running processes function stop_mistral { # Kill the Mistral screen windows - for serv in mistral mistral-api mistral-engine mistral-executor mistral-event-engine; do + local serv + for serv in mistral mistral-engine mistral-executor mistral-event-engine; do stop_process $serv done + + if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then + disable_apache_site mistral-api + restart_apache_server + else + stop_process mistral-api + fi } @@ -171,6 +203,11 @@ if is_service_enabled horizon; then _mistral_cleanup_mistraldashboard fi + + if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then + _mistral_cleanup_apache_wsgi + fi + sudo rm -rf $MISTRAL_CONF_DIR } @@ -178,6 +215,33 @@ rm -f $HORIZON_DIR/openstack_dashboard/local/enabled/_50_mistral.py } +function _mistral_cleanup_apache_wsgi { + sudo rm -f $(apache_site_config_for mistral-api) +} + +# _config_mistral_apache_wsgi() - Set WSGI config files for Mistral +function _config_mistral_apache_wsgi { + local mistral_apache_conf + mistral_apache_conf=$(apache_site_config_for mistral-api) + local mistral_ssl="" + local mistral_certfile="" + local mistral_keyfile="" + local mistral_api_port=$MISTRAL_SERVICE_PORT + local venv_path="" + + sudo cp $MISTRAL_FILES_DIR/apache-mistral-api.template $mistral_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$mistral_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%MISTRAL_BIN_DIR%|$MISTRAL_BIN_DIR|g; + s|%API_WORKERS%|$API_WORKERS|g; + s|%SSLENGINE%|$mistral_ssl|g; + s|%SSLCERTFILE%|$mistral_certfile|g; + s|%SSLKEYFILE%|$mistral_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $mistral_apache_conf +} if is_service_enabled mistral; then if [[ "$1" == "stack" && "$2" == "install" ]]; then diff -Nru mistral-4.0.0/doc/source/conf.py mistral-5.0.0~b2/doc/source/conf.py --- mistral-4.0.0/doc/source/conf.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/conf.py 2017-06-09 12:48:26.000000000 +0000 @@ -30,7 +30,6 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', - 'sphinxcontrib.autohttp.flask', 'sphinxcontrib.pecanwsme.rest', 'wsmeext.sphinxext', ] @@ -100,8 +99,8 @@ git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] try: - html_last_updated_fmt = subprocess.Popen( - git_cmd, stdout=subprocess.PIPE).communicate()[0] + html_last_updated_fmt = subprocess.check_output( + git_cmd).decode('utf-8') except Exception: warnings.warn('Cannot get last updated time from git repository. ' 'Not setting "html_last_updated_fmt".') diff -Nru mistral-4.0.0/doc/source/developer/asynchronous_actions.rst mistral-5.0.0~b2/doc/source/developer/asynchronous_actions.rst --- mistral-4.0.0/doc/source/developer/asynchronous_actions.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/developer/asynchronous_actions.rst 2017-06-09 12:48:26.000000000 +0000 @@ -129,9 +129,7 @@ corresponding id. That way Mistral will know what the result of this action is and decide how to proceed with workflow execution. -Using raw HTTP: - - .. code-block:: HTTP +Using raw HTTP:: POST /v2/action-executions/ diff -Nru mistral-4.0.0/doc/source/developer/debug.rst mistral-5.0.0~b2/doc/source/developer/debug.rst --- mistral-4.0.0/doc/source/developer/debug.rst 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/developer/debug.rst 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,65 @@ +Mistral Debugging Guide +======================= + +To debug using a local engine and executor without dependencies such as +RabbitMQ, make sure your ``/etc/mistral/mistral.conf`` has the following +settings:: + + [DEFAULT] + rpc_backend = fake + + [pecan] + auth_enable = False + +and run the following command in *pdb*, *PyDev* or *PyCharm*:: + + mistral/cmd/launch.py --server all --config-file /etc/mistral/mistral.conf --use-debugger + +.. note:: + + In PyCharm, you also need to enable the Gevent compatibility flag in + Settings -> Build, Execution, Deployment -> Python Debugger -> Gevent + compatible. Without this setting, PyCharm will not show variable values + and become unstable during debugging. + + +Running unit tests in PyCharm +----------------------------- + +In order to be able to conveniently run unit tests, you need to: + +1. Set unit tests as the default runner: + + Settings -> Tools -> Python Integrated Tools -> + Default test runner: Unittests + +2. Enable test detection for all classes: + + Run/Debug Configurations -> Defaults -> Python tests -> Unittests -> uncheck + Inspect only subclasses of unittest.TestCase + +Running examples +---------------- + +To run the examples find them in mistral-extra repository +(https://github.com/openstack/mistral-extra) and follow the instructions on +each example. + + +Tests +----- + +You can run some of the functional tests in non-openstack mode locally. To do +this: + +#. set ``auth_enable = False`` in the ``mistral.conf`` and restart Mistral +#. execute:: + + $ ./run_functional_tests.sh + +To run tests for only one version need to specify it:: + + $ bash run_functional_tests.sh v1 + +More information about automated tests for Mistral can be found on +`Mistral Wiki `_. diff -Nru mistral-4.0.0/doc/source/developer/index.rst mistral-5.0.0~b2/doc/source/developer/index.rst --- mistral-4.0.0/doc/source/developer/index.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/developer/index.rst 2017-06-09 12:48:26.000000000 +0000 @@ -8,4 +8,5 @@ extending_yaql asynchronous_actions devstack + debug troubleshooting diff -Nru mistral-4.0.0/doc/source/developer/webapi/v2.rst mistral-5.0.0~b2/doc/source/developer/webapi/v2.rst --- mistral-4.0.0/doc/source/developer/webapi/v2.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/developer/webapi/v2.rst 2017-06-09 12:48:26.000000000 +0000 @@ -169,8 +169,7 @@ Example of creating action defaults -.. code-block:: yaml - +:: ...ENV... "variables": { diff -Nru mistral-4.0.0/doc/source/dsl/dsl_v2.rst mistral-5.0.0~b2/doc/source/dsl/dsl_v2.rst --- mistral-4.0.0/doc/source/dsl/dsl_v2.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/dsl/dsl_v2.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,28 +1,27 @@ -Mistral DSL v2 specification -============================ +Mistral Workflow Language v2 specification +========================================== Introduction ------------ -This document fully describes Domain Specific Language (DSL) version 2 -of Mistral Workflow Service. Since version 1 issued in May 2014 Mistral -team completely reworked the language pursuing the goal in mind to make -it easier to understand while more consistent and flexible. - -Unlike Mistral DSLv1 DSL v2 assumes that all -entities that Mistral works with like workflows and actions are -completely independent in terms of how they're referenced and accessed -through API (and also Python Client API and CLI). Workbook, the entity -that can combine workflows and actions still exists in the -language but only for namespacing and convenience purposes. See +This document fully describes Mistral Workflow Language version 2 of Mistral +Workflow Service. Since version 1 issued in May 2014 Mistral team completely +reworked the language pursuing the goal in mind to make it easier to understand +while more consistent and flexible. + +Unlike Mistral Workflow Language v1, v2 assumes that all entities that Mistral +works with like workflows and actions are completely independent in terms of +how they're referenced and accessed through API (and also Python Client API and +CLI). Workbook, the entity that can combine workflows and actions still exists +in the language but only for namespacing and convenience purposes. See `Workbooks section <#workbooks>`__ for more details. -**NOTE**: DSL and API of version 1 has not been supported since April 2015 and -DSL and API of version 2 is now the only way to interact with Mistral +**NOTE**: Mistral Workflow Language and API of version 1 has not been supported +since April 2015 and version 2 is now the only way to interact with Mistral service. -Mistral DSL consists of the following main object(entity) types that -will be described in details below: +Mistral Workflow Language consists of the following main object(entity) types +that will be described in details below: - `Workflows <#workflows>`__ - `Actions <#actions>`__ @@ -30,13 +29,14 @@ Prerequisites ------------- -Mistral DSL supports `YAQL `__ and -`Jinja2 `__ expression languages to -reference workflow context variables and thereby implements passing data -between workflow tasks. It's also referred to as Data Flow mechanism. -YAQL is a simple but powerful query language that allows to extract -needed information from JSON structured data. It is allowed to use YAQL -in the following sections of DSL: +Mistral Workflow Language supports +`YAQL `__ and +`Jinja2 `__ expression languages to reference +workflow context variables and thereby implements passing data between workflow +tasks. It's also referred to as Data Flow mechanism. YAQL is a simple but +powerful query language that allows to extract needed information from JSON +structured data. It is allowed to use YAQL in the following sections of +Mistral Workflow Language: - Workflow `'output' attribute <#common-workflow-attributes>`__ - Workflow `'task-defaults' attribute <#common-workflow-attributes>`__ @@ -49,8 +49,8 @@ - Action `'base-input' attribute <#attributes>`__ - Action `'output' attribute <#attributes>`__ -Mistral DSL is fully based on YAML and knowledge of YAML is a plus for -better understanding of the material in this specification. It also +Mistral Workflow Language is fully based on YAML and knowledge of YAML is a +plus for better understanding of the material in this specification. It also takes advantage of supported query languages to define expressions in workflow and action definitions. @@ -61,16 +61,16 @@ Workflows --------- -Workflow is the main building block of Mistral DSL, the reason why the -project exists. Workflow represents a process that can be described in a -various number of ways and that can do some job interesting to the end -user. Each workflow consists of tasks (at least one) describing what -exact steps should be made during workflow execution. +Workflow is the main building block of Mistral Workflow Language, the reason +why the project exists. Workflow represents a process that can be described in +a various number of ways and that can do some job interesting to the end user. +Each workflow consists of tasks (at least one) describing what exact steps +should be made during workflow execution. YAML example ^^^^^^^^^^^^ -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -107,11 +107,11 @@ Workflow types ^^^^^^^^^^^^^^ -Mistral DSL v2 introduces different workflow types and the structure of -each workflow type varies according to its semantics. Basically, -workflow type encapsulates workflow processing logic, a set of meta -rules defining how all workflows of this type should work. Currently, -Mistral provides two workflow types: +Mistral Workflow Language v2 introduces different workflow types and the +structure of each workflow type varies according to its semantics. Basically, +workflow type encapsulates workflow processing logic, a set of meta rules +defining how all workflows of this type should work. Currently, Mistral +provides two workflow types: - `Direct workflow <#direct-workflow>`__ - `Reverse workflow <#reverse-workflow>`__ @@ -163,11 +163,11 @@ Task is what a workflow consists of. It defines a specific computational step in the workflow. Each task can optionally take input data and -produce output. In Mistral DSL v2 task can be associated with an action -or a workflow. In the example below there are two tasks of different +produce output. In Mistral Workflow Language v2, task can be associated with an +action or a workflow. In the example below there are two tasks of different types: -.. code-block:: yaml +.. code-block:: mistral action_based_task:   action: std.http url='openstack.org' @@ -234,7 +234,7 @@ YAML example -.. code-block:: yaml +.. code-block:: mistral my_task:   action: my_action @@ -276,7 +276,9 @@ **concurrency** Defines a max number of actions running simultaneously in a task. *Applicable* -only for tasks that have *with-items*. +only for tasks that have *with-items*. If *concurrency* task property is not +set then actions (or workflows in case of nested workflows) of the task will +be scheduled for execution all at once. **retry** @@ -297,7 +299,7 @@ Retry policy can also be configured on a single line as: -.. code-block:: yaml +.. code-block:: mistral task1:   action: my_action @@ -313,7 +315,7 @@ Full syntax: -.. code-block:: yaml +.. code-block:: mistral my_task:   action: std.http @@ -323,7 +325,7 @@ Simplified syntax: -.. code-block:: yaml +.. code-block:: mistral my_task:   action: std.http url="http://mywebsite.org" method="GET" @@ -332,7 +334,7 @@ Full syntax: -.. code-block:: yaml +.. code-block:: mistral my_task:   workflow: some_nested_workflow @@ -342,25 +344,24 @@ Simplified syntax: -.. code-block:: yaml +.. code-block:: mistral my_task:   workflow: some_nested_workflow param1='val1' param2='val2' -**NOTE**: It's also possible to merge these two approaches and specify a -part of parameters using simplified key-value pairs syntax and using -keyword *input*. In this case all the parameters will be effectively -merged. If the same parameter is specified in both ways then the one -under *input* keyword takes precedence. +**NOTE**: It's also possible to merge these two approaches and specify a part +of parameters using simplified key-value pairs syntax and using keyword *input*. +In this case all the parameters will be effectively merged. If the same +parameter is specified in both ways then the one under *input* keyword takes +precedence. Direct workflow ^^^^^^^^^^^^^^^ -Direct workflow consists of tasks combined in a graph where every next -task starts after another one depending on produced result. So direct -workflow has a notion of transition. Direct workflow is considered to be -completed if there aren't any transitions left that could be used to -jump to next tasks. +Direct workflow consists of tasks combined in a graph where every next task +starts after another one depending on produced result. So direct workflow has a +notion of transition. Direct workflow is considered to be completed if there +aren't any transitions left that could be used to jump to next tasks. .. image:: /img/Mistral_direct_workflow.png @@ -369,7 +370,7 @@ YAML example '''''''''''' -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -426,25 +427,78 @@ should only be understood as a language construction that allows to define some clean up actions. +Engine Commands +''''''''''''''' + +Mistral has a number of engine commands that can be called within direct +workflows. These commands are used to change the Workflow state. + +- **succeed** - will end the current workflow with the state SUCCESS. +- **pause** - will end the current workflow with the state PAUSED. +- **fail** - will end the current workflow with the state ERROR. + +Each of the engine commands accepts a ``msg`` input. This is optional, but if +provided will be stored in the state info on the workflow execution. + +Workflows that have been ended with ``succeed`` or ``fail`` may not be resumed +later, but workflows that have been ended with ``pause`` may be. + +YAML example +'''''''''''' + +.. code-block:: mistral + + --- + version: '2.0' + + send_error_mail: + tasks: + create_server: + action: nova.servers_create name=<% $.vm_name %> + publish: + vm_id: <% task(create_server).result.id %> + on-complete: + - fail: <% not $.vm_id %> + +In this example we have a short workflow with one task that creates a server +in Nova. The task publishes the ID of the virtual machine, but if this value +is empty then it will fail the workflow. + +.. code-block:: mistral + + on-complete: + - taskA + - fail + - taskB + +When the engine commands are used with task names in a single list, they are +processed one at a time until the workflow reaches a terminal state. In the +above example, the ``on-complete`` has three steps to complete - these are +executed in order until the workflow reaches a terminal state. So in this case +``taskA`` is called first, then the ``fail`` engine command and ``taskB`` would +never be called. ``taskB`` would not be called if ``succeed`` was used in this +example either, but if ``pause`` was used ``taskB`` would be called after the +workflow is resumed. + Transitions with YAQL expressions ''''''''''''''''''''''''''''''''' Task transitions can be determined by success/error/completeness of the -previous tasks and also by additional guard expressions that can -access any data produced by upstream tasks. So in the example above task -'create_vm' could also have a YAQL expression on transition to task -'send_success_email' as follows: +previous tasks and also by additional guard expressions that can access any +data produced by upstream tasks. So in the example above task 'create_vm' could +also have a YAQL expression on transition to task 'send_success_email' as +follows: -.. code-block:: yaml +.. code-block:: mistral create_vm:  ...  on-success:    - send_success_email: <% $.vm_id != null %> -And this would tell Mistral to run 'send_success_email' task only if -'vm_id' variable published by task 'create_vm' is not empty. -Expressions can also be applied to 'on-error' and 'on-complete'. +And this would tell Mistral to run 'send_success_email' task only if 'vm_id' +variable published by task 'create_vm' is not empty. Expressions can also be +applied to 'on-error' and 'on-complete'. Fork '''' @@ -452,7 +506,7 @@ There are situations when we need to be able to run more than one task after some task has completed. -.. code-block:: yaml +.. code-block:: mistral create_vm:   ... @@ -460,19 +514,18 @@     - register_vm_in_load_balancer     - register_vm_in_dns -In this case Mistral will run both "register_xxx" tasks simultaneously -and this will lead to multiple independent workflow routes being -processed in parallel. +In this case Mistral will run both "register_xxx" tasks simultaneously and this +will lead to multiple independent workflow routes being processed in parallel. Join '''' -Join flow control allows to synchronize multiple parallel workflow -branches and aggregate their data. +Join flow control allows to synchronize multiple parallel workflow branches and +aggregate their data. Full Join (join: all) -.. code-block:: yaml +.. code-block:: mistral register_vm_in_load_balancer:   ... @@ -493,16 +546,15 @@   join: all   action: send_email -When a task has property "join" assigned with value "all" the task will -run only if all upstream tasks (ones that lead to this task) are -completed and corresponding conditions have triggered. Task A is -considered an upstream task of Task B if Task A has Task B mentioned in -any of its "on-success", "on-error" and "on-complete" clauses regardless -of guard expressions. +When a task has property "join" assigned with value "all" the task will run +only if all upstream tasks (ones that lead to this task) are completed and +corresponding conditions have triggered. Task A is considered an upstream task +of Task B if Task A has Task B mentioned in any of its "on-success", "on-error" +and "on-complete" clauses regardless of guard expressions. Partial Join (join: 2) -.. code-block:: yaml +.. code-block:: mistral register_vm_in_load_balancer:  ... @@ -523,43 +575,42 @@   join: 2   action: send_email -When a task has property "join" assigned with a numeric value then the -task will run once at least this number of upstream tasks are completed -and corresponding conditions have triggered. In the example above task +When a task has property "join" assigned with a numeric value then the task +will run once at least this number of upstream tasks are completed and +corresponding conditions have triggered. In the example above task "wait_for_two_registrations" will run if two any of "register_vm_xxx" tasks complete. Discriminator (join: one) -Discriminator is a special case of Partial Join when "join" property has -value 1. It means Mistral will wait for any completed task. -In this case instead of 1 it is possible to specify special -string value "one" which is introduced for symmetry with "all". However, -it's up to the user whether to use "1" or "one". +Discriminator is a special case of Partial Join when "join" property has value +1. It means Mistral will wait for any completed task. In this case instead of 1 +it is possible to specify special string value "one" which is introduced for +symmetry with "all". However, it's up to the user whether to use "1" or "one". Reverse workflow ^^^^^^^^^^^^^^^^ -In reverse workflow all relationships in workflow task graph are -dependencies. In order to run this type of workflow we need to specify a -task that needs to be completed, it can be conventionally called 'target -task'. When Mistral Engine starts a workflow it recursively identifies -all the dependencies that need to be completed first. +In reverse workflow all relationships in workflow task graph are dependencies. +In order to run this type of workflow we need to specify a task that needs to +be completed, it can be conventionally called 'target task'. When Mistral +Engine starts a workflow it recursively identifies all the dependencies that +need to be completed first. .. image:: /img/Mistral_reverse_workflow.png -Figure 2 explains how reverse workflow works. In the example, task -**T1** is chosen a target task. So when the workflow starts Mistral will -run only tasks **T7**, **T8**, **T5**, **T6**, **T2** and **T1** in the -specified order (starting from tasks that have no dependencies). Tasks -**T3** and **T4** won't be a part of this workflow because there's no -route in the directed graph from **T1** to **T3** or **T4**. +Figure 2 explains how reverse workflow works. In the example, task **T1** is +chosen a target task. So when the workflow starts Mistral will run only tasks +**T7**, **T8**, **T5**, **T6**, **T2** and **T1** in the specified order +(starting from tasks that have no dependencies). Tasks **T3** and **T4** won't +be a part of this workflow because there's no route in the directed graph from +**T1** to **T3** or **T4**. YAML example '''''''''''' -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -606,7 +657,7 @@ YAML example '''''''''''' -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -638,17 +689,17 @@        delay: 5        count: <% $.vm_names.len() * 10 %> -Workflow "create_vms" in this example creates as many virtual servers -as we provide in "vm_names" input parameter. E.g., if we specify -vm_names=["vm1", "vm2"] then it'll create servers with these names -based on same image and flavor. It is possible because of using -"with-items" keyword that makes an action or a workflow associated with -a task run multiple times. Value of "with-items" task property contains -an expression in the form: in <% YAQL_expression %>. +Workflow "create_vms" in this example creates as many virtual servers as we +provide in "vm_names" input parameter. E.g., if we specify +vm_names=["vm1", "vm2"] then it'll create servers with these names based on +same image and flavor. It is possible because of using "with-items" keyword +that makes an action or a workflow associated with a task run multiple times. +Value of "with-items" task property contains an expression in the form: in +<% YAQL_expression %>. The most common form is: -.. code-block:: yaml +.. code-block:: mistral with-items:   - var1 in <% YAQL_expression_1 %> @@ -657,18 +708,18 @@   - varN in <% YAQL_expression_N %> where collections expressed as YAQL_expression_1, YAQL_expression_2, -YAQL_expression_N must have equal sizes. When a task gets started -Mistral will iterate over all collections in parallel, i.e. number of -iterations will be equal to length of any collections. - -Note that in case of using "with-items" task result accessible in -workflow context as <% task(task_name).result %> will be a list containing results -of corresponding action/workflow calls. If at least one action/workflow -call has failed then the whole task will get into ERROR state. It's also -possible to apply retry policy for tasks with "with-items" property. In -this case retry policy will be relaunching all action/workflow calls -according to "with-items" configuration. Other policies can also be used -the same way as with regular non "with-items" tasks. +YAQL_expression_N must have equal sizes. When a task gets started Mistral will +iterate over all collections in parallel, i.e. number of iterations will be +equal to length of any collections. + +Note that in case of using "with-items" task result accessible in workflow +context as <% task(task_name).result %> will be a list containing results of +corresponding action/workflow calls. If at least one action/workflow call has +failed then the whole task will get into ERROR state. It's also possible to +apply retry policy for tasks with "with-items" property. In this case retry +policy will be relaunching all action/workflow calls according to "with-items" +configuration. Other policies can also be used the same way as with regular non +"with-items" tasks. .. _actions-dsl: @@ -676,17 +727,16 @@ ------- Action defines what exactly needs to be done when task starts. Action is -similar to a regular function in general purpose programming language -like Python. It has a name and parameters. Mistral distinguishes 'system -actions' and 'Ad-hoc actions'. +similar to a regular function in general purpose programming language like +Python. It has a name and parameters. Mistral distinguishes 'system actions' +and 'Ad-hoc actions'. System actions ^^^^^^^^^^^^^^ -System actions are provided by Mistral out of the box and can be used by -anyone. It is also possible to add system actions for specific Mistral -installation via a special plugin mechanism. Currently, built-in system -actions are: +System actions are provided by Mistral out of the box and can be used by anyone. +It is also possible to add system actions for specific Mistral installation via +a special plugin mechanism. Currently, built-in system actions are: std.fail '''''''' @@ -696,7 +746,7 @@ Example: -.. code-block:: yaml +.. code-block:: mistral manual_fail: action: std.fail @@ -730,7 +780,7 @@ Example: -.. code-block:: yaml +.. code-block:: mistral http_task:   action: std.http url='google.com' @@ -738,8 +788,8 @@ std.mistral_http '''''''''''''''' -This action works just like 'std.http' with the only exception: when -sending a request it inserts the following HTTP headers: +This action works just like 'std.http' with the only exception: when sending a +request it inserts the following HTTP headers: - **Mistral-Workflow-Name** - Name of the workflow that the current action execution is associated with. @@ -750,14 +800,13 @@ - **Mistral-Action-Execution-Id** - Identifier of the current action execution. -Using this action makes it possible to do any work in asynchronous -manner triggered via HTTP protocol. That means that Mistral can send a -request using 'std.mistral_http' and then any time later whatever -system that received this request can notify Mistral back (using its -public API) with the result of this action. Header -**Mistral-Action-Execution-Id** is required for this operation because -it is used a key to find corresponding action execution in Mistral -to attach the result to. +Using this action makes it possible to do any work in asynchronous manner +triggered via HTTP protocol. That means that Mistral can send a request using +'std.mistral_http' and then any time later whatever system that received this +request can notify Mistral back (using its public API) with the result of this +action. Header **Mistral-Action-Execution-Id** is required for this operation +because it is used a key to find corresponding action execution in Mistral to +attach the result to. std.email ''''''''' @@ -773,7 +822,7 @@ Example: -.. code-block:: yaml +.. code-block:: mistral send_email_task:   action: std.email @@ -802,11 +851,13 @@ executed. *Required*. - **host** - Host name that the command needs to be executed on. *Required*. -- **username** - User name to authenticate on the host. *Required*. +- **username** - User name to authenticate on the host. *Required*. - **password** - User password to to authenticate on the host. *Optional*. -- **private_key_filename** - Private key file name which will be used for authentication on remote host. +- **private_key_filename** - Private key file name which will be used for + authentication on remote host. All private keys should be on executor host in **/.ssh/**. -**** should refer to user directory under which service is running. *Optional*. +**** should refer to user directory under which service is +running. *Optional*. **NOTE**: Authentication using key pairs is supported, key should be on Mistral Executor server machine. @@ -814,8 +865,8 @@ std.echo '''''''' -Simple action mostly needed for testing purposes that returns a -predefined result. +Simple action mostly needed for testing purposes that returns a predefined +result. Input parameters: @@ -833,10 +884,11 @@ executed. *Required*. **To use std.javascript, it is needed to install a number of -dependencies and JS engine.** Currently Mistral uses only V8 Engine and -its wrapper - PyV8. For installing it, do the next steps: +dependencies and JS engine.** Currently Mistral uses only V8 Engine and its +wrapper - PyV8. For installing it, do the next steps: -1. Install required libraries - boost, g++, libtool, autoconf, subversion, libv8-legacy-dev: On Ubuntu:: +1. Install required libraries - boost, g++, libtool, autoconf, subversion, +libv8-legacy-dev: On Ubuntu:: $ sudo apt-get install libboost-all-dev g++ libtool autoconf libv8-legacy-dev subversion make @@ -855,7 +907,7 @@ Example: -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -886,7 +938,7 @@ Another example for getting the current date and time: -.. code-block:: yaml +.. code-block:: mistral   ---   version: '2.0' @@ -913,18 +965,18 @@ Ad-hoc actions ^^^^^^^^^^^^^^ -Ad-hoc action is a special type of action that can be created by user. -Ad-hoc action is always created as a wrapper around any other existing -system action and its main goal is to simplify using same actions many -times with similar pattern. +Ad-hoc action is a special type of action that can be created by user. Ad-hoc +action is always created as a wrapper around any other existing system action +and its main goal is to simplify using same actions many times with similar +pattern. -**NOTE**: Nested ad-hoc actions currently are not supported (i.e. ad-hoc -action around another ad-hoc action). +**NOTE**: Nested ad-hoc actions currently are not supported (i.e. ad-hoc action +around another ad-hoc action). YAML example '''''''''''' -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -945,10 +997,10 @@    smtp_server: 'smtp.google.com'    smtp_password: 'SECRET' -Once this action is uploaded to Mistral any workflow will be able to use -it as follows: +Once this action is uploaded to Mistral any workflow will be able to use it as +follows: -.. code-block:: yaml +.. code-block:: mistral my_workflow:  tasks: @@ -961,54 +1013,52 @@ - **base** - Name of base action that this action is built on top of. *Required*. -- **base-input** - Actual input parameters provided to base action. - Look at the example above. *Optional*. -- **input** - List of declared action parameters which should be - specified as corresponding task input. This attribute is optional and - used only for documenting purposes. Mistral now does not enforce - actual input parameters to exactly correspond to this list. Based - parameters will be calculated based on provided actual parameters - with using expressions so what's used in expressions implicitly - define real input parameters. Dictionary of actual input parameters - (expression context) is referenced as '$.' in YAQL and as '_.' in Jinja. - Redundant parameters will be simply ignored. -- **output** - Any data structure defining how to calculate output of - this action based on output of base action. It can optionally have - expressions to access properties of base action output through expression - context. +- **base-input** - Actual input parameters provided to base action. Look at the + example above. *Optional*. +- **input** - List of declared action parameters which should be specified as + corresponding task input. This attribute is optional and used only for + documenting purposes. Mistral now does not enforce actual input parameters to + exactly correspond to this list. Based parameters will be calculated based on + provided actual parameters with using expressions so what's used in + expressions implicitly define real input parameters. Dictionary of actual + input parameters (expression context) is referenced as '$.' in YAQL and as + '_.' in Jinja. Redundant parameters will be simply ignored. +- **output** - Any data structure defining how to calculate output of this + action based on output of base action. It can optionally have expressions to + access properties of base action output through expression context. Workbooks --------- -As mentioned before, workbooks still exist in Mistral DSL version 2 but -purely for convenience. Using workbooks users can combine multiple -entities of any type (workflows, actions and triggers) into one document -and upload to Mistral service. When uploading a workbook Mistral will -parse it and save its workflows, actions and triggers as independent -objects which will be accessible via their own API endpoints -(/workflows, /actions and /triggers/). Once it's done the workbook comes -out of the game. User can just start workflows and use references to -workflows/actions/triggers as if they were uploaded without workbook in -the first place. However, if we want to modify these individual objects -we can modify the same workbook definition and re-upload it to Mistral -(or, of course, we can do it independently). +As mentioned before, workbooks still exist in Mistral Workflow Language version +2 but purely for convenience. Using workbooks users can combine multiple +entities of any type (workflows, actions and triggers) into one document and +upload to Mistral service. When uploading a workbook Mistral will parse it and +save its workflows, actions and triggers as independent objects which will be +accessible via their own API endpoints (/workflows, /actions and /triggers/). +Once it's done the workbook comes out of the game. User can just start workflows +and use references to workflows/actions/triggers as if they were uploaded +without workbook in the first place. However, if we want to modify these +individual objects we can modify the same workbook definition and re-upload it +to Mistral (or, of course, we can do it independently). Namespacing ^^^^^^^^^^^ -One thing that's worth noting is that when using a workbook Mistral uses -its name as a prefix for generating final names of workflows, actions -and triggers included into the workbook. To illustrate this principle -let's take a look at the figure below. +One thing that's worth noting is that when using a workbook Mistral uses its +name as a prefix for generating final names of workflows, actions and triggers +included into the workbook. To illustrate this principle let's take a look at +the figure below. .. image:: /img/Mistral_workbook_namespacing.png -So after a workbook has been uploaded its workflows and actions become independent objects but with slightly different names. +So after a workbook has been uploaded its workflows and actions become +independent objects but with slightly different names. YAML example '''''''''''' -.. code-block:: yaml +.. code-block:: mistral --- version: '2.0' @@ -1049,9 +1099,9 @@      - str2    base: std.echo output="<% $.str1 %><% $.str2 %>" -**NOTE**: Even though names of objects inside workbooks change upon -uploading Mistral allows referencing between those objects using local -names declared in the original workbook. +**NOTE**: Even though names of objects inside workbooks change upon uploading +Mistral allows referencing between those objects using local names declared in +the original workbook. Attributes ^^^^^^^^^^ @@ -1068,7 +1118,8 @@ Predefined values/Functions in execution data context ----------------------------------------------------- -Using expressions it is possible to use some predefined values in Mistral DSL. +Using expressions it is possible to use some predefined values in Mistral +Workflow Language. - **OpenStack context** - **Task result** @@ -1078,21 +1129,160 @@ OpenStack context ^^^^^^^^^^^^^^^^^ -OpenStack context is available by **$.openstack**. It contains -**auth_token,** **project_id**, **user_id**, **service_catalog**, -**user_name**, **project_name**, **roles**, **is_admin** properties. +OpenStack context is available by **$.openstack**. It contains **auth_token**, +**project_id**, **user_id**, **service_catalog**, **user_name**, +**project_name**, **roles**, **is_admin** properties. + + +Builtin functions in expressions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In addition to the current context (i.e. $ in YAQL and _ in Jinja2) expressions +have access to a set of predefined functions. + + +The expression languages come with their own individual included functions and +operations. Mistral adds the following functions that are available in all the +supported languages. + +This section will describe builtin functions added by Mistral. + +Tasks function +'''''''''''''' + +Signature: + **tasks(workflow_execution_id=null, recursive=false, state=null, flat=false)** + +Description: + + This function allows users to filter all tasks by workflow execution id + and/or state. In addition, it is possible to get task executions recursively + and flatten the task executions list. + +Parameters: + + #. **workflow_execution_id** - If provided the tasks function will return + task executions for a specific workflow execution (either the current + execution or a different one). Otherwise it will return all task + executions that match the other parameters. *Optional.* + #. **recursive** - This parameter is a boolean value, if it is true then all + task executions within nested workflow executions will be returned. This + is usually used in combination with a specific workflow_execution_id + where you still want to see nested workflow's task executions. *Optional.* + False by default. + #. **state** - If provided, the task executions will be filtered by their + current state. If state isn't provided, all task executions that match the + other parameters will be returned . *Optional.* + #. **flat** - if true, only list the task executions that match at least one + of the next conditions: + + * task executions of type action + * task executions of type workflow that have a different state from the + workflow execution they triggered. For example, if used with a + specific workflow_execution_id and the state ERROR it will return + tasks that erred despite the workflow succeeding. This can mean that + there was an error in the task itself, like an invalid expression in + publish. + + *Optional.* False by default. + +Example: + +Workflow definition: + +.. code-block:: mistral + + --- + version: "v2.0" + wf: + tasks: + task: + action: std.noop + publish: + all_tasks_in_this_wf_yaql: <% tasks(execution().id) %> + all_tasks_in_this_wf_jinja: "{{ tasks(execution().id) }}" + + all_tasks_in_error_yaql: <% tasks(null, false, ERROR) %> + all_tasks_in_error_jinja: "{{ tasks(None, false, 'ERROR') }}" + all_tasks_in_error_yaql_with_kw: <% tasks(state => ERROR) %> + all_tasks_in_error_jinja_with_kw: "{{ tasks(state='ERROR') }}" + + all_tasks_yaql_option1: <% tasks() %> + all_tasks_yaql_option2: <% tasks(null, false, null, false) %> + all_tasks_jinja_option1: "{{ tasks() }}" + all_tasks_jinja_option2: "{{ tasks(None, false, None, false) }}" + +Task publish result (partial to keep the documentation short): + +.. warning:: + The return value for each task execution hasn't been finalized and isn't + considered stable. It may change in a future Mistral release. + +.. code-block:: json + + { + "all_tasks_in_error_yaql": [ + { + "id": "3d363d4b-8c19-48fa-a9a0-8721dc5469f2", + "name": "fail_task", + "type": "ACTION", + "workflow_execution_id": "c0a4d2ff-0127-4826-8370-0570ef8cad80", + "state": "ERROR", + "state_info": "Failed to run action [action_ex_id=bcb04b28-6d50-458e-9b7e-a45a5ff1ca01, action_cls='', attributes='{}', params='{}']\n Fail action expected exception.", + "result": "Failed to run action [action_ex_id=bcb04b28-6d50-458e-9b7e-a45a5ff1ca01, action_cls='', attributes='{}', params='{}']\n Fail action expected exception.", + "published": {}, + "spec": { + "action": "std.fail", + "version": "2.0", + "type": "direct", + "name": "fail_task" + } + } + ], + "all_tasks_in_this_wf_jinja": [ + { + "id": "83a34bfe-268c-46f5-9e5c-c16900540084", + "name": "task", + "type": "ACTION", + "workflow_execution_id": "899a3318-b5c0-4860-82b4-a5bd147a4643", + "state": "SUCCESS", + "state_info": null, + "result": null, + "published": {}, + "spec": { + "action": "std.noop", + "version": "2.0", + "type": "direct", + "name": "task", + "publish": { + "all_tasks_in_error_yaql": "<% tasks(null, false, ERROR) %>", + "all_tasks_in_error_jinja": "{{ tasks(None, false, 'ERROR') }}", + "all_tasks_yaql_option2": "<% tasks(null, false, false, false) %>", + "all_tasks_yaql_option1": "<% tasks() %>", + "all_tasks_jinja_option1": "{{ tasks() }}", + "all_tasks_in_error_jinja_with_kw": "{{ tasks(state='ERROR') }}", + "all_tasks_jinja_option2": "{{ tasks(None, false, None, false) }}", + "all_tasks_in_this_wf_jinja": "{{ tasks(execution().id) }}", + "all_tasks_in_this_wf_yaql": "<% tasks(execution().id) %>" + } + } + } + ], + "_comment": "other fields were dropped to keep docs short" + } + Task result -^^^^^^^^^^^ +''''''''''' -Task result is available by **task().result**. It contains task result -and directly depends on action output structure. Note that the *task()* -function itself returns more than only task result. It returns the following -fields of task executions: +Task result is available by **task().result**. It contains task +result and directly depends on action output structure. Note that the +*task()* function itself returns more than only task result. It +returns the following fields of task executions: * **id** - task execution UUID. * **name** - task execution name. -* **spec** - task execution spec dict (loaded from DSL). +* **spec** - task execution spec dict (loaded from Mistral Workflow Language). * **state** - task execution state. * **state_info** - task execution state info. * **result** - task execution result. @@ -1108,5 +1298,5 @@ Environment ^^^^^^^^^^^ -Environment info is available by **env()**. It is passed when user submit workflow execution. -It contains variables specified by user. +Environment info is available by **env()**. It is passed when user submit +workflow execution. It contains variables specified by user. diff -Nru mistral-4.0.0/doc/source/dsl/index.rst mistral-5.0.0~b2/doc/source/dsl/index.rst --- mistral-4.0.0/doc/source/dsl/index.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/dsl/index.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,7 +1,7 @@ -DSL Specification -================= +Mistral Workflow Language Specification +======================================= .. toctree:: :maxdepth: 1 - DSL v2 + Mistral Workflow Language v2 diff -Nru mistral-4.0.0/doc/source/guides/cli_guide.rst mistral-5.0.0~b2/doc/source/guides/cli_guide.rst --- mistral-4.0.0/doc/source/guides/cli_guide.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/guides/cli_guide.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,658 +1,87 @@ Mistral Client Commands Guide ============================= -Workbooks -^^^^^^^^^ +The Mistral CLI can be used with ``mistral`` command or via `OpenStackClient +`_. -**workbook-create**: -:: +Mistral Client +-------------- - mistral workbook-create +The best way to learn about all the commands and arguements that are expected +is to use the ``mistral help`` comand. -Create new workbook. +.. code-block:: bash + + $ mistral help + usage: mistral [--version] [-v] [--log-file LOG_FILE] [-q] [-h] [--debug] + [--os-mistral-url MISTRAL_URL] + [--os-mistral-version MISTRAL_VERSION] + [--os-mistral-service-type SERVICE_TYPE] + ... + +It can also be used with the name of a sub-command. + +.. code-block:: bash + + $ mistral help execution-create + usage: mistral execution-create [-h] [-f {json,shell,table,value,yaml}] + [-c COLUMN] [--max-width ] + [--print-empty] [--noindent] [--prefix PREFIX] + [-d DESCRIPTION] + workflow_identifier [workflow_input] [params] + + Create new execution. + + positional arguments: + workflow_identifier Workflow ID or name. Workflow name will be deprecated + sinceMitaka. + ... + + +OpenStack Client +---------------- + +OpenStack client works in a similar way, the command ``openstack help`` shows +all the available commands and then ``openstack help `` will show +the detailed usage. + +The full list of Mistral commands that are registered with OpenStack client +can be listed with ``openstack command list``. By default it will list all +commands grouped togehter, but we can specify only the Mistral command group. + +.. code-block:: bash + + $ openstack command list --group openstack.workflow_engine.v2 + +------------------------------+-----------------------------------+ + | Command Group | Commands | + +------------------------------+-----------------------------------+ + | openstack.workflow_engine.v2 | action definition create | + | | action definition definition show | + | | action definition delete | + | | action definition list | + | | action definition show | + | | action definition update | + | | action execution delete | + ... + +Then detailed help output can be requested for an individual command. + +.. code-block:: bash + + $ openstack help workflow execution create + usage: openstack workflow execution create [-h] + [-f {json,shell,table,value,yaml}] + [-c COLUMN] [--max-width ] + [--print-empty] [--noindent] + [--prefix PREFIX] [-d DESCRIPTION] + workflow_identifier + [workflow_input] [params] + + Create new execution. + + positional arguments: + workflow_identifier Workflow ID or name. Workflow name will be deprecated + sinceMitaka. + workflow_input Workflow input + params Workflow additional parameters -positional arguments: - definition - Workbook definition file. - -**workbook-delete**: -:: - - mistral workbook-delete [ ...] - -Delete workbook. - -positional arguments: - name - Name of workbook(s). - -**workbook-get**: -:: - - mistral workbook-get - -Show specific workbook. - -positional arguments: - name - Workbook name. - -**workbook-get-definition**: -:: - - mistral workbook-get-definition - -Show workbook definition. - -positional arguments: - workbook_identifier - Workbook name or ID. - -**workbook-list**: -:: - - mistral workbook-list - -List all workbooks. - -**workbook-update**: -:: - - mistral workbook-update - -Update workbook. - -positional arguments: - definition - Workbook definition file. - -**workbook-validate**: -:: - - mistral workbook-validate - -Validate workbook. - -positional arguments: - definition - Workbook definition file. - -Workflows -^^^^^^^^^ - -**workflow-create**: -:: - - mistral workflow-create [--public] - -Create new workflow. - -positional arguments: - definition - Workflow definition file. - -optional arguments: - --public - With this flag workflow will be marked as "public". - -**workflow-delete**: -:: - - mistral workflow-delete [ ...] - -Delete workflow. - -positional arguments: - name - Name of workflow(s). - -**workflow-get**: -:: - - mistral workflow-get - -Show specific workflow. - -positional arguments: - name - Workflow name. - -**workflow-get-definition**: -:: - - mistral workflow-get-definition - -Show workflow definition. - -positional arguments: - name - Workflow name. - -**workflow-list**: -:: - - mistral workflow-list - -List all workflows. - -**workflow-update**: -:: - - mistral workflow-update - -Update workflow. - -positional arguments: - definition - Workflow definition. - -**workflow-validate**: -:: - - mistral workflow-validate - -Validate workflow. - -positional arguments: - definition - Workflow definition file. - -Actions -^^^^^^^ - -**action-create**: -:: - - mistral action-create [--public] - -Create new action. - -positional arguments: - definition - Action definition file. - -optional arguments: - --public - With this flag action will be marked as "public". - -**action-delete**: -:: - - mistral action-delete action [action ...] - -Delete action. - -positional arguments: - action - Name or ID of action(s). - -**action-get**: -:: - - mistral action-get - -Show specific action. - -positional arguments: - action - Action (name or ID). - -**action-get-definition**: -:: - - mistral action-get-definition - -Show action definition. - -positional arguments: - name - Action name. - -**action-list**: -:: - - mistral action-list - -List all actions. - -**action-update**: -:: - - mistral action-update [--public] [--id ID] - -Update action. - -positional arguments: - definition - Action definition file. - -optional arguments: - --id ID Action ID. - --public With this flag, action will be marked as "public". - -**action-validate**: -:: - - mistral action-validate - -Validate action. - -positional arguments: - definition - Action definition file. - -Workflow executions -^^^^^^^^^^^^^^^^^^^ - -**execution-create**: -:: - - mistral execution-create [-d DESCRIPTION] - [] [] - -Create a new execution. - -positional arguments: - workflow_identifier - Workflow ID or name. Workflow name has been deprecated - since Mitaka. - workflow_input - Workflow input. - params - Workflow additional parameters. - -optional arguments: - -d DESCRIPTION, --description DESCRIPTION - Execution description - -**execution-delete**: -:: - - mistral execution-delete [ ...] - -Delete execution. - -positional arguments: - execution - Id of execution identifier(s). - -**execution-get**: -:: - - mistral execution-get - -Show specific execution. - -positional arguments: - execution - Execution identifier. - -**execution-get-input**: -:: - - mistral execution-get-input - -Show execution input data. - -positional arguments: - id - Execution ID. - -**execution-get-output**: -:: - - mistral execution-get-output [-h] id - -Show execution output data. - -positional arguments: - id - Execution ID. - -**execution-list**: -:: - - mistral execution-list [--marker [MARKER]] [--limit [LIMIT]] - [--sort_keys [SORT_KEYS]] - [--sort_dirs [SORT_DIRS]] - -List all executions. - -optional arguments: - --marker [MARKER] The last execution uuid of the previous page, displays - list of executions after "marker". - --limit [LIMIT] Maximum number of executions to return in a single - result. - --sort_keys [SORT_KEYS] - Comma-separated list of sort keys to sort results by. - Default: created_at. Example: mistral execution-list - --sort_keys=id,description - --sort_dirs [SORT_DIRS] - Comma-separated list of sort directions. Default: asc. - Example: mistral execution-list - --sort_keys=id,description --sort_dirs=asc,desc - -**execution-update**: -:: - - mistral execution-update [-s {RUNNING,PAUSED,SUCCESS,ERROR,CANCELLED}] - [-e ENV] [-d DESCRIPTION] - -Update execution. - -positional arguments: - id - Execution identifier. - -optional arguments: - -s {RUNNING,PAUSED,SUCCESS,ERROR,CANCELLED}, --state {RUNNING,PAUSED,SUCCESS,ERROR,CANCELLED} - Execution state - -e ENV, --env ENV Environment variables - -d DESCRIPTION, --description DESCRIPTION - Execution description - -Task executions -^^^^^^^^^^^^^^^ - -**task-get**: -:: - - mistral task-get - -Show specific task. - -positional arguments: - id - Task identifier. - -**task-get-published**: -:: - - mistral task-get-published - -Show task published variables. - -positional arguments: - id - Task ID. - -**task-get-result**: -:: - - mistral task-get-result - -Show task output data. - -positional arguments: - id - Task ID. - -**task-list**: -:: - - mistral task-list [] - -List all tasks. - -positional arguments: - workflow_execution - Workflow execution ID associated with list of Tasks. - -**task-rerun**: -:: - - mistral task-rerun [--resume] [-e ENV] - -Rerun an existing task. - -positional arguments: - id - Task identifier. - -optional arguments: - --resume rerun only failed or unstarted action executions for - with-items task. - -e ENV, --env ENV Environment variables. - -Action executions -^^^^^^^^^^^^^^^^^ - -**action-execution-delete**: -:: - - mistral action-execution-delete [ ...] - -Delete action execution. - -positional arguments: - action_execution - Action execution ID. - -**action-execution-get**: -:: - - mistral action-execution-get - -Show specific Action execution. - -positional arguments: - action_execution - Action execution ID. - -**action-execution-get-input**: -:: - - mistral action-execution-get-input - -Show Action execution input data. - -positional arguments: - id - Action execution ID. - -**action-execution-get-output**: -:: - - mistral action-execution-get-output - -Show Action execution output data. - -positional arguments: - id - Action execution ID. - -**action-execution-list**: -:: - - mistral action-execution-list [] - -List all Action executions. - -positional arguments: - task-execution-id - Task execution ID. - -**action-execution-update**: -:: - - mistral action-execution-update [--state {IDLE,RUNNING,SUCCESS,ERROR}] [--output ] - -Update specific Action execution. - -positional arguments: - id - Action execution ID. - -optional arguments: - --state {IDLE,RUNNING,SUCCESS,ERROR} - Action execution state - --output OUTPUT - Action execution output - -**run-action**: -:: - - mistral run-action [] [-t ] - -Create new Action execution or just run specific action. - -positional arguments: - name - Action name to execute. - input - Action input. - -optional arguments: - -s, --save-result - Save the result into DB. - -t TARGET, --target TARGET - Action will be executed on executor. - -Cron-triggers -^^^^^^^^^^^^^ - -**cron-trigger-create**: -:: - - mistral cron-trigger-create [--params ] [--pattern <* * * * *>] - [--first-time ] - [--count ] - [] - -Create new trigger. - -positional arguments: - name - Cron trigger name. - workflow_identifier - Workflow name or ID. - workflow_input - Workflow input. - -optional arguments: - --params PARAMS - Workflow params. - --pattern <* * * * *> - Cron trigger pattern. - --first-time - Date and time of the first execution. - --count Number of wanted executions. - -**cron-trigger-delete**: -:: - - mistral cron-trigger-delete [ ...] - -Delete trigger. - -positional arguments: - name - Name of cron trigger(s). - -**cron-trigger-get**: -:: - - mistral cron-trigger-get - -Show specific cron trigger. - -positional arguments: - name - Cron trigger name. - -**cron-trigger-list**: -:: - - mistral cron-trigger-list - -List all cron triggers. - -Environments -^^^^^^^^^^^^ - -**environment-create**: -:: - - mistral environment-create - -Create new environment. - -positional arguments: - file - Environment configuration file in JSON or YAML. - -**environment-delete**: -:: - - mistral environment-delete [ ...] - -Delete environment. - -positional arguments: - environment - Name of environment(s). - -**environment-get**: -:: - - mistral environment-get - -Show specific environment. - -positional arguments: - name - Environment name. - -**environment-list**: -:: - - mistral environment-list - -List all environments. - -**environment-update**: -:: - - mistral environment-update - -Update environment. - -positional arguments: - file - Environment configuration file in JSON or YAML. - - -Members -^^^^^^^ - -**member-create**: -:: - - mistral member-create - -Shares a resource to another tenant. - -positional arguments: - resource_id - Resource ID to be shared. - resource_type - Resource type. - member_id - Project ID to whom the resource is shared to. - -**member-delete**: -:: - - mistral member-delete - -Delete a resource sharing relationship. - -positional arguments: - resource - Resource ID to be shared. - resource_type - Resource type. - member_id - Project ID to whom the resource is shared to. - -**member-get**: -:: - - mistral member-get [-m MEMBER_ID] - - -Show specific member information. - -positional arguments: - resource - Resource ID to be shared. - resource_type - Resource type. - -optional arguments: - -m MEMBER_ID, --member-id MEMBER_ID - Project ID to whom the resource is shared to. No need - to provide this param if you are the resource member. - -**member-list**: -:: - - mistral member-list - -List all members. - -positional arguments: - resource_id - Resource id to be shared. - resource_type - Resource type. - -**member-update**: -:: - - mistral member-update [-m MEMBER_ID] - [-s {pending,accepted,rejected}] - - -Update resource sharing status. - -positional arguments: - resource_id - Resource ID to be shared. - resource_type - Resource type. - -optional arguments: - -m MEMBER_ID, --member-id MEMBER_ID - Project ID to whom the resource is shared to. No need - to provide this param if you are the resource member. - -s {pending,accepted,rejected}, --status {pending,accepted,rejected} - status of the sharing. - -Services API -^^^^^^^^^^^^ - -**service-list**: -:: - - mistral service-list - -List all services. - -.. seealso:: - `Workflow service command-line client `_. diff -Nru mistral-4.0.0/doc/source/guides/configuration_guide.rst mistral-5.0.0~b2/doc/source/guides/configuration_guide.rst --- mistral-4.0.0/doc/source/guides/configuration_guide.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/guides/configuration_guide.rst 2017-06-09 12:48:26.000000000 +0000 @@ -57,8 +57,8 @@ and Mistral endpoints on Keystone:: $ MISTRAL_URL="http://[host]:[port]/v2" - $ openstack service create workflow --name mistral --description 'OpenStack Workflow service' - $ openstack endpoint create workflow --publicurl $MISTRAL_URL --adminurl $MISTRAL_URL --internalurl $MISTRAL_URL + $ openstack service create workflowv2 --name mistral --description 'OpenStack Workflow service' + $ openstack endpoint create workflowv2 --publicurl $MISTRAL_URL --adminurl $MISTRAL_URL --internalurl $MISTRAL_URL #. Configure transport properties in the corresponding config section: for RabbitMQ it is **oslo_messaging_rabbit**:: diff -Nru mistral-4.0.0/doc/source/guides/using_yaql.rst mistral-5.0.0~b2/doc/source/guides/using_yaql.rst --- mistral-4.0.0/doc/source/guides/using_yaql.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/guides/using_yaql.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -How to use YAQL in Mistral -========================== - -TBD diff -Nru mistral-4.0.0/doc/source/index.rst mistral-5.0.0~b2/doc/source/index.rst --- mistral-4.0.0/doc/source/index.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/index.rst 2017-06-09 12:48:26.000000000 +0000 @@ -39,7 +39,7 @@ developer/webapi/index -**DSL** +**Mistral Workflow Language** .. toctree:: :maxdepth: 2 @@ -65,6 +65,5 @@ ================== * :ref:`genindex` -* :ref:`modindex` * :ref:`search` diff -Nru mistral-4.0.0/doc/source/main_features.rst mistral-5.0.0~b2/doc/source/main_features.rst --- mistral-4.0.0/doc/source/main_features.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/main_features.rst 2017-06-09 12:48:26.000000000 +0000 @@ -7,17 +7,17 @@ Mistral supports transferring data from one task to another. In other words, if *taskA* produces a value then *taskB* which follows *taskA* can use it. -In order to use this data Mistral relies on a query language called `YAQL `_. +In order to use this data Mistral relies on a query language called `YAQL `_. YAQL is a powerful yet simple tool that allows the user to filter information, transform data and call functions. Find more information about it in the `YAQL official documentation `_ . This mechanism for transferring data plays a central role in the workflow concept and is referred to as Data Flow. -Below is a simple example of how Mistral Data Flow looks like from the DSL -(workflow language) perspective: +Below is a simple example of how Mistral Data Flow looks like from the Mistral +Workflow Language perspective: -.. code-block:: yaml +.. code-block:: mistral version: '2.0' @@ -59,7 +59,7 @@ host = my_favorite_executor Then start (restart) the executor. Use the "target" task property to specify -this executor in Workflow DSL:: +this executor in Mistral Workflow Language:: ... Workflow YAML ... task1: @@ -288,27 +288,37 @@ **By default this feature is disabled.** -When enabled, the policy will define the maximum age of an execution in -minutes since the last updated time. To enable and set a policy, edit the -Mistral configuration file and specify ``older_than`` and -``evaluation_interval`` in minutes. +This policy defines the maximum age of an execution since the last updated time +(in minutes) and the maximum number of finished executions. Each evaluation will +satisfy these conditions, so the expired executions (older than specified) will +be deleted, and the number of execution in finished state (regardless of expiration) +will be limited to max_finished_executions. + +To enable the policy, edit the Mistral configuration file and specify +``evaluation_interval`` and at least one of the ``older_than`` +or ``evaluation_interval`` options. .. code-block:: cfg [execution_expiration_policy] - older_than = 10080 # 1 week evaluation_interval = 120 # 2 hours + older_than = 10080 # 1 week + max_finished_executions = 500 -For the expiration policy to be enabled, both of these configuration options -must be set. +- **evaluation_interval** + + The evaluation interval defines how frequently Mistral will check and ensure + the above mentioned constraints. In the above example it is set to two hours, + so every two hours Mistral will remove executions older than 1 week, and + keep only the 500 latest finished executions. - **older_than** - This defines the maximum age of an execution in minutes since it was last + Defines the maximum age of an execution in minutes since it was last updated. It must be greater or equal to ``1``. -- **evaluation_interval** +- **max_finished_executions** + + Defines the maximum number of finished executions. + It must be greater or equal to ``1``. - The evaluation interval defines how frequently Mistral will check and expire - old executions. In the above example it is set to two hours, so every two - hours Mistral will clean up and look for expired executions. diff -Nru mistral-4.0.0/doc/source/quickstart.rst mistral-5.0.0~b2/doc/source/quickstart.rst --- mistral-4.0.0/doc/source/quickstart.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/quickstart.rst 2017-06-09 12:48:26.000000000 +0000 @@ -37,7 +37,7 @@ For example, we have the following workflow. -.. code-block:: yaml +.. code-block:: mistral --- version: "2.0" @@ -62,7 +62,7 @@ then stores the word "Done" as a result of the second task (`task2`). To learn more about the Mistral Workflows and what you can do, read the -:doc:`Mistral DSL specification ` +:doc:`Mistral Workflow Language specification ` Upload the workflow ------------------- @@ -73,11 +73,11 @@ The output should look similar to this:: - +-------------+--------+---------+---------------------+------------+ - | Name | Tags | Input | Created at | Updated at | - +-------------+--------+---------+---------------------+------------+ - | my_workflow | | names | 2015-08-13 08:44:49 | None | - +-------------+--------+---------+---------------------+------------+ + +------------------------------------+-------------+--------+---------+---------------------+------------+ + |ID | Name | Tags | Input | Created at | Updated at | + +------------------------------------+-------------+--------+---------+---------------------+------------+ + |9b719d62-2ced-47d3-b500-73261bb0b2ad| my_workflow | | names | 2015-08-13 08:44:49 | None | + +------------------------------------+-------------+--------+---------+---------------------+------------+ Run the workflow and check the result @@ -90,49 +90,53 @@ Make sure the output is like the following:: - +-------------+--------------------------------------+ - | Field | Value | - +-------------+--------------------------------------+ - | ID | 056c2ed1-695f-4ccd-92af-e31bc6153784 | - | Workflow | my_workflow | - | Description | | - | State | RUNNING | - | State info | None | - | Created at | 2015-08-28 09:05:00.065917 | - | Updated at | 2015-08-28 09:05:00.844990 | - +-------------+--------------------------------------+ + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 49213eb5-196c-421f-b436-775849b55040 | + | Workflow ID | 9b719d62-2ced-47d3-b500-73261bb0b2ad | + | Workflow name | my_workflow | + | Description | | + | Task Execution ID | | + | State | RUNNING | + | State info | None | + | Created at | 2017-03-06 11:24:10 | + | Updated at | 2017-03-06 11:24:10 | + +-------------------+--------------------------------------+ After a moment, check the status of the workflow execution (replace the example execution id with the ID output above):: - $ mistral execution-get 056c2ed1-695f-4ccd-92af-e31bc6153784 + $ mistral execution-get 49213eb5-196c-421f-b436-775849b55040 - +-------------+--------------------------------------+ - | Field | Value | - +-------------+--------------------------------------+ - | ID | 056c2ed1-695f-4ccd-92af-e31bc6153784 | - | Workflow | my_workflow | - | Description | | - | State | SUCCESS | - | State info | None | - | Created at | 2015-08-28 09:05:00 | - | Updated at | 2015-08-28 09:05:03 | - +-------------+--------------------------------------+ + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | ID | 49213eb5-196c-421f-b436-775849b55040 | + | Workflow ID | 9b719d62-2ced-47d3-b500-73261bb0b2ad | + | Workflow name | my_workflow | + | Description | | + | Task Execution ID | | + | State | SUCCESS | + | State info | None | + | Created at | 2017-03-06 11:24:10 | + | Updated at | 2017-03-06 11:24:20 | + +-------------------+--------------------------------------+ The status of each **task** also can be checked:: - $ mistral task-list 056c2ed1-695f-4ccd-92af-e31bc6153784 + $ mistral task-list 49213eb5-196c-421f-b436-775849b55040 - +--------------------------------------+-------+---------------+--------------------------------------+---------+ - | ID | Name | Workflow name | Execution ID | State | - +--------------------------------------+-------+---------------+--------------------------------------+---------+ - | 91874635-dcd4-4718-a864-ac90408c1085 | task1 | my_workflow | 056c2ed1-695f-4ccd-92af-e31bc6153784 | SUCCESS | - | 3bf82863-28cb-4148-bfb8-1a6c3c115022 | task2 | my_workflow | 056c2ed1-695f-4ccd-92af-e31bc6153784 | SUCCESS | - +--------------------------------------+-------+---------------+--------------------------------------+---------+ + +--------------------------------------+-------+---------------+--------------------------------------+---------+------------+---------------------+---------------------+ + | ID | Name | Workflow name | Execution ID | State | State info | Created at | Updated at | + +--------------------------------------+-------+---------------+--------------------------------------+---------+------------+---------------------+---------------------+ + | f639e7a9-9609-468e-aa08-7650e1472efe | task1 | my_workflow | 49213eb5-196c-421f-b436-775849b55040 | SUCCESS | None | 2017-03-06 11:24:11 | 2017-03-06 11:24:17 | + | d565c5a0-f46f-4ebe-8655-9eb6796307a3 | task2 | my_workflow | 49213eb5-196c-421f-b436-775849b55040 | SUCCESS | None | 2017-03-06 11:24:17 | 2017-03-06 11:24:18 | + +--------------------------------------+-------+---------------+--------------------------------------+---------+------------+---------------------+---------------------+ Check the result of task *'task1'*:: - $ mistral task-get-result 91874635-dcd4-4718-a864-ac90408c1085 + $ mistral task-get-result f639e7a9-9609-468e-aa08-7650e1472efe [ "John", @@ -144,20 +148,20 @@ If needed, we can go deeper and look at a list of the results of the **action_executions** of a single task:: - $ mistral action-execution-list 91874635-dcd4-4718-a864-ac90408c1085 + $ mistral action-execution-list f639e7a9-9609-468e-aa08-7650e1472efe - +--------------------------------------+----------+---------------+-----------+---------+------------+-------------+ - | ID | Name | Workflow name | Task name | State | State info | Is accepted | - +--------------------------------------+----------+---------------+-----------+---------+------------+-------------+ - | 20c2b65d-b899-437f-8e1b-50fe477fbf4b | std.echo | my_workflow | task1 | SUCCESS | None | True | - | 6773c887-6eff-46e6-bed9-d6b67d77813b | std.echo | my_workflow | task1 | SUCCESS | None | True | - | 753a9e39-d93e-4751-a3c1-569d1b4eac64 | std.echo | my_workflow | task1 | SUCCESS | None | True | - | 9872ddbc-61c5-4511-aa7e-dc4016607822 | std.echo | my_workflow | task1 | SUCCESS | None | True | - +--------------------------------------+----------+---------------+-----------+---------+------------+-------------+ + +--------------------------------------+----------+---------------+-----------+--------------------------------------+---------+----------+---------------------+---------------------+ + | ID | Name | Workflow name | Task name | Task ID | State | Accepted | Created at | Updated at | + +--------------------------------------+----------+---------------+-----------+--------------------------------------+---------+----------+---------------------+---------------------+ + | 4e0a60be-04df-42d7-aa59-5107e599d079 | std.echo | my_workflow | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:16 | + | 5bd95da4-9b29-4a79-bcb1-298abd659bd6 | std.echo | my_workflow | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:16 | + | 6ae6c19e-b51b-4910-9e0e-96c788093715 | std.echo | my_workflow | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:16 | + | bed5a6a2-c1d8-460f-a2a5-b36f72f85e19 | std.echo | my_workflow | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:17 | + +--------------------------------------+----------+---------------+-----------+--------------------------------------+---------+----------+---------------------+---------------------+ Check the result of the first **action_execution**:: - $ mistral action-execution-get-output 20c2b65d-b899-437f-8e1b-50fe477fbf4b + $ mistral action-execution-get-output 4e0a60be-04df-42d7-aa59-5107e599d079 { "result": "John" diff -Nru mistral-4.0.0/doc/source/terminology/workbooks.rst mistral-5.0.0~b2/doc/source/terminology/workbooks.rst --- mistral-4.0.0/doc/source/terminology/workbooks.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/terminology/workbooks.rst 2017-06-09 12:48:26.000000000 +0000 @@ -79,4 +79,5 @@ * **workflows** - Dictionary containing workflow definitions. *Optional*. * **actions** - Dictionary containing ad-hoc action definitions. *Optional*. -For more details about DSL itself, please see :doc:`Mistral DSL specification ` +For more details about Mistral Workflow Language itself, please see +:doc:`Mistral Workflow Language specification ` diff -Nru mistral-4.0.0/doc/source/terminology/workflows.rst mistral-5.0.0~b2/doc/source/terminology/workflows.rst --- mistral-4.0.0/doc/source/terminology/workflows.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/doc/source/terminology/workflows.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,11 +1,11 @@ Mistral Workflows ================= -Workflow is the main building block of Mistral DSL, the reason why the -project exists. Workflow represents a process that can be described in a -various number of ways and that can do some job interesting to the end -user. Each workflow consists of tasks (at least one) describing what -exact steps should be made during workflow execution. +Workflow is the main building block of Mistral Workflow Language, the reason +why the project exists. Workflow represents a process that can be described in +a various number of ways and that can do some job interesting to the end user. +Each workflow consists of tasks (at least one) describing what exact steps +should be made during workflow execution. YAML example ^^^^^^^^^^^^ @@ -38,9 +38,9 @@ Workflow types -------------- -Mistral DSL v2 introduces different workflow types and the structure of -each workflow type varies according to its semantics. Currently, Mistral -provides two workflow types: +Mistral Workflow Language v2 introduces different workflow types and the +structure of each workflow type varies according to its semantics. Currently, +Mistral provides two workflow types: - `Direct workflow <#direct-workflow>`__ - `Reverse workflow <#reverse-workflow>`__ @@ -136,4 +136,5 @@       action: send_email to='admin@mysite.org' body='Vm is created and id <% $.vm_id %> and ip address <% $.vm_ip %>'       requires: [create_vm, associate_ip] -For more details about DSL itself, please see :doc:`Mistral DSL specification ` +For more details about Mistral Workflow Language itself, please see +:doc:`Mistral Workflow Language specification ` diff -Nru mistral-4.0.0/HACKING.rst mistral-5.0.0~b2/HACKING.rst --- mistral-4.0.0/HACKING.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/HACKING.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,12 +1,18 @@ Style Commandments ================== -Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ +Read the OpenStack Style Commandments https://docs.openstack.org/developer/hacking/ Mistral Specific Commandments ----------------------------- +- [M001] Use LOG.warning(). LOG.warn() is deprecated. - [M318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like assertIsNone(A) +- [M319] Enforce use of assertTrue/assertFalse +- [M320] Enforce use of assertIs/assertIsNot - [M327] Do not use xrange(). xrange() is not compatible with Python 3. Use range() or six.moves.range() instead. +- [M328] Python 3: do not use dict.iteritems. +- [M329] Python 3: do not use dict.iterkeys. +- [M330] Python 3: do not use dict.itervalues. diff -Nru mistral-4.0.0/mistral/actions/base.py mistral-5.0.0~b2/mistral/actions/base.py --- mistral-4.0.0/mistral/actions/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/actions/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,6 +13,12 @@ # limitations under the License. import abc +import warnings + +warnings.warn( + "mistral.actions.Action is deprecated as of the 5.0.0 release in favor of" + "mistral_lib. It will be removed in a future release.", DeprecationWarning +) class Action(object): diff -Nru mistral-4.0.0/mistral/actions/openstack/action_generator/base.py mistral-5.0.0~b2/mistral/actions/openstack/action_generator/base.py --- mistral-4.0.0/mistral/actions/openstack/action_generator/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/actions/openstack/action_generator/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -23,12 +23,8 @@ from mistral.utils import inspect_utils as i_u from mistral import version -os_actions_mapping_path = cfg.StrOpt('openstack_actions_mapping_path', - default='actions/openstack/mapping.json') - - CONF = cfg.CONF -CONF.register_opt(os_actions_mapping_path) + LOG = logging.getLogger(__name__) @@ -72,6 +68,56 @@ base_action_class = None @classmethod + def prepare_action_inputs(cls, origin_inputs, added=[]): + """Modify action input string. + + Sometimes we need to change the default action input definition for + OpenStack actions in order to make the workflow more powerful. + + Examples:: + + >>> prepare_action_inputs('a,b,c', added=['region=RegionOne']) + a, b, c, region=RegionOne + >>> prepare_action_inputs('a,b,c=1', added=['region=RegionOne']) + a, b, region=RegionOne, c=1 + >>> prepare_action_inputs('a,b,c=1,**kwargs', + added=['region=RegionOne']) + a, b, region=RegionOne, c=1, **kwargs + >>> prepare_action_inputs('**kwargs', added=['region=RegionOne']) + region=RegionOne, **kwargs + >>> prepare_action_inputs('', added=['region=RegionOne']) + region=RegionOne + + :param origin_inputs: A string consists of action inputs, separated by + comma. + :param added: (Optional) A list of params to add to input string. + :return: The new action input string. + """ + if not origin_inputs: + return ", ".join(added) + + inputs = [i.strip() for i in origin_inputs.split(',')] + kwarg_index = None + + for index, input in enumerate(inputs): + if "=" in input: + kwarg_index = index + if "**" in input: + kwarg_index = index - 1 + + kwarg_index = len(inputs) if kwarg_index is None else kwarg_index + kwarg_index = kwarg_index + 1 if kwarg_index < 0 else kwarg_index + + for a in added: + if "=" not in a: + inputs.insert(0, a) + kwarg_index += 1 + else: + inputs.insert(kwarg_index, a) + + return ", ".join(inputs) + + @classmethod def create_action_class(cls, method_name): if not method_name: return None @@ -84,30 +130,35 @@ @classmethod def create_actions(cls): mapping = get_mapping() - method_dict = mapping[cls.action_namespace] + method_dict = mapping.get(cls.action_namespace, {}) action_classes = [] for action_name, method_name in method_dict.items(): - clazz = cls.create_action_class(method_name) + class_ = cls.create_action_class(method_name) try: - client_method = clazz.get_fake_client_method() - except Exception as e: - LOG.warning("Failed to create action: %s.%s %s" % - (cls.action_namespace, action_name, e)) - client_method = None - - if client_method: - arg_list = i_u.get_arg_list_as_str(client_method) - description = i_u.get_docstring(client_method) - else: - arg_list = '' - description = None + client_method = class_.get_fake_client_method() + except Exception: + LOG.exception("Failed to create action: %s.%s" % + (cls.action_namespace, action_name)) + continue + + arg_list = i_u.get_arg_list_as_str(client_method) + + # Support specifying region for OpenStack actions. + modules = CONF.openstack_actions.modules_support_region + if cls.action_namespace in modules: + arg_list = cls.prepare_action_inputs( + arg_list, + added=['action_region=""'] + ) + + description = i_u.get_docstring(client_method) action_classes.append( { - 'class': clazz, + 'class': class_, 'name': "%s.%s" % (cls.action_namespace, action_name), 'description': description, 'arg_list': arg_list, diff -Nru mistral-4.0.0/mistral/actions/openstack/actions.py mistral-5.0.0~b2/mistral/actions/openstack/actions.py --- mistral-4.0.0/mistral/actions/openstack/actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/actions/openstack/actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,7 +22,6 @@ from keystoneclient import httpclient from mistral.actions.openstack import base -from mistral import context from mistral.utils import inspect_utils from mistral.utils.openstack import keystone as keystone_utils @@ -73,30 +72,32 @@ class NovaAction(base.OpenStackAction): - def _create_client(self): - ctx = context.ctx() + _service_name = 'nova' + _service_type = 'compute' - LOG.debug("Nova action security context: %s" % ctx) + def _create_client(self, context): + + LOG.debug("Nova action security context: %s" % context) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() - nova_endpoint = keystone_utils.get_endpoint_for_project('nova') + nova_endpoint = self.get_service_endpoint() client = novaclient.Client( 2, username=None, api_key=None, - endpoint_type=CONF.os_actions_endpoint_type, + endpoint_type=CONF.openstack_actions.os_actions_endpoint_type, service_type='compute', - auth_token=ctx.auth_token, - tenant_id=ctx.project_id, - region_name=keystone_endpoint.region, + auth_token=context.auth_token, + tenant_id=context.project_id, + region_name=nova_endpoint.region, auth_url=keystone_endpoint.url, - insecure=ctx.insecure + insecure=context.insecure ) client.client.management_url = keystone_utils.format_url( nova_endpoint.url, - {'tenant_id': ctx.project_id} + {'tenant_id': context.project_id} ) return client @@ -107,23 +108,23 @@ class GlanceAction(base.OpenStackAction): + _service_name = 'glance' @classmethod def _get_client_class(cls): return glanceclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Glance action security context: %s" % ctx) + LOG.debug("Glance action security context: %s" % context) - glance_endpoint = keystone_utils.get_endpoint_for_project('glance') + glance_endpoint = self.get_service_endpoint() return self._get_client_class()( glance_endpoint.url, region_name=glance_endpoint.region, - token=ctx.auth_token, - insecure=ctx.insecure + token=context.auth_token, + insecure=context.insecure ) @classmethod @@ -137,30 +138,29 @@ def _get_client_class(cls): return keystoneclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Keystone action security context: %s" % ctx) + LOG.debug("Keystone action security context: %s" % context) # TODO(akovi) cacert is deprecated in favor of session # TODO(akovi) this piece of code should be refactored # TODO(akovi) to follow the new guide lines kwargs = { - 'token': ctx.auth_token, - 'auth_url': ctx.auth_uri, - 'project_id': ctx.project_id, - 'cacert': ctx.auth_cacert, - 'insecure': ctx.insecure + 'token': context.auth_token, + 'auth_url': context.auth_uri, + 'project_id': context.project_id, + 'cacert': context.auth_cacert, + 'insecure': context.insecure } # In case of trust-scoped token explicitly pass endpoint parameter. - if (ctx.is_trust_scoped - or keystone_utils.is_token_trust_scoped(ctx.auth_token)): - kwargs['endpoint'] = ctx.auth_uri + if (context.is_trust_scoped + or keystone_utils.is_token_trust_scoped(context.auth_token)): + kwargs['endpoint'] = context.auth_uri client = self._get_client_class()(**kwargs) - client.management_url = ctx.auth_uri + client.management_url = context.auth_uri return client @@ -179,31 +179,29 @@ class CeilometerAction(base.OpenStackAction): + _service_name = 'ceilometer' @classmethod def _get_client_class(cls): return ceilometerclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Ceilometer action security context: %s" % ctx) + LOG.debug("Ceilometer action security context: %s" % context) - ceilometer_endpoint = keystone_utils.get_endpoint_for_project( - 'ceilometer' - ) + ceilometer_endpoint = self.get_service_endpoint() endpoint_url = keystone_utils.format_url( ceilometer_endpoint.url, - {'tenant_id': ctx.project_id} + {'tenant_id': context.project_id} ) return self._get_client_class()( endpoint_url, region_name=ceilometer_endpoint.region, - token=ctx.auth_token, - username=ctx.user_name, - insecure=ctx.insecure + token=context.auth_token, + username=context.user_name, + insecure=context.insecure ) @classmethod @@ -212,32 +210,32 @@ class HeatAction(base.OpenStackAction): + _service_name = 'heat' @classmethod def _get_client_class(cls): return heatclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Heat action security context: %s" % ctx) + LOG.debug("Heat action security context: %s" % context) - heat_endpoint = keystone_utils.get_endpoint_for_project('heat') + heat_endpoint = self.get_service_endpoint() endpoint_url = keystone_utils.format_url( heat_endpoint.url, { - 'tenant_id': ctx.project_id, - 'project_id': ctx.project_id + 'tenant_id': context.project_id, + 'project_id': context.project_id } ) return self._get_client_class()( endpoint_url, region_name=heat_endpoint.region, - token=ctx.auth_token, - username=ctx.user_name, - insecure=ctx.insecure + token=context.auth_token, + username=context.user_name, + insecure=context.insecure ) @classmethod @@ -246,60 +244,58 @@ class NeutronAction(base.OpenStackAction): + _service_name = 'neutron' @classmethod def _get_client_class(cls): return neutronclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Neutron action security context: %s" % ctx) + LOG.debug("Neutron action security context: %s" % context) - neutron_endpoint = keystone_utils.get_endpoint_for_project('neutron') + neutron_endpoint = self.get_service_endpoint() return self._get_client_class()( endpoint_url=neutron_endpoint.url, region_name=neutron_endpoint.region, - token=ctx.auth_token, - auth_url=ctx.auth_uri, - insecure=ctx.insecure + token=context.auth_token, + auth_url=context.auth_uri, + insecure=context.insecure ) class CinderAction(base.OpenStackAction): + _service_type = 'volumev2' @classmethod def _get_client_class(cls): return cinderclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Cinder action security context: %s" % ctx) + LOG.debug("Cinder action security context: %s" % context) - cinder_endpoint = keystone_utils.get_endpoint_for_project( - service_type='volumev2' - ) + cinder_endpoint = self.get_service_endpoint() cinder_url = keystone_utils.format_url( cinder_endpoint.url, { - 'tenant_id': ctx.project_id, - 'project_id': ctx.project_id + 'tenant_id': context.project_id, + 'project_id': context.project_id } ) client = self._get_client_class()( - ctx.user_name, - ctx.auth_token, - project_id=ctx.project_id, + context.user_name, + context.auth_token, + project_id=context.project_id, auth_url=cinder_url, region_name=cinder_endpoint.region, - insecure=ctx.insecure + insecure=context.insecure ) - client.client.auth_token = ctx.auth_token + client.client.auth_token = context.auth_token client.client.management_url = cinder_url return client @@ -315,14 +311,13 @@ def _get_client_class(cls): return mistralclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Mistral action security context: %s" % ctx) + LOG.debug("Mistral action security context: %s" % context) # Check for trust scope token. This may occur if the action is # called from a workflow triggered by a Mistral cron trigger. - if ctx.is_trust_scoped: + if context.is_trust_scoped: auth_url = None mistral_endpoint = keystone_utils.get_endpoint_for_project( 'mistral' @@ -335,11 +330,11 @@ return self._get_client_class()( mistral_url=mistral_url, - auth_token=ctx.auth_token, - project_id=ctx.project_id, - user_id=ctx.user_id, + auth_token=context.auth_token, + project_id=context.project_id, + user_id=context.user_id, auth_url=auth_url, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -348,35 +343,33 @@ class TroveAction(base.OpenStackAction): + _service_type = 'database' @classmethod def _get_client_class(cls): return troveclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Trove action security context: %s" % ctx) + LOG.debug("Trove action security context: %s" % context) - trove_endpoint = keystone_utils.get_endpoint_for_project( - service_type='database' - ) + trove_endpoint = self.get_service_endpoint() trove_url = keystone_utils.format_url( trove_endpoint.url, - {'tenant_id': ctx.project_id} + {'tenant_id': context.project_id} ) client = self._get_client_class()( - ctx.user_name, - ctx.auth_token, - project_id=ctx.project_id, + context.user_name, + context.auth_token, + project_id=context.project_id, auth_url=trove_url, region_name=trove_endpoint.region, - insecure=ctx.insecure + insecure=context.insecure ) - client.client.auth_token = ctx.auth_token + client.client.auth_token = context.auth_token client.client.management_url = trove_url return client @@ -387,24 +380,24 @@ class IronicAction(base.OpenStackAction): + _service_name = 'ironic' @classmethod def _get_client_class(cls): return ironicclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Ironic action security context: %s" % ctx) + LOG.debug("Ironic action security context: %s" % context) - ironic_endpoint = keystone_utils.get_endpoint_for_project('ironic') + ironic_endpoint = self.get_service_endpoint() return self._get_client_class()( ironic_endpoint.url, - token=ctx.auth_token, + token=context.auth_token, region_name=ironic_endpoint.region, os_ironic_api_version=IRONIC_API_VERSION, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -440,10 +433,10 @@ return cls._get_client_class()() - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Baremetal introspection action security context: %s" % ctx) + LOG.debug( + "Baremetal introspection action security context: %s" % context) inspector_endpoint = keystone_utils.get_endpoint_for_project( service_type='baremetal-introspection' @@ -452,7 +445,7 @@ return self._get_client_class()( api_version=1, inspector_url=inspector_endpoint.url, - auth_token=ctx.auth_token, + auth_token=context.auth_token, ) @@ -462,17 +455,18 @@ def _get_client_class(cls): return swift_client.Connection - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Swift action security context: %s" % ctx) + LOG.debug("Swift action security context: %s" % context) swift_endpoint = keystone_utils.get_endpoint_for_project('swift') kwargs = { - 'preauthurl': swift_endpoint.url % {'tenant_id': ctx.project_id}, - 'preauthtoken': ctx.auth_token, - 'insecure': ctx.insecure + 'preauthurl': swift_endpoint.url % { + 'tenant_id': context.project_id + }, + 'preauthtoken': context.auth_token, + 'insecure': context.insecure } return self._get_client_class()(**kwargs) @@ -484,20 +478,19 @@ def _get_client_class(cls): return zaqarclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Zaqar action security context: %s" % ctx) + LOG.debug("Zaqar action security context: %s" % context) zaqar_endpoint = keystone_utils.get_endpoint_for_project( service_type='messaging') keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() opts = { - 'os_auth_token': ctx.auth_token, + 'os_auth_token': context.auth_token, 'os_auth_url': keystone_endpoint.url, - 'os_project_id': ctx.project_id, - 'insecure': ctx.insecure, + 'os_project_id': context.project_id, + 'insecure': context.insecure, } auth_opts = {'backend': 'keystone', 'options': opts} conf = {'auth_opts': auth_opts} @@ -581,26 +574,25 @@ def _get_client_class(cls): return barbicanclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Barbican action security context: %s" % ctx) + LOG.debug("Barbican action security context: %s" % context) barbican_endpoint = keystone_utils.get_endpoint_for_project('barbican') keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() auth = identity.v2.Token( auth_url=keystone_endpoint.url, - tenant_name=ctx.user_name, - token=ctx.auth_token, - tenant_id=ctx.project_id + tenant_name=context.user_name, + token=context.auth_token, + tenant_id=context.project_id ) return self._get_client_class()( - project_id=ctx.project_id, + project_id=context.project_id, endpoint=barbican_endpoint.url, auth=auth, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -679,35 +671,33 @@ class DesignateAction(base.OpenStackAction): + _service_type = 'dns' @classmethod def _get_client_class(cls): return designateclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Designate action security context: %s" % ctx) + LOG.debug("Designate action security context: %s" % context) - designate_endpoint = keystone_utils.get_endpoint_for_project( - service_type='dns' - ) + designate_endpoint = self.get_service_endpoint() designate_url = keystone_utils.format_url( designate_endpoint.url, - {'tenant_id': ctx.project_id} + {'tenant_id': context.project_id} ) client = self._get_client_class()( endpoint=designate_url, - tenant_id=ctx.project_id, - auth_url=ctx.auth_uri, + tenant_id=context.project_id, + auth_url=context.auth_uri, region_name=designate_endpoint.region, service_type='dns', - insecure=ctx.insecure + insecure=context.insecure ) - client.client.auth_token = ctx.auth_token + client.client.auth_token = context.auth_token client.client.management_url = designate_url return client @@ -723,10 +713,9 @@ def _get_client_class(cls): return magnumclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Magnum action security context: %s" % ctx) + LOG.debug("Magnum action security context: %s" % context) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() auth_url = keystone_endpoint.url @@ -734,11 +723,11 @@ return self._get_client_class()( magnum_url=magnum_url, - auth_token=ctx.auth_token, - project_id=ctx.project_id, - user_id=ctx.user_id, + auth_token=context.auth_token, + project_id=context.project_id, + user_id=context.user_id, auth_url=auth_url, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -747,26 +736,26 @@ class MuranoAction(base.OpenStackAction): + _service_name = 'murano' @classmethod def _get_client_class(cls): return muranoclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Murano action security context: %s" % ctx) + LOG.debug("Murano action security context: %s" % context) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() - murano_endpoint = keystone_utils.get_endpoint_for_project('murano') + murano_endpoint = self.get_service_endpoint() return self._get_client_class()( endpoint=murano_endpoint.url, - token=ctx.auth_token, - tenant=ctx.project_id, + token=context.auth_token, + tenant=context.project_id, region_name=murano_endpoint.region, auth_url=keystone_endpoint.url, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -775,26 +764,26 @@ class TackerAction(base.OpenStackAction): + _service_name = 'tacker' @classmethod def _get_client_class(cls): return tackerclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Tacker action security context: %s" % ctx) + LOG.debug("Tacker action security context: %s" % context) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() - tacker_endpoint = keystone_utils.get_endpoint_for_project('tacker') + tacker_endpoint = self.get_service_endpoint() return self._get_client_class()( endpoint_url=tacker_endpoint.url, - token=ctx.auth_token, - tenant_id=ctx.project_id, + token=context.auth_token, + tenant_id=context.project_id, region_name=tacker_endpoint.region, auth_url=keystone_endpoint.url, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -803,26 +792,26 @@ class SenlinAction(base.OpenStackAction): + _service_name = 'senlin' @classmethod def _get_client_class(cls): return senlinclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Senlin action security context: %s" % ctx) + LOG.debug("Senlin action security context: %s" % context) keystone_endpoint = keystone_utils.get_keystone_endpoint_v2() - senlin_endpoint = keystone_utils.get_endpoint_for_project('senlin') + senlin_endpoint = self.get_service_endpoint() return self._get_client_class()( endpoint_url=senlin_endpoint.url, - token=ctx.auth_token, - tenant_id=ctx.project_id, + token=context.auth_token, + tenant_id=context.project_id, region_name=senlin_endpoint.region, auth_url=keystone_endpoint.url, - insecure=ctx.insecure + insecure=context.insecure ) @classmethod @@ -831,31 +820,29 @@ class AodhAction(base.OpenStackAction): + _service_name = 'aodh' @classmethod def _get_client_class(cls): return aodhclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Aodh action security context: %s" % ctx) + LOG.debug("Aodh action security context: %s" % context) - aodh_endpoint = keystone_utils.get_endpoint_for_project( - 'aodh' - ) + aodh_endpoint = self.get_service_endpoint() endpoint_url = keystone_utils.format_url( aodh_endpoint.url, - {'tenant_id': ctx.project_id} + {'tenant_id': context.project_id} ) return self._get_client_class()( endpoint_url, region_name=aodh_endpoint.region, - token=ctx.auth_token, - username=ctx.user_name, - insecure=ctx.insecure + token=context.auth_token, + username=context.user_name, + insecure=context.insecure ) @classmethod @@ -864,30 +851,28 @@ class GnocchiAction(base.OpenStackAction): + _service_name = 'gnocchi' @classmethod def _get_client_class(cls): return gnocchiclient.Client - def _create_client(self): - ctx = context.ctx() + def _create_client(self, context): - LOG.debug("Gnocchi action security context: %s" % ctx) + LOG.debug("Gnocchi action security context: %s" % context) - gnocchi_endpoint = keystone_utils.get_endpoint_for_project( - 'gnocchi' - ) + gnocchi_endpoint = self.get_service_endpoint() endpoint_url = keystone_utils.format_url( gnocchi_endpoint.url, - {'tenant_id': ctx.project_id} + {'tenant_id': context.project_id} ) return self._get_client_class()( endpoint_url, region_name=gnocchi_endpoint.region, - token=ctx.auth_token, - username=ctx.user_name + token=context.auth_token, + username=context.user_name ) @classmethod diff -Nru mistral-4.0.0/mistral/actions/openstack/base.py mistral-5.0.0~b2/mistral/actions/openstack/base.py --- mistral-4.0.0/mistral/actions/openstack/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/actions/openstack/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -20,17 +20,16 @@ from oslo_log import log -from mistral.actions import base -from mistral import context from mistral import exceptions as exc from mistral.utils.openstack import keystone as keystone_utils +from mistral_lib import actions from threading import Lock LOG = log.getLogger(__name__) -class OpenStackAction(base.Action): +class OpenStackAction(actions.Action): """OpenStack Action. OpenStack Action is the basis of all OpenStack-specific actions, @@ -40,9 +39,12 @@ client_method_name = None _clients = LRUCache(100) _lock = Lock() + _service_name = None + _service_type = None def __init__(self, **kwargs): self._kwargs_for_run = kwargs + self.action_region = self._kwargs_for_run.pop('action_region', None) @abc.abstractmethod def _create_client(self): @@ -77,7 +79,7 @@ def get_fake_client_method(cls): return cls._get_client_method(cls._get_fake_client()) - def _get_client(self): + def _get_client(self, context): """Returns python-client instance via cache or creation Gets client instance according to specific OpenStack Service @@ -89,16 +91,15 @@ # regressions in Mistral. It is disabled for now and # will be revisited in Ocata. See: # https://bugs.launchpad.net/mistral/+bug/1627689 - return self._create_client() + return self._create_client(context) - ctx = context.ctx() client_class = self.__class__.__name__ # Colon character is reserved (rfc3986) which avoids key collisions. - key = client_class + ':' + ctx.project_id + key = client_class + ':' + context.project_id def create_cached_client(): - new_client = self._create_client() - new_client._mistral_ctx_expires_at = ctx.expires_at + new_client = self._create_client(context) + new_client._mistral_ctx_expires_at = context.expires_at with self._lock: self._clients[key] = new_client @@ -120,9 +121,23 @@ return client - def run(self): + def get_service_endpoint(self): + """Get OpenStack service endpoint. + + 'service_name' and 'service_type' are defined in specific OpenStack + service action. + """ + endpoint = keystone_utils.get_endpoint_for_project( + service_name=self._service_name, + service_type=self._service_type, + region_name=self.action_region + ) + + return endpoint + + def run(self, context): try: - method = self._get_client_method(self._get_client()) + method = self._get_client_method(self._get_client(context)) result = method(**self._kwargs_for_run) @@ -142,7 +157,7 @@ (self.__class__.__name__, self.client_method_name, e_str) ) - def test(self): + def test(self, context): return dict( zip(self._kwargs_for_run, ['test'] * len(self._kwargs_for_run)) ) diff -Nru mistral-4.0.0/mistral/actions/openstack/mapping.json mistral-5.0.0~b2/mistral/actions/openstack/mapping.json --- mistral-4.0.0/mistral/actions/openstack/mapping.json 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/actions/openstack/mapping.json 2017-06-09 12:48:26.000000000 +0000 @@ -411,14 +411,6 @@ "roles_list": "roles.list", "roles_revoke": "roles.revoke", "roles_update": "roles.update", - "service_catalog_catalog": "service_catalog.catalog", - "service_catalog_factory": "service_catalog.factory", - "service_catalog_get_data": "service_catalog.get_data", - "service_catalog_get_endpoints": "service_catalog.get_endpoints", - "service_catalog_get_token": "service_catalog.get_token", - "service_catalog_get_urls": "service_catalog.get_urls", - "service_catalog_is_valid": "service_catalog.is_valid", - "service_catalog_url_for": "service_catalog.url_for", "services_create": "services.create", "services_delete": "services.delete", "services_find": "services.find", @@ -1097,9 +1089,12 @@ "put_container": "put_container", "post_container": "post_container", "delete_container": "delete_container", + "head_object": "head_object", "get_object": "get_object", "put_object": "put_object", "post_object": "post_object", + "delete_object": "delete_object", + "copy_object": "copy_object", "get_capabilities": "get_capabilities" }, "zaqar": { diff -Nru mistral-4.0.0/mistral/actions/std_actions.py mistral-5.0.0~b2/mistral/actions/std_actions.py --- mistral-4.0.0/mistral/actions/std_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/actions/std_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,17 +22,17 @@ import smtplib import time -from mistral.actions import base from mistral import exceptions as exc from mistral.utils import javascript from mistral.utils import ssh_utils from mistral.workflow import utils as wf_utils +from mistral_lib import actions from oslo_log import log as logging LOG = logging.getLogger(__name__) -class EchoAction(base.Action): +class EchoAction(actions.Action): """Echo action. This action just returns a configured value as a result without doing @@ -45,16 +45,16 @@ def __init__(self, output): self.output = output - def run(self): + def run(self, context): LOG.info('Running echo action [output=%s]' % self.output) return self.output - def test(self): + def test(self, context): return 'Echo' -class NoOpAction(base.Action): +class NoOpAction(actions.Action): """No-operation action. This action does nothing. It can be mostly useful for testing and @@ -63,12 +63,12 @@ def __init__(self): pass - def run(self): + def run(self, context): LOG.info('Running no-op action') return None - def test(self): + def test(self, context): return None @@ -78,7 +78,7 @@ return False -class FailAction(base.Action): +class FailAction(actions.Action): """'Always fail' action. This action just always throws an instance of ActionException. @@ -89,16 +89,16 @@ def __init__(self): pass - def run(self): + def run(self, context): LOG.info('Running fail action.') raise exc.ActionException('Fail action expected exception.') - def test(self): + def test(self, context): raise exc.ActionException('Fail action expected exception.') -class HTTPAction(base.Action): +class HTTPAction(actions.Action): """Constructs an HTTP action. :param url: URL for the new HTTP request. @@ -158,7 +158,7 @@ self.proxies = proxies self.verify = verify - def run(self): + def run(self, context): LOG.info("Running HTTP action " "[url=%s, method=%s, params=%s, body=%s, headers=%s," " cookies=%s, auth=%s, timeout=%s, allow_redirects=%s," @@ -196,12 +196,19 @@ "HTTP action response:\n%s\n%s" % (resp.status_code, resp.content) ) + # TODO(akuznetsova): Need to refactor Mistral serialiser and + # deserializer to have an ability to pass needed encoding and work + # with it. Now it can process only default 'utf-8' encoding. + # Appropriate bug #1676411 was created. + # Represent important resp data as a dictionary. try: - content = resp.json() + content = resp.json(encoding=resp.encoding) except Exception as e: LOG.debug("HTTP action response is not json.") content = resp.content + if resp.encoding != 'utf-8': + content = content.decode(resp.encoding).encode('utf-8') _result = { 'content': content, @@ -220,12 +227,13 @@ return _result - def test(self): + def test(self, context): # TODO(rakhmerov): Implement. return None class MistralHTTPAction(HTTPAction): + def __init__(self, action_context, url, @@ -268,11 +276,11 @@ def is_sync(self): return False - def test(self): + def test(self, context): return None -class SendEmailAction(base.Action): +class SendEmailAction(actions.Action): def __init__(self, from_addr, to_addrs, smtp_server, smtp_password=None, subject=None, body=None): # TODO(dzimine): validate parameters @@ -287,7 +295,7 @@ self.sender = from_addr self.password = smtp_password - def run(self): + def run(self, context): LOG.info("Sending email message " "[from=%s, to=%s, subject=%s, using smtp=%s, body=%s...]" % (self.sender, self.to, self.subject, @@ -315,7 +323,7 @@ raise exc.ActionException("Failed to send an email message: %s" % e) - def test(self): + def test(self, context): # Just logging the operation since this action is not supposed # to return a result. LOG.info("Sending email message " @@ -324,7 +332,7 @@ self.smtp_server, self.body[:128])) -class SSHAction(base.Action): +class SSHAction(actions.Action): """Runs Secure Shell (SSH) command on provided single or multiple hosts. It is allowed to provide either a single host or a list of hosts in @@ -352,7 +360,7 @@ 'private_key_filename': self.private_key_filename } - def run(self): + def run(self, context): def raise_exc(parent_exc=None): message = ("Failed to execute ssh cmd " "'%s' on %s" % (self.cmd, self.host)) @@ -383,7 +391,7 @@ except Exception as e: return raise_exc(parent_exc=e) - def test(self): + def test(self, context): # TODO(rakhmerov): Implement. return None @@ -416,30 +424,36 @@ ) -class JavaScriptAction(base.Action): +class JavaScriptAction(actions.Action): """Evaluates given JavaScript. """ + def __init__(self, script, context=None): + """Context here refers to a javasctript context + + Not the usual mistral context. That is passed during the run method + """ + self.script = script - self.context = context + self.js_context = context - def run(self): + def run(self, context): try: script = """function f() { %s } f() """ % self.script - return javascript.evaluate(script, self.context) + return javascript.evaluate(script, self.js_context) except Exception as e: raise exc.ActionException("JavaScriptAction failed: %s" % str(e)) - def test(self): + def test(self, context): return self.script -class SleepAction(base.Action): +class SleepAction(actions.Action): """Sleep action. This action sleeps for given amount of seconds. It can be mostly useful @@ -452,20 +466,20 @@ except ValueError: self._seconds = 0 - def run(self): + def run(self, context): LOG.info('Running sleep action [seconds=%s]' % self._seconds) time.sleep(self._seconds) return None - def test(self): + def test(self, context): time.sleep(1) return None -class TestDictAction(base.Action): +class TestDictAction(actions.Action): """Generates test dict.""" def __init__(self, size=0, key_prefix='', val=''): @@ -473,7 +487,7 @@ self.key_prefix = key_prefix self.val = val - def run(self): + def run(self, context): LOG.info( 'Running test_dict action [size=%s, key_prefix=%s, val=%s]' % (self.size, self.key_prefix, self.val) @@ -486,5 +500,5 @@ return res - def test(self): + def test(self, context): return {} diff -Nru mistral-4.0.0/mistral/api/app.py mistral-5.0.0~b2/mistral/api/app.py --- mistral-4.0.0/mistral/api/app.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/app.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,7 +22,7 @@ from mistral import config as m_config from mistral import context as ctx from mistral.db.v2 import api as db_api_v2 -from mistral.engine.rpc_backend import rpc +from mistral.rpc import base as rpc from mistral.service import coordination from mistral.services import periodic @@ -83,3 +83,12 @@ # Create a CORS wrapper, and attach mistral-specific defaults that must be # included in all CORS responses. return cors_middleware.CORS(app, cfg.CONF) + + +def init_wsgi(): + # By default, oslo.config parses the CLI args if no args is provided. + # As a result, invoking this wsgi script from gunicorn leads to the error + # with argparse complaining that the CLI options have already been parsed. + m_config.parse_args(args=[]) + + return setup_app() diff -Nru mistral-4.0.0/mistral/api/controllers/resource.py mistral-5.0.0~b2/mistral/api/controllers/resource.py --- mistral-4.0.0/mistral/api/controllers/resource.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/resource.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,6 +16,8 @@ from wsme import types as wtypes +from mistral import utils + class Resource(wtypes.Base): """REST API Resource.""" @@ -27,21 +29,31 @@ for attr in self._wsme_attributes: attr_val = getattr(self, attr.name) + if not isinstance(attr_val, wtypes.UnsetType): d[attr.name] = attr_val return d @classmethod - def from_dict(cls, d): + def from_tuples(cls, tuple_iterator): obj = cls() - for key, val in d.items(): - if hasattr(obj, key): - setattr(obj, key, val) + for col_name, col_val in tuple_iterator: + if hasattr(obj, col_name): + # Convert all datetime values to strings. + setattr(obj, col_name, utils.datetime_to_str(col_val)) return obj + @classmethod + def from_dict(cls, d): + return cls.from_tuples(d.items()) + + @classmethod + def from_db_model(cls, db_model): + return cls.from_tuples(db_model.iter_columns()) + def __str__(self): """WSME based implementation of __str__.""" @@ -81,18 +93,18 @@ @classmethod def convert_with_links(cls, resources, limit, url=None, fields=None, **kwargs): - resource_collection = cls() + resource_list = cls() - setattr(resource_collection, resource_collection._type, resources) + setattr(resource_list, resource_list._type, resources) - resource_collection.next = resource_collection.get_next( + resource_list.next = resource_list.get_next( limit, url=url, fields=fields, **kwargs ) - return resource_collection + return resource_list def has_next(self, limit): """Return whether resources has more items.""" diff -Nru mistral-4.0.0/mistral/api/controllers/v2/action_execution.py mistral-5.0.0~b2/mistral/api/controllers/v2/action_execution.py --- mistral-4.0.0/mistral/api/controllers/v2/action_execution.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/action_execution.py 2017-06-09 12:48:26.000000000 +0000 @@ -24,8 +24,8 @@ from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import clients as rpc from mistral.utils import filter_utils from mistral.utils import rest_utils from mistral.workflow import states @@ -43,17 +43,20 @@ def _get_action_execution(id): with db_api.transaction(): - action_ex = db_api.get_action_execution(id) - - return _get_action_execution_resource(action_ex) + return _get_action_execution_resource(db_api.get_action_execution(id)) def _get_action_execution_resource(action_ex): _load_deferred_output_field(action_ex) + return _get_action_execution_resource_for_list(action_ex) + + +def _get_action_execution_resource_for_list(action_ex): + # TODO(nmakhotkin): Get rid of using dicts for constructing resources. # TODO(nmakhotkin): Use db_model for this instead. - res = resources.ActionExecution.from_dict(action_ex.to_dict()) + res = resources.ActionExecution.from_db_model(action_ex) task_name = (action_ex.task_execution.name if action_ex.task_execution else None) @@ -64,7 +67,7 @@ def _get_action_executions(task_execution_id=None, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', - fields='', **filters): + fields='', include_output=False, **filters): """Return all action executions. Where project_id is the same as the requester or @@ -89,12 +92,17 @@ if task_execution_id: filters['task_execution_id'] = task_execution_id + if include_output: + resource_function = _get_action_execution_resource + else: + resource_function = _get_action_execution_resource_for_list + return rest_utils.get_all( resources.ActionExecutions, resources.ActionExecution, db_api.get_action_executions, db_api.get_action_execution, - resource_function=_get_action_execution_resource, + resource_function=resource_function, marker=marker, limit=limit, sort_keys=sort_keys, @@ -134,14 +142,14 @@ "Please provide at least action name to run action." ) - action_ex = rpc.get_engine_client().start_action( + values = rpc.get_engine_client().start_action( name, action_input, description=description, **params ) - return resources.ActionExecution.from_dict(action_ex) + return resources.ActionExecution.from_dict(values) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( @@ -186,13 +194,13 @@ wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, types.uuid, wtypes.text, wtypes.text, bool, types.jsontype, - types.jsontype, types.jsontype, wtypes.text) + types.jsontype, types.jsontype, wtypes.text, bool) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', created_at=None, name=None, tags=None, updated_at=None, workflow_name=None, task_name=None, task_execution_id=None, state=None, state_info=None, accepted=None, input=None, output=None, - params=None, description=None): + params=None, description=None, include_output=False): """Return all tasks within the execution. Where project_id is the same as the requester or @@ -234,6 +242,8 @@ time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. + :param include_output: Optional. Include the output for all executions + in the list """ acl.enforce('action_executions:list', context.ctx()) @@ -264,6 +274,7 @@ sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, + include_output=include_output, **filters ) @@ -302,13 +313,14 @@ wtypes.text, types.uniquelist, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, bool, types.jsontype, types.jsontype, - types.jsontype, wtypes.text) + types.jsontype, wtypes.text, bool) def get_all(self, task_execution_id, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', created_at=None, name=None, tags=None, updated_at=None, workflow_name=None, task_name=None, state=None, state_info=None, accepted=None, input=None, - output=None, params=None, description=None): + output=None, params=None, description=None, + include_output=None): """Return all tasks within the execution. Where project_id is the same as the requester or @@ -350,6 +362,8 @@ time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. + :param include_output: Optional. Include the output for all executions + in the list """ acl.enforce('action_executions:list', context.ctx()) @@ -380,6 +394,7 @@ sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, + include_output=include_output, **filters ) diff -Nru mistral-4.0.0/mistral/api/controllers/v2/action.py mistral-5.0.0~b2/mistral/api/controllers/v2/action.py --- mistral-4.0.0/mistral/api/controllers/v2/action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/action.py 2017-06-09 12:48:26.000000000 +0000 @@ -28,10 +28,10 @@ from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import actions from mistral.utils import filter_utils from mistral.utils import rest_utils -from mistral.workbook import parser as spec_parser LOG = logging.getLogger(__name__) @@ -54,11 +54,12 @@ """ acl.enforce('actions:get', context.ctx()) + LOG.info("Fetch action [identifier=%s]", identifier) db_model = db_api.get_action_definition(identifier) - return resources.Action.from_dict(db_model.to_dict()) + return resources.Action.from_db_model(db_model) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") @@ -69,8 +70,11 @@ of multiple actions. In this case they all will be updated. """ acl.enforce('actions:update', context.ctx()) + definition = pecan.request.text + LOG.info("Update action(s) [definition=%s]", definition) + scope = pecan.request.GET.get('scope', 'private') if scope not in resources.SCOPE_TYPES.values: @@ -86,8 +90,9 @@ identifier=identifier ) - models_dicts = [db_act.to_dict() for db_act in db_acts] - action_list = [resources.Action.from_dict(act) for act in models_dicts] + action_list = [ + resources.Action.from_db_model(db_act) for db_act in db_acts + ] return resources.Actions(actions=action_list).to_json() @@ -100,6 +105,7 @@ of multiple actions. In this case they all will be created. """ acl.enforce('actions:create', context.ctx()) + definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') pecan.response.status = 201 @@ -115,8 +121,9 @@ with db_api.transaction(): db_acts = actions.create_actions(definition, scope=scope) - models_dicts = [db_act.to_dict() for db_act in db_acts] - action_list = [resources.Action.from_dict(act) for act in models_dicts] + action_list = [ + resources.Action.from_db_model(db_act) for db_act in db_acts + ] return resources.Actions(actions=action_list).to_json() @@ -125,6 +132,7 @@ def delete(self, identifier): """Delete the named action.""" acl.enforce('actions:delete', context.ctx()) + LOG.info("Delete action [identifier=%s]", identifier) with db_api.transaction(): diff -Nru mistral-4.0.0/mistral/api/controllers/v2/cron_trigger.py mistral-5.0.0~b2/mistral/api/controllers/v2/cron_trigger.py --- mistral-4.0.0/mistral/api/controllers/v2/cron_trigger.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/cron_trigger.py 2017-06-09 12:48:26.000000000 +0000 @@ -40,7 +40,7 @@ db_model = db_api.get_cron_trigger(name) - return resources.CronTrigger.from_dict(db_model.to_dict()) + return resources.CronTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( @@ -71,7 +71,7 @@ workflow_id=values.get('workflow_id') ) - return resources.CronTrigger.from_dict(db_model.to_dict()) + return resources.CronTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) @@ -81,7 +81,7 @@ LOG.info("Delete cron trigger [name=%s]" % name) - db_api.delete_cron_trigger(name) + triggers.delete_cron_trigger(name) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.CronTriggers, types.uuid, int, diff -Nru mistral-4.0.0/mistral/api/controllers/v2/environment.py mistral-5.0.0~b2/mistral/api/controllers/v2/environment.py --- mistral-4.0.0/mistral/api/controllers/v2/environment.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/environment.py 2017-06-09 12:48:26.000000000 +0000 @@ -109,7 +109,7 @@ db_model = db_api.get_environment(name) - return resources.Environment.from_dict(db_model.to_dict()) + return resources.Environment.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( @@ -130,7 +130,7 @@ db_model = db_api.create_environment(env.to_dict()) - return resources.Environment.from_dict(db_model.to_dict()) + return resources.Environment.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Environment, body=resources.Environment) @@ -155,7 +155,7 @@ db_model = db_api.update_environment(env.name, env.to_dict()) - return resources.Environment.from_dict(db_model.to_dict()) + return resources.Environment.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) diff -Nru mistral-4.0.0/mistral/api/controllers/v2/event_trigger.py mistral-5.0.0~b2/mistral/api/controllers/v2/event_trigger.py --- mistral-4.0.0/mistral/api/controllers/v2/event_trigger.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/event_trigger.py 2017-06-09 12:48:26.000000000 +0000 @@ -43,7 +43,7 @@ db_model = db_api.get_event_trigger(id) - return resources.EventTrigger.from_dict(db_model.to_dict()) + return resources.EventTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.EventTrigger, body=resources.EventTrigger, @@ -73,7 +73,7 @@ workflow_params=values.get('workflow_params'), ) - return resources.EventTrigger.from_dict(db_model.to_dict()) + return resources.EventTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.EventTrigger, types.uuid, @@ -103,7 +103,7 @@ db_model = triggers.update_event_trigger(id, values) - return resources.EventTrigger.from_dict(db_model.to_dict()) + return resources.EventTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, types.uuid, status_code=204) diff -Nru mistral-4.0.0/mistral/api/controllers/v2/execution.py mistral-5.0.0~b2/mistral/api/controllers/v2/execution.py --- mistral-4.0.0/mistral/api/controllers/v2/execution.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/execution.py 2017-06-09 12:48:26.000000000 +0000 @@ -26,8 +26,8 @@ from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import clients as rpc from mistral.services import workflows as wf_service from mistral.utils import filter_utils from mistral.utils import rest_utils @@ -47,12 +47,12 @@ ) -def _get_execution_resource(ex): +def _get_execution_resource(wf_ex): # We need to refer to this lazy-load field explicitly in # order to make sure that it is correctly loaded. - hasattr(ex, 'output') + hasattr(wf_ex, 'output') - return resources.Execution.from_dict(ex.to_dict()) + return resources.Execution.from_db_model(wf_ex) # TODO(rakhmerov): Make sure to make all needed renaming on public API. @@ -77,7 +77,7 @@ # amount of DB queries and network traffic. hasattr(wf_ex, 'output') - return resources.Execution.from_dict(wf_ex.to_dict()) + return resources.Execution.from_db_model(wf_ex) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( @@ -227,13 +227,14 @@ types.uniquelist, types.list, types.uniquelist, wtypes.text, types.uuid, wtypes.text, types.jsontype, types.uuid, STATE_TYPES, wtypes.text, types.jsontype, - types.jsontype, wtypes.text, wtypes.text, bool) + types.jsontype, wtypes.text, wtypes.text, bool, + types.uuid, bool) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', workflow_name=None, workflow_id=None, description=None, params=None, task_execution_id=None, state=None, state_info=None, input=None, output=None, created_at=None, updated_at=None, - include_output=None): + include_output=None, project_id=None, all_projects=False): """Return all Executions. :param marker: Optional. Pagination marker for large data sets. @@ -269,10 +270,17 @@ :param updated_at: Optional. Keep only resources with specific latest update time and date. :param include_output: Optional. Include the output for all executions - in the list + in the list. + :param project_id: Optional. Only get exectuions belong to the project. + Admin required. + :param all_projects: Optional. Get resources of all projects. Admin + required. """ acl.enforce('executions:list', context.ctx()) + if all_projects or project_id: + acl.enforce('executions:list:all_projects', context.ctx()) + filters = filter_utils.create_filters_from_request_params( created_at=created_at, workflow_name=workflow_name, @@ -284,13 +292,14 @@ input=input, output=output, updated_at=updated_at, - description=description + description=description, + project_id=project_id ) LOG.info( "Fetch executions. marker=%s, limit=%s, sort_keys=%s, " - "sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, - filters + "sort_dirs=%s, filters=%s, all_projects=%s", marker, limit, + sort_keys, sort_dirs, filters, all_projects ) if include_output: @@ -309,5 +318,6 @@ sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, + all_projects=all_projects, **filters ) diff -Nru mistral-4.0.0/mistral/api/controllers/v2/member.py mistral-5.0.0~b2/mistral/api/controllers/v2/member.py --- mistral-4.0.0/mistral/api/controllers/v2/member.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/member.py 2017-06-09 12:48:26.000000000 +0000 @@ -66,13 +66,13 @@ member_id ) - member_dict = db_api.get_resource_member( + member_db = db_api.get_resource_member( self.resource_id, self.type, member_id - ).to_dict() + ) - return resources.Member.from_dict(member_dict) + return resources.Member.from_db_model(member_db) @rest_utils.wrap_pecan_controller_exception @auth_enable_check @@ -91,9 +91,10 @@ self.resource_id, self.type ) + members = [ - resources.Member.from_dict(member.to_dict()) - for member in db_members + resources.Member.from_db_model(db_member) + for db_member in db_members ] return resources.Members(members=members) @@ -118,15 +119,15 @@ ) if not member_info.member_id: - msg = "Member id must be provided." - raise exc.WorkflowException(msg) + raise exc.WorkflowException("Member id must be provided.") with db_api.transaction(): wf_db = db_api.get_workflow_definition(self.resource_id) if wf_db.scope != 'private': - msg = "Only private resource could be shared." - raise exc.WorkflowException(msg) + raise exc.WorkflowException( + "Only private resource could be shared." + ) resource_member = { 'resource_id': self.resource_id, @@ -137,7 +138,7 @@ db_member = db_api.create_resource_member(resource_member) - return resources.Member.from_dict(db_member.to_dict()) + return resources.Member.from_db_model(db_member) @rest_utils.wrap_pecan_controller_exception @auth_enable_check @@ -165,7 +166,7 @@ {'status': member_info.status} ) - return resources.Member.from_dict(db_member.to_dict()) + return resources.Member.from_db_model(db_member) @rest_utils.wrap_pecan_controller_exception @auth_enable_check diff -Nru mistral-4.0.0/mistral/api/controllers/v2/resources.py mistral-5.0.0~b2/mistral/api/controllers/v2/resources.py --- mistral-4.0.0/mistral/api/controllers/v2/resources.py 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/resources.py 2017-06-09 12:48:26.000000000 +0000 @@ -95,16 +95,12 @@ updated_at='1970-01-01T00:00:00.000000') @classmethod - def from_dict(cls, d): - e = cls() + def _set_input(cls, obj, wf_spec): input_list = [] - for key, val in d.items(): - if hasattr(e, key): - setattr(e, key, val) + if wf_spec: + input = wf_spec.get('input', []) - if 'spec' in d: - input = d.get('spec', {}).get('input', []) for param in input: if isinstance(param, dict): for k, v in param.items(): @@ -112,9 +108,21 @@ else: input_list.append(param) - setattr(e, 'input', ", ".join(input_list) if input_list else '') + setattr(obj, 'input', ", ".join(input_list) if input_list else '') + + return obj + + @classmethod + def from_dict(cls, d): + obj = super(Workflow, cls).from_dict(d) + + return cls._set_input(obj, d.get('spec')) + + @classmethod + def from_db_model(cls, db_model): + obj = super(Workflow, cls).from_db_model(db_model) - return e + return cls._set_input(obj, db_model.spec) class Workflows(resource.ResourceList): @@ -289,6 +297,8 @@ state_info = wtypes.text "an optional state information string" + runtime_context = types.jsontype + result = wtypes.text published = types.jsontype processed = bool @@ -310,6 +320,14 @@ workflow_execution_id='123e4567-e89b-12d3-a456-426655440000', name='task', state=states.SUCCESS, + runtime_context={ + 'triggered_by': [ + { + 'task_id': '123-123-123', + 'event': 'on-success' + } + ] + }, result='task result', published={'key': 'value'}, processed=True, @@ -354,7 +372,7 @@ output = types.jsontype created_at = wtypes.text updated_at = wtypes.text - params = types.jsontype + params = types.jsontype # TODO(rakhmerov): What is this?? @classmethod def sample(cls): diff -Nru mistral-4.0.0/mistral/api/controllers/v2/service.py mistral-5.0.0~b2/mistral/api/controllers/v2/service.py --- mistral-4.0.0/mistral/api/controllers/v2/service.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/service.py 2017-06-09 12:48:26.000000000 +0000 @@ -58,10 +58,18 @@ try: for group in service_group: members = service_coordinator.get_members(group) - services_list.extend( - [resources.Service.from_dict( - {'type': group, 'name': member}) for member in members] - ) + + members_list = [ + resources.Service.from_dict( + { + 'type': group, + 'name': member + } + ) + for member in members + ] + + services_list.extend(members_list) except tooz.coordination.ToozError as e: # In the scenario of network interruption or manually shutdown # connection shutdown, ToozError will be raised. diff -Nru mistral-4.0.0/mistral/api/controllers/v2/task.py mistral-5.0.0~b2/mistral/api/controllers/v2/task.py --- mistral-4.0.0/mistral/api/controllers/v2/task.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/task.py 2017-06-09 12:48:26.000000000 +0000 @@ -26,11 +26,11 @@ from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.lang import parser as spec_parser +from mistral.rpc import clients as rpc from mistral.utils import filter_utils from mistral.utils import rest_utils -from mistral.workbook import parser as spec_parser from mistral.workflow import data_flow from mistral.workflow import states @@ -41,7 +41,8 @@ def _get_task_resource_with_result(task_ex): - task = resources.Task.from_dict(task_ex.to_dict()) + task = resources.Task.from_db_model(task_ex) + task.result = json.dumps(data_flow.get_task_execution_result(task_ex)) return task diff -Nru mistral-4.0.0/mistral/api/controllers/v2/workbook.py mistral-5.0.0~b2/mistral/api/controllers/v2/workbook.py --- mistral-4.0.0/mistral/api/controllers/v2/workbook.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/workbook.py 2017-06-09 12:48:26.000000000 +0000 @@ -27,10 +27,10 @@ from mistral.api.hooks import content_type as ct_hook from mistral import context from mistral.db.v2 import api as db_api +from mistral.lang import parser as spec_parser from mistral.services import workbooks from mistral.utils import filter_utils from mistral.utils import rest_utils -from mistral.workbook import parser as spec_parser LOG = logging.getLogger(__name__) @@ -52,7 +52,7 @@ db_model = db_api.get_workbook(name) - return resources.Workbook.from_dict(db_model.to_dict()) + return resources.Workbook.from_db_model(db_model) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") @@ -66,7 +66,7 @@ wb_db = workbooks.update_workbook_v2(definition) - return resources.Workbook.from_dict(wb_db.to_dict()).to_json() + return resources.Workbook.from_db_model(wb_db).to_json() @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") @@ -79,9 +79,10 @@ LOG.info("Create workbook [definition=%s]" % definition) wb_db = workbooks.create_workbook_v2(definition) + pecan.response.status = 201 - return resources.Workbook.from_dict(wb_db.to_dict()).to_json() + return resources.Workbook.from_db_model(wb_db).to_json() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) diff -Nru mistral-4.0.0/mistral/api/controllers/v2/workflow.py mistral-5.0.0~b2/mistral/api/controllers/v2/workflow.py --- mistral-4.0.0/mistral/api/controllers/v2/workflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/controllers/v2/workflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -31,10 +31,10 @@ from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import workflows from mistral.utils import filter_utils from mistral.utils import rest_utils -from mistral.workbook import parser as spec_parser LOG = logging.getLogger(__name__) @@ -85,7 +85,7 @@ db_model = db_api.get_workflow_definition(identifier) - return resources.Workflow.from_dict(db_model.to_dict()) + return resources.Workflow.from_db_model(db_model) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") @@ -117,9 +117,8 @@ identifier=identifier ) - models_dicts = [db_wf.to_dict() for db_wf in db_wfs] workflow_list = [ - resources.Workflow.from_dict(wf) for wf in models_dicts + resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs ] return (workflow_list[0].to_json() if identifier @@ -148,10 +147,9 @@ LOG.info("Create workflow(s) [definition=%s]", definition) db_wfs = workflows.create_workflows(definition, scope=scope) - models_dicts = [db_wf.to_dict() for db_wf in db_wfs] workflow_list = [ - resources.Workflow.from_dict(wf) for wf in models_dicts + resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs ] return resources.Workflows(workflows=workflow_list).to_json() @@ -161,6 +159,7 @@ def delete(self, identifier): """Delete a workflow.""" acl.enforce('workflows:delete', context.ctx()) + LOG.info("Delete workflow [identifier=%s]", identifier) with db_api.transaction(): diff -Nru mistral-4.0.0/mistral/api/wsgi.py mistral-5.0.0~b2/mistral/api/wsgi.py --- mistral-4.0.0/mistral/api/wsgi.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/api/wsgi.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,11 +13,5 @@ # limitations under the License. from mistral.api import app -from mistral import config -# By default, oslo.config parses the CLI args if no args is provided. -# As a result, invoking this wsgi script from gunicorn leads to the error -# with argparse complaining that the CLI options have already been parsed. -config.parse_args(args=[]) - -application = app.setup_app() +application = app.init_wsgi() diff -Nru mistral-4.0.0/mistral/auth/keystone.py mistral-5.0.0~b2/mistral/auth/keystone.py --- mistral-4.0.0/mistral/auth/keystone.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/auth/keystone.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,14 +13,11 @@ # limitations under the License. from oslo_config import cfg -from oslo_log import log as logging from mistral import auth from mistral import exceptions as exc -LOG = logging.getLogger(__name__) - CONF = cfg.CONF diff -Nru mistral-4.0.0/mistral/cmd/launch.py mistral-5.0.0~b2/mistral/cmd/launch.py --- mistral-4.0.0/mistral/cmd/launch.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/cmd/launch.py 2017-06-09 12:48:26.000000000 +0000 @@ -44,9 +44,9 @@ from mistral.api import service as api_service from mistral import config from mistral.engine import engine_server -from mistral.engine import executor_server -from mistral.engine.rpc_backend import rpc from mistral.event_engine import event_engine_server +from mistral.executors import executor_server +from mistral.rpc import base as rpc from mistral import version diff -Nru mistral-4.0.0/mistral/config.py mistral-5.0.0~b2/mistral/config.py --- mistral-4.0.0/mistral/config.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/config.py 2017-06-09 12:48:26.000000000 +0000 @@ -106,14 +106,6 @@ help=_('Seconds to wait for a response from a call.') ) -os_endpoint_type = cfg.StrOpt( - 'os-actions-endpoint-type', - default=os.environ.get('OS_ACTIONS_ENDPOINT_TYPE', 'public'), - choices=['public', 'admin', 'internal'], - help=_('Type of endpoint in identity service catalog to use for' - ' communication with OpenStack services.') -) - expiration_token_duration = cfg.IntOpt( 'expiration_token_duration', default=30, @@ -170,6 +162,16 @@ executor_opts = [ cfg.StrOpt( + 'type', + choices=['local', 'remote'], + default='remote', + help=( + 'Type of executor. Use local to run the executor within the ' + 'engine server. Use remote if the executor is launched as ' + 'a separate server to run action executions.' + ) + ), + cfg.StrOpt( 'host', default='0.0.0.0', help=_('Name of the executor node. This can be an opaque ' @@ -213,16 +215,35 @@ 'evaluation_interval', help=_('How often will the executions be evaluated ' '(in minutes). For example for value 120 the interval ' - 'will be 2 hours (every 2 hours).') + 'will be 2 hours (every 2 hours).' + 'Note that only final state executions will be removed: ' + '( SUCCESS / ERROR / CANCELLED ).') ), cfg.IntOpt( 'older_than', help=_('Evaluate from which time remove executions in minutes. ' 'For example when older_than = 60, remove all executions ' 'that finished a 60 minutes ago or more. ' - 'Minimum value is 1. ' - 'Note that only final state execution will remove ' - '( SUCCESS / ERROR ).') + 'Minimum value is 1.') + ), + cfg.IntOpt( + 'max_finished_executions', + default=0, + help=_('The maximum number of finished workflow executions' + 'to be stored. For example when max_finished_executions = 100,' + 'only the 100 latest finished executions will be preserved.' + 'This means that even unexpired executions are eligible' + 'for deletion, to decrease the number of executions in the' + 'database. The default value is 0. If it is set to 0,' + 'this constraint won\'t be applied.') + ), + cfg.IntOpt( + 'batch_size', + default=0, + help=_('Size of batch of expired executions to be deleted.' + 'The default value is 0. If it is set to 0, ' + 'size of batch is total number of expired executions' + 'that is going to be deleted.') ) ] @@ -259,6 +280,40 @@ ) ] +openstack_actions_opts = [ + cfg.StrOpt( + 'os-actions-endpoint-type', + default=os.environ.get('OS_ACTIONS_ENDPOINT_TYPE', 'public'), + choices=['public', 'admin', 'internal'], + deprecated_group='DEFAULT', + help=_('Type of endpoint in identity service catalog to use for' + ' communication with OpenStack services.') + ), + cfg.ListOpt( + 'modules-support-region', + default=['nova', 'glance', 'ceilometer', 'heat', 'neutron', 'cinder', + 'trove', 'ironic', 'designate', 'murano', 'tacker', 'senlin', + 'aodh', 'gnocchi'], + help=_('List of module names that support region in actions.') + ), + cfg.StrOpt( + 'default_region', + help=_('Default region name for openstack actions supporting region.') + ), +] + +# note: this command line option is used only from sync_db and +# mistral-db-manage +os_actions_mapping_path = cfg.StrOpt( + 'openstack_actions_mapping_path', + short='m', + metavar='MAPPING_PATH', + default='actions/openstack/mapping.json', + help='Path to openstack action mapping json file.' + 'It could be relative to mistral package ' + 'directory or absolute.' +) + CONF = cfg.CONF API_GROUP = 'api' @@ -270,9 +325,14 @@ EXECUTION_EXPIRATION_POLICY_GROUP = 'execution_expiration_policy' PROFILER_GROUP = profiler.list_opts()[0][0] KEYCLOAK_OIDC_GROUP = "keycloak_oidc" +OPENSTACK_ACTIONS_GROUP = 'openstack_actions' CONF.register_opt(wf_trace_log_name_opt) CONF.register_opt(auth_type_opt) +CONF.register_opt(js_impl_opt) +CONF.register_opt(rpc_impl_opt) +CONF.register_opt(rpc_response_timeout_opt) +CONF.register_opt(expiration_token_duration) CONF.register_opts(api_opts, group=API_GROUP) CONF.register_opts(engine_opts, group=ENGINE_GROUP) @@ -285,12 +345,8 @@ CONF.register_opts(pecan_opts, group=PECAN_GROUP) CONF.register_opts(coordination_opts, group=COORDINATION_GROUP) CONF.register_opts(profiler_opts, group=PROFILER_GROUP) -CONF.register_opt(js_impl_opt) -CONF.register_opt(rpc_impl_opt) -CONF.register_opt(rpc_response_timeout_opt) CONF.register_opts(keycloak_oidc_opts, group=KEYCLOAK_OIDC_GROUP) -CONF.register_opt(os_endpoint_type) -CONF.register_opt(expiration_token_duration) +CONF.register_opts(openstack_actions_opts, group=OPENSTACK_ACTIONS_GROUP) CLI_OPTS = [ use_debugger_opt, @@ -300,22 +356,19 @@ default_group_opts = itertools.chain( CLI_OPTS, [wf_trace_log_name_opt, auth_type_opt, js_impl_opt, rpc_impl_opt, - os_endpoint_type, rpc_response_timeout_opt, expiration_token_duration] + rpc_response_timeout_opt, expiration_token_duration] ) CONF.register_cli_opts(CLI_OPTS) + _DEFAULT_LOG_LEVELS = [ - 'amqp=WARN', - 'sqlalchemy=WARN', - 'oslo_messaging=INFO', - 'iso8601=WARN', 'eventlet.wsgi.server=WARN', - 'stevedore=INFO', 'oslo_service.periodic_task=INFO', 'oslo_service.loopingcall=INFO', 'mistral.services.periodic=INFO', - 'kazoo.client=WARN' + 'kazoo.client=WARN', + 'oslo_db=WARN' ] @@ -330,12 +383,15 @@ (EXECUTION_EXPIRATION_POLICY_GROUP, execution_expiration_policy_opts), (PROFILER_GROUP, profiler_opts), (KEYCLOAK_OIDC_GROUP, keycloak_oidc_opts), + (OPENSTACK_ACTIONS_GROUP, openstack_actions_opts), (None, default_group_opts) ] def parse_args(args=None, usage=None, default_config_files=None): - log.set_defaults(default_log_levels=_DEFAULT_LOG_LEVELS) + default_log_levels = log.get_default_log_levels() + default_log_levels.extend(_DEFAULT_LOG_LEVELS) + log.set_defaults(default_log_levels=default_log_levels) log.register_options(CONF) diff -Nru mistral-4.0.0/mistral/context.py mistral-5.0.0~b2/mistral/context.py --- mistral-4.0.0/mistral/context.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/context.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,7 +16,6 @@ import base64 from keystoneclient.v3 import client as keystone_client -import logging from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils @@ -29,7 +28,6 @@ from mistral import serialization from mistral import utils -LOG = logging.getLogger(__name__) CONF = cfg.CONF _CTX_THREAD_LOCAL_NAME = "MISTRAL_APP_CTX_THREAD_LOCAL" ALLOWED_WITHOUT_AUTH = ['/', '/v2/'] diff -Nru mistral-4.0.0/mistral/db/sqlalchemy/migration/cli.py mistral-5.0.0~b2/mistral/db/sqlalchemy/migration/cli.py --- mistral-4.0.0/mistral/db/sqlalchemy/migration/cli.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/db/sqlalchemy/migration/cli.py 2017-06-09 12:48:26.000000000 +0000 @@ -19,10 +19,12 @@ from alembic import config as alembic_cfg from alembic import util as alembic_u from oslo_config import cfg +from oslo_log import log as logging from oslo_utils import importutils import six import sys +from mistral import config from mistral.services import action_manager from mistral.services import workflows @@ -33,6 +35,7 @@ CONF = cfg.CONF +LOG = logging.getLogger(__name__) def do_alembic_command(config, cmd, *args, **kwargs): @@ -68,6 +71,7 @@ def do_populate(config, cmd): + LOG.info("populating db") action_manager.sync_db() workflows.sync_db() @@ -113,6 +117,7 @@ handler=add_command_parsers) CONF.register_cli_opt(command_opt) +CONF.register_cli_opt(config.os_actions_mapping_path) def main(): @@ -125,8 +130,10 @@ ) # attach the Mistral conf to the Alembic conf config.mistral_config = CONF + logging.register_options(CONF) CONF(project='mistral') + logging.setup(CONF, 'Mistral') CONF.command.func(config, CONF.command.name) if __name__ == '__main__': diff -Nru mistral-4.0.0/mistral/db/sqlalchemy/model_base.py mistral-5.0.0~b2/mistral/db/sqlalchemy/model_base.py --- mistral-4.0.0/mistral/db/sqlalchemy/model_base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/db/sqlalchemy/model_base.py 2017-06-09 12:48:26.000000000 +0000 @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from oslo_db.sqlalchemy import models as oslo_models import sqlalchemy as sa from sqlalchemy import event @@ -63,7 +61,19 @@ def to_dict(self): """sqlalchemy based automatic to_dict method.""" - d = {} + + d = {col_name: col_val for col_name, col_val in self.iter_columns()} + + utils.datetime_to_str_in_dict(d, 'created_at') + utils.datetime_to_str_in_dict(d, 'updated_at') + + return d + + def iter_column_names(self): + """Returns an iterator for loaded column names. + + :return: A generator function for column names. + """ # If a column is unloaded at this point, it is # probably deferred. We do not want to access it @@ -72,12 +82,17 @@ for col in self.__table__.columns: if col.name not in unloaded and hasattr(self, col.name): - d[col.name] = getattr(self, col.name) + yield col.name - datetime_to_str(d, 'created_at') - datetime_to_str(d, 'updated_at') + def iter_columns(self): + """Returns an iterator for loaded columns. - return d + :return: A generator function that generates + tuples (column name, column value). + """ + + for col_name in self.iter_column_names(): + yield col_name, getattr(self, col_name) def get_clone(self): """Clones current object, loads all fields and returns the result.""" @@ -87,13 +102,18 @@ if hasattr(self, col.name): setattr(m, col.name, getattr(self, col.name)) - setattr(m, 'created_at', getattr(self, 'created_at').isoformat(' ')) + setattr( + m, + 'created_at', + utils.datetime_to_str(getattr(self, 'created_at')) + ) updated_at = getattr(self, 'updated_at') + # NOTE(nmakhotkin): 'updated_at' field is empty for just created # object since it has not updated yet. if updated_at: - setattr(m, 'updated_at', updated_at.isoformat(' ')) + setattr(m, 'updated_at', utils.datetime_to_str(updated_at)) return m @@ -101,12 +121,6 @@ return '%s %s' % (type(self).__name__, self.to_dict().__repr__()) -def datetime_to_str(dct, attr_name): - if (dct.get(attr_name) is not None - and not isinstance(dct.get(attr_name), six.string_types)): - dct[attr_name] = dct[attr_name].isoformat(' ') - - MistralModelBase = declarative.declarative_base(cls=_MistralModelBase) diff -Nru mistral-4.0.0/mistral/db/utils.py mistral-5.0.0~b2/mistral/db/utils.py --- mistral-4.0.0/mistral/db/utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/db/utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -18,7 +18,9 @@ from oslo_log import log as logging from oslo_service import loopingcall -from mistral import context as ctx +from mistral import context +from mistral import exceptions as exc +from mistral.services import security LOG = logging.getLogger(__name__) @@ -35,9 +37,9 @@ :param kw: Function keywork arguments. :return: Function result. """ - old_auth_ctx = ctx.ctx() if ctx.has_ctx() else None + old_auth_ctx = context.ctx() if context.has_ctx() else None - ctx.set_ctx(auth_ctx) + context.set_ctx(auth_ctx) try: return func(*args, **kw) @@ -48,7 +50,7 @@ raise e finally: - ctx.set_ctx(old_auth_ctx) + context.set_ctx(old_auth_ctx) def retry_on_deadlock(func): @@ -67,8 +69,26 @@ # auth context in the new thread that RetryDecorator spawns. # In order to do that we need an additional helper function. - auth_ctx = ctx.ctx() if ctx.has_ctx() else None + auth_ctx = context.ctx() if context.has_ctx() else None return _with_auth_context(auth_ctx, func, *args, **kw) return decorate + + +def check_db_obj_access(db_obj): + """Check accessbility to db object.""" + ctx = context.ctx() + is_admin = ctx.is_admin + + if not is_admin and db_obj.project_id != security.get_project_id(): + raise exc.NotAllowedException( + "Can not access %s resource of other projects, ID: %s" % + (db_obj.__class__.__name__, db_obj.id) + ) + + if not is_admin and hasattr(db_obj, 'is_system') and db_obj.is_system: + raise exc.InvalidActionException( + "Can not modify a system %s resource, ID: %s" % + (db_obj.__class__.__name__, db_obj.id) + ) diff -Nru mistral-4.0.0/mistral/db/v2/api.py mistral-5.0.0~b2/mistral/db/v2/api.py --- mistral-4.0.0/mistral/db/v2/api.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/db/v2/api.py 2017-06-09 12:48:26.000000000 +0000 @@ -393,8 +393,14 @@ return IMPL.get_next_cron_triggers(time) -def get_expired_executions(time): - return IMPL.get_expired_executions(time) +def get_executions_to_clean(expiration_time, limit=None, + max_finished_executions=None, columns=()): + return IMPL.get_executions_to_clean( + expiration_time, + limit, + max_finished_executions, + columns + ) def create_cron_trigger(values): diff -Nru mistral-4.0.0/mistral/db/v2/sqlalchemy/api.py mistral-5.0.0~b2/mistral/db/v2/sqlalchemy/api.py --- mistral-4.0.0/mistral/db/v2/sqlalchemy/api.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/db/v2/sqlalchemy/api.py 2017-06-09 12:48:26.000000000 +0000 @@ -28,12 +28,14 @@ from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import Insert -from mistral import context as auth_ctx +from mistral import context from mistral.db.sqlalchemy import base as b from mistral.db.sqlalchemy import model_base as mb from mistral.db.sqlalchemy import sqlite_lock +from mistral.db import utils as m_dbutils from mistral.db.v2.sqlalchemy import filters as db_filters from mistral.db.v2.sqlalchemy import models +from mistral.db.v2.sqlalchemy.models import WorkflowExecution from mistral import exceptions as exc from mistral.services import security from mistral import utils @@ -255,12 +257,14 @@ return _secure_query(model).filter_by(name=name).first() -def _get_db_object_by_id(model, id): - return _secure_query(model).filter_by(id=id).first() +def _get_db_object_by_id(model, id, insecure=False): + query = b.model_query(model) if insecure else _secure_query(model) + return query.filter_by(id=id).first() -def _get_db_object_by_name_or_id(model, identifier): - query = _secure_query(model) + +def _get_db_object_by_name_or_id(model, identifier, insecure=False): + query = b.model_query(model) if insecure else _secure_query(model) query = query.filter( sa.or_( model.id == identifier, @@ -424,9 +428,12 @@ uuid. :return: Workflow definition. """ + ctx = context.ctx() + wf_def = _get_db_object_by_name_or_id( models.WorkflowDefinition, - identifier + identifier, + insecure=ctx.is_admin ) if not wf_def: @@ -488,18 +495,8 @@ @b.session_aware() def update_workflow_definition(identifier, values, session=None): wf_def = get_workflow_definition(identifier) - ctx = auth_ctx.ctx() - if not ctx.is_admin and wf_def.project_id != security.get_project_id(): - raise exc.NotAllowedException( - "Can not update workflow of other tenants. " - "[workflow_identifier=%s]" % identifier - ) - - if not ctx.is_admin and wf_def.is_system: - raise exc.InvalidActionException( - "Attempt to modify a system workflow: %s" % identifier - ) + m_dbutils.check_db_obj_access(wf_def) if wf_def.scope == 'public' and values['scope'] == 'private': # Check cron triggers. @@ -543,15 +540,7 @@ def delete_workflow_definition(identifier, session=None): wf_def = get_workflow_definition(identifier) - if wf_def.project_id != security.get_project_id(): - raise exc.NotAllowedException( - "Can not delete workflow of other users. [workflow_identifier=%s]" - % identifier - ) - - if wf_def.is_system: - msg = "Attempt to delete a system workflow: %s" % identifier - raise exc.DataAccessException(msg) + m_dbutils.check_db_obj_access(wf_def) cron_triggers = get_cron_triggers(insecure=True, workflow_id=wf_def.id) if cron_triggers: @@ -750,7 +739,13 @@ @b.session_aware() def get_workflow_execution(id, session=None): - wf_ex = _get_db_object_by_id(models.WorkflowExecution, id) + ctx = context.ctx() + + wf_ex = _get_db_object_by_id( + models.WorkflowExecution, + id, + insecure=ctx.is_admin + ) if not wf_ex: raise exc.DBEntityNotFoundError( @@ -798,6 +793,8 @@ def update_workflow_execution(id, values, session=None): wf_ex = get_workflow_execution(id) + m_dbutils.check_db_obj_access(wf_ex) + wf_ex.update(values.copy()) return wf_ex @@ -1038,13 +1035,37 @@ @b.session_aware() -def get_expired_executions(time, session=None): - query = b.model_query(models.WorkflowExecution) +def get_executions_to_clean(expiration_time, limit=None, + max_finished_executions=None, columns=(), + session=None): + # Get the ids of the executions that won't be deleted. + # These are the not expired executions, + # limited by the new max_finished_executions constraint. + query = _get_completed_root_executions_query((WorkflowExecution.id,)) + query = query.filter( + models.WorkflowExecution.updated_at >= expiration_time + ) + query = query.order_by(models.WorkflowExecution.updated_at.desc()) + + if max_finished_executions: + query = query.limit(max_finished_executions) + + # And take the inverse of that set. + inverse = _get_completed_root_executions_query(columns) + inverse = inverse.filter(~WorkflowExecution.id.in_(query)) + inverse = inverse.order_by(models.WorkflowExecution.updated_at.asc()) + + if limit: + inverse.limit(limit) + + return inverse.all() + +def _get_completed_root_executions_query(columns): + query = b.model_query(models.WorkflowExecution, columns=columns) # Only WorkflowExecution that are not a child of other WorkflowExecution. query = query.filter(models.WorkflowExecution. task_execution_id == sa.null()) - query = query.filter(models.WorkflowExecution.updated_at < time) query = query.filter( sa.or_( models.WorkflowExecution.state == states.SUCCESS, @@ -1052,11 +1073,8 @@ models.WorkflowExecution.state == states.CANCELLED ) ) + return query - return query.all() - - -# Cron triggers. @b.session_aware() def get_cron_trigger(name, session=None): diff -Nru mistral-4.0.0/mistral/db/v2/sqlalchemy/models.py mistral-5.0.0~b2/mistral/db/v2/sqlalchemy/models.py --- mistral-4.0.0/mistral/db/v2/sqlalchemy/models.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/db/v2/sqlalchemy/models.py 2017-06-09 12:48:26.000000000 +0000 @@ -425,8 +425,8 @@ def to_dict(self): d = super(CronTrigger, self).to_dict() - mb.datetime_to_str(d, 'first_execution_time') - mb.datetime_to_str(d, 'next_execution_time') + utils.datetime_to_str_in_dict(d, 'first_execution_time') + utils.datetime_to_str_in_dict(d, 'next_execution_time') return d diff -Nru mistral-4.0.0/mistral/engine/action_handler.py mistral-5.0.0~b2/mistral/engine/action_handler.py --- mistral-4.0.0/mistral/engine/action_handler.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/action_handler.py 2017-06-09 12:48:26.000000000 +0000 @@ -21,7 +21,7 @@ from mistral.engine import actions from mistral.engine import task_handler from mistral import exceptions as exc -from mistral.workbook import parser as spec_parser +from mistral.lang import parser as spec_parser LOG = logging.getLogger(__name__) diff -Nru mistral-4.0.0/mistral/engine/action_queue.py mistral-5.0.0~b2/mistral/engine/action_queue.py --- mistral-4.0.0/mistral/engine/action_queue.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/action_queue.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,7 +15,9 @@ import functools -from mistral.engine.rpc_backend import rpc +from oslo_config import cfg + +from mistral.executors import base as exe from mistral import utils @@ -44,14 +46,16 @@ def _run_actions(): + executor = exe.get_executor(cfg.CONF.executor.type) + for action_ex, action_def, target in _get_queue(): - rpc.get_executor_client().run_action( + executor.run_action( action_ex.id, action_def.action_class, action_def.attributes or {}, action_ex.input, - target, - safe_rerun=action_ex.runtime_context.get('safe_rerun', False) + action_ex.runtime_context.get('safe_rerun', False), + target=target ) diff -Nru mistral-4.0.0/mistral/engine/actions.py mistral-5.0.0~b2/mistral/engine/actions.py --- mistral-4.0.0/mistral/engine/actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,25 +15,30 @@ import abc from oslo_config import cfg +from oslo_log import log as logging from osprofiler import profiler import six from mistral.db.v2 import api as db_api from mistral.engine import action_queue -from mistral.engine.rpc_backend import rpc -from mistral.engine import utils as e_utils +from mistral.engine import utils as engine_utils from mistral.engine import workflow_handler as wf_handler from mistral import exceptions as exc +from mistral.executors import base as exe from mistral import expressions as expr +from mistral.lang import parser as spec_parser from mistral.services import action_manager as a_m from mistral.services import security from mistral import utils from mistral.utils import wf_trace -from mistral.workbook import parser as spec_parser +from mistral.workflow import data_flow from mistral.workflow import states from mistral.workflow import utils as wf_utils +LOG = logging.getLogger(__name__) + + @six.add_metaclass(abc.ABCMeta) class Action(object): """Action. @@ -251,14 +256,16 @@ action_ex_id=action_ex_id ) - result = rpc.get_executor_client().run_action( + executor = exe.get_executor(cfg.CONF.executor.type) + + result = executor.run_action( self.action_ex.id if self.action_ex else None, self.action_def.action_class, self.action_def.attributes or {}, input_dict, - target, - async_=False, - safe_rerun=safe_rerun + safe_rerun=safe_rerun, + target=target, + async_=False ) return self._prepare_output(result) @@ -274,10 +281,19 @@ if self.action_def.action_class: self._inject_action_ctx_for_validating(input_dict) - # NOTE(xylan): Don't validate action input if action initialization + # NOTE(kong): Don't validate action input if action initialization # method contains ** argument. - if '**' not in self.action_def.input: - e_utils.validate_input(self.action_def, input_dict) + if '**' in self.action_def.input: + return + + expected_input = utils.get_dict_from_string(self.action_def.input) + + engine_utils.validate_input( + expected_input, + input_dict, + self.action_def.name, + self.action_def.action_class + ) def _prepare_input(self, input_dict): """Template method to do manipulations with input parameters. @@ -323,7 +339,8 @@ class AdHocAction(PythonAction): """Ad-hoc action.""" - def __init__(self, action_def, action_ex=None, task_ex=None): + def __init__(self, action_def, action_ex=None, task_ex=None, task_ctx=None, + wf_ctx=None): self.action_spec = spec_parser.get_action_spec(action_def.spec) base_action_def = db_api.get_action_definition( @@ -340,12 +357,17 @@ ) self.adhoc_action_def = action_def + self.task_ctx = task_ctx or {} + self.wf_ctx = wf_ctx or {} def validate_input(self, input_dict): - e_utils.validate_input( - self.adhoc_action_def, + expected_input = self.action_spec.get_input() + + engine_utils.validate_input( + expected_input, input_dict, - self.action_spec + self.adhoc_action_def.name, + self.action_spec.__class__.__name__ ) super(AdHocAction, self).validate_input( @@ -353,15 +375,25 @@ ) def _prepare_input(self, input_dict): + for k, v in self.action_spec.get_input().items(): + if k not in input_dict or input_dict[k] is utils.NotDefined: + input_dict[k] = v + base_input_dict = input_dict for action_def in self.adhoc_action_defs: action_spec = spec_parser.get_action_spec(action_def.spec) base_input_expr = action_spec.get_base_input() + if base_input_expr: + ctx_view = data_flow.ContextView( + base_input_dict, + self.task_ctx, + self.wf_ctx + ) base_input_dict = expr.evaluate_recursively( base_input_expr, - base_input_dict + ctx_view ) else: base_input_dict = {} @@ -404,6 +436,7 @@ An ad-hoc action may be based on another ad-hoc action (and this recursively). Using twice the same base action is not allowed to avoid infinite loops. It stores the list of ad-hoc actions. + :param action_def: Action definition :type action_def: ActionDefinition :param base_action_def: Original base action definition @@ -411,11 +444,13 @@ :return; The definition of the base system action :rtype; ActionDefinition """ + self.adhoc_action_defs = [action_def] original_base_name = self.action_spec.get_name() action_names = set([original_base_name]) base = base_action_def + while not base.is_system and base.name not in action_names: action_names.add(base.name) self.adhoc_action_defs.append(base) @@ -454,7 +489,7 @@ wf_spec_name = task_spec.get_workflow_name() - wf_def = e_utils.resolve_workflow_definition( + wf_def = engine_utils.resolve_workflow_definition( parent_wf_ex.workflow_name, parent_wf_spec.get_name(), wf_spec_name @@ -472,6 +507,7 @@ if 'env' in parent_wf_ex.params: wf_params['env'] = parent_wf_ex.params['env'] + wf_params['evaluate_env'] = parent_wf_ex.params.get('evaluate_env') for k, v in list(input_dict.items()): if k not in wf_spec.get_input(): @@ -508,6 +544,7 @@ :param wf_spec_name: Workflow name according to a spec. :return: Action definition (python or ad-hoc). """ + action_db = None if wf_name and wf_name != wf_spec_name: diff -Nru mistral-4.0.0/mistral/engine/base.py mistral-5.0.0~b2/mistral/engine/base.py --- mistral-4.0.0/mistral/engine/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -1,4 +1,5 @@ # Copyright 2014 - Mirantis, Inc. +# Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -129,39 +130,6 @@ @six.add_metaclass(abc.ABCMeta) -class Executor(object): - """Action executor interface.""" - - @abc.abstractmethod - def run_action(self, action_ex_id, action_class_str, attributes, - action_params, safe_rerun, redelivered=False): - """Runs action. - - :param action_ex_id: Corresponding action execution id. - :param action_class_str: Path to action class in dot notation. - :param attributes: Attributes of action class which will be set to. - :param action_params: Action parameters. - :param safe_rerun: Tells if given action can be safely rerun. - :param redelivered: Tells if given action was run before on another - executor. - """ - raise NotImplementedError() - - -@six.add_metaclass(abc.ABCMeta) -class EventEngine(object): - """Action event trigger interface.""" - - @abc.abstractmethod - def create_event_trigger(self, trigger, events): - raise NotImplementedError() - - @abc.abstractmethod - def delete_event_trigger(self, trigger, events): - raise NotImplementedError() - - -@six.add_metaclass(abc.ABCMeta) class TaskPolicy(object): """Task policy. diff -Nru mistral-4.0.0/mistral/engine/default_engine.py mistral-5.0.0~b2/mistral/engine/default_engine.py --- mistral-4.0.0/mistral/engine/default_engine.py 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/default_engine.py 2017-06-09 12:48:26.000000000 +0000 @@ -35,10 +35,9 @@ class DefaultEngine(base.Engine): @action_queue.process - @profiler.trace('engine-start-workflow') + @profiler.trace('engine-start-workflow', hide_args=True) def start_workflow(self, wf_identifier, wf_input, description='', **params): - with db_api.transaction(): wf_ex = wf_handler.start_workflow( wf_identifier, diff -Nru mistral-4.0.0/mistral/engine/default_executor.py mistral-5.0.0~b2/mistral/engine/default_executor.py --- mistral-4.0.0/mistral/engine/default_executor.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/default_executor.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ -# Copyright 2013 - Mirantis, Inc. -# Copyright 2016 - Brocade Communications Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -from osprofiler import profiler - -from mistral.actions import action_factory as a_f -from mistral.engine import base -from mistral.engine.rpc_backend import rpc -from mistral import exceptions as exc -from mistral.utils import inspect_utils as i_u -from mistral.workflow import utils as wf_utils - - -LOG = logging.getLogger(__name__) - - -class DefaultExecutor(base.Executor): - def __init__(self): - self._engine_client = rpc.get_engine_client() - - @profiler.trace('executor-run-action', hide_args=True) - def run_action(self, action_ex_id, action_class_str, attributes, - action_params, safe_rerun, redelivered=False): - """Runs action. - - :param action_ex_id: Action execution id. - :param action_class_str: Path to action class in dot notation. - :param attributes: Attributes of action class which will be set to. - :param action_params: Action parameters. - :param safe_rerun: Tells if given action can be safely rerun. - :param redelivered: Tells if given action was run before on another - executor. - """ - - def send_error_back(error_msg): - error_result = wf_utils.Result(error=error_msg) - - if action_ex_id: - self._engine_client.on_action_complete( - action_ex_id, - error_result - ) - - return None - - return error_result - - if redelivered and not safe_rerun: - msg = ( - "Request to run action %s was redelivered, but action %s" - " cannot be re-run safely. The only safe thing to do is fail" - " action." - % (action_class_str, action_class_str) - ) - - return send_error_back(msg) - - action_cls = a_f.construct_action_class(action_class_str, attributes) - - # Instantiate action. - - try: - action = action_cls(**action_params) - except Exception as e: - msg = ("Failed to initialize action %s. Action init params = %s." - " Actual init params = %s. More info: %s" - % (action_class_str, i_u.get_arg_list(action_cls.__init__), - action_params.keys(), e)) - LOG.warning(msg) - - return send_error_back(msg) - - # Run action. - - try: - result = action.run() - - # Note: it's made for backwards compatibility with already - # existing Mistral actions which don't return result as - # instance of workflow.utils.Result. - if not isinstance(result, wf_utils.Result): - result = wf_utils.Result(data=result) - - except Exception as e: - msg = ("Failed to run action [action_ex_id=%s, action_cls='%s'," - " attributes='%s', params='%s']\n %s" - % (action_ex_id, action_cls, attributes, action_params, e)) - LOG.exception(msg) - - return send_error_back(msg) - - # Send action result. - - try: - if action_ex_id and (action.is_sync() or result.is_error()): - self._engine_client.on_action_complete( - action_ex_id, - result, - async_=True - ) - - except exc.MistralException as e: - # In case of a Mistral exception we can try to send error info to - # engine because most likely it's not related to the infrastructure - # such as message bus or network. One known case is when the action - # returns a bad result (e.g. invalid unicode) which can't be - # serialized. - msg = ("Failed to call engine's on_action_complete() method due" - " to a Mistral exception" - " [action_ex_id=%s, action_cls='%s'," - " attributes='%s', params='%s']\n %s" - % (action_ex_id, action_cls, attributes, action_params, e)) - LOG.exception(msg) - - return send_error_back(msg) - except Exception as e: - # If it's not a Mistral exception all we can do is only - # log the error. - msg = ("Failed to call engine's on_action_complete() method due" - " to an unexpected exception" - " [action_ex_id=%s, action_cls='%s'," - " attributes='%s', params='%s']\n %s" - % (action_ex_id, action_cls, attributes, action_params, e)) - LOG.exception(msg) - - return result diff -Nru mistral-4.0.0/mistral/engine/dispatcher.py mistral-5.0.0~b2/mistral/engine/dispatcher.py --- mistral-4.0.0/mistral/engine/dispatcher.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/dispatcher.py 2017-06-09 12:48:26.000000000 +0000 @@ -79,7 +79,7 @@ return res -@profiler.trace('dispatcher-dispatch-commands') +@profiler.trace('dispatcher-dispatch-commands', hide_args=True) def dispatch_workflow_commands(wf_ex, wf_cmds): # TODO(rakhmerov): I don't like these imports but otherwise we have # import cycles. diff -Nru mistral-4.0.0/mistral/engine/engine_server.py mistral-5.0.0~b2/mistral/engine/engine_server.py --- mistral-4.0.0/mistral/engine/engine_server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/engine_server.py 2017-06-09 12:48:26.000000000 +0000 @@ -17,7 +17,7 @@ from mistral import config as cfg from mistral.db.v2 import api as db_api from mistral.engine import default_engine -from mistral.engine.rpc_backend import rpc +from mistral.rpc import base as rpc from mistral.service import base as service_base from mistral.services import expiration_policy from mistral.services import scheduler @@ -91,10 +91,9 @@ """ LOG.info( - "Received RPC request 'start_workflow'[rpc_ctx=%s," - " workflow_identifier=%s, workflow_input=%s, description=%s, " - "params=%s]" % - (rpc_ctx, workflow_identifier, utils.cut(workflow_input), + "Received RPC request 'start_workflow'[workflow_identifier=%s, " + "workflow_input=%s, description=%s, params=%s]" % + (workflow_identifier, utils.cut(workflow_input), description, params) ) @@ -117,10 +116,9 @@ :return: Action execution. """ LOG.info( - "Received RPC request 'start_action'[rpc_ctx=%s," - " name=%s, input=%s, description=%s, params=%s]" - % (rpc_ctx, action_name, utils.cut(action_input), - description, params) + "Received RPC request 'start_action'[name=%s, input=%s, " + "description=%s, params=%s]" + % (action_name, utils.cut(action_input), description, params) ) return self.engine.start_action( @@ -139,11 +137,9 @@ :param wf_action: True if given id points to a workflow execution. :return: Action execution. """ - LOG.info( - "Received RPC request 'on_action_complete'[rpc_ctx=%s," - " action_ex_id=%s, result=%s]" % - (rpc_ctx, action_ex_id, result.cut_repr()) + "Received RPC request 'on_action_complete'[action_ex_id=%s, " + "result=%s]" % (action_ex_id, result.cut_repr()) ) return self.engine.on_action_complete(action_ex_id, result, wf_action) @@ -155,10 +151,9 @@ :param execution_id: Workflow execution id. :return: Workflow execution. """ - LOG.info( - "Received RPC request 'pause_workflow'[rpc_ctx=%s," - " execution_id=%s]" % (rpc_ctx, execution_id) + "Received RPC request 'pause_workflow'[execution_id=%s]" % + execution_id ) return self.engine.pause_workflow(execution_id) @@ -172,10 +167,9 @@ :param env: Environment variables to update. :return: Workflow execution. """ - LOG.info( - "Received RPC request 'rerun_workflow'[rpc_ctx=%s, " - "task_ex_id=%s]" % (rpc_ctx, task_ex_id) + "Received RPC request 'rerun_workflow'[task_ex_id=%s]" % + task_ex_id ) return self.engine.rerun_workflow(task_ex_id, reset, env) @@ -188,10 +182,8 @@ :param env: Environment variables to update. :return: Workflow execution. """ - LOG.info( - "Received RPC request 'resume_workflow'[rpc_ctx=%s, " - "wf_ex_id=%s]" % (rpc_ctx, wf_ex_id) + "Received RPC request 'resume_workflow'[wf_ex_id=%s]" % wf_ex_id ) return self.engine.resume_workflow(wf_ex_id, env) @@ -211,11 +203,10 @@ :return: Workflow execution. """ - LOG.info( - "Received RPC request 'stop_workflow'[rpc_ctx=%s, execution_id=%s," + "Received RPC request 'stop_workflow'[execution_id=%s," " state=%s, message=%s]" % - (rpc_ctx, execution_id, state, message) + (execution_id, state, message) ) return self.engine.stop_workflow(execution_id, state, message) @@ -226,10 +217,9 @@ :param rpc_ctx: RPC request context. :return: Workflow execution. """ - LOG.info( - "Received RPC request 'rollback_workflow'[rpc_ctx=%s," - " execution_id=%s]" % (rpc_ctx, execution_id) + "Received RPC request 'rollback_workflow'[execution_id=%s]" % + execution_id ) return self.engine.rollback_workflow(execution_id) diff -Nru mistral-4.0.0/mistral/engine/executor_server.py mistral-5.0.0~b2/mistral/engine/executor_server.py --- mistral-4.0.0/mistral/engine/executor_server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/executor_server.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -# Copyright 2016 - Nokia Networks -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from mistral import config as cfg -from mistral.engine import default_executor -from mistral.engine.rpc_backend import rpc -from mistral.service import base as service_base -from mistral import utils -from mistral.utils import profiler as profiler_utils - -LOG = logging.getLogger(__name__) - - -class ExecutorServer(service_base.MistralService): - """Executor server. - - This class manages executor life-cycle and gets registered as an RPC - endpoint to process executor specific calls. It also registers a - cluster member associated with this instance of executor. - """ - - def __init__(self, executor, setup_profiler=True): - super(ExecutorServer, self).__init__('executor_group', setup_profiler) - - self.executor = executor - self._rpc_server = None - - def start(self): - super(ExecutorServer, self).start() - - if self._setup_profiler: - profiler_utils.setup('mistral-executor', cfg.CONF.executor.host) - - # Initialize and start RPC server. - - self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.executor) - self._rpc_server.register_endpoint(self) - - self._rpc_server.run(executor='threading') - - self._notify_started('Executor server started.') - - def stop(self, graceful=False): - super(ExecutorServer, self).stop(graceful) - - if self._rpc_server: - self._rpc_server.stop(graceful) - - def run_action(self, rpc_ctx, action_ex_id, action_class_str, - attributes, params, safe_rerun): - """Receives calls over RPC to run action on executor. - - :param rpc_ctx: RPC request context dictionary. - :param action_ex_id: Action execution id. - :param action_class_str: Action class name. - :param attributes: Action class attributes. - :param params: Action input parameters. - :param safe_rerun: Tells if given action can be safely rerun. - :return: Action result. - """ - - LOG.info( - "Received RPC request 'run_action'[rpc_ctx=%s," - " action_ex_id=%s, action_class=%s, attributes=%s, params=%s]" - % (rpc_ctx, action_ex_id, action_class_str, attributes, - utils.cut(params)) - ) - - redelivered = rpc_ctx.redelivered or False - - return self.executor.run_action( - action_ex_id, - action_class_str, - attributes, - params, - safe_rerun, - redelivered - ) - - -def get_oslo_service(setup_profiler=True): - return ExecutorServer( - default_executor.DefaultExecutor(), - setup_profiler=setup_profiler - ) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/base.py mistral-5.0.0~b2/mistral/engine/rpc_backend/base.py --- mistral-4.0.0/mistral/engine/rpc_backend/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - - -class RPCClient(object): - def __init__(self, conf): - """Base class for RPCClient's drivers - - RPC Client is responsible for sending requests to RPC Server. - All RPC client drivers have to inherit from this class. - - :param conf: Additional config provided by upper layer. - """ - self.conf = conf - - @abc.abstractmethod - def sync_call(self, ctx, method, target=None, **kwargs): - """Synchronous call of RPC method. - - Blocks the thread and wait for method result. - """ - raise NotImplementedError - - @abc.abstractmethod - def async_call(self, ctx, method, target=None, **kwargs): - """Asynchronous call of RPC method. - - Does not block the thread, just send invoking data to - the RPC server and immediately returns nothing. - """ - raise NotImplementedError - - -class RPCServer(object): - def __init__(self, conf): - """Base class for RPCServer's drivers - - RPC Server should listen for request coming from RPC Clients and - respond to them respectively to the registered endpoints. - All RPC server drivers have to inherit from this class. - - :param conf: Additional config provided by upper layer. - """ - self.conf = conf - - @abc.abstractmethod - def register_endpoint(self, endpoint): - """Registers a new RPC endpoint. - - :param endpoint: an object containing methods which - will be used as RPC methods. - """ - raise NotImplementedError - - @abc.abstractmethod - def run(self, executor='blocking'): - """Runs the RPC server. - - :param executor: Executor used to process incoming requests. Different - implementations may support different options. - """ - raise NotImplementedError - - def stop(self, graceful=False): - """Stop the RPC server. - - :param graceful: True if this method call should wait till all - internal threads are finished. - :return: - """ - # No-op by default. - pass - - def wait(self): - """Wait till all internal threads are finished.""" - # No-op by default. - pass diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/base.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/base.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import kombu - -from mistral import exceptions as exc -from mistral import serialization as mistral_serialization -from mistral.utils import rpc_utils - -IS_RECEIVED = 'kombu_rpc_is_received' -RESULT = 'kombu_rpc_result' -CORR_ID = 'kombu_rpc_correlation_id' -TYPE = 'kombu_rpc_type' - - -class Base(object): - """Base class for Client and Server.""" - def __init__(self): - self._transport_url = None - self.serializer = None - - @staticmethod - def _make_connection(amqp_host, amqp_port, amqp_user, amqp_password, - amqp_vhost): - """Create connection. - - This method creates object representing the connection to RabbitMQ. - - :param amqp_host: Address of RabbitMQ server. - :param amqp_user: Username for connecting to RabbitMQ. - :param amqp_password: Password matching the given username. - :param amqp_vhost: Virtual host to connect to. - :param amqp_port: Port of RabbitMQ server. - :return: New connection to RabbitMQ. - """ - return kombu.BrokerConnection( - hostname=amqp_host, - userid=amqp_user, - password=amqp_password, - virtual_host=amqp_vhost, - port=amqp_port - ) - - @staticmethod - def _make_exchange(name, durable=False, auto_delete=True, - exchange_type='topic'): - """Make named exchange. - - This method creates object representing exchange on RabbitMQ. It would - create a new exchange if exchange with given name don't exists. - - :param name: Name of the exchange. - :param durable: If set to True, messages on this exchange would be - store on disk - therefore can be retrieve after - failure. - :param auto_delete: If set to True, exchange would be automatically - deleted when none is connected. - :param exchange_type: Type of the exchange. Can be one of 'direct', - 'topic', 'fanout', 'headers'. See Kombu docs for - further details. - :return: Kombu exchange object. - """ - return kombu.Exchange( - name=name, - type=exchange_type, - durable=durable, - auto_delete=auto_delete - ) - - @staticmethod - def _make_queue(name, exchange, routing_key='', - durable=False, auto_delete=True, **kwargs): - """Make named queue for a given exchange. - - This method creates object representing queue in RabbitMQ. It would - create a new queue if queue with given name don't exists. - - :param name: Name of the queue - :param exchange: Kombu Exchange object (can be created using - _make_exchange). - :param routing_key: Routing key for queue. It behaves differently - depending the exchange type. See Kombu docs for - further details. - :param durable: If set to True, messages on this queue would be - store on disk - therefore can be retrieve after - failure. - :param auto_delete: If set to True, queue would be automatically - deleted when none is connected. - :param kwargs: See kombu documentation for all parameters than may be - may be passed to Queue. - :return: Kombu Queue object. - """ - return kombu.Queue( - name=name, - routing_key=routing_key, - exchange=exchange, - durable=durable, - auto_delete=auto_delete, - **kwargs - ) - - def _register_mistral_serialization(self): - """Adds mistral serializer to available serializers in kombu. - - :return: None - """ - self.serializer = mistral_serialization.get_polymorphic_serializer() - - def _check_backend(self): - backend = rpc_utils.get_rpc_backend(self._transport_url) - - if backend not in ['rabbit', 'kombu']: - raise exc.MistralException("Unsupported backend: %s" % backend) - - def _serialize_message(self, kwargs): - result = {} - - for argname, arg in kwargs.items(): - result[argname] = self.serializer.serialize(arg) - - return result - - def _deserialize_message(self, kwargs): - result = {} - - for argname, arg in kwargs.items(): - result[argname] = self.serializer.deserialize(arg) - - return result diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/examples/client.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/examples/client.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/examples/client.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/examples/client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -from mistral.engine.rpc_backend.kombu import kombu_client - - -# Example of using Kombu based RPC client. -def main(): - conf = { - 'user_id': 'guest', - 'password': 'secret', - 'exchange': 'my_exchange', - 'topic': 'my_topic', - 'server_id': 'host', - 'host': 'localhost', - 'port': 5672, - 'virtual_host': '/' - } - kombu_rpc = kombu_client.KombuRPCClient(conf) - - print(" [x] Requesting ...") - - ctx = type('context', (object,), {'to_dict': lambda self: {}})() - - response = kombu_rpc.sync_call(ctx, 'fib', n=44) - - print(" [.] Got %r" % (response,)) - - -if __name__ == '__main__': - sys.exit(main()) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/examples/server.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/examples/server.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/examples/server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/examples/server.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -from mistral.engine.rpc_backend.kombu import kombu_server - - -# Simple example of endpoint of RPC server, which just -# calculates given fibonacci number. -class MyServer(object): - cache = {0: 0, 1: 1} - - def fib(self, rpc_ctx, n): - if self.cache.get(n) is None: - self.cache[n] = (self.fib(rpc_ctx, n - 1) - + self.fib(rpc_ctx, n - 2)) - return self.cache[n] - - def get_name(self, rpc_ctx): - return self.__class__.__name__ - - -# Example of using Kombu based RPC server. -def main(): - conf = { - 'user_id': 'guest', - 'password': 'secret', - 'exchange': 'my_exchange', - 'topic': 'my_topic', - 'server_id': 'host', - 'host': 'localhost', - 'port': 5672, - 'virtual_host': '/' - } - rpc_server = kombu_server.KombuRPCServer(conf) - rpc_server.register_endpoint(MyServer()) - rpc_server.run() - - -if __name__ == '__main__': - sys.exit(main()) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_client.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_client.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_client.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,172 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six import moves - -import kombu -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging - -from mistral.engine.rpc_backend import base as rpc_base -from mistral.engine.rpc_backend.kombu import base as kombu_base -from mistral.engine.rpc_backend.kombu import kombu_hosts -from mistral.engine.rpc_backend.kombu import kombu_listener -from mistral import exceptions as exc -from mistral import utils - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('rpc_response_timeout', 'mistral.config') - - -class KombuRPCClient(rpc_base.RPCClient, kombu_base.Base): - def __init__(self, conf): - super(KombuRPCClient, self).__init__(conf) - - self._register_mistral_serialization() - - self._transport_url = messaging.TransportURL.parse( - CONF, - CONF.transport_url - ) - self._check_backend() - - self.topic = conf.topic - self.server_id = conf.host - - self._hosts = kombu_hosts.KombuHosts(CONF) - - self.exchange = CONF.control_exchange - self.virtual_host = CONF.oslo_messaging_rabbit.rabbit_virtual_host - self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues - self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete - self._timeout = CONF.rpc_response_timeout - self.routing_key = self.topic - - hosts = self._hosts.get_hosts() - - self._connections = [] - - for host in hosts: - conn = self._make_connection( - host.hostname, - host.port, - host.username, - host.password, - self.virtual_host - ) - self._connections.append(conn) - - self.conn = self._connections[0] - - # Create exchange. - exchange = self._make_exchange( - self.exchange, - durable=self.durable_queue, - auto_delete=self.auto_delete - ) - - # Create queue. - self.queue_name = utils.generate_unicode_uuid() - self.callback_queue = kombu.Queue( - self.queue_name, - exchange=exchange, - routing_key=self.queue_name, - durable=False, - exclusive=True, - auto_delete=True - ) - - self._listener = kombu_listener.KombuRPCListener( - connections=self._connections, - callback_queue=self.callback_queue - ) - - self._listener.start() - - def _wait_for_result(self, correlation_id): - """Waits for the result from the server. - - Waits for the result from the server, checks every second if - a timeout occurred. If a timeout occurred - the `RpcTimeout` exception - will be raised. - """ - try: - return self._listener.get_result(correlation_id, self._timeout) - except moves.queue.Empty: - raise exc.MistralException("RPC Request timeout") - - def _call(self, ctx, method, target, async_=False, **kwargs): - """Performs a remote call for the given method. - - :param ctx: authentication context associated with mistral - :param method: name of the method that should be executed - :param kwargs: keyword parameters for the remote-method - :param target: Server name - :param async: bool value means whether the request is - asynchronous or not. - :return: result of the method or None if async. - """ - correlation_id = utils.generate_unicode_uuid() - - body = { - 'rpc_ctx': ctx.to_dict(), - 'rpc_method': method, - 'arguments': self._serialize_message(kwargs), - 'async': async_ - } - - LOG.debug("Publish request: {0}".format(body)) - - try: - if not async_: - self._listener.add_listener(correlation_id) - - # Publish request. - with kombu.producers[self.conn].acquire(block=True) as producer: - producer.publish( - body=body, - exchange=self.exchange, - routing_key=self.topic, - reply_to=self.queue_name, - correlation_id=correlation_id, - delivery_mode=2 - ) - - # Start waiting for response. - if async_: - return - - result = self._wait_for_result(correlation_id) - res_type = result[kombu_base.TYPE] - res_object = result[kombu_base.RESULT] - - if res_type == 'error': - raise res_object - else: - res_object = self._deserialize_message(res_object)['body'] - - finally: - if not async_: - self._listener.remove_listener(correlation_id) - - return res_object - - def sync_call(self, ctx, method, target=None, **kwargs): - return self._call(ctx, method, async_=False, target=target, **kwargs) - - def async_call(self, ctx, method, target=None, **kwargs): - return self._call(ctx, method, async_=True, target=target, **kwargs) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_hosts.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_hosts.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_hosts.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_hosts.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -# Copyright (c) 2017 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import random -import six - -import oslo_messaging as messaging - - -class KombuHosts(object): - - def __init__(self, conf): - self._conf = conf - - transport_url = messaging.TransportURL.parse( - self._conf, - self._conf.transport_url - ) - - if transport_url.hosts: - self._hosts = transport_url.hosts - else: - username = self._conf.oslo_messaging_rabbit.rabbit_userid - password = self._conf.oslo_messaging_rabbit.rabbit_password - self._hosts = [] - - for host in self._conf.oslo_messaging_rabbit.rabbit_hosts: - hostname, port = host.split(':') - self._hosts.append(messaging.TransportHost( - hostname, - port, - username, - password - )) - - if len(self._hosts) > 1: - random.shuffle(self._hosts) - - self._hosts_cycle = itertools.cycle(self._hosts) - - def get_host(self): - return six.next(self._hosts_cycle) - - def get_hosts(self): - return self._hosts diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_listener.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_listener.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_listener.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_listener.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,109 +0,0 @@ -# Copyright (c) 2016 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -from kombu.mixins import ConsumerMixin -import six -import threading - -from oslo_log import log as logging - -from mistral.engine.rpc_backend.kombu import base as kombu_base - -LOG = logging.getLogger(__name__) - - -class KombuRPCListener(ConsumerMixin): - - def __init__(self, connections, callback_queue): - self._results = {} - self._connections = itertools.cycle(connections) - self._callback_queue = callback_queue - self._thread = None - self.connection = six.next(self._connections) - - # TODO(ddeja): Those 2 options should be gathered from config. - self._sleep_time = 1 - self._max_sleep_time = 512 - - def add_listener(self, correlation_id): - self._results[correlation_id] = six.moves.queue.Queue() - - def remove_listener(self, correlation_id): - if correlation_id in self._results: - del self._results[correlation_id] - - def get_consumers(self, Consumer, channel): - return [Consumer( - self._callback_queue, - callbacks=[self.on_message], - accept=['pickle', 'json'] - )] - - def start(self): - if self._thread is None: - self._thread = threading.Thread(target=self.run) - self._thread.daemon = True - self._thread.start() - - def on_message(self, response, message): - """Callback on response. - - This method is automatically called when a response is incoming and - decides if it is the message we are waiting for - the message with the - result. - - :param response: the body of the amqp message already deserialized - by kombu - :param message: the plain amqp kombu.message with additional - information - """ - LOG.debug("Got response: {0}".format(response)) - - try: - message.ack() - except Exception as e: - LOG.exception("Failed to acknowledge AMQP message: %s" % e) - else: - LOG.debug("AMQP message acknowledged.") - - correlation_id = message.properties['correlation_id'] - - queue = self._results.get(correlation_id, None) - - if queue: - result = { - kombu_base.TYPE: 'error' - if message.properties.get('type') == 'error' - else None, - kombu_base.RESULT: response - } - queue.put(result) - else: - LOG.debug( - "Got a response, but seems like no process is waiting for" - "it [correlation_id={0}]".format(correlation_id) - ) - - def get_result(self, correlation_id, timeout): - return self._results[correlation_id].get(block=True, timeout=timeout) - - def on_connection_error(self, exc, interval): - self.connection = six.next(self._connections) - - LOG.debug("Broker connection failed: %s" % exc) - LOG.debug("Sleeping for %s seconds, then retrying connection" % - interval - ) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_server.py mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_server.py --- mistral-4.0.0/mistral/engine/rpc_backend/kombu/kombu_server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/kombu/kombu_server.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,268 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import amqp -import socket -import threading -import time - -import kombu -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from stevedore import driver - -from mistral import context as auth_ctx -from mistral.engine.rpc_backend import base as rpc_base -from mistral.engine.rpc_backend.kombu import base as kombu_base -from mistral.engine.rpc_backend.kombu import kombu_hosts -from mistral import exceptions as exc - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -_pool_opts = [ - cfg.IntOpt('executor_thread_pool_size', - default=64, - deprecated_name="rpc_thread_pool_size", - help='Size of executor thread pool.'), -] - - -class KombuRPCServer(rpc_base.RPCServer, kombu_base.Base): - def __init__(self, conf): - super(KombuRPCServer, self).__init__(conf) - - self._register_mistral_serialization() - CONF.register_opts(_pool_opts) - - self._transport_url = messaging.TransportURL.parse( - CONF, - CONF.transport_url - ) - self._check_backend() - - self.topic = conf.topic - self.server_id = conf.host - - self._hosts = kombu_hosts.KombuHosts(CONF) - - self._executor_threads = CONF.executor_thread_pool_size - self.exchange = CONF.control_exchange - self.virtual_host = CONF.oslo_messaging_rabbit.rabbit_virtual_host - self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues - self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete - self.routing_key = self.topic - self.channel = None - self.conn = None - self._running = threading.Event() - self._stopped = threading.Event() - self.endpoints = [] - self._worker = None - - # TODO(ddeja): Those 2 options should be gathered from config. - self._sleep_time = 1 - self._max_sleep_time = 512 - - @property - def is_running(self): - """Return whether server is running.""" - return self._running.is_set() - - def run(self, executor='blocking'): - """Start the server.""" - self._prepare_worker(executor) - - while True: - try: - _retry_connection = False - host = self._hosts.get_host() - - self.conn = self._make_connection( - host.hostname, - host.port, - host.username, - host.password, - self.virtual_host, - ) - - conn = kombu.connections[self.conn].acquire(block=True) - - exchange = self._make_exchange( - self.exchange, - durable=self.durable_queue, - auto_delete=self.auto_delete - ) - - queue = self._make_queue( - self.topic, - exchange, - routing_key=self.routing_key, - durable=self.durable_queue, - auto_delete=self.auto_delete - ) - with conn.Consumer( - queues=queue, - callbacks=[self._process_message], - ) as consumer: - consumer.qos(prefetch_count=1) - - self._running.set() - self._stopped.clear() - - LOG.info("Connected to AMQP at %s:%s" % ( - host.hostname, - host.port - )) - - while self.is_running: - try: - conn.drain_events(timeout=1) - except socket.timeout: - pass - except KeyboardInterrupt: - self.stop() - - LOG.info("Server with id='{0}' stopped.".format( - self.server_id)) - - return - except (socket.error, amqp.exceptions.ConnectionForced) as e: - LOG.debug("Broker connection failed: %s" % e) - _retry_connection = True - finally: - self._stopped.set() - - if _retry_connection: - LOG.debug( - "Sleeping for %s seconds, than retrying connection" % - self._sleep_time - ) - - time.sleep(self._sleep_time) - - self._sleep_time = min( - self._sleep_time * 2, - self._max_sleep_time - ) - - def stop(self, graceful=False): - self._running.clear() - - if graceful: - self.wait() - - def wait(self): - self._stopped.wait() - try: - self._worker.shutdown(wait=True) - except AttributeError as e: - LOG.warning("Cannot stop worker in graceful way: %s" % e) - - def _get_rpc_method(self, method_name): - for endpoint in self.endpoints: - if hasattr(endpoint, method_name): - return getattr(endpoint, method_name) - - return None - - @staticmethod - def _set_auth_ctx(ctx): - if not isinstance(ctx, dict): - return - - context = auth_ctx.MistralContext(**ctx) - auth_ctx.set_ctx(context) - - return context - - def publish_message(self, body, reply_to, corr_id, res_type='response'): - if res_type != 'error': - body = self._serialize_message({'body': body}) - - with kombu.producers[self.conn].acquire(block=True) as producer: - producer.publish( - body=body, - exchange=self.exchange, - routing_key=reply_to, - correlation_id=corr_id, - serializer='pickle' if res_type == 'error' else 'json', - type=res_type - ) - - def _on_message_safe(self, request, message): - try: - return self._on_message(request, message) - except Exception as e: - LOG.warning( - "Got exception while consuming message. Exception would be " - "send back to the caller." - ) - LOG.debug("Exceptions: %s" % str(e)) - - # Wrap exception into another exception for compability with oslo. - self.publish_message( - exc.KombuException(e), - message.properties['reply_to'], - message.properties['correlation_id'], - res_type='error' - ) - finally: - message.ack() - - def _on_message(self, request, message): - LOG.debug('Received message %s', - request) - - is_async = request.get('async', False) - rpc_ctx = request.get('rpc_ctx') - redelivered = message.delivery_info.get('redelivered', None) - rpc_method_name = request.get('rpc_method') - arguments = self._deserialize_message(request.get('arguments')) - correlation_id = message.properties['correlation_id'] - reply_to = message.properties['reply_to'] - - if redelivered is not None: - rpc_ctx['redelivered'] = redelivered - - rpc_context = self._set_auth_ctx(rpc_ctx) - - rpc_method = self._get_rpc_method(rpc_method_name) - - if not rpc_method: - raise exc.MistralException("No such method: %s" % rpc_method_name) - - response = rpc_method(rpc_ctx=rpc_context, **arguments) - - if not is_async: - self.publish_message( - response, - reply_to, - correlation_id - ) - - def register_endpoint(self, endpoint): - self.endpoints.append(endpoint) - - def _process_message(self, request, message): - self._worker.submit(self._on_message_safe, request, message) - - def _prepare_worker(self, executor='blocking'): - mgr = driver.DriverManager('kombu_driver.executors', executor) - - executor_opts = {} - if executor == 'threading': - executor_opts['max_workers'] = self._executor_threads - - self._worker = mgr.driver(**executor_opts) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/oslo/oslo_client.py mistral-5.0.0~b2/mistral/engine/rpc_backend/oslo/oslo_client.py --- mistral-4.0.0/mistral/engine/rpc_backend/oslo/oslo_client.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/oslo/oslo_client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import oslo_messaging as messaging - -from mistral import context as auth_ctx -from mistral.engine.rpc_backend import base as rpc_base -from mistral.engine.rpc_backend import rpc - - -class OsloRPCClient(rpc_base.RPCClient): - def __init__(self, conf): - super(OsloRPCClient, self).__init__(conf) - self.topic = conf.topic - - serializer = auth_ctx.RpcContextSerializer() - - self._client = messaging.RPCClient( - rpc.get_transport(), - messaging.Target(topic=self.topic), - serializer=serializer - ) - - def sync_call(self, ctx, method, target=None, **kwargs): - return self._client.prepare(topic=self.topic, server=target).call( - ctx, - method, - **kwargs - ) - - def async_call(self, ctx, method, target=None, **kwargs): - return self._client.prepare(topic=self.topic, server=target).cast( - ctx, - method, - **kwargs - ) diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/oslo/oslo_server.py mistral-5.0.0~b2/mistral/engine/rpc_backend/oslo/oslo_server.py --- mistral-4.0.0/mistral/engine/rpc_backend/oslo/oslo_server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/oslo/oslo_server.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging -import oslo_messaging as messaging - -from mistral import context as ctx -from mistral.engine.rpc_backend import base as rpc_base -from mistral.engine.rpc_backend import rpc - - -LOG = logging.getLogger(__name__) - - -class OsloRPCServer(rpc_base.RPCServer): - def __init__(self, conf): - super(OsloRPCServer, self).__init__(conf) - - self.topic = conf.topic - self.server_id = conf.host - self.queue = self.topic - self.routing_key = self.topic - self.channel = None - self.connection = None - self.endpoints = [] - self.oslo_server = None - - def register_endpoint(self, endpoint): - self.endpoints.append(endpoint) - - def run(self, executor='blocking'): - target = messaging.Target( - topic=self.topic, - server=self.server_id - ) - - # TODO(rakhmerov): rpc.get_transport() should be in oslo.messaging - # related module. - self.oslo_server = messaging.get_rpc_server( - rpc.get_transport(), - target, - self.endpoints, - executor=executor, - serializer=ctx.RpcContextSerializer() - ) - - self.oslo_server.start() - - def stop(self, graceful=False): - self.oslo_server.stop() - - if graceful: - self.oslo_server.wait() - - def wait(self): - self.oslo_server.wait() diff -Nru mistral-4.0.0/mistral/engine/rpc_backend/rpc.py mistral-5.0.0~b2/mistral/engine/rpc_backend/rpc.py --- mistral-4.0.0/mistral/engine/rpc_backend/rpc.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/rpc_backend/rpc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,391 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_messaging.rpc import client -from osprofiler import profiler -from stevedore import driver - -from mistral import context as auth_ctx -from mistral.engine import base -from mistral import exceptions as exc - - -LOG = logging.getLogger(__name__) - - -_IMPL_CLIENT = None -_IMPL_SERVER = None -_TRANSPORT = None - -_ENGINE_CLIENT = None -_EXECUTOR_CLIENT = None -_EVENT_ENGINE_CLIENT = None - - -def cleanup(): - """Intended to be used by tests to recreate all RPC related objects.""" - - global _TRANSPORT - global _ENGINE_CLIENT - global _EXECUTOR_CLIENT - global _EVENT_ENGINE_CLIENT - - _TRANSPORT = None - _ENGINE_CLIENT = None - _EXECUTOR_CLIENT = None - _EVENT_ENGINE_CLIENT = None - - -# TODO(rakhmerov): This method seems misplaced. Now we have different kind -# of transports (oslo, kombu) and this module should not have any oslo -# specific things anymore. -def get_transport(): - global _TRANSPORT - - if not _TRANSPORT: - _TRANSPORT = messaging.get_transport(cfg.CONF) - - return _TRANSPORT - - -def get_engine_client(): - global _ENGINE_CLIENT - - if not _ENGINE_CLIENT: - _ENGINE_CLIENT = EngineClient(cfg.CONF.engine) - - return _ENGINE_CLIENT - - -def get_executor_client(): - global _EXECUTOR_CLIENT - - if not _EXECUTOR_CLIENT: - _EXECUTOR_CLIENT = ExecutorClient(cfg.CONF.executor) - - return _EXECUTOR_CLIENT - - -def get_event_engine_client(): - global _EVENT_ENGINE_CLIENT - - if not _EVENT_ENGINE_CLIENT: - _EVENT_ENGINE_CLIENT = EventEngineClient(cfg.CONF.event_engine) - - return _EVENT_ENGINE_CLIENT - - -def get_rpc_server_driver(): - rpc_impl = cfg.CONF.rpc_implementation - - global _IMPL_SERVER - if not _IMPL_SERVER: - _IMPL_SERVER = driver.DriverManager( - 'mistral.engine.rpc_backend', - '%s_server' % rpc_impl - ).driver - - return _IMPL_SERVER - - -def get_rpc_client_driver(): - rpc_impl = cfg.CONF.rpc_implementation - - global _IMPL_CLIENT - if not _IMPL_CLIENT: - _IMPL_CLIENT = driver.DriverManager( - 'mistral.engine.rpc_backend', - '%s_client' % rpc_impl - ).driver - - return _IMPL_CLIENT - - -def _wrap_exception_and_reraise(exception): - message = "%s: %s" % (exception.__class__.__name__, exception.args[0]) - - raise exc.MistralException(message) - - -def wrap_messaging_exception(method): - """This decorator unwrap remote error in one of MistralException. - - oslo.messaging has different behavior on raising exceptions - when fake or rabbit transports are used. In case of rabbit - transport it raises wrapped RemoteError which forwards directly - to API. Wrapped RemoteError contains one of MistralException raised - remotely on Engine and for correct exception interpretation we - need to unwrap and raise given exception and manually send it to - API layer. - """ - def decorator(*args, **kwargs): - try: - return method(*args, **kwargs) - - except exc.MistralException: - raise - except (client.RemoteError, exc.KombuException, Exception) as e: - if hasattr(e, 'exc_type') and hasattr(exc, e.exc_type): - exc_cls = getattr(exc, e.exc_type) - raise exc_cls(e.value) - - _wrap_exception_and_reraise(e) - - return decorator - - -class EngineClient(base.Engine): - """RPC Engine client.""" - - def __init__(self, rpc_conf_dict): - """Constructs an RPC client for engine. - - :param rpc_conf_dict: Dict containing RPC configuration. - """ - self._client = get_rpc_client_driver()(rpc_conf_dict) - - @wrap_messaging_exception - def start_workflow(self, wf_identifier, wf_input, description='', - **params): - """Starts workflow sending a request to engine over RPC. - - :return: Workflow execution. - """ - return self._client.sync_call( - auth_ctx.ctx(), - 'start_workflow', - workflow_identifier=wf_identifier, - workflow_input=wf_input or {}, - description=description, - params=params - ) - - @wrap_messaging_exception - def start_action(self, action_name, action_input, - description=None, **params): - """Starts action sending a request to engine over RPC. - - :return: Action execution. - """ - return self._client.sync_call( - auth_ctx.ctx(), - 'start_action', - action_name=action_name, - action_input=action_input or {}, - description=description, - params=params - ) - - @wrap_messaging_exception - @profiler.trace('engine-client-on-action-complete', hide_args=True) - def on_action_complete(self, action_ex_id, result, wf_action=False, - async_=False): - """Conveys action result to Mistral Engine. - - This method should be used by clients of Mistral Engine to update - state of a action execution once action has executed. One of the - clients of this method is Mistral REST API server that receives - action result from the outside action handlers. - - Note: calling this method serves an event notifying Mistral that - it possibly needs to move the workflow on, i.e. run other workflow - tasks for which all dependencies are satisfied. - - :param action_ex_id: Action execution id. - :param result: Action execution result. - :param wf_action: If True it means that the given id points to - a workflow execution rather than action execution. It happens - when a nested workflow execution sends its result to a parent - workflow. - :param async: If True, run action in asynchronous mode (w/o waiting - for completion). - :return: Action(or workflow if wf_action=True) execution object. - """ - - call = self._client.async_call if async_ else self._client.sync_call - - return call( - auth_ctx.ctx(), - 'on_action_complete', - action_ex_id=action_ex_id, - result=result, - wf_action=wf_action - ) - - @wrap_messaging_exception - def pause_workflow(self, wf_ex_id): - """Stops the workflow with the given execution id. - - :param wf_ex_id: Workflow execution id. - :return: Workflow execution. - """ - - return self._client.sync_call( - auth_ctx.ctx(), - 'pause_workflow', - execution_id=wf_ex_id - ) - - @wrap_messaging_exception - def rerun_workflow(self, task_ex_id, reset=True, env=None): - """Rerun the workflow. - - This method reruns workflow with the given execution id - at the specific task execution id. - - :param task_ex_id: Task execution id. - :param reset: If true, then reset task execution state and purge - action execution for the task. - :param env: Environment variables to update. - :return: Workflow execution. - """ - - return self._client.sync_call( - auth_ctx.ctx(), - 'rerun_workflow', - task_ex_id=task_ex_id, - reset=reset, - env=env - ) - - @wrap_messaging_exception - def resume_workflow(self, wf_ex_id, env=None): - """Resumes the workflow with the given execution id. - - :param wf_ex_id: Workflow execution id. - :param env: Environment variables to update. - :return: Workflow execution. - """ - - return self._client.sync_call( - auth_ctx.ctx(), - 'resume_workflow', - wf_ex_id=wf_ex_id, - env=env - ) - - @wrap_messaging_exception - def stop_workflow(self, wf_ex_id, state, message=None): - """Stops workflow execution with given status. - - Once stopped, the workflow is complete with SUCCESS or ERROR, - and can not be resumed. - - :param wf_ex_id: Workflow execution id - :param state: State assigned to the workflow: SUCCESS or ERROR - :param message: Optional information string - - :return: Workflow execution, model.Execution - """ - - return self._client.sync_call( - auth_ctx.ctx(), - 'stop_workflow', - execution_id=wf_ex_id, - state=state, - message=message - ) - - @wrap_messaging_exception - def rollback_workflow(self, wf_ex_id): - """Rolls back the workflow with the given execution id. - - :param wf_ex_id: Workflow execution id. - - :return: Workflow execution. - """ - - return self._client.sync_call( - auth_ctx.ctx(), - 'rollback_workflow', - execution_id=wf_ex_id - ) - - -class ExecutorClient(base.Executor): - """RPC Executor client.""" - - def __init__(self, rpc_conf_dict): - """Constructs an RPC client for the Executor. - - :param rpc_conf_dict: Dict containing RPC configuration. - """ - - self.topic = cfg.CONF.executor.topic - self._client = get_rpc_client_driver()(rpc_conf_dict) - - @profiler.trace('executor-client-run-action') - def run_action(self, action_ex_id, action_class_str, attributes, - action_params, target=None, async_=True, safe_rerun=False): - """Sends a request to run action to executor. - - :param action_ex_id: Action execution id. - :param action_class_str: Action class name. - :param attributes: Action class attributes. - :param action_params: Action input parameters. - :param target: Target (group of action executors). - :param async: If True, run action in asynchronous mode (w/o waiting - for completion). - :param safe_rerun: If true, action would be re-run if executor dies - during execution. - :return: Action result. - """ - - kwargs = { - 'action_ex_id': action_ex_id, - 'action_class_str': action_class_str, - 'attributes': attributes, - 'params': action_params, - 'safe_rerun': safe_rerun - } - - rpc_client_method = (self._client.async_call - if async_ else self._client.sync_call) - - return rpc_client_method(auth_ctx.ctx(), 'run_action', **kwargs) - - -class EventEngineClient(base.EventEngine): - """RPC EventEngine client.""" - - def __init__(self, rpc_conf_dict): - """Constructs an RPC client for the EventEngine service.""" - self._client = get_rpc_client_driver()(rpc_conf_dict) - - def create_event_trigger(self, trigger, events): - return self._client.sync_call( - auth_ctx.ctx(), - 'create_event_trigger', - trigger=trigger, - events=events - ) - - def delete_event_trigger(self, trigger, events): - return self._client.sync_call( - auth_ctx.ctx(), - 'delete_event_trigger', - trigger=trigger, - events=events - ) - - def update_event_trigger(self, trigger): - return self._client.sync_call( - auth_ctx.ctx(), - 'update_event_trigger', - trigger=trigger, - ) diff -Nru mistral-4.0.0/mistral/engine/task_handler.py mistral-5.0.0~b2/mistral/engine/task_handler.py --- mistral-4.0.0/mistral/engine/task_handler.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/task_handler.py 2017-06-09 12:48:26.000000000 +0000 @@ -25,8 +25,8 @@ from mistral.engine import tasks from mistral.engine import workflow_handler as wf_handler from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import scheduler -from mistral.workbook import parser as spec_parser from mistral.workflow import base as wf_base from mistral.workflow import commands as wf_cmds from mistral.workflow import states @@ -62,7 +62,7 @@ msg = ( "Failed to run task [error=%s, wf=%s, task=%s]:\n%s" % - (e, wf_ex, task_spec.get_name(), tb.format_exc()) + (e, wf_ex.name, task_spec.get_name(), tb.format_exc()) ) LOG.error(msg) @@ -159,7 +159,7 @@ msg = ( "Failed to run task [error=%s, wf=%s, task=%s]:\n%s" % - (e, wf_ex, task_ex.name, tb.format_exc()) + (e, wf_ex.name, task_ex.name, tb.format_exc()) ) LOG.error(msg) @@ -185,7 +185,7 @@ msg = ( "Failed to complete task [error=%s, wf=%s, task=%s]:\n%s" % - (e, wf_ex, task_ex.name, tb.format_exc()) + (e, wf_ex.name, task_ex.name, tb.format_exc()) ) LOG.error(msg) @@ -217,7 +217,8 @@ cmd.ctx, task_ex=cmd.task_ex, unique_key=cmd.task_ex.unique_key, - waiting=cmd.task_ex.state == states.WAITING + waiting=cmd.task_ex.state == states.WAITING, + triggered_by=cmd.triggered_by ) if cmd.reset: @@ -232,7 +233,8 @@ cmd.task_spec, cmd.ctx, unique_key=cmd.unique_key, - waiting=cmd.is_waiting() + waiting=cmd.is_waiting(), + triggered_by=cmd.triggered_by ) return task @@ -241,13 +243,22 @@ def _create_task(wf_ex, wf_spec, task_spec, ctx, task_ex=None, - unique_key=None, waiting=False): + unique_key=None, waiting=False, triggered_by=None): if task_spec.get_with_items(): cls = tasks.WithItemsTask else: cls = tasks.RegularTask - return cls(wf_ex, wf_spec, task_spec, ctx, task_ex, unique_key, waiting) + return cls( + wf_ex, + wf_spec, + task_spec, + ctx, + task_ex=task_ex, + unique_key=unique_key, + waiting=waiting, + triggered_by=triggered_by + ) @action_queue.process @@ -270,16 +281,20 @@ wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) - state, state_info, cardinality = wf_ctrl.get_logical_task_state( + log_state = wf_ctrl.get_logical_task_state( task_ex ) + state = log_state.state + state_info = log_state.state_info + + # Update 'triggered_by' because it could have changed. + task_ex.runtime_context['triggered_by'] = log_state.triggered_by + if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: - task = _build_task_from_execution(wf_spec, task_ex) - - task.complete(state, state_info) + complete_task(task_ex, state, state_info) elif state == states.WAITING: # Let's assume that a task takes 0.01 sec in average to complete # and based on this assumption calculate a time of the next check. @@ -290,14 +305,14 @@ # then the next 'refresh_task_state' call will happen in 10 # seconds. For 500 tasks it will be 50 seconds. The larger the # workflow is, the more beneficial this mechanism will be. - delay = int(cardinality * 0.01) + delay = int(log_state.cardinality * 0.01) _schedule_refresh_task_state(task_ex, max(1, delay)) else: # Must never get here. raise RuntimeError( - 'Unexpected logical task state [task_ex=%s, state=%s]' % - (task_ex, state) + 'Unexpected logical task state [task_ex_id=%s, task_name=%s, ' + 'state=%s]' % (task_ex_id, task_ex.name, state) ) diff -Nru mistral-4.0.0/mistral/engine/tasks.py mistral-5.0.0~b2/mistral/engine/tasks.py --- mistral-4.0.0/mistral/engine/tasks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/tasks.py 2017-06-09 12:48:26.000000000 +0000 @@ -44,7 +44,7 @@ """ def __init__(self, wf_ex, wf_spec, task_spec, ctx, task_ex=None, - unique_key=None, waiting=False): + unique_key=None, waiting=False, triggered_by=None): self.wf_ex = wf_ex self.task_spec = task_spec self.ctx = ctx @@ -52,6 +52,7 @@ self.wf_spec = wf_spec self.unique_key = unique_key self.waiting = waiting + self.triggered_by = triggered_by self.reset_flag = False self.created = False self.state_changed = False @@ -227,6 +228,9 @@ 'type': task_type } + if self.triggered_by: + values['runtime_context']['triggered_by'] = self.triggered_by + self.task_ex = db_api.create_task_execution(values) # Add to collection explicitly so that it's in a proper @@ -280,8 +284,8 @@ self._create_task_execution() LOG.debug( - 'Starting task [workflow=%s, task_spec=%s, init_state=%s]' % - (self.wf_ex.name, self.task_spec, self.task_ex.state) + 'Starting task [workflow=%s, task=%s, init_state=%s]' % + (self.wf_ex.name, self.task_spec.get_name(), self.task_ex.state) ) self._before_task_start() @@ -307,6 +311,7 @@ self.set_state(states.RUNNING, None, processed=False) self._update_inbound_context() + self._update_triggered_by() self._reset_actions() self._schedule_actions() @@ -319,6 +324,14 @@ utils.update_dict(self.task_ex.in_context, self.ctx) + def _update_triggered_by(self): + assert self.task_ex + + if not self.triggered_by: + return + + self.task_ex.runtime_context['triggered_by'] = self.triggered_by + def _reset_actions(self): """Resets task state. @@ -404,7 +417,9 @@ ) if action_def.spec: - return actions.AdHocAction(action_def, task_ex=self.task_ex) + return actions.AdHocAction(action_def, task_ex=self.task_ex, + task_ctx=self.ctx, + wf_ctx=self.wf_ex.context) return actions.PythonAction(action_def, task_ex=self.task_ex) diff -Nru mistral-4.0.0/mistral/engine/utils.py mistral-5.0.0~b2/mistral/engine/utils.py --- mistral-4.0.0/mistral/engine/utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -20,44 +20,50 @@ from mistral import utils -# TODO(rakhmerov): This method is too abstract, validation rules may vary -# depending on object type (action, wf), it's not clear what it can be -# applied to. -# TODO(rakhmerov): It must not do any manipulations with parameters -# (input_dict)! -def validate_input(definition, input_dict, spec=None): - input_param_names = copy.deepcopy(list((input_dict or {}).keys())) - missing_param_names = [] - - spec_input = (spec.get_input() if spec else - utils.get_dict_from_string(definition.input)) - - for p_name, p_value in spec_input.items(): - if p_value is utils.NotDefined and p_name not in input_param_names: - missing_param_names.append(str(p_name)) +def _compare_parameters(expected_input, actual_input): + """Compares the expected parameters with the actual parameters. - if p_name in input_param_names: - input_param_names.remove(p_name) + :param expected_input: Expected dict of parameters. + :param actual_input: Actual dict of parameters. + :return: Tuple {missing parameter names, unexpected parameter names} + """ - if missing_param_names or input_param_names: + missing_params = [] + unexpected_params = copy.deepcopy(list((actual_input or {}).keys())) + + for p_name, p_value in expected_input.items(): + if p_value is utils.NotDefined and p_name not in unexpected_params: + missing_params.append(str(p_name)) + + if p_name in unexpected_params: + unexpected_params.remove(p_name) + + return missing_params, unexpected_params + + +def validate_input(expected_input, actual_input, obj_name, obj_class): + actual_input = actual_input or {} + + missing, unexpected = _compare_parameters( + expected_input, + actual_input + ) + + if missing or unexpected: msg = 'Invalid input [name=%s, class=%s' - msg_props = [definition.name, spec.__class__.__name__] + msg_props = [obj_name, obj_class] - if missing_param_names: + if missing: msg += ', missing=%s' - msg_props.append(missing_param_names) + msg_props.append(missing) - if input_param_names: + if unexpected: msg += ', unexpected=%s' - msg_props.append(input_param_names) + msg_props.append(unexpected) msg += ']' - raise exc.InputException( - msg % tuple(msg_props) - ) - else: - utils.merge_dicts(input_dict, spec_input, overwrite=False) + raise exc.InputException(msg % tuple(msg_props)) def resolve_workflow_definition(parent_wf_name, parent_wf_spec_name, diff -Nru mistral-4.0.0/mistral/engine/workflow_handler.py mistral-5.0.0~b2/mistral/engine/workflow_handler.py --- mistral-4.0.0/mistral/engine/workflow_handler.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/workflow_handler.py 2017-06-09 12:48:26.000000000 +0000 @@ -32,13 +32,16 @@ ) -@profiler.trace('workflow-handler-start-workflow') +@profiler.trace('workflow-handler-start-workflow', hide_args=True) def start_workflow(wf_identifier, wf_input, desc, params): - wf = workflows.Workflow( - db_api.get_workflow_definition(wf_identifier) - ) + wf = workflows.Workflow() - wf.start(wf_input, desc=desc, params=params) + wf.start( + wf_def=db_api.get_workflow_definition(wf_identifier), + input_dict=wf_input, + desc=desc, + params=params + ) _schedule_check_and_complete(wf.wf_ex) @@ -46,10 +49,7 @@ def stop_workflow(wf_ex, state, msg=None): - wf = workflows.Workflow( - db_api.get_workflow_definition(wf_ex.workflow_id), - wf_ex=wf_ex - ) + wf = workflows.Workflow(wf_ex=wf_ex) # In this case we should not try to handle possible errors. Instead, # we need to let them pop up since the typical way of failing objects @@ -77,7 +77,7 @@ stop_workflow(wf_ex, states.CANCELLED, msg) -@profiler.trace('workflow-handler-check-and-complete') +@profiler.trace('workflow-handler-check-and-complete', hide_args=True) def _check_and_complete(wf_ex_id): # Note: This method can only be called via scheduler. with db_api.transaction(): @@ -86,17 +86,14 @@ if not wf_ex or states.is_completed(wf_ex.state): return - wf = workflows.Workflow( - db_api.get_workflow_definition(wf_ex.workflow_id), - wf_ex=wf_ex - ) + wf = workflows.Workflow(wf_ex=wf_ex) try: incomplete_tasks_count = wf.check_and_complete() except exc.MistralException as e: msg = ( - "Failed to check and complete [wf_ex=%s]:" - " %s\n%s" % (wf_ex, e, tb.format_exc()) + "Failed to check and complete [wf_ex_id=%s, wf_name=%s]:" + " %s\n%s" % (wf_ex_id, wf_ex.name, e, tb.format_exc()) ) LOG.error(msg) @@ -121,10 +118,7 @@ def pause_workflow(wf_ex, msg=None): - wf = workflows.Workflow( - db_api.get_workflow_definition(wf_ex.workflow_id), - wf_ex=wf_ex - ) + wf = workflows.Workflow(wf_ex=wf_ex) wf.set_state(states.PAUSED, msg) @@ -133,10 +127,7 @@ if wf_ex.state == states.PAUSED: return wf_ex.get_clone() - wf = workflows.Workflow( - db_api.get_workflow_definition(wf_ex.workflow_id), - wf_ex=wf_ex - ) + wf = workflows.Workflow(wf_ex=wf_ex) wf.rerun(task_ex, reset=reset, env=env) @@ -150,15 +141,12 @@ if not states.is_paused_or_idle(wf_ex.state): return wf_ex.get_clone() - wf = workflows.Workflow( - db_api.get_workflow_definition(wf_ex.workflow_id), - wf_ex=wf_ex - ) + wf = workflows.Workflow(wf_ex=wf_ex) wf.resume(env=env) -@profiler.trace('workflow-handler-set-state') +@profiler.trace('workflow-handler-set-state', hide_args=True) def set_workflow_state(wf_ex, state, msg=None): if states.is_completed(state): stop_workflow(wf_ex, state, msg) @@ -166,7 +154,8 @@ pause_workflow(wf_ex, msg) else: raise exc.MistralError( - 'Invalid workflow state [wf_ex=%s, state=%s]' % (wf_ex, state) + 'Invalid workflow execution state [wf_ex_id=%s, wf_name=%s, ' + 'state=%s]' % (wf_ex.id, wf_ex.name, state) ) @@ -174,7 +163,7 @@ return 'wfh_on_c_a_c-%s' % wf_ex.id -@profiler.trace('workflow-handler-schedule-check-and-complete') +@profiler.trace('workflow-handler-schedule-check-and-complete', hide_args=True) def _schedule_check_and_complete(wf_ex, delay=0): """Schedules workflow completion check. diff -Nru mistral-4.0.0/mistral/engine/workflows.py mistral-5.0.0~b2/mistral/engine/workflows.py --- mistral-4.0.0/mistral/engine/workflows.py 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/mistral/engine/workflows.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,15 +22,15 @@ from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral.engine import dispatcher -from mistral.engine.rpc_backend import rpc -from mistral.engine import utils as eng_utils +from mistral.engine import utils as engine_utils from mistral import exceptions as exc +from mistral.lang import parser as spec_parser +from mistral.rpc import clients as rpc from mistral.services import scheduler from mistral.services import workflows as wf_service from mistral import utils from mistral.utils import merge_dicts from mistral.utils import wf_trace -from mistral.workbook import parser as spec_parser from mistral.workflow import base as wf_base from mistral.workflow import commands from mistral.workflow import data_flow @@ -54,8 +54,7 @@ Mistral engine or its components in order to manipulate with workflows. """ - def __init__(self, wf_def, wf_ex=None): - self.wf_def = wf_def + def __init__(self, wf_ex=None): self.wf_ex = wf_ex if wf_ex: @@ -64,16 +63,13 @@ wf_ex.id ) else: - # New workflow execution. - self.wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wf_def.id, - wf_def.updated_at - ) + self.wf_spec = None @profiler.trace('workflow-start') - def start(self, input_dict, desc='', params=None): + def start(self, wf_def, input_dict, desc='', params=None): """Start workflow. + :param wf_def: Workflow definition. :param input_dict: Workflow input. :param desc: Workflow execution description. :param params: Workflow type specific parameters. @@ -81,21 +77,35 @@ assert not self.wf_ex - wf_trace.info(self.wf_ex, "Starting workflow: %s" % self.wf_def) + # New workflow execution. + self.wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wf_def.id, + wf_def.updated_at + ) - # TODO(rakhmerov): This call implicitly changes input_dict! Fix it! - # After fix we need to move validation after adding risky fields. - eng_utils.validate_input(self.wf_def, input_dict, self.wf_spec) + wf_trace.info( + self.wf_ex, + 'Starting workflow [name=%s, input=%s]' % + (wf_def.name, utils.cut(input_dict)) + ) - self._create_execution(input_dict, desc, params) + self.validate_input(input_dict) + + self._create_execution( + wf_def, + self.prepare_input(input_dict), + desc, + params + ) self.set_state(states.RUNNING) wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) - cmds = wf_ctrl.continue_workflow() - - dispatcher.dispatch_workflow_commands(self.wf_ex, cmds) + dispatcher.dispatch_workflow_commands( + self.wf_ex, + wf_ctrl.continue_workflow() + ) def stop(self, state, msg=None): """Stop workflow. @@ -133,6 +143,21 @@ self._continue_workflow(cmds) + def prepare_input(self, input_dict): + for k, v in self.wf_spec.get_input().items(): + if k not in input_dict or input_dict[k] is utils.NotDefined: + input_dict[k] = v + + return input_dict + + def validate_input(self, input_dict): + engine_utils.validate_input( + self.wf_spec.get_input(), + input_dict, + self.wf_spec.get_name(), + self.wf_spec.__class__.__name__ + ) + def rerun(self, task_ex, reset=True, env=None): """Rerun workflow from the given task. @@ -147,7 +172,7 @@ # Since some lookup utils functions may use cache for completed tasks # we need to clean caches to make sure that stale objects can't be # retrieved. - lookup_utils.clean_caches() + lookup_utils.clear_caches() wf_service.update_workflow_execution_env(self.wf_ex, env) @@ -194,16 +219,19 @@ final_context = wf_ctrl.evaluate_workflow_final_context() except Exception as e: LOG.warning( - 'Failed to get final context for %s: %s' % (self.wf_ex, e) + 'Failed to get final context for workflow execution. ' + '[wf_ex_id: %s, wf_name: %s, error: %s]' % + (self.wf_ex.id, self.wf_ex.name, str(e)) ) + return final_context - def _create_execution(self, input_dict, desc, params): + def _create_execution(self, wf_def, input_dict, desc, params): self.wf_ex = db_api.create_workflow_execution({ - 'name': self.wf_def.name, + 'name': wf_def.name, 'description': desc, - 'workflow_name': self.wf_def.name, - 'workflow_id': self.wf_def.id, + 'workflow_name': wf_def.name, + 'workflow_id': wf_def.id, 'spec': self.wf_spec.to_dict(), 'state': states.IDLE, 'output': {}, @@ -258,15 +286,17 @@ # only if it completed successfully or failed. self.wf_ex.accepted = states.is_completed(state) + if states.is_completed(state): + # No need to keep task executions of this workflow in the + # lookup cache anymore. + lookup_utils.invalidate_cached_task_executions(self.wf_ex.id) + if recursive and self.wf_ex.task_execution_id: parent_task_ex = db_api.get_task_execution( self.wf_ex.task_execution_id ) - parent_wf = Workflow( - db_api.get_workflow_definition(parent_task_ex.workflow_id), - parent_task_ex.workflow_execution - ) + parent_wf = Workflow(wf_ex=parent_task_ex.workflow_execution) parent_wf.lock() diff -Nru mistral-4.0.0/mistral/event_engine/base.py mistral-5.0.0~b2/mistral/event_engine/base.py --- mistral-4.0.0/mistral/event_engine/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/event_engine/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,34 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class EventEngine(object): + """Action event trigger interface.""" + + @abc.abstractmethod + def create_event_trigger(self, trigger, events): + raise NotImplementedError() + + @abc.abstractmethod + def update_event_trigger(self, trigger): + raise NotImplementedError() + + @abc.abstractmethod + def delete_event_trigger(self, trigger, events): + raise NotImplementedError() diff -Nru mistral-4.0.0/mistral/event_engine/default_event_engine.py mistral-5.0.0~b2/mistral/event_engine/default_event_engine.py --- mistral-4.0.0/mistral/event_engine/default_event_engine.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/event_engine/default_event_engine.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,379 @@ +# Copyright 2016 Catalyst IT Ltd +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from collections import defaultdict +import os +import threading + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_service import threadgroup +from oslo_utils import fnmatch +import six +import yaml + +from mistral import context as auth_ctx +from mistral.db.v2 import api as db_api +from mistral.event_engine import base +from mistral import exceptions +from mistral import expressions +from mistral import messaging as mistral_messaging +from mistral.rpc import clients as rpc +from mistral.services import security + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +DEFAULT_PROPERTIES = { + 'service': '<% $.publisher %>', + 'project_id': '<% $.context.project_id %>', + 'user_id': '<% $.context.user_id %>', + 'timestamp': '<% $.timestamp %>' +} + + +class EventDefinition(object): + def __init__(self, definition_cfg): + self.cfg = definition_cfg + + try: + self.event_types = self.cfg['event_types'] + self.properties = self.cfg['properties'] + except KeyError as err: + raise exceptions.MistralException( + "Required field %s not specified" % err.args[0] + ) + + if isinstance(self.event_types, six.string_types): + self.event_types = [self.event_types] + + def match_type(self, event_type): + for t in self.event_types: + if fnmatch.fnmatch(event_type, t): + return True + + return False + + def convert(self, event): + return expressions.evaluate_recursively(self.properties, event) + + +class NotificationsConverter(object): + def __init__(self): + config_file = CONF.event_engine.event_definitions_cfg_file + definition_cfg = [] + + if os.path.exists(config_file): + with open(config_file) as cf: + config = cf.read() + + try: + definition_cfg = yaml.safe_load(config) + except yaml.YAMLError as err: + if hasattr(err, 'problem_mark'): + mark = err.problem_mark + errmsg = ( + "Invalid YAML syntax in Definitions file " + "%(file)s at line: %(line)s, column: %(column)s." + % dict(file=config_file, + line=mark.line + 1, + column=mark.column + 1) + ) + else: + errmsg = ( + "YAML error reading Definitions file %s" % + CONF.event_engine.event_definitions_cfg_file + ) + + LOG.error(errmsg) + + raise exceptions.MistralError( + 'Invalid event definition configuration file. %s' % + config_file + ) + + self.definitions = [EventDefinition(event_def) + for event_def in reversed(definition_cfg)] + + def get_event_definition(self, event_type): + for d in self.definitions: + if d.match_type(event_type): + return d + + return None + + def convert(self, event_type, event): + edef = self.get_event_definition(event_type) + + if edef is None: + LOG.debug('No event definition found for type: %s, use default ' + 'settings instead.', event_type) + + return expressions.evaluate_recursively(DEFAULT_PROPERTIES, event) + + return edef.convert(event) + + +class DefaultEventEngine(base.EventEngine): + """Event engine server. + + A separate service that is responsible for listening event notification + and triggering workflows defined by end user. + """ + def __init__(self): + self.engine_client = rpc.get_engine_client() + self.event_queue = six.moves.queue.Queue() + self.handler_tg = threadgroup.ThreadGroup() + + self.event_triggers_map = defaultdict(list) + self.exchange_topic_events_map = defaultdict(set) + self.exchange_topic_listener_map = {} + + self.lock = threading.Lock() + + LOG.debug('Loading notification definitions.') + + self.notification_converter = NotificationsConverter() + + self._start_handler() + self._start_listeners() + + def _get_endpoint_cls(self, events): + """Create a messaging endpoint class. + + The endpoint implements the method named like the priority, and only + handle the notification match the NotificationFilter rule set into the + filter_rule attribute of the endpoint. + """ + # Handle each priority of notification messages. + event_priorities = ['audit', 'critical', 'debug', 'error', 'info'] + attrs = dict.fromkeys( + event_priorities, + mistral_messaging.handle_event + ) + attrs['event_types'] = events + + endpoint_cls = type( + 'MistralNotificationEndpoint', + (mistral_messaging.NotificationEndpoint,), + attrs, + ) + + return endpoint_cls + + def _add_event_listener(self, exchange, topic, events): + """Add or update event listener for specified exchange, topic. + + Create a new event listener for the event trigger if no existing + listener relates to (exchange, topic). + + Or, restart existing event listener with updated events. + """ + key = (exchange, topic) + + if key in self.exchange_topic_listener_map: + listener = self.exchange_topic_listener_map[key] + listener.stop() + listener.wait() + + endpoint = self._get_endpoint_cls(events)(self) + + LOG.debug("Starting to listen to AMQP. exchange: %s, topic: %s", + exchange, topic) + + listener = mistral_messaging.start_listener( + CONF, + exchange, + topic, + [endpoint] + ) + + self.exchange_topic_listener_map[key] = listener + + def stop_all_listeners(self): + for listener in six.itervalues(self.exchange_topic_listener_map): + listener.stop() + listener.wait() + + def _start_listeners(self): + triggers = db_api.get_event_triggers(insecure=True) + + LOG.info('Find %s event triggers.', len(triggers)) + + for trigger in triggers: + exchange_topic = (trigger.exchange, trigger.topic) + self.exchange_topic_events_map[exchange_topic].add(trigger.event) + + trigger_info = trigger.to_dict() + self.event_triggers_map[trigger.event].append(trigger_info) + + for (ex_t, events) in self.exchange_topic_events_map.items(): + exchange, topic = ex_t + self._add_event_listener(exchange, topic, events) + + def _start_workflow(self, triggers, event_params): + """Start workflows defined in event triggers.""" + for t in triggers: + LOG.info('Start to process event trigger: %s', t['id']) + + workflow_params = t.get('workflow_params', {}) + workflow_params.update({'event_params': event_params}) + + # Setup context before schedule triggers. + ctx = security.create_context(t['trust_id'], t['project_id']) + auth_ctx.set_ctx(ctx) + + try: + self.engine_client.start_workflow( + t['workflow_id'], + t['workflow_input'], + description="Workflow execution created by event " + "trigger %s." % t['id'], + **workflow_params + ) + except Exception as e: + LOG.exception("Failed to process event trigger %s, " + "error: %s", t['id'], str(e)) + finally: + auth_ctx.set_ctx(None) + + def _process_event_queue(self, *args, **kwargs): + """Process notification events. + + This function is called in a thread. + """ + while True: + event = self.event_queue.get() + + context = event.get('context') + event_type = event.get('event_type') + + # NOTE(kong): Use lock here to protect event_triggers_map variable + # from being updated outside the thread. + with self.lock: + if event_type in self.event_triggers_map: + triggers = self.event_triggers_map[event_type] + + # There may be more projects registered the same event. + project_ids = [t['project_id'] for t in triggers] + + # Skip the event doesn't belong to any event trigger owner. + if (CONF.pecan.auth_enable and + context.get('project_id', '') not in project_ids): + self.event_queue.task_done() + continue + + LOG.debug('Start to handle event: %s, %d trigger(s) ' + 'registered.', event_type, len(triggers)) + + event_params = self.notification_converter.convert( + event_type, + event + ) + + self._start_workflow(triggers, event_params) + + self.event_queue.task_done() + + def _start_handler(self): + """Starts event queue handler in a thread group.""" + LOG.info('Starting event notification task...') + + self.handler_tg.add_thread(self._process_event_queue) + + def process_notification_event(self, notification): + """Callback funtion by event handler. + + Just put notification into a queue. + """ + LOG.debug("Putting notification event to event queue.") + + self.event_queue.put(notification) + + def create_event_trigger(self, trigger, events): + """An endpoint method for creating event trigger. + + When creating an event trigger in API layer, we need to create a new + listener or update an existing listener. + + :param trigger: a dict containing event trigger information. + :param events: a list of events binding to the (exchange, topic) of + the event trigger. + """ + with self.lock: + ids = [t['id'] for t in self.event_triggers_map[trigger['event']]] + + if trigger['id'] not in ids: + self.event_triggers_map[trigger['event']].append(trigger) + + self._add_event_listener(trigger['exchange'], trigger['topic'], events) + + def update_event_trigger(self, trigger): + """An endpoint method for updating event trigger. + + Because only workflow related information is allowed to be updated, we + only need to update event_triggers_map(in a synchronous way). + + :param trigger: a dict containing event trigger information. + """ + assert trigger['event'] in self.event_triggers_map + + with self.lock: + for t in self.event_triggers_map[trigger['event']]: + if trigger['id'] == t['id']: + t.update(trigger) + + def delete_event_trigger(self, trigger, events): + """An endpoint method for deleting event trigger. + + If there is no event binding to (exchange, topic) after deletion, we + need to delete the related listener. Otherwise, we need to restart + that listener. + + :param trigger: a dict containing event trigger information. + :param events: a list of events binding to the (exchange, topic) of + the event trigger. + """ + assert trigger['event'] in self.event_triggers_map + + with self.lock: + for t in self.event_triggers_map[trigger['event']]: + if t['id'] == trigger['id']: + self.event_triggers_map[trigger['event']].remove(t) + break + + if not self.event_triggers_map[trigger['event']]: + del self.event_triggers_map[trigger['event']] + + if not events: + key = (trigger['exchange'], trigger['topic']) + + listener = self.exchange_topic_listener_map[key] + listener.stop() + listener.wait() + + del self.exchange_topic_listener_map[key] + + LOG.info( + 'Deleted listener for exchange: %s, topic: %s', + trigger['exchange'], + trigger['topic'] + ) + + return + + self._add_event_listener(trigger['exchange'], trigger['topic'], events) diff -Nru mistral-4.0.0/mistral/event_engine/event_engine.py mistral-5.0.0~b2/mistral/event_engine/event_engine.py --- mistral-4.0.0/mistral/event_engine/event_engine.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/event_engine/event_engine.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,377 +0,0 @@ -# Copyright 2016 Catalyst IT Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import defaultdict -import os -import threading - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_service import threadgroup -from oslo_utils import fnmatch -import six -import yaml - -from mistral import context as auth_ctx -from mistral.db.v2 import api as db_api -from mistral.engine.rpc_backend import rpc -from mistral import exceptions -from mistral import expressions -from mistral import messaging as mistral_messaging -from mistral.services import security - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -DEFAULT_PROPERTIES = { - 'service': '<% $.publisher %>', - 'project_id': '<% $.context.project_id %>', - 'user_id': '<% $.context.user_id %>', - 'timestamp': '<% $.timestamp %>' -} - - -class EventDefinition(object): - def __init__(self, definition_cfg): - self.cfg = definition_cfg - - try: - self.event_types = self.cfg['event_types'] - self.properties = self.cfg['properties'] - except KeyError as err: - raise exceptions.MistralException( - "Required field %s not specified" % err.args[0] - ) - - if isinstance(self.event_types, six.string_types): - self.event_types = [self.event_types] - - def match_type(self, event_type): - for t in self.event_types: - if fnmatch.fnmatch(event_type, t): - return True - - return False - - def convert(self, event): - return expressions.evaluate_recursively(self.properties, event) - - -class NotificationsConverter(object): - def __init__(self): - config_file = CONF.event_engine.event_definitions_cfg_file - definition_cfg = [] - - if os.path.exists(config_file): - with open(config_file) as cf: - config = cf.read() - - try: - definition_cfg = yaml.safe_load(config) - except yaml.YAMLError as err: - if hasattr(err, 'problem_mark'): - mark = err.problem_mark - errmsg = ( - "Invalid YAML syntax in Definitions file " - "%(file)s at line: %(line)s, column: %(column)s." - % dict(file=config_file, - line=mark.line + 1, - column=mark.column + 1) - ) - else: - errmsg = ( - "YAML error reading Definitions file %s" % - CONF.event_engine.event_definitions_cfg_file - ) - - LOG.error(errmsg) - - raise exceptions.MistralError( - 'Invalid event definition configuration file. %s' % - config_file - ) - - self.definitions = [EventDefinition(event_def) - for event_def in reversed(definition_cfg)] - - def get_event_definition(self, event_type): - for d in self.definitions: - if d.match_type(event_type): - return d - - return None - - def convert(self, event_type, event): - edef = self.get_event_definition(event_type) - - if edef is None: - LOG.debug('No event definition found for type: %s, use default ' - 'settings instead.', event_type) - - return expressions.evaluate_recursively(DEFAULT_PROPERTIES, event) - - return edef.convert(event) - - -class EventEngine(object): - """Event engine server. - - A separate service that is responsible for listening event notification - and triggering workflows defined by end user. - """ - def __init__(self): - self.engine_client = rpc.get_engine_client() - self.event_queue = six.moves.queue.Queue() - self.handler_tg = threadgroup.ThreadGroup() - - self.event_triggers_map = defaultdict(list) - self.exchange_topic_events_map = defaultdict(set) - self.exchange_topic_listener_map = {} - - self.lock = threading.Lock() - - LOG.debug('Loading notification definitions.') - - self.notification_converter = NotificationsConverter() - - self._start_handler() - self._start_listeners() - - def _get_endpoint_cls(self, events): - """Create a messaging endpoint class. - - The endpoint implements the method named like the priority, and only - handle the notification match the NotificationFilter rule set into the - filter_rule attribute of the endpoint. - """ - # Handle each priority of notification messages. - event_priorities = ['audit', 'critical', 'debug', 'error', 'info'] - attrs = dict.fromkeys( - event_priorities, - mistral_messaging.handle_event - ) - attrs['event_types'] = events - - endpoint_cls = type( - 'MistralNotificationEndpoint', - (mistral_messaging.NotificationEndpoint,), - attrs, - ) - - return endpoint_cls - - def _add_event_listener(self, exchange, topic, events): - """Add or update event listener for specified exchange, topic. - - Create a new event listener for the event trigger if no existing - listener relates to (exchange, topic). - - Or, restart existing event listener with updated events. - """ - key = (exchange, topic) - - if key in self.exchange_topic_listener_map: - listener = self.exchange_topic_listener_map[key] - listener.stop() - listener.wait() - - endpoint = self._get_endpoint_cls(events)(self) - - LOG.debug("Starting to listen to AMQP. exchange: %s, topic: %s", - exchange, topic) - - listener = mistral_messaging.start_listener( - CONF, - exchange, - topic, - [endpoint] - ) - - self.exchange_topic_listener_map[key] = listener - - def stop_all_listeners(self): - for listener in six.itervalues(self.exchange_topic_listener_map): - listener.stop() - listener.wait() - - def _start_listeners(self): - triggers = db_api.get_event_triggers(insecure=True) - - LOG.info('Find %s event triggers.', len(triggers)) - - for trigger in triggers: - exchange_topic = (trigger.exchange, trigger.topic) - self.exchange_topic_events_map[exchange_topic].add(trigger.event) - - trigger_info = trigger.to_dict() - self.event_triggers_map[trigger.event].append(trigger_info) - - for (ex_t, events) in self.exchange_topic_events_map.items(): - exchange, topic = ex_t - self._add_event_listener(exchange, topic, events) - - def _start_workflow(self, triggers, event_params): - """Start workflows defined in event triggers.""" - for t in triggers: - LOG.info('Start to process event trigger: %s', t['id']) - - workflow_params = t.get('workflow_params', {}) - workflow_params.update({'event_params': event_params}) - - # Setup context before schedule triggers. - ctx = security.create_context(t['trust_id'], t['project_id']) - auth_ctx.set_ctx(ctx) - - try: - self.engine_client.start_workflow( - t['workflow_id'], - t['workflow_input'], - description="Workflow execution created by event " - "trigger %s." % t['id'], - **workflow_params - ) - except Exception as e: - LOG.exception("Failed to process event trigger %s, " - "error: %s", t['id'], str(e)) - finally: - auth_ctx.set_ctx(None) - - def _process_event_queue(self, *args, **kwargs): - """Process notification events. - - This function is called in a thread. - """ - while True: - event = self.event_queue.get() - - context = event.get('context') - event_type = event.get('event_type') - - # NOTE(kong): Use lock here to protect event_triggers_map variable - # from being updated outside the thread. - with self.lock: - if event_type in self.event_triggers_map: - triggers = self.event_triggers_map[event_type] - - # There may be more projects registered the same event. - project_ids = [t['project_id'] for t in triggers] - - # Skip the event doesn't belong to any event trigger owner. - if (CONF.pecan.auth_enable and - context.get('project_id', '') not in project_ids): - self.event_queue.task_done() - continue - - LOG.debug('Start to handle event: %s, %d trigger(s) ' - 'registered.', event_type, len(triggers)) - - event_params = self.notification_converter.convert( - event_type, - event - ) - - self._start_workflow(triggers, event_params) - - self.event_queue.task_done() - - def _start_handler(self): - """Starts event queue handler in a thread group.""" - LOG.info('Starting event notification task...') - - self.handler_tg.add_thread(self._process_event_queue) - - def process_notification_event(self, notification): - """Callback funtion by event handler. - - Just put notification into a queue. - """ - LOG.debug("Putting notification event to event queue.") - - self.event_queue.put(notification) - - def create_event_trigger(self, trigger, events): - """An endpoint method for creating event trigger. - - When creating an event trigger in API layer, we need to create a new - listener or update an existing listener. - - :param trigger: a dict containing event trigger information. - :param events: a list of events binding to the (exchange, topic) of - the event trigger. - """ - with self.lock: - ids = [t['id'] for t in self.event_triggers_map[trigger['event']]] - - if trigger['id'] not in ids: - self.event_triggers_map[trigger['event']].append(trigger) - - self._add_event_listener(trigger['exchange'], trigger['topic'], events) - - def update_event_trigger(self, trigger): - """An endpoint method for updating event trigger. - - Because only workflow related information is allowed to be updated, we - only need to update event_triggers_map(in a synchronous way). - - :param trigger: a dict containing event trigger information. - """ - assert trigger['event'] in self.event_triggers_map - - with self.lock: - for t in self.event_triggers_map[trigger['event']]: - if trigger['id'] == t['id']: - t.update(trigger) - - def delete_event_trigger(self, trigger, events): - """An endpoint method for deleting event trigger. - - If there is no event binding to (exchange, topic) after deletion, we - need to delete the related listener. Otherwise, we need to restart - that listener. - - :param trigger: a dict containing event trigger information. - :param events: a list of events binding to the (exchange, topic) of - the event trigger. - """ - assert trigger['event'] in self.event_triggers_map - - with self.lock: - for t in self.event_triggers_map[trigger['event']]: - if t['id'] == trigger['id']: - self.event_triggers_map[trigger['event']].remove(t) - break - - if not self.event_triggers_map[trigger['event']]: - del self.event_triggers_map[trigger['event']] - - if not events: - key = (trigger['exchange'], trigger['topic']) - - listener = self.exchange_topic_listener_map[key] - listener.stop() - listener.wait() - - del self.exchange_topic_listener_map[key] - - LOG.info( - 'Deleted listener for exchange: %s, topic: %s', - trigger['exchange'], - trigger['topic'] - ) - - return - - self._add_event_listener(trigger['exchange'], trigger['topic'], events) diff -Nru mistral-4.0.0/mistral/event_engine/event_engine_server.py mistral-5.0.0~b2/mistral/event_engine/event_engine_server.py --- mistral-4.0.0/mistral/event_engine/event_engine_server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/event_engine/event_engine_server.py 2017-06-09 12:48:26.000000000 +0000 @@ -1,4 +1,5 @@ # Copyright 2016 - Nokia Networks +# Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +16,8 @@ from oslo_log import log as logging from mistral import config as cfg -from mistral.engine.rpc_backend import rpc -from mistral.event_engine import event_engine +from mistral.event_engine import default_event_engine as evt_eng +from mistral.rpc import base as rpc from mistral.service import base as service_base from mistral.utils import profiler as profiler_utils @@ -88,4 +89,4 @@ def get_oslo_service(): - return EventEngineServer(event_engine.EventEngine()) + return EventEngineServer(evt_eng.DefaultEventEngine()) diff -Nru mistral-4.0.0/mistral/executors/base.py mistral-5.0.0~b2/mistral/executors/base.py --- mistral-4.0.0/mistral/executors/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/executors/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,67 @@ +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import six + +from stevedore import driver + + +_EXECUTORS = {} + + +def cleanup(): + global _EXECUTORS + _EXECUTORS = {} + + +def get_executor(exec_type): + global _EXECUTORS + + if not _EXECUTORS.get(exec_type): + mgr = driver.DriverManager( + 'mistral.executors', + exec_type, + invoke_on_load=True + ) + + _EXECUTORS[exec_type] = mgr.driver + + return _EXECUTORS[exec_type] + + +@six.add_metaclass(abc.ABCMeta) +class Executor(object): + """Action executor interface.""" + + @abc.abstractmethod + def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, + params, safe_rerun, redelivered=False, + target=None, async_=True): + """Runs action. + + :param action_ex_id: Corresponding action execution id. + :param action_cls_str: Path to action class in dot notation. + :param action_cls_attrs: Attributes of action class which + will be set to. + :param params: Action parameters. + :param safe_rerun: Tells if given action can be safely rerun. + :param redelivered: Tells if given action was run before on another + executor. + :param target: Target (group of action executors). + :param async_: If True, run action in asynchronous mode (w/o waiting + for completion). + :return: Action result. + """ + raise NotImplementedError() diff -Nru mistral-4.0.0/mistral/executors/default_executor.py mistral-5.0.0~b2/mistral/executors/default_executor.py --- mistral-4.0.0/mistral/executors/default_executor.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/executors/default_executor.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,183 @@ +# Copyright 2013 - Mirantis, Inc. +# Copyright 2016 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging +from osprofiler import profiler + +from mistral_lib import actions as mistral_lib + +from mistral.actions import action_factory as a_f +from mistral import context +from mistral import exceptions as exc +from mistral.executors import base +from mistral.rpc import clients as rpc +from mistral.utils import inspect_utils as i_u +from mistral.workflow import utils as wf_utils + + +LOG = logging.getLogger(__name__) + + +class DefaultExecutor(base.Executor): + def __init__(self): + self._engine_client = rpc.get_engine_client() + + @profiler.trace('default-executor-run-action', hide_args=True) + def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, + params, safe_rerun, redelivered=False, + target=None, async_=True): + """Runs action. + + :param action_ex_id: Action execution id. + :param action_cls_str: Path to action class in dot notation. + :param action_cls_attrs: Attributes of action class which + will be set to. + :param params: Action parameters. + :param safe_rerun: Tells if given action can be safely rerun. + :param redelivered: Tells if given action was run before on another + executor. + :param target: Target (group of action executors). + :param async_: If True, run action in asynchronous mode (w/o waiting + for completion). + :return: Action result. + """ + + def send_error_back(error_msg): + error_result = wf_utils.Result(error=error_msg) + + if action_ex_id: + self._engine_client.on_action_complete( + action_ex_id, + error_result + ) + + return None + + return error_result + + if redelivered and not safe_rerun: + msg = ( + "Request to run action %s was redelivered, but action %s " + "cannot be re-run safely. The only safe thing to do is fail " + "action." % (action_cls_str, action_cls_str) + ) + + return send_error_back(msg) + + # Load action module. + action_cls = a_f.construct_action_class( + action_cls_str, + action_cls_attrs + ) + + # Instantiate action. + try: + action = action_cls(**params) + except Exception as e: + msg = ( + "Failed to initialize action %s. Action init params = %s. " + "Actual init params = %s. More info: %s" % ( + action_cls_str, + i_u.get_arg_list(action_cls.__init__), + params.keys(), + e + ) + ) + + LOG.warning(msg) + + return send_error_back(msg) + + # Run action. + try: + + # NOTE(d0ugal): If the action is a subclass of mistral-lib we know + # that it expects to be passed the context. We should deprecate + # the builtin action class in Mistral. + if isinstance(action, mistral_lib.Action): + result = action.run(context.ctx()) + else: + result = action.run() + + # Note: it's made for backwards compatibility with already + # existing Mistral actions which don't return result as + # instance of workflow.utils.Result. + if not isinstance(result, wf_utils.Result): + result = wf_utils.Result(data=result) + + except Exception as e: + msg = ( + "Failed to run action [action_ex_id=%s, action_cls='%s', " + "attributes='%s', params='%s']\n %s" % ( + action_ex_id, + action_cls, + action_cls_attrs, + params, + e + ) + ) + + LOG.exception(msg) + + return send_error_back(msg) + + # Send action result. + try: + if action_ex_id and (action.is_sync() or result.is_error()): + self._engine_client.on_action_complete( + action_ex_id, + result, + async_=True + ) + + except exc.MistralException as e: + # In case of a Mistral exception we can try to send error info to + # engine because most likely it's not related to the infrastructure + # such as message bus or network. One known case is when the action + # returns a bad result (e.g. invalid unicode) which can't be + # serialized. + msg = ( + "Failed to complete action due to a Mistral exception " + "[action_ex_id=%s, action_cls='%s', " + "attributes='%s', params='%s']\n %s" % ( + action_ex_id, + action_cls, + action_cls_attrs, + params, + e + ) + ) + + LOG.exception(msg) + + return send_error_back(msg) + except Exception as e: + # If it's not a Mistral exception all we can do is only + # log the error. + msg = ( + "Failed to complete action due to an unexpected exception " + "[action_ex_id=%s, action_cls='%s', " + "attributes='%s', params='%s']\n %s" % ( + action_ex_id, + action_cls, + action_cls_attrs, + params, + e + ) + ) + + LOG.exception(msg) + + return result diff -Nru mistral-4.0.0/mistral/executors/executor_server.py mistral-5.0.0~b2/mistral/executors/executor_server.py --- mistral-4.0.0/mistral/executors/executor_server.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/executors/executor_server.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,101 @@ +# Copyright 2016 - Nokia Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from mistral import config as cfg +from mistral.executors import default_executor as exe +from mistral.rpc import base as rpc +from mistral.service import base as service_base +from mistral import utils +from mistral.utils import profiler as profiler_utils + +LOG = logging.getLogger(__name__) + + +class ExecutorServer(service_base.MistralService): + """Executor server. + + This class manages executor life-cycle and gets registered as an RPC + endpoint to process executor specific calls. It also registers a + cluster member associated with this instance of executor. + """ + + def __init__(self, executor, setup_profiler=True): + super(ExecutorServer, self).__init__('executor_group', setup_profiler) + + self.executor = executor + self._rpc_server = None + + def start(self): + super(ExecutorServer, self).start() + + if self._setup_profiler: + profiler_utils.setup('mistral-executor', cfg.CONF.executor.host) + + # Initialize and start RPC server. + + self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.executor) + self._rpc_server.register_endpoint(self) + + self._rpc_server.run(executor='threading') + + self._notify_started('Executor server started.') + + def stop(self, graceful=False): + super(ExecutorServer, self).stop(graceful) + + if self._rpc_server: + self._rpc_server.stop(graceful) + + def run_action(self, rpc_ctx, action_ex_id, action_cls_str, + action_cls_attrs, params, safe_rerun): + """Receives calls over RPC to run action on executor. + + :param rpc_ctx: RPC request context dictionary. + :param action_ex_id: Action execution id. + :param action_cls_str: Action class name. + :param action_cls_attrs: Action class attributes. + :param params: Action input parameters. + :param safe_rerun: Tells if given action can be safely rerun. + :return: Action result. + """ + + LOG.info( + "Received RPC request 'run_action'[action_ex_id=%s, " + "action_cls_str=%s, action_cls_attrs=%s, params=%s]" % ( + action_ex_id, + action_cls_str, + action_cls_attrs, + utils.cut(params) + ) + ) + + redelivered = rpc_ctx.redelivered or False + + return self.executor.run_action( + action_ex_id, + action_cls_str, + action_cls_attrs, + params, + safe_rerun, + redelivered + ) + + +def get_oslo_service(setup_profiler=True): + return ExecutorServer( + exe.DefaultExecutor(), + setup_profiler=setup_profiler + ) diff -Nru mistral-4.0.0/mistral/executors/remote_executor.py mistral-5.0.0~b2/mistral/executors/remote_executor.py --- mistral-4.0.0/mistral/executors/remote_executor.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/executors/remote_executor.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,30 @@ +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from mistral.rpc import base as rpc_base +from mistral.rpc import clients as rpc_clients + + +LOG = logging.getLogger(__name__) + + +class RemoteExecutor(rpc_clients.ExecutorClient): + """Executor that passes execution request to a remote executor.""" + + def __init__(self): + self.topic = cfg.CONF.executor.topic + self._client = rpc_base.get_rpc_client_driver()(cfg.CONF.executor) diff -Nru mistral-4.0.0/mistral/expressions/jinja_expression.py mistral-5.0.0~b2/mistral/expressions/jinja_expression.py --- mistral-4.0.0/mistral/expressions/jinja_expression.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/expressions/jinja_expression.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,6 +16,7 @@ import jinja2 from jinja2 import parser as jinja_parse +from jinja2.sandbox import SandboxedEnvironment from oslo_log import log as logging import six @@ -29,7 +30,7 @@ JINJA_REGEXP = '({{(.*)}})' JINJA_BLOCK_REGEXP = '({%(.*)%})' -_environment = jinja2.Environment( +_environment = SandboxedEnvironment( undefined=jinja2.StrictUndefined, trim_blocks=True, lstrip_blocks=True @@ -46,11 +47,6 @@ @classmethod def validate(cls, expression): - LOG.debug( - "Validating Jinja expression [expression='%s']", - expression - ) - if not isinstance(expression, six.string_types): raise exc.JinjaEvaluationException( "Unsupported type '%s'." % type(expression) @@ -67,11 +63,6 @@ @classmethod def evaluate(cls, expression, data_context): - LOG.debug( - "Evaluating Jinja expression [expression='%s', context=%s]" - % (expression, data_context) - ) - opts = {'undefined_to_none': False} ctx = expression_utils.get_jinja_context(data_context) @@ -89,8 +80,6 @@ ", data=%s]" % (expression, str(e), data_context) ) - LOG.debug("Jinja expression result: %s" % result) - return result @classmethod @@ -110,11 +99,6 @@ @classmethod def validate(cls, expression): - LOG.debug( - "Validating Jinja expression [expression='%s']", - expression - ) - if not isinstance(expression, six.string_types): raise exc.JinjaEvaluationException( "Unsupported type '%s'." % type(expression) @@ -130,7 +114,7 @@ @classmethod def evaluate(cls, expression, data_context): LOG.debug( - "Evaluating Jinja expression [expression='%s', context=%s]" + "Start to evaluate Jinja expression. [expression='%s', context=%s]" % (expression, data_context) ) @@ -142,7 +126,10 @@ ctx = expression_utils.get_jinja_context(data_context) result = cls._env.from_string(expression).render(**ctx) - LOG.debug("Jinja expression result: %s" % result) + LOG.debug( + "Finished evaluation. [expression='%s', result: %s]" % + (expression, result) + ) return result diff -Nru mistral-4.0.0/mistral/expressions/yaql_expression.py mistral-5.0.0~b2/mistral/expressions/yaql_expression.py --- mistral-4.0.0/mistral/expressions/yaql_expression.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/expressions/yaql_expression.py 2017-06-09 12:48:26.000000000 +0000 @@ -35,8 +35,6 @@ class YAQLEvaluator(Evaluator): @classmethod def validate(cls, expression): - LOG.debug("Validating YAQL expression [expression='%s']", expression) - try: YAQL_ENGINE(expression) except (yaql_exc.YaqlException, KeyError, ValueError, TypeError) as e: @@ -46,11 +44,6 @@ def evaluate(cls, expression, data_context): expression = expression.strip() if expression else expression - LOG.debug( - "Evaluating YAQL expression [expression='%s', context=%s]" - % (expression, data_context) - ) - try: result = YAQL_ENGINE(expression).evaluate( context=expression_utils.get_yaql_context(data_context) @@ -61,8 +54,6 @@ ", data=%s]" % (expression, str(e), data_context) ) - LOG.debug("YAQL expression result: %s" % result) - return result if not inspect.isgenerator(result) else list(result) @classmethod @@ -79,9 +70,6 @@ @classmethod def validate(cls, expression): - LOG.debug( - "Validating inline YAQL expression [expression='%s']", expression) - if not isinstance(expression, six.string_types): raise exc.YaqlEvaluationException( "Unsupported type '%s'." % type(expression) @@ -96,7 +84,7 @@ @classmethod def evaluate(cls, expression, data_context): LOG.debug( - "Evaluating inline YAQL expression [expression='%s', context=%s]" + "Start to evaluate YAQL expression. [expression='%s', context=%s]" % (expression, data_context) ) @@ -113,7 +101,10 @@ else: result = result.replace(expr, str(evaluated)) - LOG.debug("Inline YAQL expression result: %s" % result) + LOG.debug( + "Finished evaluation. [expression='%s', result: %s]" % + (expression, result) + ) return result diff -Nru mistral-4.0.0/mistral/ext/pygmentplugin.py mistral-5.0.0~b2/mistral/ext/pygmentplugin.py --- mistral-4.0.0/mistral/ext/pygmentplugin.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/ext/pygmentplugin.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,64 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from pygments import lexer +from pygments import token + + +class MistralLexer(lexer.RegexLexer): + + name = 'Mistral' + aliases = ['mistral'] + + flags = re.MULTILINE | re.UNICODE + + tokens = { + "root": [ + (r'^(\s)*(workflows|tasks|input|output|type)(\s)*:', + token.Keyword), + (r'^(\s)*(version|name|description)(\s)*:', token.Keyword), + (r'^(\s)*(publish|timeout|retry|with\-items)(\s)*:', + token.Keyword), + (r'^(\s)*(on\-success|on\-error|on\-complete)(\s)*:', + token.Keyword), + (r'^(\s)*(action|workflow)(\s)*:', token.Keyword, 'call'), + (r'(\-|\:)(\s)*(fail|succeed|pause)(\s)+', token.Operator.Word), + (r'<%', token.Name.Entity, 'expression'), + (r'\{\{', token.Name.Entity, 'expression'), + (r'#.*$', token.Comment), + (r'(^|\s|\-)+\d+', token.Number), + lexer.include("generic"), + ], + "expression": [ + (r'\$', token.Operator), + (r'\s(json_pp|task|tasks|execution|env|uuid)(?!\w)', + token.Name.Builtin), + lexer.include("generic"), + (r'%>', token.Name.Entity, '#pop'), + (r'}\\}', token.Name.Entity, '#pop'), + ], + "call": [ + (r'(\s)*[\w\.]+($|\s)', token.Name.Function), + lexer.default('#pop'), + ], + "generic": [ + (r'%>', token.Name.Entity, '#pop'), + (r'}\\}', token.Name.Entity, '#pop'), + (r'(\-|:|=|!|\[|\]|<|>|\/|\*)', token.Operator), + (r'(null|None|True|False)', token.Name.Builtin), + (r'"(\\\\|\\"|[^"])*"', token.String.Double), + (r"'(\\\\|\\'|[^'])*'", token.String.Single), + (r'\W|\w|\s|\(|\)|,|\.', token.Text), + ] + } diff -Nru mistral-4.0.0/mistral/hacking/checks.py mistral-5.0.0~b2/mistral/hacking/checks.py --- mistral-4.0.0/mistral/hacking/checks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/hacking/checks.py 2017-06-09 12:48:26.000000000 +0000 @@ -49,6 +49,34 @@ yield (0, msg) +def no_assert_equal_true_false(logical_line): + """Check for assertTrue/assertFalse sentences + + M319 + """ + _start_re = re.compile(r'assert(Not)?Equal\((True|False),') + _end_re = re.compile(r'assert(Not)?Equal\(.*,\s+(True|False)\)$') + + if _start_re.search(logical_line) or _end_re.search(logical_line): + yield (0, "M319: assertEqual(A, True|False), " + "assertEqual(True|False, A), assertNotEqual(A, True|False), " + "or assertEqual(True|False, A) sentences must not be used. " + "Use assertTrue(A) or assertFalse(A) instead") + + +def no_assert_true_false_is_not(logical_line): + """Check for assertIs/assertIsNot sentences + + M320 + """ + _re = re.compile(r'assert(True|False)\(.+\s+is\s+(not\s+)?.+\)$') + + if _re.search(logical_line): + yield (0, "M320: assertTrue(A is|is not B) or " + "assertFalse(A is|is not B) sentences must not be used. " + "Use assertIs(A, B) or assertIsNot(A, B) instead") + + def check_oslo_namespace_imports(logical_line): if re.match(oslo_namespace_imports_from_dot, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( @@ -73,6 +101,27 @@ "with Python 3. Use range() or six.moves.range() instead.") +def check_python3_no_iteritems(logical_line): + msg = ("M328: Use six.iteritems() instead of dict.iteritems().") + + if re.search(r".*\.iteritems\(\)", logical_line): + yield(0, msg) + + +def check_python3_no_iterkeys(logical_line): + msg = ("M329: Use six.iterkeys() instead of dict.iterkeys().") + + if re.search(r".*\.iterkeys\(\)", logical_line): + yield(0, msg) + + +def check_python3_no_itervalues(logical_line): + msg = ("M330: Use six.itervalues() instead of dict.itervalues().") + + if re.search(r".*\.itervalues\(\)", logical_line): + yield(0, msg) + + class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. @@ -234,5 +283,11 @@ def factory(register): register(assert_equal_none) + register(no_assert_equal_true_false) + register(no_assert_true_false_is_not) register(check_oslo_namespace_imports) register(CheckForLoggingIssues) + register(check_python3_no_iteritems) + register(check_python3_no_iterkeys) + register(check_python3_no_itervalues) + register(check_python3_xrange) diff -Nru mistral-4.0.0/mistral/_i18n.py mistral-5.0.0~b2/mistral/_i18n.py --- mistral-4.0.0/mistral/_i18n.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/_i18n.py 2017-06-09 12:48:26.000000000 +0000 @@ -24,13 +24,3 @@ # The primary translation function using the well-known name "_" _ = _translators.primary - -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical diff -Nru mistral-4.0.0/mistral/lang/base.py mistral-5.0.0~b2/mistral/lang/base.py --- mistral-4.0.0/mistral/lang/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,379 @@ +# Copyright 2015 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import json +import jsonschema +import re +import six + +from mistral import exceptions as exc +from mistral import expressions as expr +from mistral.lang import types +from mistral import utils + + +CMD_PTRN = re.compile("^[\w\.]+[^=\(\s\"]*") + +EXPRESSION = '|'.join([expr.patterns[name] for name in expr.patterns]) +_ALL_IN_BRACKETS = "\[.*\]\s*" +_ALL_IN_QUOTES = "\"[^\"]*\"\s*" +_ALL_IN_APOSTROPHES = "'[^']*'\s*" +_DIGITS = "\d+" +_TRUE = "true" +_FALSE = "false" +_NULL = "null" + +ALL = ( + _ALL_IN_QUOTES, _ALL_IN_APOSTROPHES, EXPRESSION, + _ALL_IN_BRACKETS, _TRUE, _FALSE, _NULL, _DIGITS +) + +PARAMS_PTRN = re.compile("([-_\w]+)=(%s)" % "|".join(ALL)) + + +def instantiate_spec(spec_cls, data): + """Instantiates specification accounting for specification hierarchies. + + :param spec_cls: Specification concrete or base class. In case if base + class or the hierarchy is provided this method relies on attributes + _polymorphic_key and _polymorphic_value in order to find a concrete + class that needs to be instantiated. + :param data: Raw specification data as a dictionary. + """ + + if issubclass(spec_cls, BaseSpecList): + # Ignore polymorphic search for specification lists because + # it doesn't make sense for them. + return spec_cls(data) + + if not hasattr(spec_cls, '_polymorphic_key'): + spec = spec_cls(data) + + spec.validate_semantics() + + return spec + + # In order to do polymorphic search we need to make sure that + # a spec is backed by a dictionary. Otherwise we can't extract + # a polymorphic key. + if not isinstance(data, dict): + raise exc.InvalidModelException( + "A specification with polymorphic key must be backed by" + " a dictionary [spec_cls=%s, data=%s]" % (spec_cls, data) + ) + + key = spec_cls._polymorphic_key + + if not isinstance(key, tuple): + key_name = key + key_default = None + else: + key_name = key[0] + key_default = key[1] + + for cls in utils.iter_subclasses(spec_cls): + if not hasattr(cls, '_polymorphic_value'): + raise exc.DSLParsingException( + "Class '%s' is expected to have attribute '_polymorphic_value'" + " because it's a part of specification hierarchy inherited " + "from class '%s'." % (cls, spec_cls) + ) + + if cls._polymorphic_value == data.get(key_name, key_default): + spec = cls(data) + + spec.validate_semantics() + + return spec + + raise exc.DSLParsingException( + 'Failed to find a specification class to instantiate ' + '[spec_cls=%s, data=%s]' % (spec_cls, data) + ) + + +class BaseSpec(object): + """Base class for all DSL specifications. + + It represents a DSL entity such as workflow or task as a python object + providing more convenient API to analyse DSL than just working with raw + data in form of a dictionary. Specification classes also implement + all required validation logic by overriding instance methods + 'validate_schema()' and 'validate_semantics()'. + + Note that the specification mechanism allows to have polymorphic entities + in DSL. For example, if we find it more convenient to have separate + specification classes for different types of workflow (i.e. 'direct' and + 'reverse') we can do so. In this case, in order to instantiate them + correctly method 'instantiate_spec' must always be used where argument + 'spec_cls' must be a root class of the specification hierarchy containing + class attribute '_polymorhpic_key' pointing to a key in raw data relying + on which we can find a concrete class. Concrete classes then must all have + attribute '_polymorhpic_value' corresponding to a value in a raw data. + Attribute '_polymorhpic_key' can be either a string or a tuple of size two + where the first value is a key name itself and the second value is a + default polymorphic value that must be used if raw data doesn't contain + a configured key at all. An example of this situation is when we don't + specify a workflow type in DSL. In this case, we assume it's 'direct'. + """ + + # See http://json-schema.org + _schema = { + 'type': 'object' + } + + _meta_schema = { + 'type': 'object' + } + + _definitions = {} + + _version = '2.0' + + @classmethod + def get_schema(cls, includes=['meta', 'definitions']): + schema = copy.deepcopy(cls._schema) + + schema['properties'] = utils.merge_dicts( + schema.get('properties', {}), + cls._meta_schema.get('properties', {}), + overwrite=False + ) + + if includes and 'meta' in includes: + schema['required'] = list( + set(schema.get('required', []) + + cls._meta_schema.get('required', [])) + ) + + if includes and 'definitions' in includes: + schema['definitions'] = utils.merge_dicts( + schema.get('definitions', {}), + cls._definitions, + overwrite=False + ) + + return schema + + def __init__(self, data): + self._data = data + + self.validate_schema() + + def validate_schema(self): + """Validates DSL entity schema that this specification represents. + + By default, this method just validate schema of DSL entity that this + specification represents using "_schema" class attribute. + Additionally, child classes may implement additional logic to validate + more specific things like YAQL expressions in their fields. + + Note that this method is called before construction of specification + fields and validation logic should only rely on raw data provided as + a dictionary accessible through '_data' instance field. + """ + + try: + jsonschema.validate(self._data, self.get_schema()) + except jsonschema.ValidationError as e: + raise exc.InvalidModelException("Invalid DSL: %s" % e) + + def validate_semantics(self): + """Validates semantics of specification object. + + Child classes may implement validation logic to check things like + integrity of corresponding data structure (e.g. task graph) or + other things that can't be expressed in JSON schema. + + This method is called after specification has been built (i.e. + its initializer has finished it's work) so that validation logic + can rely on initialized specification fields. + """ + pass + + def validate_expr(self, dsl_part): + if isinstance(dsl_part, six.string_types): + expr.validate(dsl_part) + elif isinstance(dsl_part, (list, tuple)): + for expression in dsl_part: + if isinstance(expression, six.string_types): + expr.validate(expression) + elif isinstance(dsl_part, dict): + for expression in dsl_part.values(): + if isinstance(expression, six.string_types): + expr.validate(expression) + + def _spec_property(self, prop_name, spec_cls): + prop_val = self._data.get(prop_name) + + return ( + instantiate_spec(spec_cls, prop_val) if prop_val is not None + else None + ) + + def _group_spec(self, spec_cls, *prop_names): + if not prop_names: + return None + + data = {} + + for prop_name in prop_names: + prop_val = self._data.get(prop_name) + + if prop_val: + data[prop_name] = prop_val + + return instantiate_spec(spec_cls, data) + + def _inject_version(self, prop_names): + for prop_name in prop_names: + prop_data = self._data.get(prop_name) + + if isinstance(prop_data, dict): + prop_data['version'] = self._version + + def _as_dict(self, prop_name): + prop_val = self._data.get(prop_name) + + if not prop_val: + return {} + + if isinstance(prop_val, dict): + return prop_val + elif isinstance(prop_val, list): + result = {} + + for t in prop_val: + result.update(t if isinstance(t, dict) else {t: ''}) + + return result + elif isinstance(prop_val, six.string_types): + return {prop_val: ''} + + @staticmethod + def _parse_cmd_and_input(cmd_str): + # TODO(rakhmerov): Try to find a way with one expression. + cmd_matcher = CMD_PTRN.search(cmd_str) + + if not cmd_matcher: + msg = "Invalid action/workflow task property: %s" % cmd_str + raise exc.InvalidModelException(msg) + + cmd = cmd_matcher.group() + + params = {} + + for match in re.findall(PARAMS_PTRN, cmd_str): + k = match[0] + # Remove embracing quotes. + v = match[1].strip() + if v[0] == '"' or v[0] == "'": + v = v[1:-1] + else: + try: + v = json.loads(v) + except Exception: + pass + + params[k] = v + + return cmd, params + + def to_dict(self): + return self._data + + def get_version(self): + return self._version + + def __repr__(self): + return "%s %s" % (self.__class__.__name__, self.to_dict()) + + +class BaseListSpec(BaseSpec): + item_class = None + + _schema = { + "type": "object", + "properties": { + "version": types.VERSION + }, + "additionalProperties": types.NONEMPTY_DICT, + "required": ["version"], + } + + def __init__(self, data): + super(BaseListSpec, self).__init__(data) + + self.items = [] + + for k, v in data.items(): + if k != 'version': + v['name'] = k + self._inject_version([k]) + self.items.append(instantiate_spec(self.item_class, v)) + + def validate_schema(self): + super(BaseListSpec, self).validate_schema() + + if len(self._data.keys()) < 2: + raise exc.InvalidModelException( + 'At least one item must be in the list [data=%s].' % + self._data + ) + + def get_items(self): + return self.items + + def __getitem__(self, idx): + return self.items[idx] + + def __len__(self): + return len(self.items) + + +class BaseSpecList(object): + item_class = None + + _version = '2.0' + + def __init__(self, data): + self.items = {} + + for k, v in data.items(): + if k != 'version': + # At this point, we don't know if item schema is valid, + # it may not be even a dictionary. So we should check the + # type first before manipulating with it. + if isinstance(v, dict): + v['name'] = k + v['version'] = self._version + + self.items[k] = instantiate_spec(self.item_class, v) + + def item_keys(self): + return self.items.keys() + + def __iter__(self): + return six.itervalues(self.items) + + def __getitem__(self, name): + return self.items.get(name) + + def __len__(self): + return len(self.items) + + def get(self, name): + return self.__getitem__(name) diff -Nru mistral-4.0.0/mistral/lang/parser.py mistral-5.0.0~b2/mistral/lang/parser.py --- mistral-4.0.0/mistral/lang/parser.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/parser.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,263 @@ +# Copyright 2013 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cachetools +import threading +import yaml +from yaml import error + +import six + +from mistral.db.v2 import api as db_api +from mistral import exceptions as exc +from mistral.lang import base +from mistral.lang.v2 import actions as actions_v2 +from mistral.lang.v2 import tasks as tasks_v2 +from mistral.lang.v2 import workbook as wb_v2 +from mistral.lang.v2 import workflows as wf_v2 + +V2_0 = '2.0' + +ALL_VERSIONS = [V2_0] + + +_WF_EX_CACHE = cachetools.LRUCache(maxsize=100) +_WF_EX_CACHE_LOCK = threading.RLock() + +_WF_DEF_CACHE = cachetools.LRUCache(maxsize=100) +_WF_DEF_CACHE_LOCK = threading.RLock() + + +def parse_yaml(text): + """Loads a text in YAML format as dictionary object. + + :param text: YAML text. + :return: Parsed YAML document as dictionary. + """ + + try: + return yaml.safe_load(text) or {} + except error.YAMLError as e: + raise exc.DSLParsingException( + "Definition could not be parsed: %s\n" % e + ) + + +def _get_spec_version(spec_dict): + # If version is not specified it will '2.0' by default. + ver = V2_0 + + if 'version' in spec_dict: + ver = spec_dict['version'] + + def _raise(ver): + raise exc.DSLParsingException('Unsupported DSL version: %s' % ver) + try: + str_ver = str(float(ver)) + except (ValueError, TypeError): + _raise(ver) + + if not ver or str_ver not in ALL_VERSIONS: + _raise(ver) + + return ver + + +# Factory methods to get specifications either from raw YAML formatted text or +# from dictionaries parsed from YAML formatted text. + +def get_workbook_spec(spec_dict): + if _get_spec_version(spec_dict) == V2_0: + return base.instantiate_spec(wb_v2.WorkbookSpec, spec_dict) + + return None + + +def get_workbook_spec_from_yaml(text): + return get_workbook_spec(parse_yaml(text)) + + +def get_action_spec(spec_dict): + if _get_spec_version(spec_dict) == V2_0: + return base.instantiate_spec(actions_v2.ActionSpec, spec_dict) + + return None + + +def get_action_spec_from_yaml(text, action_name): + spec_dict = parse_yaml(text) + + spec_dict['name'] = action_name + + return get_action_spec(spec_dict) + + +def get_action_list_spec(spec_dict): + return base.instantiate_spec(actions_v2.ActionListSpec, spec_dict) + + +def get_action_list_spec_from_yaml(text): + return get_action_list_spec(parse_yaml(text)) + + +def get_workflow_spec(spec_dict): + """Get workflow specification object from dictionary. + + NOTE: For large workflows this method can work very long (seconds). + For this reason, method 'get_workflow_spec_by_definition_id' or + 'get_workflow_spec_by_execution_id' should be used whenever possible + because they cache specification objects. + + :param spec_dict: Raw specification dictionary. + """ + if _get_spec_version(spec_dict) == V2_0: + return base.instantiate_spec(wf_v2.WorkflowSpec, spec_dict) + + return None + + +def get_workflow_list_spec(spec_dict): + return base.instantiate_spec(wf_v2.WorkflowListSpec, spec_dict) + + +def get_workflow_spec_from_yaml(text): + return get_workflow_spec(parse_yaml(text)) + + +def get_workflow_list_spec_from_yaml(text): + return get_workflow_list_spec(parse_yaml(text)) + + +def get_task_spec(spec_dict): + if _get_spec_version(spec_dict) == V2_0: + return base.instantiate_spec(tasks_v2.TaskSpec, spec_dict) + + return None + + +def get_workflow_definition(wb_def, wf_name): + wf_name = wf_name + ":" + + return _parse_def_from_wb(wb_def, "workflows:", wf_name) + + +def get_action_definition(wb_def, action_name): + action_name += ":" + + return _parse_def_from_wb(wb_def, "actions:", action_name) + + +def _parse_def_from_wb(wb_def, section_name, item_name): + io = six.StringIO(wb_def[wb_def.index(section_name):]) + io.readline() + definition = [] + ident = 0 + # Get the indentation of the action/workflow name tag. + for line in io: + if item_name == line.strip(): + ident = line.index(item_name) + definition.append(line.lstrip()) + break + + # Add strings to list unless same/less indentation is found. + for line in io: + new_line = line.strip() + + if not new_line: + definition.append(line) + elif new_line.startswith("#"): + new_line = line if ident > line.index("#") else line[ident:] + definition.append(new_line) + else: + temp = line.index(line.lstrip()) + if ident < temp: + definition.append(line[ident:]) + else: + break + + io.close() + definition = ''.join(definition).rstrip() + '\n' + + return definition + + +# Methods for obtaining specifications in a more efficient way using +# caching techniques. + +@cachetools.cached(_WF_EX_CACHE, lock=_WF_EX_CACHE_LOCK) +def get_workflow_spec_by_execution_id(wf_ex_id): + """Gets workflow specification by workflow execution id. + + The idea is that when a workflow execution is running we + must be getting the same workflow specification even if + + :param wf_ex_id: Workflow execution id. + :return: Workflow specification. + """ + if not wf_ex_id: + return None + + wf_ex = db_api.get_workflow_execution(wf_ex_id) + + return get_workflow_spec(wf_ex.spec) + + +@cachetools.cached(_WF_DEF_CACHE, lock=_WF_DEF_CACHE_LOCK) +def get_workflow_spec_by_definition_id(wf_def_id, wf_def_updated_at): + """Gets specification by workflow definition id and its 'updated_at'. + + The idea of this method is to return a cached specification for the + given workflow id and workflow definition 'updated_at'. As long as the + given workflow definition remains the same in DB users of this method + will be getting a cached value. Once the workflow definition has + changed clients will be providing a different 'updated_at' value and + hence this method will be called and spec is updated for this combination + of parameters. Old cached values will be kicked out by LRU algorithm + if the cache runs out of space. + + :param wf_def_id: Workflow definition id. + :param wf_def_updated_at: Workflow definition 'updated_at' value. It + serves only as part of cache key and is not explicitly used in the + method. + :return: Workflow specification. + """ + if not wf_def_id: + return None + + wf_def = db_api.get_workflow_definition(wf_def_id) + + return get_workflow_spec(wf_def.spec) + + +def cache_workflow_spec_by_execution_id(wf_ex_id, wf_spec): + with _WF_EX_CACHE_LOCK: + _WF_EX_CACHE[cachetools.keys.hashkey(wf_ex_id)] = wf_spec + + +def get_wf_execution_spec_cache_size(): + return len(_WF_EX_CACHE) + + +def get_wf_definition_spec_cache_size(): + return len(_WF_DEF_CACHE) + + +def clear_caches(): + """Clears all specification caches.""" + with _WF_EX_CACHE_LOCK: + _WF_EX_CACHE.clear() + + with _WF_DEF_CACHE_LOCK: + _WF_DEF_CACHE.clear() diff -Nru mistral-4.0.0/mistral/lang/types.py mistral-5.0.0~b2/mistral/lang/types.py --- mistral-4.0.0/mistral/lang/types.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/types.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,148 @@ +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral import expressions + + +NONEMPTY_STRING = { + "type": "string", + "minLength": 1 +} + +UNIQUE_STRING_LIST = { + "type": "array", + "items": NONEMPTY_STRING, + "uniqueItems": True, + "minItems": 1 +} + +POSITIVE_INTEGER = { + "type": "integer", + "minimum": 0 +} + +POSITIVE_NUMBER = { + "type": "number", + "minimum": 0.0 +} + +EXPRESSION = { + "oneOf": [{ + "type": "string", + "pattern": "^%s\\s*$" % expressions.patterns[name] + } for name in expressions.patterns] +} + +EXPRESSION_CONDITION = { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^\w+$": EXPRESSION + } +} + +ANY = { + "anyOf": [ + {"type": "array"}, + {"type": "boolean"}, + {"type": "integer"}, + {"type": "number"}, + {"type": "object"}, + {"type": "string"} + ] +} + +ANY_NULLABLE = { + "anyOf": [ + {"type": "null"}, + {"type": "array"}, + {"type": "boolean"}, + {"type": "integer"}, + {"type": "number"}, + {"type": "object"}, + {"type": "string"} + ] +} + +NONEMPTY_DICT = { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^\w+$": ANY_NULLABLE + } +} + +ONE_KEY_DICT = { + "type": "object", + "minProperties": 1, + "maxProperties": 1, + "patternProperties": { + "^\w+$": ANY_NULLABLE + } +} + +STRING_OR_EXPRESSION_CONDITION = { + "oneOf": [ + NONEMPTY_STRING, + EXPRESSION_CONDITION + ] +} + +EXPRESSION_OR_POSITIVE_INTEGER = { + "oneOf": [ + EXPRESSION, + POSITIVE_INTEGER + ] +} + +EXPRESSION_OR_BOOLEAN = { + "oneOf": [ + EXPRESSION, + {"type": "boolean"} + ] +} + + +UNIQUE_STRING_OR_EXPRESSION_CONDITION_LIST = { + "type": "array", + "items": STRING_OR_EXPRESSION_CONDITION, + "uniqueItems": True, + "minItems": 1 +} + +VERSION = { + "anyOf": [ + NONEMPTY_STRING, + POSITIVE_INTEGER, + POSITIVE_NUMBER + ] +} + +WORKFLOW_TYPE = { + "enum": ["reverse", "direct"] +} + +STRING_OR_ONE_KEY_DICT = { + "oneOf": [ + NONEMPTY_STRING, + ONE_KEY_DICT + ] +} + +UNIQUE_STRING_OR_ONE_KEY_DICT_LIST = { + "type": "array", + "items": STRING_OR_ONE_KEY_DICT, + "uniqueItems": True, + "minItems": 1 +} diff -Nru mistral-4.0.0/mistral/lang/v2/actions.py mistral-5.0.0~b2/mistral/lang/v2/actions.py --- mistral-4.0.0/mistral/lang/v2/actions.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,94 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from mistral.lang import types +from mistral.lang.v2 import base +from mistral import utils + + +class ActionSpec(base.BaseSpec): + # See http://json-schema.org + _schema = { + "type": "object", + "properties": { + "base": types.NONEMPTY_STRING, + "base-input": types.NONEMPTY_DICT, + "input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST, + "output": types.ANY_NULLABLE, + }, + "required": ["base"], + "additionalProperties": False + } + + def __init__(self, data): + super(ActionSpec, self).__init__(data) + + self._name = data['name'] + self._description = data.get('description') + self._tags = data.get('tags', []) + self._base = data['base'] + self._base_input = data.get('base-input', {}) + self._input = utils.get_dict_from_entries(data.get('input', [])) + self._output = data.get('output') + + self._base, _input = self._parse_cmd_and_input(self._base) + + utils.merge_dicts(self._base_input, _input) + + def validate_schema(self): + super(ActionSpec, self).validate_schema() + + # Validate YAQL expressions. + inline_params = self._parse_cmd_and_input(self._data.get('base'))[1] + self.validate_expr(inline_params) + + self.validate_expr(self._data.get('base-input', {})) + + if isinstance(self._data.get('output'), six.string_types): + self.validate_expr(self._data.get('output')) + + def get_name(self): + return self._name + + def get_description(self): + return self._description + + def get_tags(self): + return self._tags + + def get_base(self): + return self._base + + def get_base_input(self): + return self._base_input + + def get_input(self): + return self._input + + def get_output(self): + return self._output + + +class ActionSpecList(base.BaseSpecList): + item_class = ActionSpec + + +class ActionListSpec(base.BaseListSpec): + item_class = ActionSpec + + def get_actions(self): + return self.get_items() diff -Nru mistral-4.0.0/mistral/lang/v2/base.py mistral-5.0.0~b2/mistral/lang/v2/base.py --- mistral-4.0.0/mistral/lang/v2/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral.lang import base +from mistral.lang import types + + +class BaseSpec(base.BaseSpec): + _version = "2.0" + + _meta_schema = { + "type": "object", + "properties": { + "name": types.NONEMPTY_STRING, + "version": types.VERSION, + "description": types.NONEMPTY_STRING, + "tags": types.UNIQUE_STRING_LIST + }, + "required": ["name", "version"] + } + + +class BaseSpecList(base.BaseSpecList): + _version = "2.0" + + +class BaseListSpec(base.BaseListSpec): + _version = "2.0" diff -Nru mistral-4.0.0/mistral/lang/v2/on_clause.py mistral-5.0.0~b2/mistral/lang/v2/on_clause.py --- mistral-4.0.0/mistral/lang/v2/on_clause.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/on_clause.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from mistral.lang import types +from mistral.lang.v2 import base +from mistral.lang.v2 import publish + + +class OnClauseSpec(base.BaseSpec): + _simple_schema = { + "oneOf": [ + types.NONEMPTY_STRING, + types.UNIQUE_STRING_OR_EXPRESSION_CONDITION_LIST + ] + } + + _advanced_schema = { + "type": "object", + "properties": { + "publish": types.NONEMPTY_DICT, + "next": _simple_schema, + }, + "additionalProperties": False + } + + _schema = {"oneOf": [_simple_schema, _advanced_schema]} + + def __init__(self, data): + super(OnClauseSpec, self).__init__(data) + + if not isinstance(data, dict): + # Old simple schema. + self._publish = None + self._next = prepare_next_clause(data) + else: + # New advanced schema. + self._publish = self._spec_property('publish', publish.PublishSpec) + self._next = prepare_next_clause(data.get('next')) + + @classmethod + def get_schema(cls, includes=['definitions']): + return super(OnClauseSpec, cls).get_schema(includes) + + def get_publish(self): + return self._publish + + def get_next(self): + return self._next + + +def _as_list_of_tuples(data): + if not data: + return [] + + if isinstance(data, six.string_types): + return [_as_tuple(data)] + + return [_as_tuple(item) for item in data] + + +def _as_tuple(val): + return list(val.items())[0] if isinstance(val, dict) else (val, '') + + +def prepare_next_clause(next_clause): + list_of_tuples = _as_list_of_tuples(next_clause) + + for i, task in enumerate(list_of_tuples): + task_name, params = OnClauseSpec._parse_cmd_and_input(task[0]) + + list_of_tuples[i] = (task_name, task[1], params) + + return list_of_tuples diff -Nru mistral-4.0.0/mistral/lang/v2/policies.py mistral-5.0.0~b2/mistral/lang/v2/policies.py --- mistral-4.0.0/mistral/lang/v2/policies.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/policies.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,76 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral.lang import types +from mistral.lang.v2 import base +from mistral.lang.v2 import retry_policy + + +class PoliciesSpec(base.BaseSpec): + # See http://json-schema.org + _schema = { + "type": "object", + "properties": { + "retry": types.ANY, + "wait-before": types.EXPRESSION_OR_POSITIVE_INTEGER, + "wait-after": types.EXPRESSION_OR_POSITIVE_INTEGER, + "timeout": types.EXPRESSION_OR_POSITIVE_INTEGER, + "pause-before": types.EXPRESSION_OR_BOOLEAN, + "concurrency": types.EXPRESSION_OR_POSITIVE_INTEGER, + }, + "additionalProperties": False + } + + @classmethod + def get_schema(cls, includes=['definitions']): + return super(PoliciesSpec, cls).get_schema(includes) + + def __init__(self, data): + super(PoliciesSpec, self).__init__(data) + + self._retry = self._spec_property('retry', retry_policy.RetrySpec) + self._wait_before = data.get('wait-before', 0) + self._wait_after = data.get('wait-after', 0) + self._timeout = data.get('timeout', 0) + self._pause_before = data.get('pause-before', False) + self._concurrency = data.get('concurrency', 0) + + def validate_schema(self): + super(PoliciesSpec, self).validate_schema() + + # Validate YAQL expressions. + self.validate_expr(self._data.get('wait-before', 0)) + self.validate_expr(self._data.get('wait-after', 0)) + self.validate_expr(self._data.get('timeout', 0)) + self.validate_expr(self._data.get('pause-before', False)) + self.validate_expr(self._data.get('concurrency', 0)) + + def get_retry(self): + return self._retry + + def get_wait_before(self): + return self._wait_before + + def get_wait_after(self): + return self._wait_after + + def get_timeout(self): + return self._timeout + + def get_pause_before(self): + return self._pause_before + + def get_concurrency(self): + return self._concurrency diff -Nru mistral-4.0.0/mistral/lang/v2/publish.py mistral-5.0.0~b2/mistral/lang/v2/publish.py --- mistral-4.0.0/mistral/lang/v2/publish.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/publish.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,61 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral import exceptions as exc +from mistral.lang import types +from mistral.lang.v2 import base + + +class PublishSpec(base.BaseSpec): + _schema = { + "type": "object", + "properties": { + "branch": types.NONEMPTY_DICT, + "global": types.NONEMPTY_DICT, + "atomic": types.NONEMPTY_DICT + }, + "additionalProperties": False + } + + def __init__(self, data): + super(PublishSpec, self).__init__(data) + + self._branch = self._data.get('branch') + self._global = self._data.get('global') + self._atomic = self._data.get('atomic') + + @classmethod + def get_schema(cls, includes=['definitions']): + return super(PublishSpec, cls).get_schema(includes) + + def validate_semantics(self): + if not self._branch and not self._global and not self._atomic: + raise exc.InvalidModelException( + "Either 'branch', 'global' or 'atomic' must be specified: " + % self._data + ) + + self.validate_expr(self._branch) + self.validate_expr(self._global) + self.validate_expr(self._atomic) + + def get_branch(self): + return self._branch + + def get_global(self): + return self._global + + def get_atomic(self): + return self._atomic diff -Nru mistral-4.0.0/mistral/lang/v2/retry_policy.py mistral-5.0.0~b2/mistral/lang/v2/retry_policy.py --- mistral-4.0.0/mistral/lang/v2/retry_policy.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/retry_policy.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from mistral.lang import types +from mistral.lang.v2 import base + + +class RetrySpec(base.BaseSpec): + # See http://json-schema.org + _retry_dict_schema = { + "type": "object", + "properties": { + "count": { + "oneOf": [ + types.EXPRESSION, + types.POSITIVE_INTEGER + ] + }, + "break-on": types.EXPRESSION, + "continue-on": types.EXPRESSION, + "delay": { + "oneOf": [ + types.EXPRESSION, + types.POSITIVE_INTEGER + ] + }, + }, + "required": ["delay", "count"], + "additionalProperties": False + } + + _schema = { + "oneOf": [ + _retry_dict_schema, + types.NONEMPTY_STRING + ] + } + + @classmethod + def get_schema(cls, includes=['definitions']): + return super(RetrySpec, cls).get_schema(includes) + + def __init__(self, data): + data = self._transform_retry_one_line(data) + + super(RetrySpec, self).__init__(data) + + self._break_on = data.get('break-on') + self._count = data.get('count') + self._continue_on = data.get('continue-on') + self._delay = data['delay'] + + def _transform_retry_one_line(self, retry): + if isinstance(retry, six.string_types): + _, params = self._parse_cmd_and_input(retry) + return params + + return retry + + def validate_schema(self): + super(RetrySpec, self).validate_schema() + + # Validate YAQL expressions. + self.validate_expr(self._data.get('count')) + self.validate_expr(self._data.get('delay')) + self.validate_expr(self._data.get('break-on')) + self.validate_expr(self._data.get('continue-on')) + + def get_count(self): + return self._count + + def get_break_on(self): + return self._break_on + + def get_continue_on(self): + return self._continue_on + + def get_delay(self): + return self._delay diff -Nru mistral-4.0.0/mistral/lang/v2/task_defaults.py mistral-5.0.0~b2/mistral/lang/v2/task_defaults.py --- mistral-4.0.0/mistral/lang/v2/task_defaults.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/task_defaults.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,108 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from mistral.lang import types +from mistral.lang.v2 import base +from mistral.lang.v2 import on_clause +from mistral.lang.v2 import policies + + +# TODO(rakhmerov): This specification should be broken into two separate +# specs for direct and reverse workflows. It's weird to combine them into +# one because they address different use cases. + + +class TaskDefaultsSpec(base.BaseSpec): + # See http://json-schema.org + _schema = { + "type": "object", + "properties": { + "retry": types.ANY, + "wait-before": types.ANY, + "wait-after": types.ANY, + "timeout": types.ANY, + "pause-before": types.ANY, + "concurrency": types.ANY, + "on-complete": types.ANY, + "on-success": types.ANY, + "on-error": types.ANY, + "requires": { + "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] + } + }, + "additionalProperties": False + } + + @classmethod + def get_schema(cls, includes=['definitions']): + return super(TaskDefaultsSpec, cls).get_schema(includes) + + def __init__(self, data): + super(TaskDefaultsSpec, self).__init__(data) + + self._policies = self._group_spec( + policies.PoliciesSpec, + 'retry', + 'wait-before', + 'wait-after', + 'timeout', + 'pause-before', + 'concurrency' + ) + + on_spec_cls = on_clause.OnClauseSpec + + self._on_complete = self._spec_property('on-complete', on_spec_cls) + self._on_success = self._spec_property('on-success', on_spec_cls) + self._on_error = self._spec_property('on-error', on_spec_cls) + + # TODO(rakhmerov): 'requires' should reside in a different spec for + # reverse workflows. + self._requires = data.get('requires', []) + + def validate_semantics(self): + # Validate YAQL expressions. + self._validate_transitions(self._on_complete) + self._validate_transitions(self._on_success) + self._validate_transitions(self._on_error) + + def _validate_transitions(self, on_clause_spec): + val = on_clause_spec.get_next() if on_clause_spec else [] + + if not val: + return + + [self.validate_expr(t) + for t in ([val] if isinstance(val, six.string_types) else val)] + + def get_policies(self): + return self._policies + + def get_on_complete(self): + return self._on_complete + + def get_on_success(self): + return self._on_success + + def get_on_error(self): + return self._on_error + + def get_requires(self): + if isinstance(self._requires, six.string_types): + return [self._requires] + + return self._requires diff -Nru mistral-4.0.0/mistral/lang/v2/tasks.py mistral-5.0.0~b2/mistral/lang/v2/tasks.py --- mistral-4.0.0/mistral/lang/v2/tasks.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/tasks.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,353 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import json +import re +import six + +from mistral import exceptions as exc +from mistral import expressions +from mistral.lang import types +from mistral.lang.v2 import base +from mistral.lang.v2 import on_clause +from mistral.lang.v2 import policies +from mistral.lang.v2 import publish +from mistral import utils +from mistral.workflow import states + +_expr_ptrns = [expressions.patterns[name] for name in expressions.patterns] +WITH_ITEMS_PTRN = re.compile( + "\s*([\w\d_\-]+)\s*in\s*(\[.+\]|%s)" % '|'.join(_expr_ptrns) +) +RESERVED_TASK_NAMES = [ + 'noop', + 'fail', + 'succeed', + 'pause' +] + + +class TaskSpec(base.BaseSpec): + # See http://json-schema.org + _polymorphic_key = ('type', 'direct') + + _schema = { + "type": "object", + "properties": { + "type": types.WORKFLOW_TYPE, + "action": types.NONEMPTY_STRING, + "workflow": types.NONEMPTY_STRING, + "input": types.NONEMPTY_DICT, + "with-items": { + "oneOf": [ + types.NONEMPTY_STRING, + types.UNIQUE_STRING_LIST + ] + }, + "publish": types.NONEMPTY_DICT, + "publish-on-error": types.NONEMPTY_DICT, + "retry": types.ANY, + "wait-before": types.ANY, + "wait-after": types.ANY, + "timeout": types.ANY, + "pause-before": types.ANY, + "concurrency": types.ANY, + "target": types.NONEMPTY_STRING, + "keep-result": types.EXPRESSION_OR_BOOLEAN, + "safe-rerun": types.EXPRESSION_OR_BOOLEAN + }, + "additionalProperties": False, + "anyOf": [ + { + "not": { + "type": "object", + "required": ["action", "workflow"] + }, + }, + { + "oneOf": [ + { + "type": "object", + "required": ["action"] + }, + { + "type": "object", + "required": ["workflow"] + } + ] + } + ] + } + + def __init__(self, data): + super(TaskSpec, self).__init__(data) + + self._name = data['name'] + self._description = data.get('description') + self._action = data.get('action') + self._workflow = data.get('workflow') + self._input = data.get('input', {}) + self._with_items = self._transform_with_items() + self._publish = data.get('publish', {}) + self._publish_on_error = data.get('publish-on-error', {}) + self._policies = self._group_spec( + policies.PoliciesSpec, + 'retry', + 'wait-before', + 'wait-after', + 'timeout', + 'pause-before', + 'concurrency' + ) + self._target = data.get('target') + self._keep_result = data.get('keep-result', True) + self._safe_rerun = data.get('safe-rerun', False) + + self._process_action_and_workflow() + + def validate_schema(self): + super(TaskSpec, self).validate_schema() + + action = self._data.get('action') + workflow = self._data.get('workflow') + + # Validate YAQL expressions. + if action or workflow: + inline_params = self._parse_cmd_and_input(action or workflow)[1] + self.validate_expr(inline_params) + + self.validate_expr(self._data.get('input', {})) + self.validate_expr(self._data.get('publish', {})) + self.validate_expr(self._data.get('publish-on-error', {})) + self.validate_expr(self._data.get('keep-result', {})) + self.validate_expr(self._data.get('safe-rerun', {})) + + def _transform_with_items(self): + raw = self._data.get('with-items', []) + with_items = {} + + if isinstance(raw, six.string_types): + raw = [raw] + + for item in raw: + if not isinstance(item, six.string_types): + raise exc.InvalidModelException( + "'with-items' elements should be strings: %s" % self._data + ) + + match = re.match(WITH_ITEMS_PTRN, item) + + if not match: + msg = ("Wrong format of 'with-items' property. Please use " + "format 'var in {[some, list] | <%% $.array %%> }: " + "%s" % self._data) + raise exc.InvalidModelException(msg) + + match_groups = match.groups() + var_name = match_groups[0] + array = match_groups[1] + + # Validate YAQL expression that may follow after "in" for the + # with-items syntax "var in {[some, list] | <% $.array %> }". + self.validate_expr(array) + + if array.startswith('['): + try: + array = json.loads(array) + except Exception as e: + msg = ("Invalid array in 'with-items' clause: " + "%s, error: %s" % (array, str(e))) + raise exc.InvalidModelException(msg) + + with_items[var_name] = array + + return with_items + + def _process_action_and_workflow(self): + params = {} + + if self._action: + self._action, params = self._parse_cmd_and_input(self._action) + elif self._workflow: + self._workflow, params = self._parse_cmd_and_input( + self._workflow) + else: + self._action = 'std.noop' + + utils.merge_dicts(self._input, params) + + def get_name(self): + return self._name + + def get_description(self): + return self._description + + def get_action_name(self): + return self._action if self._action else None + + def get_workflow_name(self): + return self._workflow + + def get_input(self): + return self._input + + def get_with_items(self): + return self._with_items + + def get_policies(self): + return self._policies + + def get_target(self): + return self._target + + def get_publish(self, state): + spec = None + + if state == states.SUCCESS and self._publish: + spec = publish.PublishSpec({'branch': self._publish}) + elif state == states.ERROR and self._publish_on_error: + spec = publish.PublishSpec( + {'branch': self._publish_on_error} + ) + + return spec + + def get_keep_result(self): + return self._keep_result + + def get_safe_rerun(self): + return self._safe_rerun + + def get_type(self): + return (utils.WORKFLOW_TASK_TYPE if self._workflow + else utils.ACTION_TASK_TYPE) + + +class DirectWorkflowTaskSpec(TaskSpec): + _polymorphic_value = 'direct' + + _direct_workflow_schema = { + "type": "object", + "properties": { + "type": {"enum": [_polymorphic_value]}, + "join": { + "oneOf": [ + {"enum": ["all", "one"]}, + types.POSITIVE_INTEGER + ] + }, + "on-complete": types.ANY, + "on-success": types.ANY, + "on-error": types.ANY + } + } + + _schema = utils.merge_dicts( + copy.deepcopy(TaskSpec._schema), + _direct_workflow_schema + ) + + def __init__(self, data): + super(DirectWorkflowTaskSpec, self).__init__(data) + + self._join = data.get('join') + + on_spec_cls = on_clause.OnClauseSpec + + self._on_complete = self._spec_property('on-complete', on_spec_cls) + self._on_success = self._spec_property('on-success', on_spec_cls) + self._on_error = self._spec_property('on-error', on_spec_cls) + + def validate_semantics(self): + # Validate YAQL expressions. + self._validate_transitions(self._on_complete) + self._validate_transitions(self._on_success) + self._validate_transitions(self._on_error) + + def _validate_transitions(self, on_clause_spec): + val = on_clause_spec.get_next() if on_clause_spec else [] + + if not val: + return + + [self.validate_expr(t) + for t in ([val] if isinstance(val, six.string_types) else val)] + + def get_publish(self, state): + spec = super(DirectWorkflowTaskSpec, self).get_publish(state) + + # TODO(rakhmerov): How do we need to resolve a possible conflict + # between 'on-complete' and 'on-success/on-error' and + # 'publish/publish-on-error'? For now we assume that 'on-error' + # and 'on-success' take precedence over on-complete. + + on_clause = self._on_complete + + if state == states.SUCCESS: + on_clause = self._on_success + elif state == states.ERROR: + on_clause = self._on_error + + if not on_clause: + return spec + + return on_clause.get_publish() or spec + + def get_join(self): + return self._join + + def get_on_complete(self): + return self._on_complete + + def get_on_success(self): + return self._on_success + + def get_on_error(self): + return self._on_error + + +class ReverseWorkflowTaskSpec(TaskSpec): + _polymorphic_value = 'reverse' + + _reverse_workflow_schema = { + "type": "object", + "properties": { + "type": {"enum": [_polymorphic_value]}, + "requires": { + "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] + } + } + } + + _schema = utils.merge_dicts( + copy.deepcopy(TaskSpec._schema), + _reverse_workflow_schema + ) + + def __init__(self, data): + super(ReverseWorkflowTaskSpec, self).__init__(data) + + self._requires = data.get('requires', []) + + def get_requires(self): + if isinstance(self._requires, six.string_types): + return [self._requires] + + return self._requires + + +class TaskSpecList(base.BaseSpecList): + item_class = TaskSpec diff -Nru mistral-4.0.0/mistral/lang/v2/workbook.py mistral-5.0.0~b2/mistral/lang/v2/workbook.py --- mistral-4.0.0/mistral/lang/v2/workbook.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/workbook.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,78 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral.lang import types +from mistral.lang.v2 import actions as act +from mistral.lang.v2 import base +from mistral.lang.v2 import workflows as wf + +# We want to match any single word that isn't exactly "version" +NON_VERSION_WORD_REGEX = "^(?!version$)[\w-]+$" + + +class WorkbookSpec(base.BaseSpec): + # See http://json-schema.org + + _schema = { + "type": "object", + "properties": { + "version": {"enum": ["2.0", 2.0]}, + "actions": { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^version$": {"enum": ["2.0", 2.0]}, + NON_VERSION_WORD_REGEX: types.ANY + }, + "additionalProperties": False + }, + "workflows": { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^version$": {"enum": ["2.0", 2.0]}, + NON_VERSION_WORD_REGEX: types.ANY + }, + "additionalProperties": False + } + }, + "additionalProperties": False + } + + def __init__(self, data): + super(WorkbookSpec, self).__init__(data) + + self._inject_version(['actions', 'workflows']) + + self._name = data['name'] + self._description = data.get('description') + self._tags = data.get('tags', []) + self._actions = self._spec_property('actions', act.ActionSpecList) + self._workflows = self._spec_property('workflows', wf.WorkflowSpecList) + + def get_name(self): + return self._name + + def get_description(self): + return self._description + + def get_tags(self): + return self._tags + + def get_actions(self): + return self._actions + + def get_workflows(self): + return self._workflows diff -Nru mistral-4.0.0/mistral/lang/v2/workflows.py mistral-5.0.0~b2/mistral/lang/v2/workflows.py --- mistral-4.0.0/mistral/lang/v2/workflows.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/lang/v2/workflows.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,398 @@ +# Copyright 2015 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_utils import uuidutils +import six +import threading + +from mistral import exceptions as exc +from mistral.lang import types +from mistral.lang.v2 import base +from mistral.lang.v2 import task_defaults +from mistral.lang.v2 import tasks +from mistral import utils + + +class WorkflowSpec(base.BaseSpec): + # See http://json-schema.org + + _polymorphic_key = ('type', 'direct') + + _meta_schema = { + "type": "object", + "properties": { + "type": types.WORKFLOW_TYPE, + "task-defaults": types.NONEMPTY_DICT, + "input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST, + "output": types.NONEMPTY_DICT, + "output-on-error": types.NONEMPTY_DICT, + "vars": types.NONEMPTY_DICT + }, + "required": ["tasks"], + "additionalProperties": False + } + + def __init__(self, data): + super(WorkflowSpec, self).__init__(data) + + self._name = data['name'] + self._description = data.get('description') + self._tags = data.get('tags', []) + self._type = data['type'] if 'type' in data else 'direct' + self._input = utils.get_dict_from_entries(data.get('input', [])) + self._output = data.get('output', {}) + self._output_on_error = data.get('output-on-error', {}) + self._vars = data.get('vars', {}) + + self._task_defaults = self._spec_property( + 'task-defaults', + task_defaults.TaskDefaultsSpec + ) + + # Inject 'type' here, so instantiate_spec function can recognize the + # specific subclass of TaskSpec. + for task in six.itervalues(self._data.get('tasks')): + task['type'] = self._type + + self._tasks = self._spec_property('tasks', tasks.TaskSpecList) + + def validate_schema(self): + super(WorkflowSpec, self).validate_schema() + + if not self._data.get('tasks'): + raise exc.InvalidModelException( + "Workflow doesn't have any tasks [data=%s]" % self._data + ) + + # Validate expressions. + self.validate_expr(self._data.get('output', {})) + self.validate_expr(self._data.get('vars', {})) + + def validate_semantics(self): + super(WorkflowSpec, self).validate_semantics() + + # Distinguish workflow name from workflow UUID. + if uuidutils.is_uuid_like(self._name): + raise exc.InvalidModelException( + "Workflow name cannot be in the format of UUID." + ) + + def _validate_task_link(self, task_name, allow_engine_cmds=True): + valid_task = self._task_exists(task_name) + + if allow_engine_cmds: + valid_task |= task_name in tasks.RESERVED_TASK_NAMES + + if not valid_task: + raise exc.InvalidModelException( + "Task '%s' not found." % task_name + ) + + def _task_exists(self, task_name): + return self.get_tasks()[task_name] is not None + + def get_name(self): + return self._name + + def get_description(self): + return self._description + + def get_tags(self): + return self._tags + + def get_type(self): + return self._type + + def get_input(self): + return self._input + + def get_output(self): + return self._output + + def get_output_on_error(self): + return self._output_on_error + + def get_vars(self): + return self._vars + + def get_task_defaults(self): + return self._task_defaults + + def get_tasks(self): + return self._tasks + + def get_task(self, name): + return self._tasks[name] + + +class DirectWorkflowSpec(WorkflowSpec): + _polymorphic_value = 'direct' + + _schema = { + "properties": { + "tasks": { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^\w+$": types.NONEMPTY_DICT + } + }, + } + } + + def __init__(self, data): + super(DirectWorkflowSpec, self).__init__(data) + + # Init simple dictionary based caches for inbound and + # outbound task specifications. In fact, we don't need + # any special cache implementations here because these + # structures can't grow indefinitely. + self.inbound_tasks_cache_lock = threading.RLock() + self.inbound_tasks_cache = {} + self.outbound_tasks_cache_lock = threading.RLock() + self.outbound_tasks_cache = {} + + def validate_semantics(self): + super(DirectWorkflowSpec, self).validate_semantics() + + # Check if there are start tasks. + if not self.find_start_tasks(): + raise exc.DSLParsingException( + 'Failed to find start tasks in direct workflow. ' + 'There must be at least one task without inbound transition.' + '[workflow_name=%s]' % self._name + ) + + self._check_workflow_integrity() + self._check_join_tasks() + + def _check_workflow_integrity(self): + for t_s in self.get_tasks(): + out_task_names = self.find_outbound_task_names(t_s.get_name()) + + for out_t_name in out_task_names: + self._validate_task_link(out_t_name) + + def _check_join_tasks(self): + join_tasks = [t for t in self.get_tasks() if t.get_join()] + + err_msgs = [] + + for join_t in join_tasks: + t_name = join_t.get_name() + join_val = join_t.get_join() + + in_tasks = self.find_inbound_task_specs(join_t) + + if join_val == 'all': + if len(in_tasks) == 0: + err_msgs.append( + "No inbound tasks for task with 'join: all'" + " [task_name=%s]" % t_name + ) + + continue + + if join_val == 'one': + join_val = 1 + + if len(in_tasks) < join_val: + err_msgs.append( + "Not enough inbound tasks for task with 'join'" + " [task_name=%s, join=%s, inbound_tasks=%s]" % + (t_name, join_val, len(in_tasks)) + ) + + if len(err_msgs) > 0: + raise exc.InvalidModelException('\n'.join(err_msgs)) + + def find_start_tasks(self): + return [ + t_s for t_s in self.get_tasks() + if not self.has_inbound_transitions(t_s) + ] + + def find_inbound_task_specs(self, task_spec): + task_name = task_spec.get_name() + + with self.inbound_tasks_cache_lock: + specs = self.inbound_tasks_cache.get(task_name) + + if specs is not None: + return specs + + specs = [ + t_s for t_s in self.get_tasks() + if self.transition_exists(t_s.get_name(), task_name) + ] + + with self.inbound_tasks_cache_lock: + self.inbound_tasks_cache[task_name] = specs + + return specs + + def find_outbound_task_specs(self, task_spec): + task_name = task_spec.get_name() + + with self.outbound_tasks_cache_lock: + specs = self.outbound_tasks_cache.get(task_name) + + if specs is not None: + return specs + + specs = [ + t_s for t_s in self.get_tasks() + if self.transition_exists(task_name, t_s.get_name()) + ] + + with self.outbound_tasks_cache_lock: + self.outbound_tasks_cache[task_name] = specs + + return specs + + def has_inbound_transitions(self, task_spec): + return len(self.find_inbound_task_specs(task_spec)) > 0 + + def has_outbound_transitions(self, task_spec): + return len(self.find_outbound_task_specs(task_spec)) > 0 + + def find_outbound_task_names(self, task_name): + t_names = set() + + for tup in self.get_on_error_clause(task_name): + t_names.add(tup[0]) + + for tup in self.get_on_success_clause(task_name): + t_names.add(tup[0]) + + for tup in self.get_on_complete_clause(task_name): + t_names.add(tup[0]) + + return t_names + + def transition_exists(self, from_task_name, to_task_name): + t_names = self.find_outbound_task_names(from_task_name) + + return to_task_name in t_names + + def get_on_error_clause(self, t_name): + result = [] + + on_clause = self.get_tasks()[t_name].get_on_error() + + if on_clause: + result = on_clause.get_next() + + if not result: + t_defaults = self.get_task_defaults() + + if t_defaults and t_defaults.get_on_error(): + result = self._remove_task_from_clause( + t_defaults.get_on_error().get_next(), + t_name + ) + + return result + + def get_on_success_clause(self, t_name): + result = [] + + on_clause = self.get_tasks()[t_name].get_on_success() + + if on_clause: + result = on_clause.get_next() + + if not result: + t_defaults = self.get_task_defaults() + + if t_defaults and t_defaults.get_on_success(): + result = self._remove_task_from_clause( + t_defaults.get_on_success().get_next(), + t_name + ) + + return result + + def get_on_complete_clause(self, t_name): + result = [] + + on_clause = self.get_tasks()[t_name].get_on_complete() + + if on_clause: + result = on_clause.get_next() + + if not result: + t_defaults = self.get_task_defaults() + + if t_defaults and t_defaults.get_on_complete(): + result = self._remove_task_from_clause( + t_defaults.get_on_complete().get_next(), + t_name + ) + + return result + + @staticmethod + def _remove_task_from_clause(on_clause, t_name): + return list([tup for tup in on_clause if tup[0] != t_name]) + + +class ReverseWorkflowSpec(WorkflowSpec): + _polymorphic_value = 'reverse' + + _schema = { + "properties": { + "tasks": { + "type": "object", + "minProperties": 1, + "patternProperties": { + "^\w+$": types.NONEMPTY_DICT + } + }, + } + } + + def validate_semantics(self): + super(ReverseWorkflowSpec, self).validate_semantics() + + self._check_workflow_integrity() + + def _check_workflow_integrity(self): + for t_s in self.get_tasks(): + for req in self.get_task_requires(t_s): + self._validate_task_link(req, allow_engine_cmds=False) + + def get_task_requires(self, task_spec): + requires = set(task_spec.get_requires()) + + defaults = self.get_task_defaults() + + if defaults: + requires |= set(defaults.get_requires()) + + requires.discard(task_spec.get_name()) + + return list(requires) + + +class WorkflowSpecList(base.BaseSpecList): + item_class = WorkflowSpec + + +class WorkflowListSpec(base.BaseListSpec): + item_class = WorkflowSpec + + def get_workflows(self): + return self.get_items() diff -Nru mistral-4.0.0/mistral/rpc/base.py mistral-5.0.0~b2/mistral/rpc/base.py --- mistral-4.0.0/mistral/rpc/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,186 @@ +# Copyright 2015 - Mirantis, Inc. +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging as messaging +from oslo_messaging.rpc import client +from stevedore import driver + +from mistral import exceptions as exc + + +LOG = logging.getLogger(__name__) + + +_IMPL_CLIENT = None +_IMPL_SERVER = None +_TRANSPORT = None + + +def cleanup(): + """Intended to be used by tests to recreate all RPC related objects.""" + + global _TRANSPORT + + _TRANSPORT = None + + +# TODO(rakhmerov): This method seems misplaced. Now we have different kind +# of transports (oslo, kombu) and this module should not have any oslo +# specific things anymore. +def get_transport(): + global _TRANSPORT + + if not _TRANSPORT: + _TRANSPORT = messaging.get_transport(cfg.CONF) + + return _TRANSPORT + + +def get_rpc_server_driver(): + rpc_impl = cfg.CONF.rpc_implementation + + global _IMPL_SERVER + if not _IMPL_SERVER: + _IMPL_SERVER = driver.DriverManager( + 'mistral.rpc.backends', + '%s_server' % rpc_impl + ).driver + + return _IMPL_SERVER + + +def get_rpc_client_driver(): + rpc_impl = cfg.CONF.rpc_implementation + + global _IMPL_CLIENT + if not _IMPL_CLIENT: + _IMPL_CLIENT = driver.DriverManager( + 'mistral.rpc.backends', + '%s_client' % rpc_impl + ).driver + + return _IMPL_CLIENT + + +def _wrap_exception_and_reraise(exception): + message = "%s: %s" % (exception.__class__.__name__, exception.args[0]) + + raise exc.MistralException(message) + + +def wrap_messaging_exception(method): + """This decorator unwrap remote error in one of MistralException. + + oslo.messaging has different behavior on raising exceptions + when fake or rabbit transports are used. In case of rabbit + transport it raises wrapped RemoteError which forwards directly + to API. Wrapped RemoteError contains one of MistralException raised + remotely on Engine and for correct exception interpretation we + need to unwrap and raise given exception and manually send it to + API layer. + """ + def decorator(*args, **kwargs): + try: + return method(*args, **kwargs) + + except exc.MistralException: + raise + except (client.RemoteError, exc.KombuException, Exception) as e: + if hasattr(e, 'exc_type') and hasattr(exc, e.exc_type): + exc_cls = getattr(exc, e.exc_type) + raise exc_cls(e.value) + + _wrap_exception_and_reraise(e) + + return decorator + + +class RPCClient(object): + def __init__(self, conf): + """Base class for RPCClient's drivers + + RPC Client is responsible for sending requests to RPC Server. + All RPC client drivers have to inherit from this class. + + :param conf: Additional config provided by upper layer. + """ + self.conf = conf + + @abc.abstractmethod + def sync_call(self, ctx, method, target=None, **kwargs): + """Synchronous call of RPC method. + + Blocks the thread and wait for method result. + """ + raise NotImplementedError + + @abc.abstractmethod + def async_call(self, ctx, method, target=None, **kwargs): + """Asynchronous call of RPC method. + + Does not block the thread, just send invoking data to + the RPC server and immediately returns nothing. + """ + raise NotImplementedError + + +class RPCServer(object): + def __init__(self, conf): + """Base class for RPCServer's drivers + + RPC Server should listen for request coming from RPC Clients and + respond to them respectively to the registered endpoints. + All RPC server drivers have to inherit from this class. + + :param conf: Additional config provided by upper layer. + """ + self.conf = conf + + @abc.abstractmethod + def register_endpoint(self, endpoint): + """Registers a new RPC endpoint. + + :param endpoint: an object containing methods which + will be used as RPC methods. + """ + raise NotImplementedError + + @abc.abstractmethod + def run(self, executor='blocking'): + """Runs the RPC server. + + :param executor: Executor used to process incoming requests. Different + implementations may support different options. + """ + raise NotImplementedError + + def stop(self, graceful=False): + """Stop the RPC server. + + :param graceful: True if this method call should wait till all + internal threads are finished. + :return: + """ + # No-op by default. + pass + + def wait(self): + """Wait till all internal threads are finished.""" + # No-op by default. + pass diff -Nru mistral-4.0.0/mistral/rpc/clients.py mistral-5.0.0~b2/mistral/rpc/clients.py --- mistral-4.0.0/mistral/rpc/clients.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/clients.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,310 @@ +# Copyright 2014 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg +from osprofiler import profiler + +from mistral import context as auth_ctx +from mistral.engine import base as eng +from mistral.event_engine import base as evt_eng +from mistral.executors import base as exe +from mistral.rpc import base + + +_ENGINE_CLIENT = None +_EXECUTOR_CLIENT = None +_EVENT_ENGINE_CLIENT = None + + +def cleanup(): + """Intended to be used by tests to recreate all RPC related objects.""" + + global _ENGINE_CLIENT + global _EXECUTOR_CLIENT + global _EVENT_ENGINE_CLIENT + + _ENGINE_CLIENT = None + _EXECUTOR_CLIENT = None + _EVENT_ENGINE_CLIENT = None + + +def get_engine_client(): + global _ENGINE_CLIENT + + if not _ENGINE_CLIENT: + _ENGINE_CLIENT = EngineClient(cfg.CONF.engine) + + return _ENGINE_CLIENT + + +def get_executor_client(): + global _EXECUTOR_CLIENT + + if not _EXECUTOR_CLIENT: + _EXECUTOR_CLIENT = ExecutorClient(cfg.CONF.executor) + + return _EXECUTOR_CLIENT + + +def get_event_engine_client(): + global _EVENT_ENGINE_CLIENT + + if not _EVENT_ENGINE_CLIENT: + _EVENT_ENGINE_CLIENT = EventEngineClient(cfg.CONF.event_engine) + + return _EVENT_ENGINE_CLIENT + + +class EngineClient(eng.Engine): + """RPC Engine client.""" + + def __init__(self, rpc_conf_dict): + """Constructs an RPC client for engine. + + :param rpc_conf_dict: Dict containing RPC configuration. + """ + self._client = base.get_rpc_client_driver()(rpc_conf_dict) + + @base.wrap_messaging_exception + def start_workflow(self, wf_identifier, wf_input, description='', + **params): + """Starts workflow sending a request to engine over RPC. + + :return: Workflow execution. + """ + return self._client.sync_call( + auth_ctx.ctx(), + 'start_workflow', + workflow_identifier=wf_identifier, + workflow_input=wf_input or {}, + description=description, + params=params + ) + + @base.wrap_messaging_exception + def start_action(self, action_name, action_input, + description=None, **params): + """Starts action sending a request to engine over RPC. + + :return: Action execution. + """ + return self._client.sync_call( + auth_ctx.ctx(), + 'start_action', + action_name=action_name, + action_input=action_input or {}, + description=description, + params=params + ) + + @base.wrap_messaging_exception + @profiler.trace('engine-client-on-action-complete', hide_args=True) + def on_action_complete(self, action_ex_id, result, wf_action=False, + async_=False): + """Conveys action result to Mistral Engine. + + This method should be used by clients of Mistral Engine to update + state of a action execution once action has executed. One of the + clients of this method is Mistral REST API server that receives + action result from the outside action handlers. + + Note: calling this method serves an event notifying Mistral that + it possibly needs to move the workflow on, i.e. run other workflow + tasks for which all dependencies are satisfied. + + :param action_ex_id: Action execution id. + :param result: Action execution result. + :param wf_action: If True it means that the given id points to + a workflow execution rather than action execution. It happens + when a nested workflow execution sends its result to a parent + workflow. + :param async: If True, run action in asynchronous mode (w/o waiting + for completion). + :return: Action(or workflow if wf_action=True) execution object. + """ + + call = self._client.async_call if async_ else self._client.sync_call + + return call( + auth_ctx.ctx(), + 'on_action_complete', + action_ex_id=action_ex_id, + result=result, + wf_action=wf_action + ) + + @base.wrap_messaging_exception + def pause_workflow(self, wf_ex_id): + """Stops the workflow with the given execution id. + + :param wf_ex_id: Workflow execution id. + :return: Workflow execution. + """ + + return self._client.sync_call( + auth_ctx.ctx(), + 'pause_workflow', + execution_id=wf_ex_id + ) + + @base.wrap_messaging_exception + def rerun_workflow(self, task_ex_id, reset=True, env=None): + """Rerun the workflow. + + This method reruns workflow with the given execution id + at the specific task execution id. + + :param task_ex_id: Task execution id. + :param reset: If true, then reset task execution state and purge + action execution for the task. + :param env: Environment variables to update. + :return: Workflow execution. + """ + + return self._client.sync_call( + auth_ctx.ctx(), + 'rerun_workflow', + task_ex_id=task_ex_id, + reset=reset, + env=env + ) + + @base.wrap_messaging_exception + def resume_workflow(self, wf_ex_id, env=None): + """Resumes the workflow with the given execution id. + + :param wf_ex_id: Workflow execution id. + :param env: Environment variables to update. + :return: Workflow execution. + """ + + return self._client.sync_call( + auth_ctx.ctx(), + 'resume_workflow', + wf_ex_id=wf_ex_id, + env=env + ) + + @base.wrap_messaging_exception + def stop_workflow(self, wf_ex_id, state, message=None): + """Stops workflow execution with given status. + + Once stopped, the workflow is complete with SUCCESS or ERROR, + and can not be resumed. + + :param wf_ex_id: Workflow execution id + :param state: State assigned to the workflow: SUCCESS or ERROR + :param message: Optional information string + + :return: Workflow execution, model.Execution + """ + + return self._client.sync_call( + auth_ctx.ctx(), + 'stop_workflow', + execution_id=wf_ex_id, + state=state, + message=message + ) + + @base.wrap_messaging_exception + def rollback_workflow(self, wf_ex_id): + """Rolls back the workflow with the given execution id. + + :param wf_ex_id: Workflow execution id. + + :return: Workflow execution. + """ + + return self._client.sync_call( + auth_ctx.ctx(), + 'rollback_workflow', + execution_id=wf_ex_id + ) + + +class ExecutorClient(exe.Executor): + """RPC Executor client.""" + + def __init__(self, rpc_conf_dict): + """Constructs an RPC client for the Executor.""" + + self.topic = cfg.CONF.executor.topic + self._client = base.get_rpc_client_driver()(rpc_conf_dict) + + @profiler.trace('executor-client-run-action') + def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, + params, safe_rerun, redelivered=False, + target=None, async_=True): + """Sends a request to run action to executor. + + :param action_ex_id: Action execution id. + :param action_cls_str: Action class name. + :param action_cls_attrs: Action class attributes. + :param params: Action input parameters. + :param safe_rerun: If true, action would be re-run if executor dies + during execution. + :param redelivered: Tells if given action was run before on another + executor. + :param target: Target (group of action executors). + :param async_: If True, run action in asynchronous mode (w/o waiting + for completion). + :return: Action result. + """ + + rpc_kwargs = { + 'action_ex_id': action_ex_id, + 'action_cls_str': action_cls_str, + 'action_cls_attrs': action_cls_attrs, + 'params': params, + 'safe_rerun': safe_rerun + } + + rpc_client_method = (self._client.async_call + if async_ else self._client.sync_call) + + return rpc_client_method(auth_ctx.ctx(), 'run_action', **rpc_kwargs) + + +class EventEngineClient(evt_eng.EventEngine): + """RPC EventEngine client.""" + + def __init__(self, rpc_conf_dict): + """Constructs an RPC client for the EventEngine service.""" + self._client = base.get_rpc_client_driver()(rpc_conf_dict) + + def create_event_trigger(self, trigger, events): + return self._client.sync_call( + auth_ctx.ctx(), + 'create_event_trigger', + trigger=trigger, + events=events + ) + + def delete_event_trigger(self, trigger, events): + return self._client.sync_call( + auth_ctx.ctx(), + 'delete_event_trigger', + trigger=trigger, + events=events + ) + + def update_event_trigger(self, trigger): + return self._client.sync_call( + auth_ctx.ctx(), + 'update_event_trigger', + trigger=trigger, + ) diff -Nru mistral-4.0.0/mistral/rpc/kombu/base.py mistral-5.0.0~b2/mistral/rpc/kombu/base.py --- mistral-4.0.0/mistral/rpc/kombu/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,151 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import kombu + +import oslo_messaging as messaging + +from mistral import config as cfg +from mistral import exceptions as exc +from mistral import serialization as mistral_serialization +from mistral.utils import rpc_utils + +IS_RECEIVED = 'kombu_rpc_is_received' +RESULT = 'kombu_rpc_result' +CORR_ID = 'kombu_rpc_correlation_id' +TYPE = 'kombu_rpc_type' + + +CONF = cfg.CONF + + +def set_transport_options(check_backend=True): + # We can be sure that all needed transport options are registered + # only if we at least once called method get_transport(). Because + # this is the method that registers them. + messaging.get_transport(CONF) + + backend = rpc_utils.get_rpc_backend( + messaging.TransportURL.parse(CONF, CONF.transport_url) + ) + + if check_backend and backend not in ['rabbit', 'kombu']: + raise exc.MistralException("Unsupported backend: %s" % backend) + + +class Base(object): + """Base class for Client and Server.""" + def __init__(self): + self.serializer = None + + @staticmethod + def _make_connection(amqp_host, amqp_port, amqp_user, amqp_password, + amqp_vhost): + """Create connection. + + This method creates object representing the connection to RabbitMQ. + + :param amqp_host: Address of RabbitMQ server. + :param amqp_user: Username for connecting to RabbitMQ. + :param amqp_password: Password matching the given username. + :param amqp_vhost: Virtual host to connect to. + :param amqp_port: Port of RabbitMQ server. + :return: New connection to RabbitMQ. + """ + return kombu.BrokerConnection( + hostname=amqp_host, + userid=amqp_user, + password=amqp_password, + virtual_host=amqp_vhost, + port=amqp_port + ) + + @staticmethod + def _make_exchange(name, durable=False, auto_delete=True, + exchange_type='topic'): + """Make named exchange. + + This method creates object representing exchange on RabbitMQ. It would + create a new exchange if exchange with given name don't exists. + + :param name: Name of the exchange. + :param durable: If set to True, messages on this exchange would be + store on disk - therefore can be retrieve after + failure. + :param auto_delete: If set to True, exchange would be automatically + deleted when none is connected. + :param exchange_type: Type of the exchange. Can be one of 'direct', + 'topic', 'fanout', 'headers'. See Kombu docs for + further details. + :return: Kombu exchange object. + """ + return kombu.Exchange( + name=name, + type=exchange_type, + durable=durable, + auto_delete=auto_delete + ) + + @staticmethod + def _make_queue(name, exchange, routing_key='', + durable=False, auto_delete=True, **kwargs): + """Make named queue for a given exchange. + + This method creates object representing queue in RabbitMQ. It would + create a new queue if queue with given name don't exists. + + :param name: Name of the queue + :param exchange: Kombu Exchange object (can be created using + _make_exchange). + :param routing_key: Routing key for queue. It behaves differently + depending the exchange type. See Kombu docs for + further details. + :param durable: If set to True, messages on this queue would be + store on disk - therefore can be retrieve after + failure. + :param auto_delete: If set to True, queue would be automatically + deleted when none is connected. + :param kwargs: See kombu documentation for all parameters than may be + may be passed to Queue. + :return: Kombu Queue object. + """ + return kombu.Queue( + name=name, + routing_key=routing_key, + exchange=exchange, + durable=durable, + auto_delete=auto_delete, + **kwargs + ) + + def _register_mistral_serialization(self): + """Adds mistral serializer to available serializers in kombu.""" + + self.serializer = mistral_serialization.get_polymorphic_serializer() + + def _serialize_message(self, kwargs): + result = {} + + for argname, arg in kwargs.items(): + result[argname] = self.serializer.serialize(arg) + + return result + + def _deserialize_message(self, kwargs): + result = {} + + for argname, arg in kwargs.items(): + result[argname] = self.serializer.deserialize(arg) + + return result diff -Nru mistral-4.0.0/mistral/rpc/kombu/examples/client.py mistral-5.0.0~b2/mistral/rpc/kombu/examples/client.py --- mistral-4.0.0/mistral/rpc/kombu/examples/client.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/examples/client.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,44 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from mistral.rpc.kombu import kombu_client + + +# Example of using Kombu based RPC client. +def main(): + conf = { + 'user_id': 'guest', + 'password': 'secret', + 'exchange': 'my_exchange', + 'topic': 'my_topic', + 'server_id': 'host', + 'host': 'localhost', + 'port': 5672, + 'virtual_host': '/' + } + kombu_rpc = kombu_client.KombuRPCClient(conf) + + print(" [x] Requesting ...") + + ctx = type('context', (object,), {'to_dict': lambda self: {}})() + + response = kombu_rpc.sync_call(ctx, 'fib', n=44) + + print(" [.] Got %r" % (response,)) + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru mistral-4.0.0/mistral/rpc/kombu/examples/server.py mistral-5.0.0~b2/mistral/rpc/kombu/examples/server.py --- mistral-4.0.0/mistral/rpc/kombu/examples/server.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/examples/server.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from mistral.rpc.kombu import kombu_server + + +# Simple example of endpoint of RPC server, which just +# calculates given fibonacci number. +class MyServer(object): + cache = {0: 0, 1: 1} + + def fib(self, rpc_ctx, n): + if self.cache.get(n) is None: + self.cache[n] = (self.fib(rpc_ctx, n - 1) + + self.fib(rpc_ctx, n - 2)) + return self.cache[n] + + def get_name(self, rpc_ctx): + return self.__class__.__name__ + + +# Example of using Kombu based RPC server. +def main(): + conf = { + 'user_id': 'guest', + 'password': 'secret', + 'exchange': 'my_exchange', + 'topic': 'my_topic', + 'server_id': 'host', + 'host': 'localhost', + 'port': 5672, + 'virtual_host': '/' + } + rpc_server = kombu_server.KombuRPCServer(conf) + rpc_server.register_endpoint(MyServer()) + rpc_server.run() + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru mistral-4.0.0/mistral/rpc/kombu/kombu_client.py mistral-5.0.0~b2/mistral/rpc/kombu/kombu_client.py --- mistral-4.0.0/mistral/rpc/kombu/kombu_client.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/kombu_client.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,169 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import moves + +import kombu +from oslo_log import log as logging + +from mistral import config as cfg +from mistral import exceptions as exc +from mistral.rpc import base as rpc_base +from mistral.rpc.kombu import base as kombu_base +from mistral.rpc.kombu import kombu_hosts +from mistral.rpc.kombu import kombu_listener +from mistral import utils + + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + +CONF.import_opt('rpc_response_timeout', 'mistral.config') + + +class KombuRPCClient(rpc_base.RPCClient, kombu_base.Base): + def __init__(self, conf): + super(KombuRPCClient, self).__init__(conf) + + kombu_base.set_transport_options() + + self._register_mistral_serialization() + + self.topic = conf.topic + self.server_id = conf.host + + self._hosts = kombu_hosts.KombuHosts(CONF) + + self.exchange = CONF.control_exchange + self.virtual_host = CONF.oslo_messaging_rabbit.rabbit_virtual_host + self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues + self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete + self._timeout = CONF.rpc_response_timeout + self.routing_key = self.topic + + hosts = self._hosts.get_hosts() + + self._connections = [] + + for host in hosts: + conn = self._make_connection( + host.hostname, + host.port, + host.username, + host.password, + self.virtual_host + ) + self._connections.append(conn) + + self.conn = self._connections[0] + + # Create exchange. + exchange = self._make_exchange( + self.exchange, + durable=self.durable_queue, + auto_delete=self.auto_delete + ) + + # Create queue. + self.queue_name = utils.generate_unicode_uuid() + self.callback_queue = kombu.Queue( + self.queue_name, + exchange=exchange, + routing_key=self.queue_name, + durable=False, + exclusive=True, + auto_delete=True + ) + + self._listener = kombu_listener.KombuRPCListener( + connections=self._connections, + callback_queue=self.callback_queue + ) + + self._listener.start() + + def _wait_for_result(self, correlation_id): + """Waits for the result from the server. + + Waits for the result from the server, checks every second if + a timeout occurred. If a timeout occurred - the `RpcTimeout` exception + will be raised. + """ + try: + return self._listener.get_result(correlation_id, self._timeout) + except moves.queue.Empty: + raise exc.MistralException("RPC Request timeout") + + def _call(self, ctx, method, target, async_=False, **kwargs): + """Performs a remote call for the given method. + + :param ctx: authentication context associated with mistral + :param method: name of the method that should be executed + :param kwargs: keyword parameters for the remote-method + :param target: Server name + :param async: bool value means whether the request is + asynchronous or not. + :return: result of the method or None if async. + """ + correlation_id = utils.generate_unicode_uuid() + + body = { + 'rpc_ctx': ctx.to_dict(), + 'rpc_method': method, + 'arguments': self._serialize_message(kwargs), + 'async': async_ + } + + LOG.debug("Publish request: {0}".format(body)) + + try: + if not async_: + self._listener.add_listener(correlation_id) + + # Publish request. + with kombu.producers[self.conn].acquire(block=True) as producer: + producer.publish( + body=body, + exchange=self.exchange, + routing_key=self.topic, + reply_to=self.queue_name, + correlation_id=correlation_id, + delivery_mode=2 + ) + + # Start waiting for response. + if async_: + return + + result = self._wait_for_result(correlation_id) + res_type = result[kombu_base.TYPE] + res_object = result[kombu_base.RESULT] + + if res_type == 'error': + raise res_object + else: + res_object = self._deserialize_message(res_object)['body'] + + finally: + if not async_: + self._listener.remove_listener(correlation_id) + + return res_object + + def sync_call(self, ctx, method, target=None, **kwargs): + return self._call(ctx, method, async_=False, target=target, **kwargs) + + def async_call(self, ctx, method, target=None, **kwargs): + return self._call(ctx, method, async_=True, target=target, **kwargs) diff -Nru mistral-4.0.0/mistral/rpc/kombu/kombu_hosts.py mistral-5.0.0~b2/mistral/rpc/kombu/kombu_hosts.py --- mistral-4.0.0/mistral/rpc/kombu/kombu_hosts.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/kombu_hosts.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,59 @@ +# Copyright (c) 2017 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +import random +import six + +import oslo_messaging as messaging + + +class KombuHosts(object): + def __init__(self, conf): + self._conf = conf + + transport_url = messaging.TransportURL.parse( + self._conf, + self._conf.transport_url + ) + + if transport_url.hosts: + self._hosts = transport_url.hosts + else: + username = self._conf.oslo_messaging_rabbit.rabbit_userid + password = self._conf.oslo_messaging_rabbit.rabbit_password + + self._hosts = [] + + for host in self._conf.oslo_messaging_rabbit.rabbit_hosts: + hostname, port = host.split(':') + + self._hosts.append(messaging.TransportHost( + hostname, + port, + username, + password + )) + + if len(self._hosts) > 1: + random.shuffle(self._hosts) + + self._hosts_cycle = itertools.cycle(self._hosts) + + def get_host(self): + return six.next(self._hosts_cycle) + + def get_hosts(self): + return self._hosts diff -Nru mistral-4.0.0/mistral/rpc/kombu/kombu_listener.py mistral-5.0.0~b2/mistral/rpc/kombu/kombu_listener.py --- mistral-4.0.0/mistral/rpc/kombu/kombu_listener.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/kombu_listener.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,109 @@ +# Copyright (c) 2016 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +from kombu.mixins import ConsumerMixin +import six +import threading + +from oslo_log import log as logging + +from mistral.rpc.kombu import base as kombu_base + +LOG = logging.getLogger(__name__) + + +class KombuRPCListener(ConsumerMixin): + + def __init__(self, connections, callback_queue): + self._results = {} + self._connections = itertools.cycle(connections) + self._callback_queue = callback_queue + self._thread = None + self.connection = six.next(self._connections) + + # TODO(ddeja): Those 2 options should be gathered from config. + self._sleep_time = 1 + self._max_sleep_time = 512 + + def add_listener(self, correlation_id): + self._results[correlation_id] = six.moves.queue.Queue() + + def remove_listener(self, correlation_id): + if correlation_id in self._results: + del self._results[correlation_id] + + def get_consumers(self, Consumer, channel): + return [Consumer( + self._callback_queue, + callbacks=[self.on_message], + accept=['pickle', 'json'] + )] + + def start(self): + if self._thread is None: + self._thread = threading.Thread(target=self.run) + self._thread.daemon = True + self._thread.start() + + def on_message(self, response, message): + """Callback on response. + + This method is automatically called when a response is incoming and + decides if it is the message we are waiting for - the message with the + result. + + :param response: the body of the amqp message already deserialized + by kombu + :param message: the plain amqp kombu.message with additional + information + """ + LOG.debug("Got response: {0}".format(response)) + + try: + message.ack() + except Exception as e: + LOG.exception("Failed to acknowledge AMQP message: %s" % e) + else: + LOG.debug("AMQP message acknowledged.") + + correlation_id = message.properties['correlation_id'] + + queue = self._results.get(correlation_id, None) + + if queue: + result = { + kombu_base.TYPE: 'error' + if message.properties.get('type') == 'error' + else None, + kombu_base.RESULT: response + } + queue.put(result) + else: + LOG.debug( + "Got a response, but seems like no process is waiting for" + "it [correlation_id={0}]".format(correlation_id) + ) + + def get_result(self, correlation_id, timeout): + return self._results[correlation_id].get(block=True, timeout=timeout) + + def on_connection_error(self, exc, interval): + self.connection = six.next(self._connections) + + LOG.debug("Broker connection failed: %s" % exc) + LOG.debug("Sleeping for %s seconds, then retrying connection" % + interval + ) diff -Nru mistral-4.0.0/mistral/rpc/kombu/kombu_server.py mistral-5.0.0~b2/mistral/rpc/kombu/kombu_server.py --- mistral-4.0.0/mistral/rpc/kombu/kombu_server.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/kombu/kombu_server.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,269 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amqp +import socket +import threading +import time + +import kombu +from oslo_config import cfg +from oslo_log import log as logging +from stevedore import driver + +from mistral import context as auth_ctx +from mistral import exceptions as exc +from mistral.rpc import base as rpc_base +from mistral.rpc.kombu import base as kombu_base +from mistral.rpc.kombu import kombu_hosts + + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + +_pool_opts = [ + cfg.IntOpt('executor_thread_pool_size', + default=64, + deprecated_name="rpc_thread_pool_size", + help='Size of executor thread pool when' + ' executor is threading or eventlet.'), +] + + +class KombuRPCServer(rpc_base.RPCServer, kombu_base.Base): + def __init__(self, conf): + super(KombuRPCServer, self).__init__(conf) + + CONF.register_opts(_pool_opts) + + kombu_base.set_transport_options() + + self._register_mistral_serialization() + + self.topic = conf.topic + self.server_id = conf.host + + self._hosts = kombu_hosts.KombuHosts(CONF) + + self._executor_threads = CONF.executor_thread_pool_size + self.exchange = CONF.control_exchange + self.virtual_host = CONF.oslo_messaging_rabbit.rabbit_virtual_host + self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues + self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete + self.routing_key = self.topic + self.channel = None + self.conn = None + self._running = threading.Event() + self._stopped = threading.Event() + self.endpoints = [] + self._worker = None + + # TODO(ddeja): Those 2 options should be gathered from config. + self._sleep_time = 1 + self._max_sleep_time = 512 + + @property + def is_running(self): + """Return whether server is running.""" + return self._running.is_set() + + def run(self, executor='blocking'): + """Start the server.""" + self._prepare_worker(executor) + + while True: + try: + _retry_connection = False + host = self._hosts.get_host() + + self.conn = self._make_connection( + host.hostname, + host.port, + host.username, + host.password, + self.virtual_host, + ) + + conn = kombu.connections[self.conn].acquire(block=True) + + exchange = self._make_exchange( + self.exchange, + durable=self.durable_queue, + auto_delete=self.auto_delete + ) + + queue = self._make_queue( + self.topic, + exchange, + routing_key=self.routing_key, + durable=self.durable_queue, + auto_delete=self.auto_delete + ) + with conn.Consumer( + queues=queue, + callbacks=[self._process_message], + ) as consumer: + consumer.qos(prefetch_count=1) + + self._running.set() + self._stopped.clear() + + LOG.info("Connected to AMQP at %s:%s" % ( + host.hostname, + host.port + )) + + while self.is_running: + try: + conn.drain_events(timeout=1) + except socket.timeout: + pass + except KeyboardInterrupt: + self.stop() + + LOG.info("Server with id='{0}' stopped.".format( + self.server_id)) + + return + except (socket.error, amqp.exceptions.ConnectionForced) as e: + LOG.debug("Broker connection failed: %s" % e) + _retry_connection = True + finally: + self._stopped.set() + + if _retry_connection: + LOG.debug( + "Sleeping for %s seconds, than retrying connection" % + self._sleep_time + ) + + time.sleep(self._sleep_time) + + self._sleep_time = min( + self._sleep_time * 2, + self._max_sleep_time + ) + + def stop(self, graceful=False): + self._running.clear() + + if graceful: + self.wait() + + def wait(self): + self._stopped.wait() + + try: + self._worker.shutdown(wait=True) + except AttributeError as e: + LOG.warning("Cannot stop worker in graceful way: %s" % e) + + def _get_rpc_method(self, method_name): + for endpoint in self.endpoints: + if hasattr(endpoint, method_name): + return getattr(endpoint, method_name) + + return None + + @staticmethod + def _set_auth_ctx(ctx): + if not isinstance(ctx, dict): + return + + context = auth_ctx.MistralContext(**ctx) + auth_ctx.set_ctx(context) + + return context + + def publish_message(self, body, reply_to, corr_id, res_type='response'): + if res_type != 'error': + body = self._serialize_message({'body': body}) + + with kombu.producers[self.conn].acquire(block=True) as producer: + producer.publish( + body=body, + exchange=self.exchange, + routing_key=reply_to, + correlation_id=corr_id, + serializer='pickle' if res_type == 'error' else 'json', + type=res_type + ) + + def _on_message_safe(self, request, message): + try: + return self._on_message(request, message) + except Exception as e: + LOG.warning( + "Got exception while consuming message. Exception would be " + "send back to the caller." + ) + LOG.debug("Exceptions: %s" % str(e)) + + # Wrap exception into another exception for compability with oslo. + self.publish_message( + exc.KombuException(e), + message.properties['reply_to'], + message.properties['correlation_id'], + res_type='error' + ) + finally: + message.ack() + + def _on_message(self, request, message): + LOG.debug('Received message %s', + request) + + is_async = request.get('async', False) + rpc_ctx = request.get('rpc_ctx') + redelivered = message.delivery_info.get('redelivered', None) + rpc_method_name = request.get('rpc_method') + arguments = self._deserialize_message(request.get('arguments')) + correlation_id = message.properties['correlation_id'] + reply_to = message.properties['reply_to'] + + if redelivered is not None: + rpc_ctx['redelivered'] = redelivered + + rpc_context = self._set_auth_ctx(rpc_ctx) + + rpc_method = self._get_rpc_method(rpc_method_name) + + if not rpc_method: + raise exc.MistralException("No such method: %s" % rpc_method_name) + + response = rpc_method(rpc_ctx=rpc_context, **arguments) + + if not is_async: + self.publish_message( + response, + reply_to, + correlation_id + ) + + def register_endpoint(self, endpoint): + self.endpoints.append(endpoint) + + def _process_message(self, request, message): + self._worker.submit(self._on_message_safe, request, message) + + def _prepare_worker(self, executor='blocking'): + mgr = driver.DriverManager('kombu_driver.executors', executor) + + executor_opts = {} + + if executor == 'threading': + executor_opts['max_workers'] = self._executor_threads + + self._worker = mgr.driver(**executor_opts) diff -Nru mistral-4.0.0/mistral/rpc/oslo/oslo_client.py mistral-5.0.0~b2/mistral/rpc/oslo/oslo_client.py --- mistral-4.0.0/mistral/rpc/oslo/oslo_client.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/oslo/oslo_client.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,46 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import oslo_messaging as messaging + +from mistral import context as auth_ctx +from mistral.rpc import base as rpc + + +class OsloRPCClient(rpc.RPCClient): + def __init__(self, conf): + super(OsloRPCClient, self).__init__(conf) + self.topic = conf.topic + + serializer = auth_ctx.RpcContextSerializer() + + self._client = messaging.RPCClient( + rpc.get_transport(), + messaging.Target(topic=self.topic), + serializer=serializer + ) + + def sync_call(self, ctx, method, target=None, **kwargs): + return self._client.prepare(topic=self.topic, server=target).call( + ctx, + method, + **kwargs + ) + + def async_call(self, ctx, method, target=None, **kwargs): + return self._client.prepare(topic=self.topic, server=target).cast( + ctx, + method, + **kwargs + ) diff -Nru mistral-4.0.0/mistral/rpc/oslo/oslo_server.py mistral-5.0.0~b2/mistral/rpc/oslo/oslo_server.py --- mistral-4.0.0/mistral/rpc/oslo/oslo_server.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/rpc/oslo/oslo_server.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,62 @@ +# Copyright 2015 - Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import oslo_messaging as messaging + +from mistral import context as ctx +from mistral.rpc import base as rpc + + +class OsloRPCServer(rpc.RPCServer): + def __init__(self, conf): + super(OsloRPCServer, self).__init__(conf) + + self.topic = conf.topic + self.server_id = conf.host + self.queue = self.topic + self.routing_key = self.topic + self.channel = None + self.connection = None + self.endpoints = [] + self.oslo_server = None + + def register_endpoint(self, endpoint): + self.endpoints.append(endpoint) + + def run(self, executor='blocking'): + target = messaging.Target( + topic=self.topic, + server=self.server_id + ) + + # TODO(rakhmerov): rpc.get_transport() should be in oslo.messaging + # related module. + self.oslo_server = messaging.get_rpc_server( + rpc.get_transport(), + target, + self.endpoints, + executor=executor, + serializer=ctx.RpcContextSerializer() + ) + + self.oslo_server.start() + + def stop(self, graceful=False): + self.oslo_server.stop() + + if graceful: + self.oslo_server.wait() + + def wait(self): + self.oslo_server.wait() diff -Nru mistral-4.0.0/mistral/serialization.py mistral-5.0.0~b2/mistral/serialization.py --- mistral-4.0.0/mistral/serialization.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/serialization.py 2017-06-09 12:48:26.000000000 +0000 @@ -137,6 +137,15 @@ self.serializers[key] = serializer + def unregister(self, entity_cls): + key = self._get_serialization_key(entity_cls) + + if not key: + return + + if key in self.serializers: + del self.serializers[key] + def cleanup(self): self.serializers.clear() @@ -154,6 +163,11 @@ serializer = self.serializers.get(key) + if not serializer: + raise RuntimeError( + "Failed to find a serializer for the key: %s" % key + ) + result = { '__serial_key': key, '__serial_data': serializer.serialize(entity) @@ -188,5 +202,9 @@ get_polymorphic_serializer().register(entity_cls, serializer) +def unregister_serializer(entity_cls): + get_polymorphic_serializer().unregister(entity_cls) + + def cleanup(): get_polymorphic_serializer().cleanup() diff -Nru mistral-4.0.0/mistral/service/base.py mistral-5.0.0~b2/mistral/service/base.py --- mistral-4.0.0/mistral/service/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/service/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,15 +13,11 @@ # limitations under the License. from eventlet import event -from oslo_log import log from oslo_service import service from mistral.service import coordination -LOG = log.getLogger(__name__) - - class MistralService(service.Service): """Base class for Mistral services. diff -Nru mistral-4.0.0/mistral/service/coordination.py mistral-5.0.0~b2/mistral/service/coordination.py --- mistral-4.0.0/mistral/service/coordination.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/service/coordination.py 2017-06-09 12:48:26.000000000 +0000 @@ -144,6 +144,7 @@ return [] get_members_req = self._coordinator.get_members(group_id) + try: members = get_members_req.get() diff -Nru mistral-4.0.0/mistral/services/action_manager.py mistral-5.0.0~b2/mistral/services/action_manager.py --- mistral-4.0.0/mistral/services/action_manager.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/action_manager.py 2017-06-09 12:48:26.000000000 +0000 @@ -187,9 +187,7 @@ def get_empty_action_context(): - return { - _ACTION_CTX_PARAM: {} - } + return {_ACTION_CTX_PARAM: {}} def _has_argument(action, attributes, argument_name): diff -Nru mistral-4.0.0/mistral/services/actions.py mistral-5.0.0~b2/mistral/services/actions.py --- mistral-4.0.0/mistral/services/actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,7 +16,7 @@ from mistral.db.v2 import api as db_api from mistral import exceptions as exc -from mistral.workbook import parser as spec_parser +from mistral.lang import parser as spec_parser def create_actions(definition, scope='private'): diff -Nru mistral-4.0.0/mistral/services/expiration_policy.py mistral-5.0.0~b2/mistral/services/expiration_policy.py --- mistral-4.0.0/mistral/services/expiration_policy.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/expiration_policy.py 2017-06-09 12:48:26.000000000 +0000 @@ -41,9 +41,10 @@ super(ExecutionExpirationPolicy, self).__init__(conf) interval = CONF.execution_expiration_policy.evaluation_interval - older_than = CONF.execution_expiration_policy.older_than + ot = CONF.execution_expiration_policy.older_than + mfe = CONF.execution_expiration_policy.max_finished_executions - if (interval and older_than and older_than >= 1): + if interval and ((ot and ot >= 1) or (mfe and mfe >= 1)): _periodic_task = periodic_task.periodic_task( spacing=interval * 60, run_immediately=True @@ -54,43 +55,70 @@ ) else: LOG.debug("Expiration policy disabled. Evaluation_interval " - "is not configured or older_than < '1'.") + "is not configured or both older_than and " + "max_finished_executions < '1'.") + + +def _delete_executions(batch_size, expiration_time, + max_finished_executions): + while True: + with db_api.transaction(): + # TODO(gpaz): In the future should use generic method with + # filters params and not specific method that filter by time. + execs = db_api.get_executions_to_clean( + expiration_time, + limit=batch_size, + max_finished_executions=max_finished_executions + ) + + if not execs: + break + _delete(execs) + + +def _delete(executions): + for execution in executions: + try: + # Setup project_id for _secure_query delete execution. + # TODO(tuan_luong): Manipulation with auth_ctx should be + # out of db transaction scope. + ctx = auth_ctx.MistralContext( + user_id=None, + project_id=execution.project_id, + auth_token=None, + is_admin=True + ) + auth_ctx.set_ctx(ctx) + + LOG.debug( + 'DELETE execution id : %s from date : %s ' + 'according to expiration policy', + execution.id, + execution.updated_at + ) + db_api.delete_workflow_execution(execution.id) + except Exception as e: + msg = ("Failed to delete [execution_id=%s]\n %s" + % (execution.id, traceback.format_exc(e))) + LOG.warning(msg) + finally: + auth_ctx.set_ctx(None) def run_execution_expiration_policy(self, ctx): - LOG.debug("Starting expiration policy task.") + LOG.debug("Starting expiration policy.") older_than = CONF.execution_expiration_policy.older_than exp_time = (datetime.datetime.utcnow() - datetime.timedelta(minutes=older_than)) - with db_api.transaction(): - # TODO(gpaz): In the future should use generic method with - # filters params and not specific method that filter by time. - for execution in db_api.get_expired_executions(exp_time): - try: - # Setup project_id for _secure_query delete execution. - ctx = auth_ctx.MistralContext( - user_id=None, - project_id=execution.project_id, - auth_token=None, - is_admin=True - ) - auth_ctx.set_ctx(ctx) - - LOG.debug( - 'DELETE execution id : %s from date : %s ' - 'according to expiration policy', - execution.id, - execution.updated_at - ) - db_api.delete_workflow_execution(execution.id) - except Exception as e: - msg = ("Failed to delete [execution_id=%s]\n %s" - % (execution.id, traceback.format_exc(e))) - LOG.warning(msg) - finally: - auth_ctx.set_ctx(None) + batch_size = CONF.execution_expiration_policy.batch_size + max_executions = CONF.execution_expiration_policy.max_finished_executions + + # The default value of batch size is 0 + # If it is not set, size of batch will be the size + # of total number of expired executions. + _delete_executions(batch_size, exp_time, max_executions) def setup(): diff -Nru mistral-4.0.0/mistral/services/periodic.py mistral-5.0.0~b2/mistral/services/periodic.py --- mistral-4.0.0/mistral/services/periodic.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/periodic.py 2017-06-09 12:48:26.000000000 +0000 @@ -19,8 +19,8 @@ from mistral import context as auth_ctx from mistral.db.v2 import api as db_api_v2 -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import clients as rpc from mistral.services import security from mistral.services import triggers @@ -81,7 +81,10 @@ # If this is the last execution. if t.remaining_executions == 0: - modified_count = db_api_v2.delete_cron_trigger(t.name) + modified_count = triggers.delete_cron_trigger( + t.name, + trust_id=t.trust_id + ) else: # if remaining execution = None or > 0. next_time = triggers.get_next_execution_time( t.pattern, diff -Nru mistral-4.0.0/mistral/services/scheduler.py mistral-5.0.0~b2/mistral/services/scheduler.py --- mistral-4.0.0/mistral/services/scheduler.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/scheduler.py 2017-06-09 12:48:26.000000000 +0000 @@ -133,7 +133,12 @@ calls_to_make.append(result) for call in calls_to_make: - LOG.debug('Processing next delayed call: %s', call) + LOG.debug( + 'Processing next delayed call. ' + '[ID=%s, factory_method_path=%s, target_method_name=%s, ' + 'method_arguments=%s]', call.id, call.factory_method_path, + call.target_method_name, call.method_arguments + ) target_auth_context = copy.deepcopy(call.auth_context) diff -Nru mistral-4.0.0/mistral/services/security.py mistral-5.0.0~b2/mistral/services/security.py --- mistral-4.0.0/mistral/services/security.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/security.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,11 +13,13 @@ # limitations under the License. from oslo_config import cfg +from oslo_log import log as logging from mistral import context as auth_ctx from mistral.utils.openstack import keystone +LOG = logging.getLogger(__name__) CONF = cfg.CONF # Make sure to import 'auth_enable' option before using it. @@ -79,12 +81,16 @@ ) -def delete_trust(workbook): - if not workbook.trust_id: +def delete_trust(trust_id): + if not trust_id: return - keystone_client = keystone.client_for_trusts(workbook.trust_id) - keystone_client.trusts.delete(workbook.trust_id) + keystone_client = keystone.client_for_trusts(trust_id) + + try: + keystone_client.trusts.delete(trust_id) + except Exception as e: + LOG.warning("Failed to delete trust [id=%s]: %s" % (trust_id, e)) def add_trust_id(secure_object_values): diff -Nru mistral-4.0.0/mistral/services/triggers.py mistral-5.0.0~b2/mistral/services/triggers.py --- mistral-4.0.0/mistral/services/triggers.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/triggers.py 2017-06-09 12:48:26.000000000 +0000 @@ -18,20 +18,17 @@ import time from mistral.db.v2 import api as db_api -from mistral.engine.rpc_backend import rpc from mistral.engine import utils as eng_utils from mistral import exceptions as exc +from mistral.lang import parser +from mistral.rpc import clients as rpc from mistral.services import security -from mistral.workbook import parser def get_next_execution_time(pattern, start_time): - local_time = croniter.croniter(pattern, start_time).get_next( + return croniter.croniter(pattern, start_time).get_next( datetime.datetime ) - epoch_second = time.mktime(local_time.timetuple()) - utc_time = datetime.datetime.utcfromtimestamp(epoch_second) - return utc_time # Triggers v2. @@ -72,7 +69,7 @@ workflow_params=None, pattern=None, first_time=None, count=None, start_time=None, workflow_id=None): if not start_time: - start_time = datetime.datetime.now() + start_time = datetime.datetime.utcnow() if isinstance(first_time, six.string_types): try: @@ -85,12 +82,8 @@ validate_cron_trigger_input(pattern, first_time, count) - first_utc_time = first_time - if first_time: - first_second = time.mktime(first_time.timetuple()) - first_utc_time = datetime.datetime.utcfromtimestamp(first_second) - next_time = first_utc_time + next_time = first_time if not (pattern or count): count = 1 @@ -102,16 +95,23 @@ workflow_id if workflow_id else workflow_name ) + wf_spec = parser.get_workflow_spec_by_definition_id( + wf_def.id, + wf_def.updated_at + ) + + # TODO(rakhmerov): Use Workflow object here instead of utils. eng_utils.validate_input( - wf_def, - workflow_input or {}, - parser.get_workflow_spec(wf_def.spec) + wf_spec.get_input(), + workflow_input, + wf_spec.get_name(), + wf_spec.__class__.__name__ ) values = { 'name': name, 'pattern': pattern, - 'first_execution_time': first_utc_time, + 'first_execution_time': first_time, 'next_execution_time': next_time, 'remaining_executions': count, 'workflow_name': wf_def.name, @@ -123,23 +123,41 @@ security.add_trust_id(values) - trig = db_api.create_cron_trigger(values) + try: + trig = db_api.create_cron_trigger(values) + except Exception: + # Delete trust before raising exception. + security.delete_trust(values.get('trust_id')) + raise return trig +def delete_cron_trigger(name, trust_id=None): + if not trust_id: + trigger = db_api.get_cron_trigger(name) + trust_id = trigger.trust_id + + security.delete_trust(trust_id) + return db_api.delete_cron_trigger(name) + + def create_event_trigger(name, exchange, topic, event, workflow_id, workflow_input=None, workflow_params=None): with db_api.transaction(): wf_def = db_api.get_workflow_definition_by_id(workflow_id) + wf_spec = parser.get_workflow_spec_by_definition_id( + wf_def.id, + wf_def.updated_at + ) + + # TODO(rakhmerov): Use Workflow object here instead of utils. eng_utils.validate_input( - wf_def, - workflow_input or {}, - parser.get_workflow_spec_by_definition_id( - wf_def.id, - wf_def.updated_at - ) + wf_spec.get_input(), + workflow_input, + wf_spec.get_name(), + wf_spec.__class__.__name__ ) values = { diff -Nru mistral-4.0.0/mistral/services/workbooks.py mistral-5.0.0~b2/mistral/services/workbooks.py --- mistral-4.0.0/mistral/services/workbooks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/workbooks.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,8 +13,8 @@ # limitations under the License. from mistral.db.v2 import api as db_api_v2 +from mistral.lang import parser as spec_parser from mistral.services import actions -from mistral.workbook import parser as spec_parser def create_workbook_v2(definition, scope='private'): diff -Nru mistral-4.0.0/mistral/services/workflows.py mistral-5.0.0~b2/mistral/services/workflows.py --- mistral-4.0.0/mistral/services/workflows.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/services/workflows.py 2017-06-09 12:48:26.000000000 +0000 @@ -14,16 +14,19 @@ from mistral.db.v2 import api as db_api from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral import utils -from mistral.workbook import parser as spec_parser from mistral.workflow import data_flow from mistral.workflow import states +from oslo_log import log as logging STD_WF_PATH = 'resources/workflows' +LOG = logging.getLogger(__name__) def register_standard_workflows(run_in_tx=True): + LOG.debug("registering standard workflows") workflow_paths = utils.get_file_list(STD_WF_PATH) for wf_path in workflow_paths: @@ -42,6 +45,7 @@ def sync_db(): + LOG.debug("Syncing db") with db_api.transaction(): _clear_system_workflow_db() register_standard_workflows(run_in_tx=False) @@ -49,6 +53,7 @@ def create_workflows(definition, scope='private', is_system=False, run_in_tx=True): + LOG.debug("creating workflows") wf_list_spec = spec_parser.get_workflow_list_spec_from_yaml(definition) db_wfs = [] @@ -81,6 +86,7 @@ def update_workflows(definition, scope='private', identifier=None): + LOG.debug("updating workflows") wf_list_spec = spec_parser.get_workflow_list_spec_from_yaml(definition) wfs = wf_list_spec.get_workflows() diff -Nru mistral-4.0.0/mistral/tests/resources/openstack/action_collection_wb.yaml mistral-5.0.0~b2/mistral/tests/resources/openstack/action_collection_wb.yaml --- mistral-4.0.0/mistral/tests/resources/openstack/action_collection_wb.yaml 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/resources/openstack/action_collection_wb.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -6,18 +6,18 @@ keystone: type: direct tasks: - catalog: - action: keystone.service_catalog_get_data + projects_list: + action: keystone.projects_list publish: - result: <% task(catalog).result %> + result: <% task().result %> nova: type: direct tasks: - networks_list: - action: nova.networks_list + flavors_list: + action: nova.flavors_list publish: - result: <% task(networks_list).result %> + result: <% task().result %> glance: type: direct @@ -25,7 +25,7 @@ images_list: action: glance.images_list publish: - result: <% task(images_list).result %> + result: <% task().result %> heat: type: direct @@ -33,7 +33,7 @@ stacks_list: action: heat.stacks_list publish: - result: <% task(stacks_list).result %> + result: <% task().result %> neutron: type: direct @@ -41,7 +41,7 @@ list_subnets: action: neutron.list_subnets publish: - result: <% task(list_subnets).result %> + result: <% task().result %> cinder: type: direct @@ -49,5 +49,5 @@ volumes_list: action: cinder.volumes_list publish: - result: <% task(volumes_list).result %> + result: <% task().result %> diff -Nru mistral-4.0.0/mistral/tests/resources/openstack/test_mapping.json mistral-5.0.0~b2/mistral/tests/resources/openstack/test_mapping.json --- mistral-4.0.0/mistral/tests/resources/openstack/test_mapping.json 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/resources/openstack/test_mapping.json 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,16 @@ +{ + "_comment": "Mapping OpenStack action namespaces to all its actions. Each action name is mapped to python-client method name in this namespace.", + "nova": { + "servers_get": "servers.get", + "servers_find": "servers.find", + "volumes_delete_server_volume": "volumes.delete_server_volume" + }, + "keystone": { + "users_list": "users.list", + "trusts_create": "trusts.create" + }, + "glance": { + "images_list": "images.list", + "images_delete": "images.delete" + } +} diff -Nru mistral-4.0.0/mistral/tests/resources/single_wf.yaml mistral-5.0.0~b2/mistral/tests/resources/single_wf.yaml --- mistral-4.0.0/mistral/tests/resources/single_wf.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/resources/single_wf.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,11 @@ +--- +version: '2.0' + +single_wf: + type: direct + + tasks: + hello: + action: std.echo output="Hello" + publish: + result: <% task(hello).result %> diff -Nru mistral-4.0.0/mistral/tests/unit/actions/openstack/test_generator.py mistral-5.0.0~b2/mistral/tests/unit/actions/openstack/test_generator.py --- mistral-4.0.0/mistral/tests/unit/actions/openstack/test_generator.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/openstack/test_generator.py 2017-06-09 12:48:26.000000000 +0000 @@ -10,11 +10,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import contextlib +import os + +from oslo_config import cfg + +import mock from mistral.actions import generator_factory +from mistral.actions.openstack.action_generator import base as generator_base from mistral.actions.openstack import actions +from mistral import config + from mistral.tests.unit import base +ABSOLUTE_TEST_MAPPING_PATH = os.path.realpath( + os.path.join(os.path.dirname(__file__), + "../../../resources/openstack/test_mapping.json") +) + +RELATIVE_TEST_MAPPING_PATH = "tests/resources/openstack/test_mapping.json" MODULE_MAPPING = { 'nova': ['nova.servers_get', actions.NovaAction], @@ -44,13 +59,33 @@ EXTRA_MODULES = ['neutron', 'swift', 'zaqar', 'tacker'] +CONF = cfg.CONF +CONF.register_opt(config.os_actions_mapping_path) + + class GeneratorTest(base.BaseTest): + + def setUp(self): + super(GeneratorTest, self).setUp() + + # The baremetal inspector client expects the service to be running + # when it is initialised and attempts to connect. This mocks out this + # service only and returns a simple function that can be used by the + # inspection utils. + self.baremetal_patch = mock.patch.object( + actions.BaremetalIntrospectionAction, + "get_fake_client_method", + return_value=lambda x: None) + + self.baremetal_patch.start() + self.addCleanup(self.baremetal_patch.stop) + def test_generator(self): for generator_cls in generator_factory.all_generators(): action_classes = generator_cls.create_actions() - action_name = MODULE_MAPPING.get(generator_cls.action_namespace)[0] - action_cls = MODULE_MAPPING.get(generator_cls.action_namespace)[1] + action_name = MODULE_MAPPING[generator_cls.action_namespace][0] + action_cls = MODULE_MAPPING[generator_cls.action_namespace][1] method_name_pre = action_name.split('.')[1] method_name = ( method_name_pre @@ -65,3 +100,80 @@ self.assertTrue(issubclass(action['class'], action_cls)) self.assertEqual(method_name, action['class'].client_method_name) + + modules = CONF.openstack_actions.modules_support_region + if generator_cls.action_namespace in modules: + self.assertIn('action_region', action['arg_list']) + + def test_missing_module_from_mapping(self): + with _patch_openstack_action_mapping_path(RELATIVE_TEST_MAPPING_PATH): + for generator_cls in generator_factory.all_generators(): + action_classes = generator_cls.create_actions() + action_names = [action['name'] for action in action_classes] + + cls = MODULE_MAPPING.get(generator_cls.action_namespace)[1] + if cls == actions.NovaAction: + self.assertIn('nova.servers_get', action_names) + self.assertEqual(3, len(action_names)) + elif cls not in (actions.GlanceAction, actions.KeystoneAction): + self.assertEqual([], action_names) + + def test_absolute_mapping_path(self): + with _patch_openstack_action_mapping_path(ABSOLUTE_TEST_MAPPING_PATH): + self.assertTrue(os.path.isabs(ABSOLUTE_TEST_MAPPING_PATH), + "Mapping path is relative: %s" % + ABSOLUTE_TEST_MAPPING_PATH) + for generator_cls in generator_factory.all_generators(): + action_classes = generator_cls.create_actions() + action_names = [action['name'] for action in action_classes] + + cls = MODULE_MAPPING.get(generator_cls.action_namespace)[1] + if cls == actions.NovaAction: + self.assertIn('nova.servers_get', action_names) + self.assertEqual(3, len(action_names)) + elif cls not in (actions.GlanceAction, actions.KeystoneAction): + self.assertEqual([], action_names) + + def test_prepare_action_inputs(self): + inputs = generator_base.OpenStackActionGenerator.prepare_action_inputs( + 'a,b,c', + added=['region=RegionOne'] + ) + + self.assertEqual('a, b, c, region=RegionOne', inputs) + + inputs = generator_base.OpenStackActionGenerator.prepare_action_inputs( + 'a,b,c=1', + added=['region=RegionOne'] + ) + + self.assertEqual('a, b, region=RegionOne, c=1', inputs) + + inputs = generator_base.OpenStackActionGenerator.prepare_action_inputs( + 'a,b,c=1,**kwargs', + added=['region=RegionOne'] + ) + + self.assertEqual('a, b, region=RegionOne, c=1, **kwargs', inputs) + + inputs = generator_base.OpenStackActionGenerator.prepare_action_inputs( + '**kwargs', + added=['region=RegionOne'] + ) + + self.assertEqual('region=RegionOne, **kwargs', inputs) + + inputs = generator_base.OpenStackActionGenerator.prepare_action_inputs( + '', + added=['region=RegionOne'] + ) + + self.assertEqual('region=RegionOne', inputs) + + +@contextlib.contextmanager +def _patch_openstack_action_mapping_path(path): + original_path = CONF.openstack_actions_mapping_path + CONF.set_default("openstack_actions_mapping_path", path) + yield + CONF.set_default("openstack_actions_mapping_path", original_path) diff -Nru mistral-4.0.0/mistral/tests/unit/actions/openstack/test_openstack_actions.py mistral-5.0.0~b2/mistral/tests/unit/actions/openstack/test_openstack_actions.py --- mistral-4.0.0/mistral/tests/unit/actions/openstack/test_openstack_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/openstack/test_openstack_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,7 +15,6 @@ import mock from mistral.actions.openstack import actions -from mistral import config from mistral import context as ctx from oslotest import base @@ -28,12 +27,13 @@ class OpenStackActionTest(base.BaseTestCase): @mock.patch.object(actions.NovaAction, '_get_client') def test_nova_action(self, mocked): + mock_ctx = mock.Mock() method_name = "servers.get" action_class = actions.NovaAction action_class.client_method_name = method_name params = {'server': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().servers.get.called) mocked().servers.get.assert_called_once_with(server="1234-abcd") @@ -47,9 +47,6 @@ mock_nova_endpoint, mock_ks_endpoint_v2): - # this is the default, but be explicit - config.CONF.set_default('os_actions_endpoint_type', 'public') - test_ctx = ctx.MistralContext( user_id=None, project_id='1234', @@ -60,7 +57,6 @@ expires_at='3016-07-13T18:34:22.000000Z', insecure=False ) - ctx.set_ctx(test_ctx) # attributes mirror keystone Endpoint object exactly # (with endpoint type public) @@ -102,7 +98,7 @@ action_class.client_method_name = method_name params = {'server': '1234-abcd'} action = action_class(**params) - action.run() + action.run(test_ctx) mock_novaclient.Client.assert_called_once_with( 2, @@ -112,7 +108,7 @@ service_type='compute', auth_token=test_ctx.auth_token, tenant_id=test_ctx.project_id, - region_name=mock_ks_endpoint_v2().region, + region_name=mock_nova_endpoint().region, auth_url=mock_ks_endpoint_v2().url, insecure=test_ctx.insecure ) @@ -123,7 +119,7 @@ # Repeat test in order to validate cache. mock_novaclient.reset_mock() - action.run() + action.run(test_ctx) # TODO(d0ugal): Uncomment the following line when caching is fixed. # mock_novaclient.Client.assert_not_called() @@ -133,10 +129,9 @@ # Repeat again with different context for cache testing. test_ctx.project_name = 'service' test_ctx.project_id = '1235' - ctx.set_ctx(test_ctx) mock_novaclient.reset_mock() - action.run() + action.run(test_ctx) mock_novaclient.Client.assert_called_once_with( 2, username=None, @@ -145,7 +140,7 @@ service_type='compute', auth_token=test_ctx.auth_token, tenant_id=test_ctx.project_id, - region_name=mock_ks_endpoint_v2().region, + region_name=mock_nova_endpoint().region, auth_url=mock_ks_endpoint_v2().url, insecure=test_ctx.insecure ) @@ -156,132 +151,143 @@ @mock.patch.object(actions.GlanceAction, '_get_client') def test_glance_action(self, mocked): + mock_ctx = mock.Mock() method_name = "images.delete" action_class = actions.GlanceAction action_class.client_method_name = method_name params = {'image': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().images.delete.called) mocked().images.delete.assert_called_once_with(image="1234-abcd") @mock.patch.object(actions.KeystoneAction, '_get_client') def test_keystone_action(self, mocked): + mock_ctx = mock.Mock() method_name = "users.get" action_class = actions.KeystoneAction action_class.client_method_name = method_name params = {'user': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().users.get.called) mocked().users.get.assert_called_once_with(user="1234-abcd") @mock.patch.object(actions.HeatAction, '_get_client') def test_heat_action(self, mocked): + mock_ctx = mock.Mock() method_name = "stacks.get" action_class = actions.HeatAction action_class.client_method_name = method_name params = {'id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().stacks.get.called) mocked().stacks.get.assert_called_once_with(id="1234-abcd") @mock.patch.object(actions.NeutronAction, '_get_client') def test_neutron_action(self, mocked): + mock_ctx = mock.Mock() method_name = "show_network" action_class = actions.NeutronAction action_class.client_method_name = method_name params = {'id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().show_network.called) mocked().show_network.assert_called_once_with(id="1234-abcd") @mock.patch.object(actions.CinderAction, '_get_client') def test_cinder_action(self, mocked): + mock_ctx = mock.Mock() method_name = "volumes.get" action_class = actions.CinderAction action_class.client_method_name = method_name params = {'volume': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().volumes.get.called) mocked().volumes.get.assert_called_once_with(volume="1234-abcd") @mock.patch.object(actions.CeilometerAction, '_get_client') def test_ceilometer_action(self, mocked): + mock_ctx = mock.Mock() method_name = "alarms.get" action_class = actions.CeilometerAction action_class.client_method_name = method_name params = {'alarm_id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().alarms.get.called) mocked().alarms.get.assert_called_once_with(alarm_id="1234-abcd") @mock.patch.object(actions.TroveAction, '_get_client') def test_trove_action(self, mocked): + mock_ctx = mock.Mock() method_name = "instances.get" action_class = actions.TroveAction action_class.client_method_name = method_name params = {'instance': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().instances.get.called) mocked().instances.get.assert_called_once_with(instance="1234-abcd") @mock.patch.object(actions.IronicAction, '_get_client') def test_ironic_action(self, mocked): + mock_ctx = mock.Mock() method_name = "node.get" action_class = actions.IronicAction action_class.client_method_name = method_name params = {'node': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().node.get.called) mocked().node.get.assert_called_once_with(node="1234-abcd") @mock.patch.object(actions.BaremetalIntrospectionAction, '_get_client') def test_baremetal_introspector_action(self, mocked): + mock_ctx = mock.Mock() method_name = "get_status" action_class = actions.BaremetalIntrospectionAction action_class.client_method_name = method_name params = {'uuid': '1234'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().get_status.called) mocked().get_status.assert_called_once_with(uuid="1234") @mock.patch.object(actions.MistralAction, '_get_client') def test_mistral_action(self, mocked): + mock_ctx = mock.Mock() method_name = "workflows.get" action_class = actions.MistralAction action_class.client_method_name = method_name params = {'name': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().workflows.get.called) mocked().workflows.get.assert_called_once_with(name="1234-abcd") @mock.patch.object(actions.SwiftAction, '_get_client') def test_swift_action(self, mocked): + mock_ctx = mock.Mock() method_name = "get_object" action_class = actions.SwiftAction action_class.client_method_name = method_name params = {'container': 'foo', 'object': 'bar'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().get_object.called) mocked().get_object.assert_called_once_with(container='foo', @@ -289,60 +295,65 @@ @mock.patch.object(actions.ZaqarAction, '_get_client') def test_zaqar_action(self, mocked): + mock_ctx = mock.Mock() method_name = "queue_messages" action_class = actions.ZaqarAction action_class.client_method_name = method_name params = {'queue_name': 'foo'} action = action_class(**params) - action.run() + action.run(mock_ctx) mocked().queue.assert_called_once_with('foo') mocked().queue().messages.assert_called_once_with() @mock.patch.object(actions.BarbicanAction, '_get_client') def test_barbican_action(self, mocked): + mock_ctx = mock.Mock() method_name = "orders_list" action_class = actions.BarbicanAction action_class.client_method_name = method_name params = {'limit': 5} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().orders_list.called) mocked().orders_list.assert_called_once_with(limit=5) @mock.patch.object(actions.DesignateAction, '_get_client') def test_designate_action(self, mocked): + mock_ctx = mock.Mock() method_name = "domain.get" action_class = actions.DesignateAction action_class.client_method_name = method_name params = {'domain': 'example.com'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().domain.get.called) mocked().domain.get.assert_called_once_with(domain="example.com") @mock.patch.object(actions.MagnumAction, '_get_client') def test_magnum_action(self, mocked): + mock_ctx = mock.Mock() method_name = "baymodels.get" action_class = actions.MagnumAction action_class.client_method_name = method_name params = {'id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().baymodels.get.called) mocked().baymodels.get.assert_called_once_with(id="1234-abcd") @mock.patch.object(actions.MuranoAction, '_get_client') def test_murano_action(self, mocked): + mock_ctx = mock.Mock() method_name = "categories.get" action_class = actions.MuranoAction action_class.client_method_name = method_name params = {'category_id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().categories.get.called) mocked().categories.get.assert_called_once_with( @@ -351,12 +362,13 @@ @mock.patch.object(actions.TackerAction, '_get_client') def test_tacker_action(self, mocked): + mock_ctx = mock.Mock() method_name = "show_vim" action_class = actions.TackerAction action_class.client_method_name = method_name params = {'vim_id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().show_vim.called) mocked().show_vim.assert_called_once_with( @@ -365,11 +377,12 @@ @mock.patch.object(actions.SenlinAction, '_get_client') def test_senlin_action(self, mocked): + mock_ctx = mock.Mock() action_class = actions.SenlinAction action_class.client_method_name = "get_cluster" action = action_class(cluster_id='1234-abcd') - action.run() + action.run(mock_ctx) self.assertTrue(mocked().get_cluster.called) @@ -379,24 +392,26 @@ @mock.patch.object(actions.AodhAction, '_get_client') def test_aodh_action(self, mocked): + mock_ctx = mock.Mock() method_name = "alarm.get" action_class = actions.AodhAction action_class.client_method_name = method_name params = {'alarm_id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().alarm.get.called) mocked().alarm.get.assert_called_once_with(alarm_id="1234-abcd") @mock.patch.object(actions.GnocchiAction, '_get_client') def test_gnocchi_action(self, mocked): + mock_ctx = mock.Mock() method_name = "metric.get" action_class = actions.GnocchiAction action_class.client_method_name = method_name params = {'metric_id': '1234-abcd'} action = action_class(**params) - action.run() + action.run(mock_ctx) self.assertTrue(mocked().metric.get.called) mocked().metric.get.assert_called_once_with(metric_id="1234-abcd") diff -Nru mistral-4.0.0/mistral/tests/unit/actions/test_javascript_action.py mistral-5.0.0~b2/mistral/tests/unit/actions/test_javascript_action.py --- mistral-4.0.0/mistral/tests/unit/actions/test_javascript_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/test_javascript_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -24,7 +24,8 @@ javascript, 'evaluate', mock.Mock(return_value="3") ) def test_js_action(self): + mock_ctx = mock.Mock() script = "return 1 + 2" action = std.JavaScriptAction(script) - self.assertEqual("3", action.run()) + self.assertEqual("3", action.run(mock_ctx)) diff -Nru mistral-4.0.0/mistral/tests/unit/actions/test_std_echo_action.py mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_echo_action.py --- mistral-4.0.0/mistral/tests/unit/actions/test_std_echo_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_echo_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -14,11 +14,13 @@ from mistral.actions import std_actions as std from mistral.tests.unit import base +import mock class EchoActionTest(base.BaseTest): def test_fake_action(self): expected = "my output" + mock_ctx = mock.Mock() action = std.EchoAction(expected) - self.assertEqual(expected, action.run()) + self.assertEqual(expected, action.run(mock_ctx)) diff -Nru mistral-4.0.0/mistral/tests/unit/actions/test_std_email_action.py mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_email_action.py --- mistral-4.0.0/mistral/tests/unit/actions/test_std_email_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_email_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -61,6 +61,7 @@ self.from_addr = "bot@example.com" self.to_addrs_str = ", ".join(self.to_addrs) + self.ctx = mock.Mock() @testtools.skipIf(not LOCAL_SMTPD, "Setup local smtpd to run it") def test_send_email_real(self): @@ -68,7 +69,7 @@ self.from_addr, self.to_addrs, self.smtp_server, None, self.subject, self.body ) - action.run() + action.run(self.ctx) @testtools.skipIf(not REMOTE_SMTP, "Configure Remote SMTP to run it") def test_with_password_real(self): @@ -82,7 +83,7 @@ self.smtp_server, self.smtp_password, self.subject, self.body ) - action.run() + action.run(self.ctx) @mock.patch('smtplib.SMTP') def test_with_mutli_to_addrs(self, smtp): @@ -91,7 +92,7 @@ self.from_addr, self.to_addrs, self.smtp_server, smtp_password, self.subject, self.body ) - action.run() + action.run(self.ctx) @mock.patch('smtplib.SMTP') def test_with_one_to_addr(self, smtp): @@ -102,7 +103,7 @@ self.from_addr, to_addr, self.smtp_server, smtp_password, self.subject, self.body ) - action.run() + action.run(self.ctx) @mock.patch('smtplib.SMTP') def test_send_email(self, smtp): @@ -111,7 +112,7 @@ self.smtp_server, None, self.subject, self.body ) - action.run() + action.run(self.ctx) smtp.assert_called_once_with(self.smtp_server) @@ -157,7 +158,7 @@ self.smtp_server, self.smtp_password, self.subject, self.body ) - action.run() + action.run(self.ctx) smtpmock = smtp.return_value calls = [mock.call.ehlo(), mock.call.starttls(), mock.call.ehlo(), @@ -177,7 +178,7 @@ ) try: - action.run() + action.run(self.ctx) except exc.ActionException: pass else: diff -Nru mistral-4.0.0/mistral/tests/unit/actions/test_std_fail_action.py mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_fail_action.py --- mistral-4.0.0/mistral/tests/unit/actions/test_std_fail_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_fail_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import mock from mistral.actions import std_actions as std from mistral import exceptions as exc @@ -21,4 +22,4 @@ def test_fail_action(self): action = std.FailAction() - self.assertRaises(exc.ActionException, action.run) + self.assertRaises(exc.ActionException, action.run, mock.Mock) diff -Nru mistral-4.0.0/mistral/tests/unit/actions/test_std_http_action.py mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_http_action.py --- mistral-4.0.0/mistral/tests/unit/actions/test_std_http_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/actions/test_std_http_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -53,6 +53,7 @@ @mock.patch.object(requests, 'request') def test_http_action(self, mocked_method): mocked_method.return_value = get_success_fake_response() + mock_ctx = mock.Mock() action = std.HTTPAction( url=URL, @@ -67,7 +68,7 @@ self.assertEqual(DATA_STR, action.body) self.assertEqual(URL, action.url) - result = action.run() + result = action.run(mock_ctx) self.assertIsInstance(result, dict) self.assertEqual(DATA, result['content']) @@ -91,6 +92,7 @@ @mock.patch.object(requests, 'request') def test_http_action_error_result(self, mocked_method): mocked_method.return_value = get_error_fake_response() + mock_ctx = mock.Mock() action = std.HTTPAction( url=URL, @@ -105,7 +107,7 @@ self.assertEqual(DATA_STR, action.body) self.assertEqual(URL, action.url) - result = action.run() + result = action.run(mock_ctx) self.assertIsInstance(result, wf_utils.Result) self.assertEqual(401, result.error['status']) @@ -127,6 +129,7 @@ @mock.patch.object(requests, 'request') def test_http_action_with_auth(self, mocked_method): mocked_method.return_value = get_success_fake_response() + mock_ctx = mock.Mock() action = std.HTTPAction( url=URL, @@ -140,7 +143,7 @@ self.assertEqual(data_str, action.body) self.assertEqual(URL, action.url) - result = action.run() + result = action.run(mock_ctx) self.assertIsInstance(result, dict) self.assertEqual(DATA, result['content']) @@ -164,6 +167,7 @@ @mock.patch.object(requests, 'request') def test_http_action_with_headers(self, mocked_method): mocked_method.return_value = get_success_fake_response() + mock_ctx = mock.Mock() headers = {'int_header': 33, 'bool_header': True, 'float_header': 3.0, 'regular_header': 'teststring'} @@ -183,7 +187,7 @@ self.assertEqual(data_str, action.body) self.assertEqual(URL, action.url) - result = action.run() + result = action.run(mock_ctx) self.assertIsInstance(result, dict) self.assertEqual(DATA, result['content']) diff -Nru mistral-4.0.0/mistral/tests/unit/api/test_auth.py mistral-5.0.0~b2/mistral/tests/unit/api/test_auth.py --- mistral-4.0.0/mistral/tests/unit/api/test_auth.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/test_auth.py 2017-06-09 12:48:26.000000000 +0000 @@ -11,10 +11,9 @@ # limitations under the License. import datetime -import uuid - from oslo_config import cfg from oslo_utils import timeutils +from oslo_utils import uuidutils import pecan import pecan.testing @@ -34,21 +33,21 @@ PKI_TOKEN_VERIFIED = { 'token': { 'methods': ['password'], - 'roles': [{'id': uuid.uuid4().hex, + 'roles': [{'id': uuidutils.generate_uuid(dashed=False), 'name': 'admin'}], 'expires_at': datetime.datetime.isoformat( datetime.datetime.utcnow() + datetime.timedelta(seconds=60) ), 'project': { 'domain': {'id': 'default', 'name': 'Default'}, - 'id': uuid.uuid4().hex, + 'id': uuidutils.generate_uuid(dashed=False), 'name': 'Mistral' }, 'catalog': [], 'extras': {}, 'user': { 'domain': {'id': 'default', 'name': 'Default'}, - 'id': uuid.uuid4().hex, + 'id': uuidutils.generate_uuid(dashed=False), 'name': 'admin' }, 'issued_at': datetime.datetime.isoformat(timeutils.utcnow()) diff -Nru mistral-4.0.0/mistral/tests/unit/api/test_resource_base.py mistral-5.0.0~b2/mistral/tests/unit/api/test_resource_base.py --- mistral-4.0.0/mistral/tests/unit/api/test_resource_base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/test_resource_base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,74 @@ +# Copyright 2016 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import datetime + +from mistral.api.controllers.v2 import resources +from mistral.db.v2 import api as db_api +from mistral.tests.unit import base +from mistral import utils + + +WF_EXEC = { + 'id': 'c0f3be41-88b9-4c86-a669-83e77cd0a1b8', + 'spec': {}, + 'params': {'task': 'my_task1'}, + 'project_id': '', + 'scope': 'PUBLIC', + 'state': 'IDLE', + 'state_info': "Running...", + 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), + 'updated_at': None, + 'context': None, + 'task_execution_id': None, + 'description': None, + 'output': None, + 'accepted': False, + 'some_invalid_field': "foobar" +} + + +class TestRestResource(base.DbTestCase): + def test_from_db_model(self): + wf_ex = db_api.create_workflow_execution(WF_EXEC) + + self.assertIsNotNone(wf_ex) + + wf_ex_resource = resources.Execution.from_db_model(wf_ex) + + self.assertIsNotNone(wf_ex_resource) + + expected = copy.copy(WF_EXEC) + + del expected['some_invalid_field'] + utils.datetime_to_str_in_dict(expected, 'created_at') + + self.assertDictEqual(expected, wf_ex.to_dict()) + + def test_from_dict(self): + wf_ex = db_api.create_workflow_execution(WF_EXEC) + + self.assertIsNotNone(wf_ex) + + wf_ex_resource = resources.Execution.from_dict(wf_ex.to_dict()) + + self.assertIsNotNone(wf_ex_resource) + + expected = copy.copy(WF_EXEC) + + del expected['some_invalid_field'] + utils.datetime_to_str_in_dict(expected, 'created_at') + + self.assertDictEqual(expected, wf_ex.to_dict()) diff -Nru mistral-4.0.0/mistral/tests/unit/api/v2/test_action_executions.py mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_action_executions.py --- mistral-4.0.0/mistral/tests/unit/api/v2/test_action_executions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_action_executions.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,11 +22,14 @@ from oslo_config import cfg import oslo_messaging +from mistral.api.controllers.v2 import action_execution from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import base as rpc_base +from mistral.rpc import clients as rpc_clients from mistral.tests.unit.api import base +from mistral.utils import rest_utils from mistral.workflow import states from mistral.workflow import utils as wf_utils @@ -192,7 +195,7 @@ MOCK_DELETE = mock.MagicMock(return_value=None) -@mock.patch.object(rpc, '_IMPL_CLIENT', mock.Mock()) +@mock.patch.object(rpc_base, '_IMPL_CLIENT', mock.Mock()) class TestActionExecutionsController(base.APITest): def setUp(self): super(TestActionExecutionsController, self).setUp() @@ -221,7 +224,7 @@ self.assertEqual(404, resp.status_int) - @mock.patch.object(rpc.EngineClient, 'start_action') + @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post(self, f): f.return_value = ACTION_EX_DB.to_dict() @@ -249,7 +252,7 @@ run_sync=True ) - @mock.patch.object(rpc.EngineClient, 'start_action') + @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post_json(self, f): f.return_value = ACTION_EX_DB.to_dict() @@ -276,7 +279,7 @@ save_result=True ) - @mock.patch.object(rpc.EngineClient, 'start_action') + @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post_without_input(self, f): f.return_value = ACTION_EX_DB.to_dict() f.return_value['output'] = {'result': '123'} @@ -318,7 +321,7 @@ self.assertEqual(400, resp.status_int) - @mock.patch.object(rpc.EngineClient, 'on_action_complete') + @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put(self, f): f.return_value = UPDATED_ACTION_EX_DB @@ -332,7 +335,7 @@ wf_utils.Result(data=ACTION_EX_DB.output) ) - @mock.patch.object(rpc.EngineClient, 'on_action_complete') + @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_error_with_output(self, f): f.return_value = ERROR_ACTION_EX_WITH_OUTPUT @@ -349,7 +352,7 @@ wf_utils.Result(error=ERROR_ACTION_RES_WITH_OUTPUT) ) - @mock.patch.object(rpc.EngineClient, 'on_action_complete') + @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_error_with_unknown_reason(self, f): f.return_value = ERROR_ACTION_EX_FOR_EMPTY_OUTPUT resp = self.app.put_json('/v2/action_executions/123', ERROR_ACTION) @@ -362,7 +365,7 @@ wf_utils.Result(error=DEFAULT_ERROR_OUTPUT) ) - @mock.patch.object(rpc.EngineClient, 'on_action_complete') + @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_error_with_unknown_reason_output_none(self, f): f.return_value = ERROR_ACTION_EX_FOR_EMPTY_OUTPUT resp = self.app.put_json( @@ -378,7 +381,7 @@ wf_utils.Result(error=DEFAULT_ERROR_OUTPUT) ) - @mock.patch.object(rpc.EngineClient, 'on_action_complete') + @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_cancelled(self, on_action_complete_mock_func): on_action_complete_mock_func.return_value = CANCELLED_ACTION_EX_DB @@ -393,7 +396,7 @@ ) @mock.patch.object( - rpc.EngineClient, + rpc_clients.EngineClient, 'on_action_complete', MOCK_NOT_FOUND ) @@ -428,7 +431,7 @@ self.assertEqual(400, resp.status_int) - @mock.patch.object(rpc.EngineClient, 'on_action_complete') + @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_without_result(self, f): action_ex = copy.deepcopy(UPDATED_ACTION) del action_ex['output'] @@ -448,6 +451,29 @@ self.assertEqual(1, len(resp.json['action_executions'])) self.assertDictEqual(ACTION_EX, resp.json['action_executions'][0]) + @mock.patch.object(db_api, 'get_action_executions', MOCK_ACTIONS) + @mock.patch.object(rest_utils, 'get_all') + def test_get_all_with_and_without_output(self, mock_get_all): + resp = self.app.get('/v2/action_executions') + args, kwargs = mock_get_all.call_args + resource_function = kwargs['resource_function'] + + self.assertEqual(200, resp.status_int) + self.assertEqual( + action_execution._get_action_execution_resource_for_list, + resource_function + ) + + resp = self.app.get('/v2/action_executions?include_output=true') + args, kwargs = mock_get_all.call_args + resource_function = kwargs['resource_function'] + + self.assertEqual(200, resp.status_int) + self.assertEqual( + action_execution._get_action_execution_resource, + resource_function + ) + @mock.patch.object(db_api, 'get_action_executions', MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/action_executions') @@ -483,7 +509,7 @@ ) @mock.patch.object(db_api, 'get_action_execution', MOCK_ACTION) - def test_delete_action_exeuction_with_task(self): + def test_delete_action_execution_with_task(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) @@ -499,7 +525,7 @@ 'get_action_execution', MOCK_ACTION_NOT_COMPLETE ) - def test_delete_action_exeuction_not_complete(self): + def test_delete_action_execution_not_complete(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) @@ -516,7 +542,7 @@ MOCK_ACTION_COMPLETE_ERROR ) @mock.patch.object(db_api, 'delete_action_execution', MOCK_DELETE) - def test_delete_action_exeuction_complete_error(self): + def test_delete_action_execution_complete_error(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) @@ -529,7 +555,7 @@ MOCK_ACTION_COMPLETE_CANCELLED ) @mock.patch.object(db_api, 'delete_action_execution', MOCK_DELETE) - def test_delete_action_exeuction_complete_cancelled(self): + def test_delete_action_execution_complete_cancelled(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) diff -Nru mistral-4.0.0/mistral/tests/unit/api/v2/test_cron_triggers.py mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_cron_triggers.py --- mistral-4.0.0/mistral/tests/unit/api/v2/test_cron_triggers.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_cron_triggers.py 2017-06-09 12:48:26.000000000 +0000 @@ -19,6 +19,7 @@ from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc +from mistral.services import security from mistral.tests.unit.api import base WF = models.WorkflowDefinition( @@ -101,11 +102,13 @@ @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE) - def test_post_dup(self): + @mock.patch.object(security, "delete_trust") + def test_post_dup(self, delete_trust): resp = self.app.post_json( '/v2/cron_triggers', TRIGGER, expect_errors=True ) + self.assertEqual(1, delete_trust.call_count) self.assertEqual(409, resp.status_int) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @@ -122,9 +125,11 @@ @mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER) @mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE) - def test_delete(self): + @mock.patch.object(security, "delete_trust") + def test_delete(self, delete_trust): resp = self.app.delete('/v2/cron_triggers/my_cron_trigger') + self.assertEqual(1, delete_trust.call_count) self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "delete_cron_trigger", MOCK_NOT_FOUND) diff -Nru mistral-4.0.0/mistral/tests/unit/api/v2/test_event_trigger.py mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_event_trigger.py --- mistral-4.0.0/mistral/tests/unit/api/v2/test_event_trigger.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_event_trigger.py 2017-06-09 12:48:26.000000000 +0000 @@ -85,7 +85,7 @@ @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(db_api, "create_event_trigger", MOCK_TRIGGER) @mock.patch.object(db_api, "get_event_triggers", MOCK_TRIGGERS) - @mock.patch('mistral.engine.rpc_backend.rpc.get_event_engine_client') + @mock.patch('mistral.rpc.clients.get_event_engine_client') def test_post(self, mock_rpc_client): client = mock.Mock() mock_rpc_client.return_value = client @@ -134,7 +134,7 @@ self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'ensure_event_trigger_exists', MOCK_NONE) - @mock.patch('mistral.engine.rpc_backend.rpc.get_event_engine_client') + @mock.patch('mistral.rpc.clients.get_event_engine_client') @mock.patch('mistral.db.v2.api.update_event_trigger') def test_put(self, mock_update, mock_rpc_client): client = mock.Mock() @@ -167,7 +167,7 @@ self.assertEqual(400, resp.status_int) - @mock.patch('mistral.engine.rpc_backend.rpc.get_event_engine_client') + @mock.patch('mistral.rpc.clients.get_event_engine_client') @mock.patch.object(db_api, "get_event_trigger", MOCK_TRIGGER) @mock.patch.object(db_api, "get_event_triggers", mock.MagicMock(return_value=[])) diff -Nru mistral-4.0.0/mistral/tests/unit/api/v2/test_executions.py mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_executions.py --- mistral-4.0.0/mistral/tests/unit/api/v2/test_executions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_executions.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,19 +22,21 @@ import mock from oslo_config import cfg import oslo_messaging +from oslo_utils import uuidutils from webtest import app as webtest_app from mistral.api.controllers.v2 import execution from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import api as sql_db_api from mistral.db.v2.sqlalchemy import models -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import base as rpc_base +from mistral.rpc import clients as rpc_clients from mistral.tests.unit.api import base +from mistral.tests.unit import base as unit_base from mistral import utils from mistral.utils import rest_utils from mistral.workflow import states -from oslo_utils import uuidutils # This line is needed for correct initialization of messaging config. oslo_messaging.get_transport(cfg.CONF) @@ -130,7 +132,7 @@ MOCK_ACTION_EXC = mock.MagicMock(side_effect=exc.ActionException()) -@mock.patch.object(rpc, '_IMPL_CLIENT', mock.Mock()) +@mock.patch.object(rpc_base, '_IMPL_CLIENT', mock.Mock()) class TestExecutionsController(base.APITest): @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) def test_get(self): @@ -158,7 +160,7 @@ mock.MagicMock(return_value=None) ) @mock.patch.object( - rpc.EngineClient, + rpc_clients.EngineClient, 'pause_workflow', MOCK_UPDATED_WF_EX ) @@ -181,7 +183,7 @@ 'ensure_workflow_execution_exists', mock.MagicMock(return_value=None) ) - @mock.patch.object(rpc.EngineClient, 'stop_workflow') + @mock.patch.object(rpc_clients.EngineClient, 'stop_workflow') def test_put_state_error(self, mock_stop_wf): update_exec = { 'id': WF_EX['id'], @@ -209,7 +211,7 @@ 'ensure_workflow_execution_exists', mock.MagicMock(return_value=None) ) - @mock.patch.object(rpc.EngineClient, 'stop_workflow') + @mock.patch.object(rpc_clients.EngineClient, 'stop_workflow') def test_put_state_cancelled(self, mock_stop_wf): update_exec = { 'id': WF_EX['id'], @@ -242,7 +244,7 @@ 'ensure_workflow_execution_exists', mock.MagicMock(return_value=None) ) - @mock.patch.object(rpc.EngineClient, 'resume_workflow') + @mock.patch.object(rpc_clients.EngineClient, 'resume_workflow') def test_put_state_resume(self, mock_resume_wf): update_exec = { 'id': WF_EX['id'], @@ -296,7 +298,7 @@ 'ensure_workflow_execution_exists', mock.MagicMock(return_value=None) ) - @mock.patch.object(rpc.EngineClient, 'stop_workflow') + @mock.patch.object(rpc_clients.EngineClient, 'stop_workflow') def test_put_state_info_unset(self, mock_stop_wf): update_exec = { 'id': WF_EX['id'], @@ -453,7 +455,7 @@ self.assertIn(expected_fault, resp.json['faultstring']) - @mock.patch.object(rpc.EngineClient, 'start_workflow') + @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') def test_post(self, f): f.return_value = WF_EX.to_dict() @@ -471,7 +473,11 @@ **json.loads(exec_dict['params']) ) - @mock.patch.object(rpc.EngineClient, 'start_workflow', MOCK_ACTION_EXC) + @mock.patch.object( + rpc_clients.EngineClient, + 'start_workflow', + MOCK_ACTION_EXC + ) def test_post_throws_exception(self): context = self.assertRaises( webtest_app.AppError, @@ -630,3 +636,40 @@ resource_function = kwargs['resource_function'] self.assertIsNone(resource_function) + + @mock.patch('mistral.db.v2.api.get_workflow_executions') + @mock.patch('mistral.context.context_from_headers_and_env') + def test_get_all_projects_admin(self, mock_context, mock_get_execs): + admin_ctx = unit_base.get_context(admin=True) + mock_context.return_value = admin_ctx + + resp = self.app.get('/v2/executions?all_projects=true') + + self.assertEqual(200, resp.status_int) + + self.assertTrue(mock_get_execs.call_args[1].get('insecure', False)) + + def test_get_all_projects_normal_user(self): + resp = self.app.get( + '/v2/executions?all_projects=true', + expect_errors=True + ) + + self.assertEqual(403, resp.status_int) + + @mock.patch('mistral.db.v2.api.get_workflow_executions') + @mock.patch('mistral.context.context_from_headers_and_env') + def test_get_all_filter_by_project_id(self, mock_context, mock_get_execs): + admin_ctx = unit_base.get_context(admin=True) + mock_context.return_value = admin_ctx + + fake_project_id = uuidutils.generate_uuid() + + resp = self.app.get('/v2/executions?project_id=%s' % fake_project_id) + + self.assertEqual(200, resp.status_int) + + self.assertTrue(mock_get_execs.call_args[1].get('insecure', False)) + self.assertTrue( + mock_get_execs.call_args[1].get('project_id', fake_project_id) + ) diff -Nru mistral-4.0.0/mistral/tests/unit/api/v2/test_tasks.py mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_tasks.py --- mistral-4.0.0/mistral/tests/unit/api/v2/test_tasks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_tasks.py 2017-06-09 12:48:26.000000000 +0000 @@ -20,8 +20,8 @@ from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import clients as rpc from mistral.tests.unit.api import base from mistral.workflow import data_flow from mistral.workflow import states @@ -30,6 +30,14 @@ RESULT = {"some": "result"} PUBLISHED = {"var": "val"} +RUNTIME_CONTEXT = { + 'triggered_by': [ + { + 'task_id': '123-123-123', + 'event': 'on-success' + } + ] +} WF_EX = models.WorkflowExecution( id='abc', @@ -59,7 +67,7 @@ state=states.RUNNING, tags=['a', 'b'], in_context={}, - runtime_context={}, + runtime_context=RUNTIME_CONTEXT, workflow_execution_id=WF_EX.id, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), @@ -82,7 +90,7 @@ state=states.RUNNING, tags=['a', 'b'], in_context={}, - runtime_context={}, + runtime_context=RUNTIME_CONTEXT, workflow_execution_id=WF_EX.id, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), @@ -101,6 +109,7 @@ 'updated_at': '1970-01-01 00:00:00', 'result': json.dumps(RESULT), 'published': json.dumps(PUBLISHED), + 'runtime_context': json.dumps(RUNTIME_CONTEXT), 'processed': True } diff -Nru mistral-4.0.0/mistral/tests/unit/api/v2/test_workflows.py mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_workflows.py --- mistral-4.0.0/mistral/tests/unit/api/v2/test_workflows.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/api/v2/test_workflows.py 2017-06-09 12:48:26.000000000 +0000 @@ -236,7 +236,7 @@ self.assertEqual(400, resp.status_int) self.assertIn( - "Attempt to modify a system workflow", + "Can not modify a system", resp.body.decode() ) @@ -395,7 +395,7 @@ self.assertEqual(400, resp.status_int) self.assertIn( - "Attempt to delete a system workflow", + "Can not modify a system", resp.body.decode() ) @@ -412,6 +412,9 @@ self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['workflows'])) + + print(resp.json['workflows'][0]) + self.assertDictEqual(WF, resp.json['workflows'][0]) @mock.patch.object(db_api, "get_workflow_definitions", MOCK_EMPTY) diff -Nru mistral-4.0.0/mistral/tests/unit/base.py mistral-5.0.0~b2/mistral/tests/unit/base.py --- mistral-4.0.0/mistral/tests/unit/base.py 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -25,16 +25,17 @@ from oslotest import base import testtools.matchers as ttm +from mistral import config from mistral import context as auth_context from mistral.db.sqlalchemy import base as db_sa_base from mistral.db.sqlalchemy import sqlite_lock from mistral.db.v2 import api as db_api +from mistral.lang import parser as spec_parser from mistral.services import action_manager from mistral.services import security from mistral.tests.unit import config as test_config from mistral.utils import inspect_utils as i_utils from mistral import version -from mistral.workbook import parser as spec_parser from mistral.workflow import lookup_utils RESOURCES_PATH = 'tests/resources/' @@ -80,7 +81,7 @@ class FakeHTTPResponse(object): def __init__(self, text, status_code, reason=None, headers=None, - history=None, encoding='utf8', url='', cookies=None, + history=None, encoding='utf-8', url='', cookies=None, elapsed=None): self.text = text self.content = text @@ -93,8 +94,8 @@ self.cookies = cookies or {} self.elapsed = elapsed or datetime.timedelta(milliseconds=123) - def json(self): - return json.loads(self.text) + def json(self, **kwargs): + return json.loads(self.text, **kwargs) class BaseTest(base.BaseTestCase): @@ -240,6 +241,14 @@ if cfg.CONF.database.connection.startswith('sqlite'): cfg.CONF.set_default('connection', 'sqlite://', group='database') + # This option is normally registered in sync_db.py so we have to + # register it here specifically for tests. + cfg.CONF.register_opt(config.os_actions_mapping_path) + + cfg.CONF.set_default( + 'openstack_actions_mapping_path', + 'tests/resources/openstack/test_mapping.json' + ) cfg.CONF.set_default('max_overflow', -1, group='database') cfg.CONF.set_default('max_pool_size', 1000, group='database') @@ -248,7 +257,7 @@ action_manager.sync_db() def _clean_db(self): - lookup_utils.clean_caches() + lookup_utils.clear_caches() contexts = [ get_context(default=False), diff -Nru mistral-4.0.0/mistral/tests/unit/db/v2/test_db_model.py mistral-5.0.0~b2/mistral/tests/unit/db/v2/test_db_model.py --- mistral-4.0.0/mistral/tests/unit/db/v2/test_db_model.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/db/v2/test_db_model.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,94 @@ +# Copyright 2017 - Nokia Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import datetime + +from mistral.db.v2.sqlalchemy import api as db_api +from mistral.tests.unit import base as test_base +from mistral import utils + +WF_EXEC = { + 'id': 'c0f3be41-88b9-4c86-a669-83e77cd0a1b8', + 'spec': {}, + 'params': {'task': 'my_task1'}, + 'project_id': '', + 'scope': 'PUBLIC', + 'state': 'IDLE', + 'state_info': "Running...", + 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), + 'updated_at': None, + 'context': None, + 'task_execution_id': None, + 'description': None, + 'output': None, + 'accepted': False, + 'some_invalid_field': "foobar" +} + + +class DBModelTest(test_base.DbTestCase): + def setUp(self): + super(DBModelTest, self).setUp() + + def test_iterate_column_names(self): + wf_ex = db_api.create_workflow_execution(WF_EXEC) + + self.assertIsNotNone(wf_ex) + + c_names = [c_name for c_name in wf_ex.iter_column_names()] + + expected = set(WF_EXEC.keys()) + + expected.remove('some_invalid_field') + + self.assertEqual(expected, set(c_names)) + + def test_iterate_columns(self): + wf_ex = db_api.create_workflow_execution(WF_EXEC) + + self.assertIsNotNone(wf_ex) + + values = {c_name: c_val for c_name, c_val in wf_ex.iter_columns()} + + expected = copy.copy(WF_EXEC) + + del expected['some_invalid_field'] + + self.assertDictEqual(expected, values) + + def test_to_dict(self): + wf_ex = db_api.create_workflow_execution(WF_EXEC) + + self.assertIsNotNone(wf_ex) + + expected = copy.copy(WF_EXEC) + + del expected['some_invalid_field'] + + actual = wf_ex.to_dict() + + # The method to_dict() returns date as strings. So, we have to + # check them separately. + + self.assertEqual( + utils.datetime_to_str(expected['created_at']), + actual['created_at'] + ) + + # Now check the rest of the columns. + del expected['created_at'] + del actual['created_at'] + + self.assertDictEqual(expected, actual) diff -Nru mistral-4.0.0/mistral/tests/unit/db/v2/test_locking.py mistral-5.0.0~b2/mistral/tests/unit/db/v2/test_locking.py --- mistral-4.0.0/mistral/tests/unit/db/v2/test_locking.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/db/v2/test_locking.py 2017-06-09 12:48:26.000000000 +0000 @@ -19,6 +19,7 @@ import random import testtools +from mistral import context as auth_context from mistral.db.sqlalchemy import sqlite_lock from mistral.db.v2.sqlalchemy import api as db_api from mistral.db.v2.sqlalchemy import models as db_models @@ -89,6 +90,9 @@ self.assertEqual(0, len(sqlite_lock.get_locks())) def _run_correct_locking(self, wf_ex): + # Set context info for the thread. + auth_context.set_ctx(test_base.get_context()) + self._random_sleep() with db_api.transaction(): diff -Nru mistral-4.0.0/mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py mistral-5.0.0~b2/mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py --- mistral-4.0.0/mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py 2017-06-09 12:48:26.000000000 +0000 @@ -29,7 +29,9 @@ from mistral.utils import filter_utils -user_context = test_base.get_context(default=False) +DEFAULT_CTX = test_base.get_context() +USER_CTX = test_base.get_context(default=False) +ADM_CTX = test_base.get_context(default=False, admin=True) WORKBOOKS = [ { @@ -305,7 +307,7 @@ self.assertEqual(created, fetched[0]) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) created = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() @@ -324,7 +326,7 @@ self.assertEqual(created1, fetched[0]) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_workbooks() @@ -347,7 +349,7 @@ auth_context.ctx().project_id) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_workbooks() @@ -420,6 +422,16 @@ self.assertEqual(created, fetched) + def test_get_workflow_definition_by_admin(self): + created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) + + # Switch to admin project. + auth_context.set_ctx(test_base.get_context(default=False, admin=True)) + + fetched = db_api.get_workflow_definition(created.id) + + self.assertEqual(created, fetched) + def test_filter_workflow_definitions_by_equal_value(self): db_api.create_workbook(WF_DEFINITIONS[0]) @@ -606,7 +618,7 @@ created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to another project. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) self.assertRaises( exc.NotAllowedException, @@ -616,10 +628,10 @@ ) def test_update_other_project_workflow_by_admin(self): - created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) + created = db_api.create_workflow_definition(WF_DEFINITIONS[1]) # Switch to admin. - auth_context.set_ctx(test_base.get_context(default=False, admin=True)) + auth_context.set_ctx(ADM_CTX) updated = db_api.update_workflow_definition( created['id'], @@ -632,7 +644,7 @@ self.assertEqual('my new definition', updated.definition) # Switch back. - auth_context.set_ctx(test_base.get_context()) + auth_context.set_ctx(DEFAULT_CTX) fetched = db_api.get_workflow_definition(created['id']) @@ -645,7 +657,7 @@ created = db_api.create_workflow_definition(system_workflow) # Switch to admin. - auth_context.set_ctx(test_base.get_context(default=False, admin=True)) + auth_context.set_ctx(ADM_CTX) updated = db_api.update_workflow_definition( created['id'], @@ -689,13 +701,13 @@ created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) cron_trigger = copy.copy(CRON_TRIGGER) cron_trigger['workflow_id'] = created.id db_api.create_cron_trigger(cron_trigger) - auth_context.set_ctx(test_base.get_context(default=True)) + auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.NotAllowedException, @@ -708,7 +720,7 @@ created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to another user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) event_trigger = copy.copy(EVENT_TRIGGERS[0]) event_trigger.update({'workflow_id': created.id}) @@ -716,7 +728,7 @@ db_api.create_event_trigger(event_trigger) # Switch back. - auth_context.set_ctx(test_base.get_context(default=True)) + auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.NotAllowedException, @@ -810,7 +822,7 @@ created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to another project. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) self.assertRaises( exc.NotAllowedException, @@ -818,6 +830,23 @@ created.name ) + def test_delete_other_project_workflow_definition_by_admin(self): + created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) + + # Switch to admin. + auth_context.set_ctx(ADM_CTX) + + db_api.delete_workflow_definition(created['id']) + + # Switch back. + auth_context.set_ctx(DEFAULT_CTX) + + self.assertRaises( + exc.DBEntityNotFoundError, + db_api.get_workflow_definition, + created['id'] + ) + def test_workflow_definition_private(self): # Create a workflow(scope=private) as under one project # then make sure it's NOT visible for other projects. @@ -829,7 +858,7 @@ self.assertEqual(created1, fetched[0]) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_workflow_definitions() @@ -854,7 +883,7 @@ ) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_workflow_definitions() @@ -1335,7 +1364,7 @@ created = db_api.create_action_execution(ACTION_EXECS[0]) # Create a new user. - auth_context.set_ctx(test_base.get_context(default=False)) + auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, @@ -1438,6 +1467,43 @@ self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) + def test_update_workflow_execution_by_admin(self): + with db_api.transaction(): + created = db_api.create_workflow_execution(WF_EXECS[0]) + + auth_context.set_ctx(ADM_CTX) + + updated = db_api.update_workflow_execution( + created.id, + {'state': 'RUNNING', 'state_info': "Running..."} + ) + + auth_context.set_ctx(DEFAULT_CTX) + + self.assertEqual('RUNNING', updated.state) + self.assertEqual( + 'RUNNING', + db_api.load_workflow_execution(updated.id).state + ) + + fetched = db_api.get_workflow_execution(created.id) + + self.assertEqual(updated, fetched) + self.assertIsNotNone(fetched.updated_at) + + def test_update_workflow_execution_by_others_fail(self): + with db_api.transaction(): + created = db_api.create_workflow_execution(WF_EXECS[0]) + + auth_context.set_ctx(USER_CTX) + + self.assertRaises( + exc.DBEntityNotFoundError, + db_api.update_workflow_execution, + created.id, + {'state': 'RUNNING', 'state_info': "Running..."} + ) + def test_create_or_update_workflow_execution(self): id = 'not-existing-id' @@ -1639,6 +1705,33 @@ created.id ) + def test_delete_workflow_execution_by_admin(self): + with db_api.transaction(): + created = db_api.create_workflow_execution(WF_EXECS[0]) + fetched = db_api.get_workflow_execution(created.id) + + self.assertEqual(created, fetched) + + auth_context.set_ctx(ADM_CTX) + db_api.delete_workflow_execution(created.id) + auth_context.set_ctx(DEFAULT_CTX) + + self.assertRaises( + exc.DBEntityNotFoundError, + db_api.get_workflow_execution, + created.id + ) + + def test_delete_workflow_execution_by_other_fail(self): + created = db_api.create_workflow_execution(WF_EXECS[0]) + auth_context.set_ctx(USER_CTX) + + self.assertRaises( + exc.DBEntityNotFoundError, + db_api.delete_workflow_execution, + created.id + ) + def test_trim_status_info(self): created = db_api.create_workflow_execution(WF_EXECS[0]) @@ -2226,7 +2319,7 @@ created0 = db_api.create_cron_trigger(CRON_TRIGGERS[0]) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_cron_triggers( insecure=True, @@ -2542,7 +2635,7 @@ 'resource_id': '123e4567-e89b-12d3-a456-426655440000', 'resource_type': 'workflow', 'project_id': security.get_project_id(), - 'member_id': user_context.project_id, + 'member_id': USER_CTX.project_id, 'status': 'pending', }, { @@ -2563,18 +2656,18 @@ fetched = db_api.get_resource_member( '123e4567-e89b-12d3-a456-426655440000', 'workflow', - user_context.project_id + USER_CTX.project_id ) self.assertEqual(created_1, fetched) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_resource_member( '123e4567-e89b-12d3-a456-426655440000', 'workflow', - user_context.project_id + USER_CTX.project_id ) self.assertEqual(created_1, fetched) @@ -2613,7 +2706,7 @@ db_api.create_resource_member(RESOURCE_MEMBERS[1]) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) fetched = db_api.get_resource_members( created.resource_id, @@ -2627,12 +2720,12 @@ created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) updated = db_api.update_resource_member( created.resource_id, 'workflow', - user_context.project_id, + USER_CTX.project_id, {'status': 'accepted'} ) @@ -2647,7 +2740,7 @@ db_api.update_resource_member, created.resource_id, 'workflow', - user_context.project_id, + USER_CTX.project_id, {'status': 'accepted'} ) @@ -2657,7 +2750,7 @@ db_api.delete_resource_member( created.resource_id, 'workflow', - user_context.project_id, + USER_CTX.project_id, ) fetched = db_api.get_resource_members( @@ -2671,14 +2764,14 @@ created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.delete_resource_member, created.resource_id, 'workflow', - user_context.project_id, + USER_CTX.project_id, ) def test_delete_resource_member_already_deleted(self): @@ -2687,7 +2780,7 @@ db_api.delete_resource_member( created.resource_id, 'workflow', - user_context.project_id, + USER_CTX.project_id, ) self.assertRaises( @@ -2695,7 +2788,7 @@ db_api.delete_resource_member, created.resource_id, 'workflow', - user_context.project_id, + USER_CTX.project_id, ) def test_delete_nonexistent_resource_member(self): @@ -2713,7 +2806,7 @@ wf = db_api.create_workflow_definition(WF_DEFINITIONS[1]) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, @@ -2722,25 +2815,25 @@ ) # Switch to original tenant, share workflow to another tenant. - auth_context.set_ctx(test_base.get_context()) + auth_context.set_ctx(DEFAULT_CTX) workflow_sharing = { 'resource_id': wf.id, 'resource_type': 'workflow', 'project_id': security.get_project_id(), - 'member_id': user_context.project_id, + 'member_id': USER_CTX.project_id, 'status': 'pending', } db_api.create_resource_member(workflow_sharing) # Switch to another tenant, accept the sharing, get workflows. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) db_api.update_resource_member( wf.id, 'workflow', - user_context.project_id, + USER_CTX.project_id, {'status': 'accepted'} ) @@ -2755,19 +2848,19 @@ 'resource_id': wf.id, 'resource_type': 'workflow', 'project_id': security.get_project_id(), - 'member_id': user_context.project_id, + 'member_id': USER_CTX.project_id, 'status': 'pending', } db_api.create_resource_member(workflow_sharing) # Switch to another tenant, accept the sharing. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) db_api.update_resource_member( wf.id, 'workflow', - user_context.project_id, + USER_CTX.project_id, {'status': 'accepted'} ) @@ -2776,12 +2869,12 @@ self.assertEqual(wf, fetched) # Switch to original tenant, delete the workflow. - auth_context.set_ctx(test_base.get_context()) + auth_context.set_ctx(DEFAULT_CTX) db_api.delete_workflow_definition(wf.id) # Switch to another tenant, can not see that workflow. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, @@ -2796,19 +2889,19 @@ 'resource_id': wf.id, 'resource_type': 'workflow', 'project_id': security.get_project_id(), - 'member_id': user_context.project_id, + 'member_id': USER_CTX.project_id, 'status': 'pending', } db_api.create_resource_member(workflow_sharing) # Switch to another tenant, accept the sharing. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) db_api.update_resource_member( wf.id, 'workflow', - user_context.project_id, + USER_CTX.project_id, {'status': 'accepted'} ) @@ -2817,7 +2910,7 @@ db_api.create_cron_trigger(CRON_TRIGGERS[0]) # Switch to original tenant, try to delete the workflow. - auth_context.set_ctx(test_base.get_context()) + auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.DBError, @@ -2876,7 +2969,7 @@ db_api.create_event_trigger(EVENT_TRIGGERS[0]) # Switch to another tenant. - auth_context.set_ctx(user_context) + auth_context.set_ctx(USER_CTX) db_api.create_event_trigger(EVENT_TRIGGERS[1]) fetched = db_api.get_event_triggers() diff -Nru mistral-4.0.0/mistral/tests/unit/engine/base.py mistral-5.0.0~b2/mistral/tests/unit/engine/base.py --- mistral-4.0.0/mistral/tests/unit/engine/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,8 +22,10 @@ from mistral.db.v2 import api as db_api from mistral.engine import engine_server -from mistral.engine import executor_server -from mistral.engine.rpc_backend import rpc +from mistral.executors import base as exe +from mistral.executors import executor_server +from mistral.rpc import base as rpc_base +from mistral.rpc import clients as rpc_clients from mistral.tests.unit import base from mistral.workflow import states @@ -56,41 +58,44 @@ cfg.CONF.set_default('rpc_backend', 'fake') # Drop all RPC objects (transport, clients). - rpc.cleanup() - - self.engine_client = rpc.get_engine_client() - self.executor_client = rpc.get_executor_client() - - LOG.info("Starting engine and executor threads...") - - engine_service = engine_server.get_oslo_service(setup_profiler=False) - executor_service = executor_server.get_oslo_service( - setup_profiler=False - ) - - self.engine = engine_service.engine - self.executor = executor_service.executor - - self.threads = [ - eventlet.spawn(launch_service, executor_service), - eventlet.spawn(launch_service, engine_service) - ] + rpc_base.cleanup() + rpc_clients.cleanup() + exe.cleanup() + + self.threads = [] + + # Start remote executor. + if cfg.CONF.executor.type == 'remote': + LOG.info("Starting remote executor threads...") + self.executor_client = rpc_clients.get_executor_client() + exe_svc = executor_server.get_oslo_service(setup_profiler=False) + self.executor = exe_svc.executor + self.threads.append(eventlet.spawn(launch_service, exe_svc)) + self.addCleanup(exe_svc.stop, True) + + # Start engine. + LOG.info("Starting engine threads...") + self.engine_client = rpc_clients.get_engine_client() + eng_svc = engine_server.get_oslo_service(setup_profiler=False) + self.engine = eng_svc.engine + self.threads.append(eventlet.spawn(launch_service, eng_svc)) + self.addCleanup(eng_svc.stop, True) self.addOnException(self.print_executions) - - self.addCleanup(executor_service.stop, True) - self.addCleanup(engine_service.stop, True) self.addCleanup(self.kill_threads) # Make sure that both services fully started, otherwise # the test may run too early. - executor_service.wait_started() - engine_service.wait_started() + if cfg.CONF.executor.type == 'remote': + exe_svc.wait_started() + + eng_svc.wait_started() def kill_threads(self): LOG.info("Finishing engine and executor threads...") - [thread.kill() for thread in self.threads] + for thread in self.threads: + thread.kill() @staticmethod def print_executions(exc_info=None): diff -Nru mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/base.py mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/base.py --- mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -# Copyright (c) 2016 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mistral.tests.unit import base - - -class KombuTestCase(base.BaseTest): - pass diff -Nru mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/fake_kombu.py mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/fake_kombu.py --- mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/fake_kombu.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/fake_kombu.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -# Copyright (c) 2016 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from kombu import mixins as mx -import mock - - -# Hack for making tests works with kombu listener -mixins = mx - -producer = mock.MagicMock() - -producers = mock.MagicMock() -producers.__getitem__ = lambda *args, **kwargs: producer - -connection = mock.MagicMock() - -connections = mock.MagicMock() -connections.__getitem__ = lambda *args, **kwargs: connection - -serialization = mock.MagicMock() - - -def BrokerConnection(*args, **kwargs): - return mock.MagicMock() - - -def Exchange(*args, **kwargs): - return mock.MagicMock() - - -def Queue(*args, **kwargs): - return mock.MagicMock() - - -def Consumer(*args, **kwargs): - return mock.MagicMock() diff -Nru mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_client.py mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_client.py --- mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_client.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_client.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -# Copyright (c) 2016 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mistral import exceptions as exc -from mistral.tests.unit.engine.rpc_backend.kombu import base -from mistral.tests.unit.engine.rpc_backend.kombu import fake_kombu - -import mock -from six import moves - -with mock.patch.dict('sys.modules', kombu=fake_kombu): - from mistral.engine.rpc_backend.kombu import base as kombu_base - from mistral.engine.rpc_backend.kombu import kombu_client - - -class TestException(exc.MistralException): - pass - - -class KombuClientTestCase(base.KombuTestCase): - - _RESPONSE = "response" - - def setUp(self): - super(KombuClientTestCase, self).setUp() - conf = mock.MagicMock() - - kombu_client.kombu_listener.KombuRPCListener = mock.MagicMock() - self.client = kombu_client.KombuRPCClient(conf) - self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() - - def test_sync_call_result_get(self): - self.client._listener.get_result = mock.MagicMock( - return_value={ - kombu_base.TYPE: None, - kombu_base.RESULT: self.client._serialize_message({ - 'body': self._RESPONSE - }) - } - ) - response = self.client.sync_call(self.ctx, 'method') - - self.assertEqual(response, self._RESPONSE) - - def test_sync_call_result_not_get(self): - self.client._listener.get_result = mock.MagicMock( - side_effect=moves.queue.Empty - ) - - self.assertRaises( - exc.MistralException, - self.client.sync_call, - self.ctx, - 'method_not_found' - ) - - def test_sync_call_result_type_error(self): - - def side_effect(*args, **kwargs): - result = {} - result[kombu_base.TYPE] = 'error' - result[kombu_base.RESULT] = TestException() - return result - - self.client._wait_for_result = mock.MagicMock(side_effect=side_effect) - - self.assertRaises( - TestException, - self.client.sync_call, - self.ctx, - 'method' - ) - - def test_async_call(self): - response = self.client.async_call(self.ctx, 'method') - self.assertIsNone(response) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_listener.py mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_listener.py --- mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_listener.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_listener.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,222 +0,0 @@ -# Copyright (c) 2017 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mistral import exceptions as exc -from mistral.tests.unit.engine.rpc_backend.kombu import base -from mistral.tests.unit.engine.rpc_backend.kombu import fake_kombu -from mistral import utils - -import mock -from six import moves - -with mock.patch.dict('sys.modules', kombu=fake_kombu): - from mistral.engine.rpc_backend.kombu import base as kombu_base - from mistral.engine.rpc_backend.kombu import kombu_listener - - -class TestException(exc.MistralException): - pass - - -class KombuListenerTestCase(base.KombuTestCase): - - def setUp(self): - super(KombuListenerTestCase, self).setUp() - - self.listener = kombu_listener.KombuRPCListener( - [mock.MagicMock()], - mock.MagicMock() - ) - self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() - - def test_add_listener(self): - correlation_id = utils.generate_unicode_uuid() - - self.listener.add_listener(correlation_id) - - self.assertEqual( - type(self.listener._results.get(correlation_id)), - moves.queue.Queue - ) - - self.assertEqual( - self.listener._results[correlation_id].qsize(), - 0 - ) - - def test_remove_listener_correlation_id_in_results(self): - correlation_id = utils.generate_unicode_uuid() - - self.listener.add_listener(correlation_id) - - self.assertEqual( - type(self.listener._results.get(correlation_id)), - moves.queue.Queue - ) - - self.listener.remove_listener(correlation_id) - - self.assertEqual( - self.listener._results.get(correlation_id), - None - ) - - def test_remove_listener_correlation_id_not_in_results(self): - correlation_id = utils.generate_unicode_uuid() - - self.listener.add_listener(correlation_id) - - self.assertEqual( - type(self.listener._results.get(correlation_id)), - moves.queue.Queue - ) - - self.listener.remove_listener(utils.generate_unicode_uuid()) - - self.assertEqual( - type(self.listener._results.get(correlation_id)), - moves.queue.Queue - ) - - @mock.patch('threading.Thread') - def test_start_thread_not_set(self, thread_class_mock): - thread_mock = mock.MagicMock() - thread_class_mock.return_value = thread_mock - - self.listener.start() - - self.assertTrue(thread_mock.daemon) - self.assertEqual(thread_mock.start.call_count, 1) - - @mock.patch('threading.Thread') - def test_start_thread_set(self, thread_class_mock): - thread_mock = mock.MagicMock() - thread_class_mock.return_value = thread_mock - - self.listener._thread = mock.MagicMock() - self.listener.start() - - self.assertEqual(thread_mock.start.call_count, 0) - - def test_get_result_results_in_queue(self): - expected_result = 'abcd' - correlation_id = utils.generate_unicode_uuid() - - self.listener.add_listener(correlation_id) - self.listener._results.get(correlation_id).put(expected_result) - - result = self.listener.get_result(correlation_id, 5) - - self.assertEqual(result, expected_result) - - def test_get_result_not_in_queue(self): - correlation_id = utils.generate_unicode_uuid() - self.listener.add_listener(correlation_id) - - self.assertRaises( - moves.queue.Empty, - self.listener.get_result, - correlation_id, - 1 # timeout - ) - - def test_get_result_lack_of_queue(self): - correlation_id = utils.generate_unicode_uuid() - - self.assertRaises( - KeyError, - self.listener.get_result, - correlation_id, - 1 # timeout - ) - - def test__on_response_message_ack_fail(self): - message = mock.MagicMock() - message.ack.side_effect = Exception('Test Exception') - response = 'response' - - kombu_listener.LOG = mock.MagicMock() - - self.listener.on_message(response, message) - self.assertEqual(kombu_listener.LOG.debug.call_count, 1) - self.assertEqual(kombu_listener.LOG.exception.call_count, 1) - - def test__on_response_message_ack_ok_corr_id_not_match(self): - message = mock.MagicMock() - message.properties = mock.MagicMock() - message.properties.__getitem__ = lambda *args, **kwargs: True - response = 'response' - - kombu_listener.LOG = mock.MagicMock() - - self.listener.on_message(response, message) - self.assertEqual(kombu_listener.LOG.debug.call_count, 3) - self.assertEqual(kombu_listener.LOG.exception.call_count, 0) - - def test__on_response_message_ack_ok_messsage_type_error(self): - correlation_id = utils.generate_unicode_uuid() - - message = mock.MagicMock() - message.properties = dict() - message.properties['type'] = 'error' - message.properties['correlation_id'] = correlation_id - - response = TestException('response') - - kombu_listener.LOG = mock.MagicMock() - - self.listener.add_listener(correlation_id) - self.listener.on_message(response, message) - - self.assertEqual(kombu_listener.LOG.debug.call_count, 2) - self.assertEqual(kombu_listener.LOG.exception.call_count, 0) - - result = self.listener.get_result(correlation_id, 5) - - self.assertDictEqual( - result, - { - kombu_base.TYPE: 'error', - kombu_base.RESULT: response - } - ) - - def test__on_response_message_ack_ok(self): - correlation_id = utils.generate_unicode_uuid() - - message = mock.MagicMock() - message.properties = dict() - message.properties['type'] = None - message.properties['correlation_id'] = correlation_id - - response = 'response' - - kombu_listener.LOG = mock.MagicMock() - - self.listener.add_listener(correlation_id) - self.listener.on_message(response, message) - - self.assertEqual(kombu_listener.LOG.debug.call_count, 2) - self.assertEqual(kombu_listener.LOG.exception.call_count, 0) - - result = self.listener.get_result(correlation_id, 5) - - self.assertDictEqual( - result, - { - kombu_base.TYPE: None, - kombu_base.RESULT: response - } - ) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_server.py mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_server.py --- mistral-4.0.0/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_server.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_server.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,296 +0,0 @@ -# Copyright (c) 2016 Intel Corporation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mistral import exceptions as exc -from mistral.tests.unit.engine.rpc_backend.kombu import base -from mistral.tests.unit.engine.rpc_backend.kombu import fake_kombu - -import mock -import socket -from stevedore import driver - -with mock.patch.dict('sys.modules', kombu=fake_kombu): - from mistral.engine.rpc_backend.kombu import kombu_server - - -class TestException(exc.MistralError): - pass - - -class KombuServerTestCase(base.KombuTestCase): - - def setUp(self): - super(KombuServerTestCase, self).setUp() - - self.conf = mock.MagicMock() - self.server = kombu_server.KombuRPCServer(self.conf) - self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() - - def test_is_running_is_running(self): - self.server._running.set() - self.assertTrue(self.server.is_running) - - def test_is_running_is_not_running(self): - self.server._running.clear() - self.assertFalse(self.server.is_running) - - def test_stop(self): - self.server.stop() - self.assertFalse(self.server.is_running) - - def test_publish_message(self): - body = 'body' - reply_to = 'reply_to' - corr_id = 'corr_id' - type = 'type' - - acquire_mock = mock.MagicMock() - fake_kombu.producer.acquire.return_value = acquire_mock - - enter_mock = mock.MagicMock() - acquire_mock.__enter__.return_value = enter_mock - - self.server.publish_message(body, reply_to, corr_id, type) - enter_mock.publish.assert_called_once_with( - body={'body': '"body"'}, - exchange='openstack', - routing_key=reply_to, - correlation_id=corr_id, - type=type, - serializer='json' - ) - - def test_run_launch_successfully(self): - acquire_mock = mock.MagicMock() - acquire_mock.drain_events.side_effect = TestException() - fake_kombu.connection.acquire.return_value = acquire_mock - - self.assertRaises(TestException, self.server.run) - self.assertTrue(self.server.is_running) - - def test_run_launch_successfully_than_stop(self): - - def side_effect(*args, **kwargs): - self.assertTrue(self.server.is_running) - raise KeyboardInterrupt - - acquire_mock = mock.MagicMock() - acquire_mock.drain_events.side_effect = side_effect - fake_kombu.connection.acquire.return_value = acquire_mock - - self.server.run() - self.assertFalse(self.server.is_running) - self.assertEqual(self.server._sleep_time, 1) - - def test_run_socket_error_reconnect(self): - - def side_effect(*args, **kwargs): - if acquire_mock.drain_events.call_count == 1: - raise socket.error() - raise TestException() - - acquire_mock = mock.MagicMock() - acquire_mock.drain_events.side_effect = side_effect - fake_kombu.connection.acquire.return_value = acquire_mock - - self.assertRaises(TestException, self.server.run) - self.assertEqual(self.server._sleep_time, 2) - - def test_run_socket_timeout_still_running(self): - - def side_effect(*args, **kwargs): - if acquire_mock.drain_events.call_count == 0: - raise socket.timeout() - raise TestException() - - acquire_mock = mock.MagicMock() - acquire_mock.drain_events.side_effect = side_effect - fake_kombu.connection.acquire.return_value = acquire_mock - - self.assertRaises( - TestException, - self.server.run - ) - self.assertTrue(self.server.is_running) - - def test_run_keyboard_interrupt_not_running(self): - acquire_mock = mock.MagicMock() - acquire_mock.drain_events.side_effect = KeyboardInterrupt() - fake_kombu.connection.acquire.return_value = acquire_mock - - self.assertIsNone(self.server.run()) - self.assertFalse(self.server.is_running) - - @mock.patch.object( - kombu_server.KombuRPCServer, - '_on_message', - mock.MagicMock() - ) - @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') - def test__on_message_safe_message_processing_ok(self, publish_message): - message = mock.MagicMock() - - self.server._on_message_safe(None, message) - - self.assertEqual(message.ack.call_count, 1) - self.assertEqual(publish_message.call_count, 0) - - @mock.patch.object(kombu_server.KombuRPCServer, '_on_message') - @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') - def test__on_message_safe_message_processing_raise( - self, - publish_message, - _on_message - ): - reply_to = 'reply_to' - correlation_id = 'corr_id' - message = mock.MagicMock() - message.properties = { - 'reply_to': reply_to, - 'correlation_id': correlation_id - } - - test_exception = TestException() - _on_message.side_effect = test_exception - - self.server._on_message_safe(None, message) - - self.assertEqual(message.ack.call_count, 1) - self.assertEqual(publish_message.call_count, 1) - - @mock.patch.object( - kombu_server.KombuRPCServer, - '_get_rpc_method', - mock.MagicMock(return_value=None) - ) - def test__on_message_rpc_method_not_found(self): - request = { - 'rpc_ctx': {}, - 'rpc_method': 'not_found_method', - 'arguments': {} - } - - message = mock.MagicMock() - message.properties = { - 'reply_to': None, - 'correlation_id': None - } - - self.assertRaises( - exc.MistralException, - self.server._on_message, - request, - message - ) - - @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') - @mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method') - @mock.patch('mistral.context.MistralContext') - def test__on_message_is_async(self, mistral_context, get_rpc_method, - publish_message): - result = 'result' - request = { - 'async': True, - 'rpc_ctx': {}, - 'rpc_method': 'found_method', - 'arguments': self.server._serialize_message({ - 'a': 1, - 'b': 2 - }) - } - - message = mock.MagicMock() - message.properties = { - 'reply_to': None, - 'correlation_id': None - } - message.delivery_info.get.return_value = False - - rpc_method = mock.MagicMock(return_value=result) - get_rpc_method.return_value = rpc_method - - self.server._on_message(request, message) - rpc_method.assert_called_once_with( - rpc_ctx=mistral_context(), - a=1, - b=2 - ) - self.assertEqual(publish_message.call_count, 0) - - @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') - @mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method') - @mock.patch('mistral.context.MistralContext') - def test__on_message_is_sync(self, mistral_context, get_rpc_method, - publish_message): - result = 'result' - request = { - 'async': False, - 'rpc_ctx': {}, - 'rpc_method': 'found_method', - 'arguments': self.server._serialize_message({ - 'a': 1, - 'b': 2 - }) - } - - reply_to = 'reply_to' - correlation_id = 'corr_id' - message = mock.MagicMock() - message.properties = { - 'reply_to': reply_to, - 'correlation_id': correlation_id - } - message.delivery_info.get.return_value = False - - rpc_method = mock.MagicMock(return_value=result) - get_rpc_method.return_value = rpc_method - - self.server._on_message(request, message) - rpc_method.assert_called_once_with( - rpc_ctx=mistral_context(), - a=1, - b=2 - ) - publish_message.assert_called_once_with( - result, - reply_to, - correlation_id - ) - - @mock.patch('stevedore.driver.DriverManager') - def test__prepare_worker(self, driver_manager_mock): - worker_mock = mock.MagicMock() - mgr_mock = mock.MagicMock() - mgr_mock.driver.return_value = worker_mock - - def side_effect(*args, **kwargs): - return mgr_mock - - driver_manager_mock.side_effect = side_effect - - self.server._prepare_worker('blocking') - - self.assertEqual(self.server._worker, worker_mock) - - @mock.patch('stevedore.driver.DriverManager') - def test__prepare_worker_no_valid_executor(self, driver_manager_mock): - - driver_manager_mock.side_effect = driver.NoMatches() - - self.assertRaises( - driver.NoMatches, - self.server._prepare_worker, - 'non_valid_executor' - ) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_adhoc_actions.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_adhoc_actions.py --- mistral-4.0.0/mistral/tests/unit/engine/test_adhoc_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_adhoc_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -41,6 +41,11 @@ - s2 output: "<% $ %> and <% $ %>" + test_env: + base: std.echo + base-input: + output: '{{ env().foo }}' + workflows: wf1: type: direct @@ -81,6 +86,20 @@ tasks: concat: action: concat_twice + + wf4: + type: direct + input: + - str1 + output: + workflow_result: '{{ _.printenv_result }}' + + tasks: + printenv: + action: test_env + publish: + printenv_result: '{{ task().result }}' + """ @@ -135,3 +154,18 @@ self.assertIn("Invalid input", wf_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state) + + def test_run_adhoc_action_with_env(self): + wf_ex = self.engine.start_workflow( + 'my_wb.wf4', {'str1': 'a'}, env={'foo': 'bar'}) + + self.await_workflow_success(wf_ex.id) + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + self.assertDictEqual( + { + 'workflow_result': 'bar' + }, + wf_ex.output + ) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_cron_trigger.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_cron_trigger.py --- mistral-4.0.0/mistral/tests/unit/engine/test_cron_trigger.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_cron_trigger.py 2017-06-09 12:48:26.000000000 +0000 @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import datetime -import time import mock from oslo_config import cfg @@ -128,7 +127,7 @@ # Make the first_time 1 sec later than current time, in order to make # it executed by next cron-trigger task. - first_time = datetime.datetime.now() + datetime.timedelta(0, 1) + first_time = datetime.datetime.utcnow() + datetime.timedelta(0, 1) # Creates a cron-trigger with pattern and first time, ensure the # cron-trigger can be executed more than once, and cron-trigger will @@ -146,11 +145,8 @@ None ) - first_second = time.mktime(first_time.timetuple()) - first_utc_time = datetime.datetime.utcfromtimestamp(first_second) - self.assertEqual( - first_utc_time, + first_time, cron_trigger.next_execution_time ) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_dataflow.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_dataflow.py --- mistral-4.0.0/mistral/tests/unit/engine/test_dataflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_dataflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -674,6 +674,258 @@ self.assertIn('task(task1).result.message', task1.state_info) + def test_override_json_input(self): + wf_text = """--- + version: 2.0 + + wf: + input: + - a: + aa: aa + bb: bb + + tasks: + task1: + action: std.noop + publish: + published_a: <% $.a %> + """ + + wf_service.create_workflows(wf_text) + + wf_input = { + 'a': { + 'cc': 'cc', + 'dd': 'dd' + } + } + + # Start workflow. + wf_ex = self.engine.start_workflow('wf', wf_input) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + task1 = wf_ex.task_executions[0] + + self.assertDictEqual(wf_input['a'], task1.published['published_a']) + + def test_branch_publishing_success(self): + wf_text = """--- + version: 2.0 + + wf: + tasks: + task1: + action: std.noop + on-success: + publish: + branch: + my_var: my branch value + next: task2 + + task2: + action: std.echo output=<% $.my_var %> + """ + + wf_service.create_workflows(wf_text) + + # Start workflow. + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + tasks = wf_ex.task_executions + + task1 = self._assert_single_item(tasks, name='task1') + + self._assert_single_item(tasks, name='task2') + + self.assertDictEqual({"my_var": "my branch value"}, task1.published) + + def test_global_publishing_success_access_via_root_context_(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output="Hi" + on-success: + publish: + global: + my_var: <% task().result %> + next: + - task2 + + task2: + action: std.echo output=<% $.my_var %> + publish: + result: <% task().result %> + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + tasks = wf_ex.task_executions + + self._assert_single_item(tasks, name='task1') + task2 = self._assert_single_item(tasks, name='task2') + + self.assertDictEqual({'result': 'Hi'}, task2.published) + + def test_global_publishing_error_access_via_root_context(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.fail + on-success: + publish: + global: + my_var: "We got success" + next: + - task2 + on-error: + publish: + global: + my_var: "We got an error" + next: + - task2 + + task2: + action: std.echo output=<% $.my_var %> + publish: + result: <% task().result %> + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + tasks = wf_ex.task_executions + + self._assert_single_item(tasks, name='task1') + task2 = self._assert_single_item(tasks, name='task2') + + self.assertDictEqual({'result': 'We got an error'}, task2.published) + + def test_global_publishing_success_access_via_function(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.noop + on-success: + publish: + branch: + my_var: Branch local value + global: + my_var: Global value + next: + - task2 + + task2: + action: std.noop + publish: + local: <% $.my_var %> + global: <% global(my_var) %> + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + tasks = wf_ex.task_executions + + self._assert_single_item(tasks, name='task1') + task2 = self._assert_single_item(tasks, name='task2') + + self.assertDictEqual( + { + 'local': 'Branch local value', + 'global': 'Global value' + }, + task2.published + ) + + def test_global_publishing_error_access_via_function(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.fail + on-error: + publish: + branch: + my_var: Branch local value + global: + my_var: Global value + next: + - task2 + + task2: + action: std.noop + publish: + local: <% $.my_var %> + global: <% global(my_var) %> + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + tasks = wf_ex.task_executions + + self._assert_single_item(tasks, name='task1') + task2 = self._assert_single_item(tasks, name='task2') + + self.assertDictEqual( + { + 'local': 'Branch local value', + 'global': 'Global value' + }, + task2.published + ) + class DataFlowTest(test_base.BaseTest): def test_get_task_execution_result(self): diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_default_engine.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_default_engine.py --- mistral-4.0.0/mistral/tests/unit/engine/test_default_engine.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_default_engine.py 2017-06-09 12:48:26.000000000 +0000 @@ -23,8 +23,8 @@ from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral.engine import default_engine as d_eng -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.executors import base as exe from mistral.services import workbooks as wb_service from mistral.tests.unit import base from mistral.tests.unit.engine import base as eng_test_base @@ -93,7 +93,7 @@ MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) -@mock.patch.object(rpc, 'get_executor_client', mock.Mock()) +@mock.patch.object(exe, 'get_executor', mock.Mock()) class DefaultEngineTest(base.DbTestCase): def setUp(self): super(DefaultEngineTest, self).setUp() diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_direct_workflow.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_direct_workflow.py --- mistral-4.0.0/mistral/tests/unit/engine/test_direct_workflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_direct_workflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -38,7 +38,7 @@ return db_api.get_workflow_execution(wf_ex.id) - def test_direct_workflow_on_closures(self): + def test_on_closures(self): wf_text = """ version: '2.0' @@ -48,7 +48,7 @@ tasks: task1: description: | - Explicit 'fail' command should lead to workflow failure. + Explicit 'succeed' command should lead to workflow success. action: std.echo output="Echo" on-success: - task2 @@ -72,7 +72,7 @@ action: std.noop """ - wf_ex = self._run_workflow(wf_text) + wf_ex = self._run_workflow(wf_text, expected_state=states.SUCCESS) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -80,18 +80,16 @@ tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') - task3 = self._assert_single_item(tasks, name='task3') - task4 = self._assert_single_item(tasks, name='task4') + task2 = self._assert_single_item(tasks, name='task2') - self.assertEqual(3, len(tasks)) + self.assertEqual(2, len(tasks)) self.await_task_success(task1.id) - self.await_task_success(task3.id) - self.await_task_success(task4.id) + self.await_task_success(task2.id) self.assertTrue(wf_ex.state, states.ERROR) - def test_direct_workflow_condition_transition_not_triggering(self): + def test_condition_transition_not_triggering(self): wf_text = """--- version: '2.0' @@ -132,7 +130,7 @@ self.assertTrue(wf_ex.state, states.ERROR) - def test_direct_workflow_change_state_after_success(self): + def test_change_state_after_success(self): wf_text = """ version: '2.0' @@ -656,7 +654,7 @@ len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 ) - def test_direct_workfow_output(self): + def test_output(self): wf_text = """--- version: '2.0' @@ -680,3 +678,77 @@ wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({}, wf_ex.output) + + def test_triggered_by(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.noop + on-success: task2 + + task2: + action: std.fail + on-error: task3 + + task3: + action: std.fail + on-error: noop + on-success: task4 + on-complete: task4 + + task4: + action: std.noop + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + task_execs = wf_ex.task_executions + + task1 = self._assert_single_item(task_execs, name='task1') + task2 = self._assert_single_item(task_execs, name='task2') + task3 = self._assert_single_item(task_execs, name='task3') + task4 = self._assert_single_item(task_execs, name='task4') + + key = 'triggered_by' + + self.assertIsNone(task1.runtime_context.get(key)) + + self.assertListEqual( + [ + { + "task_id": task1.id, + "event": "on-success" + } + ], + task2.runtime_context.get(key) + ) + + self.assertListEqual( + [ + { + "task_id": task2.id, + "event": "on-error" + } + ], + task3.runtime_context.get(key) + ) + + self.assertListEqual( + [ + { + "task_id": task3.id, + "event": "on-complete" + } + ], + task4.runtime_context.get(key) + ) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_direct_workflow_rerun.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_direct_workflow_rerun.py --- mistral-4.0.0/mistral/tests/unit/engine/test_direct_workflow_rerun.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_direct_workflow_rerun.py 2017-06-09 12:48:26.000000000 +0000 @@ -559,7 +559,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_success(wf_ex.id, delay=10) + self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -646,7 +646,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_success(wf_ex.id, delay=10) + self.await_workflow_success(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -739,7 +739,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_success(wf_ex.id, delay=10) + self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -1088,7 +1088,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_error(wf_ex.id, delay=10) + self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -1109,7 +1109,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_error(wf_ex.id, delay=10) + self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -1130,7 +1130,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_error(wf_ex.id, delay=10) + self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) @@ -1151,7 +1151,7 @@ self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) - self.await_workflow_success(wf_ex.id, delay=10) + self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_environment.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_environment.py --- mistral-4.0.0/mistral/tests/unit/engine/test_environment.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_environment.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,9 +16,10 @@ from oslo_config import cfg from mistral.db.v2 import api as db_api -from mistral.engine import default_executor -from mistral.engine.rpc_backend import rpc +from mistral.executors import default_executor as d_exe +from mistral.executors import remote_executor as r_exe from mistral.services import workbooks as wb_service +from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base @@ -77,16 +78,17 @@ """ -def _run_at_target(action_ex_id, action_class_str, attributes, - action_params, target=None, async_=True, safe_rerun=False): +def _run_at_target(action_ex_id, action_cls_str, action_cls_attrs, + params, safe_rerun, target=None, async_=True): + # We'll just call executor directly for testing purposes. - executor = default_executor.DefaultExecutor() + executor = d_exe.DefaultExecutor() executor.run_action( action_ex_id, - action_class_str, - attributes, - action_params, + action_cls_str, + action_cls_attrs, + params, safe_rerun ) @@ -100,7 +102,7 @@ wb_service.create_workbook_v2(WORKBOOK) - @mock.patch.object(rpc.ExecutorClient, 'run_action', MOCK_RUN_AT_TARGET) + @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def _test_subworkflow(self, env): wf2_ex = self.engine.start_workflow('my_wb.wf2', {}, env=env) @@ -169,13 +171,13 @@ for t_ex in wf1_task_execs: a_ex = t_ex.action_executions[0] - rpc.ExecutorClient.run_action.assert_any_call( + r_exe.RemoteExecutor.run_action.assert_any_call( a_ex.id, 'mistral.actions.std_actions.EchoAction', {}, a_ex.input, - TARGET, - safe_rerun=False + False, + target=TARGET ) def test_subworkflow_env_task_input(self): @@ -198,3 +200,157 @@ } self._test_subworkflow(env) + + def test_evaluate_env_parameter(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.noop + publish: + var1: <% env().var1 %> + var2: <% env().var2 %> + """ + + wf_service.create_workflows(wf_text) + + env = { + "var1": "val1", + "var2": "<% env().var1 %>" + } + + # Run with 'evaluate_env' set to True. + + wf_ex = self.engine.start_workflow( + 'wf', + {}, + env=env, + evaluate_env=True + ) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + t = self._assert_single_item(wf_ex.task_executions, name='task1') + + self.assertDictEqual( + { + "var1": "val1", + "var2": "val1" + }, + t.published + ) + + # Run with 'evaluate_env' set to False. + + wf_ex = self.engine.start_workflow( + 'wf', + {}, + env=env, + evaluate_env=False + ) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + t = self._assert_single_item(wf_ex.task_executions, name='task1') + + self.assertDictEqual( + { + "var1": "val1", + "var2": "<% env().var1 %>" + }, + t.published + ) + + def test_evaluate_env_parameter_subworkflow(self): + wf_text = """--- + version: '2.0' + + parent_wf: + tasks: + task1: + workflow: sub_wf + + sub_wf: + output: + result: <% $.result %> + + tasks: + task1: + action: std.noop + publish: + result: <% env().dummy %> + """ + + wf_service.create_workflows(wf_text) + + # Run with 'evaluate_env' set to False. + + env = {"dummy": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>"} + + parent_wf_ex = self.engine.start_workflow( + 'parent_wf', + {}, + env=env, + evaluate_env=False + ) + + self.await_workflow_success(parent_wf_ex.id) + + with db_api.transaction(): + parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) + + t = self._assert_single_item( + parent_wf_ex.task_executions, + name='task1' + ) + + sub_wf_ex = db_api.get_workflow_executions( + task_execution_id=t.id + )[0] + + self.assertDictEqual( + { + "result": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>" + }, + sub_wf_ex.output + ) + + # Run with 'evaluate_env' set to True. + + env = {"dummy": "<% 1 + 1 %>"} + + parent_wf_ex = self.engine.start_workflow( + 'parent_wf', + {}, + env=env, + evaluate_env=True + ) + + self.await_workflow_success(parent_wf_ex.id) + + with db_api.transaction(): + parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) + + t = self._assert_single_item( + parent_wf_ex.task_executions, + name='task1' + ) + + sub_wf_ex = db_api.get_workflow_executions( + task_execution_id=t.id + )[0] + + self.assertDictEqual( + { + "result": 2 + }, + sub_wf_ex.output + ) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_javascript_action.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_javascript_action.py --- mistral-4.0.0/mistral/tests/unit/engine/test_javascript_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_javascript_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -64,9 +64,6 @@ class JavaScriptEngineTest(base.EngineTestCase): - def setUp(self): - super(JavaScriptEngineTest, self).setUp() - @testtools.skip('It requires installed JS engine.') def test_javascript_action(self): wb_service.create_workbook_v2(WORKBOOK) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_join.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_join.py --- mistral-4.0.0/mistral/tests/unit/engine/test_join.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_join.py 2017-06-09 12:48:26.000000000 +0000 @@ -51,6 +51,15 @@ self.await_workflow_success(wf_ex.id) + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + t_execs = wf_ex.task_executions + + self._assert_single_item(t_execs, name='task1') + self._assert_single_item(t_execs, name='task2') + self._assert_single_item(t_execs, name='join_task') + def test_full_join_without_errors(self): wf_text = """--- version: '2.0' @@ -1031,3 +1040,194 @@ wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id) + + def test_triggered_by_success(self): + wf_text = """--- + version: '2.0' + + wf: + type: direct + + tasks: + join_task: + join: all + + task1: + on-success: join_task + + task2: + on-success: join_task + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + t_execs = wf_ex.task_executions + + task1 = self._assert_single_item(t_execs, name='task1') + task2 = self._assert_single_item(t_execs, name='task2') + join_task = self._assert_single_item(t_execs, name='join_task') + + key = 'triggered_by' + + self.assertIsNone(task1.runtime_context.get(key)) + self.assertIsNone(task2.runtime_context.get(key)) + + self.assertIn( + { + "task_id": task1.id, + "event": "on-success" + }, + join_task.runtime_context.get(key) + ) + self.assertIn( + { + "task_id": task2.id, + "event": "on-success" + }, + join_task.runtime_context.get(key) + ) + + def test_triggered_by_error(self): + wf_text = """--- + version: '2.0' + + wf: + type: direct + + tasks: + task1: + on-success: join_task + + task2: + action: std.fail + on-success: join_task + + task3: + action: std.noop + on-error: join_task + + join_task: + join: all + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_error(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + t_execs = wf_ex.task_executions + + task1 = self._assert_single_item( + t_execs, + name='task1', + state=states.SUCCESS + ) + task2 = self._assert_single_item( + t_execs, + name='task2', + state=states.ERROR + ) + task3 = self._assert_single_item( + t_execs, + name='task3', + state=states.SUCCESS + ) + join_task = self._assert_single_item( + t_execs, + name='join_task', + state=states.ERROR + ) + + key = 'triggered_by' + + self.assertIsNone(task1.runtime_context.get(key)) + self.assertIsNone(task2.runtime_context.get(key)) + self.assertIsNone(task3.runtime_context.get(key)) + + self.assertIn( + { + "task_id": task2.id, + "event": "not triggered" + }, + join_task.runtime_context.get(key) + ) + self.assertIn( + { + "task_id": task3.id, + "event": "not triggered" + }, + join_task.runtime_context.get(key) + ) + + def test_triggered_by_impossible_route(self): + wf_text = """--- + version: '2.0' + + wf: + type: direct + + tasks: + task1: + on-success: join_task + + task2: + action: std.fail + on-success: task3 + + task3: + action: std.noop + on-success: join_task + + join_task: + join: all + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_error(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + t_execs = wf_ex.task_executions + + task1 = self._assert_single_item( + t_execs, + name='task1', + state=states.SUCCESS + ) + task2 = self._assert_single_item( + t_execs, + name='task2', + state=states.ERROR + ) + join_task = self._assert_single_item( + t_execs, + name='join_task', + state=states.ERROR + ) + + self.assertEqual(3, len(t_execs)) + + key = 'triggered_by' + + self.assertIsNone(task1.runtime_context.get(key)) + self.assertIsNone(task2.runtime_context.get(key)) + + # Note: in case if execution does not exist for a previous + # task we can't track it in "triggered_by" because we need + # to know its ID so we leave it blank. + self.assertFalse(join_task.runtime_context.get(key)) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_lookup_utils.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_lookup_utils.py --- mistral-4.0.0/mistral/tests/unit/engine/test_lookup_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_lookup_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,82 @@ +# Copyright 2017 - Nokia Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from mistral.db.v2 import api as db_api +from mistral.services import workflows as wf_service +from mistral.tests.unit.engine import base +from mistral.workflow import lookup_utils +from mistral.workflow import states + +# Use the set_default method to set value otherwise in certain test cases +# the change in value is not permanent. +cfg.CONF.set_default('auth_enable', False, group='pecan') + + +class LookupUtilsTest(base.EngineTestCase): + def test_task_execution_cache_invalidation(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.noop + on-success: join_task + + task2: + action: std.noop + on-success: join_task + + join_task: + join: all + on-success: task4 + + task4: + action: std.noop + pause-before: true + """ + + wf_service.create_workflows(wf_text) + + # Start workflow. + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_paused(wf_ex.id) + + with db_api.transaction(): + # Note: We need to reread execution to access related tasks. + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + tasks = wf_ex.task_executions + + self.assertEqual(4, len(tasks)) + + self._assert_single_item(tasks, name='task1', state=states.SUCCESS) + self._assert_single_item(tasks, name='task2', state=states.SUCCESS) + self._assert_single_item(tasks, name='join_task', state=states.SUCCESS) + self._assert_single_item(tasks, name='task4', state=states.IDLE) + + # Expecting one cache entry because we know that 'join' operation + # uses cached lookups and the workflow is not finished yet. + self.assertEqual(1, lookup_utils.get_task_execution_cache_size()) + + self.engine.resume_workflow(wf_ex.id) + + self.await_workflow_success(wf_ex.id) + + # Expecting that the cache size is 0 because the workflow has + # finished and invalidated corresponding cache entry. + self.assertEqual(0, lookup_utils.get_task_execution_cache_size()) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_policies.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_policies.py --- mistral-4.0.0/mistral/tests/unit/engine/test_policies.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_policies.py 2017-06-09 12:48:26.000000000 +0000 @@ -21,10 +21,10 @@ from mistral.db.v2.sqlalchemy import models from mistral.engine import policies from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base -from mistral.workbook import parser as spec_parser from mistral.workflow import states @@ -572,11 +572,20 @@ self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) - # TODO(rakhmerov): This check doesn't make sense anymore because - # we don't store evaluated values anywhere. - # Need to create a better test. - # self.assertEqual(3, task_ex.in_context["count"]) - # self.assertEqual(1, task_ex.in_context["delay"]) + self.await_task_delayed(task_ex.id, delay=0.5) + self.await_task_error(task_ex.id) + + self.await_workflow_error(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + task_ex = wf_ex.task_executions[0] + + self.assertEqual( + 3, + task_ex.runtime_context["retry_task_policy"]["retry_no"] + ) def test_retry_policy_never_happen(self): retry_wb = """--- diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_run_action.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_run_action.py --- mistral-4.0.0/mistral/tests/unit/engine/test_run_action.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_run_action.py 2017-06-09 12:48:26.000000000 +0000 @@ -88,9 +88,6 @@ """ actions.create_actions(action) - def tearDown(self): - super(RunActionEngineTest, self).tearDown() - def test_run_action_sync(self): # Start action and see the result. action_ex = self.engine.start_action('std.echo', {'output': 'Hello!'}) diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_safe_rerun.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_safe_rerun.py --- mistral-4.0.0/mistral/tests/unit/engine/test_safe_rerun.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_safe_rerun.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,8 +16,8 @@ import mock from mistral.db.v2 import api as db_api -from mistral.engine import default_executor -from mistral.engine.rpc_backend import rpc +from mistral.executors import default_executor as d_exe +from mistral.executors import remote_executor as r_exe from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import data_flow @@ -25,9 +25,9 @@ def _run_at_target(action_ex_id, action_class_str, attributes, - action_params, target=None, async_=True, safe_rerun=False): + action_params, safe_rerun, target=None, async_=True): # We'll just call executor directly for testing purposes. - executor = default_executor.DefaultExecutor() + executor = d_exe.DefaultExecutor() executor.run_action( action_ex_id, @@ -43,7 +43,8 @@ class TestSafeRerun(base.EngineTestCase): - @mock.patch.object(rpc.ExecutorClient, 'run_action', MOCK_RUN_AT_TARGET) + + @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_true(self): wf_text = """--- version: '2.0' @@ -89,7 +90,7 @@ self.assertEqual(task1.state, states.SUCCESS) self.assertEqual(task2.state, states.SUCCESS) - @mock.patch.object(rpc.ExecutorClient, 'run_action', MOCK_RUN_AT_TARGET) + @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_false(self): wf_text = """--- version: '2.0' @@ -135,7 +136,7 @@ self.assertEqual(task1.state, states.ERROR) self.assertEqual(task3.state, states.SUCCESS) - @mock.patch.object(rpc.ExecutorClient, 'run_action', MOCK_RUN_AT_TARGET) + @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_with_items(self): wf_text = """--- version: '2.0' diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_task_cancel.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_task_cancel.py --- mistral-4.0.0/mistral/tests/unit/engine/test_task_cancel.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_task_cancel.py 2017-06-09 12:48:26.000000000 +0000 @@ -32,7 +32,6 @@ version: '2.0' wf: - type: direct tasks: task1: action: std.async_noop @@ -119,13 +118,11 @@ workflows: wf: - type: direct tasks: taskx: workflow: subwf subwf: - type: direct tasks: task1: action: std.async_noop @@ -207,7 +204,6 @@ version: '2.0' wf: - type: direct tasks: task1: action: std.async_noop @@ -294,10 +290,11 @@ def test_cancel_with_items_concurrency(self): wb_def = """ version: '2.0' + name: wb1 + workflows: wf1: - type: direct tasks: t1: with-items: i in <% list(range(0, 4)) %> diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_with_items.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_with_items.py --- mistral-4.0.0/mistral/tests/unit/engine/test_with_items.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_with_items.py 2017-06-09 12:48:26.000000000 +0000 @@ -1060,10 +1060,10 @@ with_items_retry: tasks: task1: - with-items: i in [1, 2, 3] + with-items: i in [1, 2] action: std.fail retry: - count: 3 + count: 1 delay: 1 on-error: task2 @@ -1091,11 +1091,11 @@ task1_executions = task1_ex.executions self.assertEqual( - 3, + 1, task1_ex.runtime_context['retry_task_policy']['retry_no'] ) - self.assertEqual(12, len(task1_executions)) - self._assert_multiple_items(task1_executions, 3, accepted=True) + self.assertEqual(4, len(task1_executions)) + self._assert_multiple_items(task1_executions, 2, accepted=True) def test_with_items_concurrency_retry_policy(self): wf_text = """--- @@ -1104,10 +1104,10 @@ wf: tasks: task1: - with-items: i in [1, 2, 3, 4] + with-items: i in [1, 2] action: std.fail retry: - count: 3 + count: 2 delay: 1 concurrency: 2 on-error: task2 @@ -1137,8 +1137,8 @@ task1_execs = task1_ex.executions - self.assertEqual(16, len(task1_execs)) - self._assert_multiple_items(task1_execs, 4, accepted=True) + self.assertEqual(6, len(task1_execs)) + self._assert_multiple_items(task1_execs, 2, accepted=True) def test_with_items_env(self): wf_text = """--- diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_workflow_cancel.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_workflow_cancel.py --- mistral-4.0.0/mistral/tests/unit/engine/test_workflow_cancel.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_workflow_cancel.py 2017-06-09 12:48:26.000000000 +0000 @@ -71,6 +71,33 @@ self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state) + def test_cancel_workflow_if_definition_deleted(self): + workflow = """ + version: '2.0' + + wf: + type: direct + tasks: + task1: + action: std.echo output="foo" + wait-before: 5 + """ + + wf = wf_service.create_workflows(workflow)[0] + + wf_ex = self.engine.start_workflow('wf', {}) + + with db_api.transaction(): + db_api.delete_workflow_definition(wf.id) + + self.engine.stop_workflow( + wf_ex.id, + states.CANCELLED, + "Cancelled by user." + ) + + self.await_workflow_cancelled(wf_ex.id) + def test_cancel_paused_workflow(self): workflow = """ version: '2.0' diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_workflow_resume.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_workflow_resume.py --- mistral-4.0.0/mistral/tests/unit/engine/test_workflow_resume.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_workflow_resume.py 2017-06-09 12:48:26.000000000 +0000 @@ -17,9 +17,9 @@ from mistral.db.v2 import api as db_api from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base -from mistral.workbook import parser as spec_parser from mistral.workflow import data_flow from mistral.workflow import states from mistral.workflow import utils diff -Nru mistral-4.0.0/mistral/tests/unit/engine/test_yaql_functions.py mistral-5.0.0~b2/mistral/tests/unit/engine/test_yaql_functions.py --- mistral-4.0.0/mistral/tests/unit/engine/test_yaql_functions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/engine/test_yaql_functions.py 2017-06-09 12:48:26.000000000 +0000 @@ -229,6 +229,63 @@ task2_ex.published ) + def test_task_function_no_name_on_complete_case(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output=1 + on-complete: + - fail(msg=<% task() %>) + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_error(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + self.assertEqual(states.ERROR, wf_ex.state) + self.assertIsNotNone(wf_ex.state_info) + self.assertIn(wf_ex.id, wf_ex.state_info) + + def test_task_function_no_name_on_success_case(self): + wf_text = """--- + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output=1 + on-success: + - task2: <% task().result = 1 %> + - task3: <% task().result = 100 %> + + task2: + action: std.echo output=2 + + task3: + action: std.echo output=3 + """ + + wf_service.create_workflows(wf_text) + + wf_ex = self.engine.start_workflow('wf', {}) + + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + + self.assertEqual(2, len(wf_ex.task_executions)) + self._assert_single_item(wf_ex.task_executions, name='task1') + self._assert_single_item(wf_ex.task_executions, name='task2') + def test_uuid_function(self): wf_text = """--- version: '2.0' @@ -313,3 +370,8 @@ ) self.assertDictEqual({'param1': 'blablabla'}, execution['params']) + + self.assertEqual( + wf_ex.created_at.isoformat(' '), + execution['created_at'] + ) diff -Nru mistral-4.0.0/mistral/tests/unit/executors/base.py mistral-5.0.0~b2/mistral/tests/unit/executors/base.py --- mistral-4.0.0/mistral/tests/unit/executors/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/executors/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,24 @@ +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from mistral.tests.unit.engine import base as engine_test_base + + +LOG = logging.getLogger(__name__) + + +class ExecutorTestCase(engine_test_base.EngineTestCase): + pass diff -Nru mistral-4.0.0/mistral/tests/unit/executors/test_local_executor.py mistral-5.0.0~b2/mistral/tests/unit/executors/test_local_executor.py --- mistral-4.0.0/mistral/tests/unit/executors/test_local_executor.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/executors/test_local_executor.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,171 @@ +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from oslo_config import cfg +from oslo_log import log as logging + +from mistral.actions import std_actions +from mistral.db.v2 import api as db_api +from mistral.executors import base as exe +from mistral.executors import remote_executor as r_exe +from mistral.services import workbooks as wb_svc +from mistral.tests.unit.executors import base +from mistral.workflow import states + + +LOG = logging.getLogger(__name__) + + +# Use the set_default method to set value otherwise in certain test cases +# the change in value is not permanent. +cfg.CONF.set_default('auth_enable', False, group='pecan') + + +@mock.patch.object( + r_exe.RemoteExecutor, + 'run_action', + mock.MagicMock(return_value=None) +) +class LocalExecutorTestCase(base.ExecutorTestCase): + + @classmethod + def setUpClass(cls): + super(LocalExecutorTestCase, cls).setUpClass() + cfg.CONF.set_default('type', 'local', group='executor') + + @classmethod + def tearDownClass(cls): + exe.cleanup() + cfg.CONF.set_default('type', 'remote', group='executor') + super(LocalExecutorTestCase, cls).tearDownClass() + + @mock.patch.object( + std_actions.EchoAction, + 'run', + mock.MagicMock( + side_effect=[ + 'Task 1', # Mock task1 success. + 'Task 2', # Mock task2 success. + 'Task 3' # Mock task3 success. + ] + ) + ) + def test_run(self): + wb_def = """ + version: '2.0' + + name: wb1 + + workflows: + wf1: + type: direct + + tasks: + t1: + action: std.echo output="Task 1" + on-success: + - t2 + t2: + action: std.echo output="Task 2" + on-success: + - t3 + t3: + action: std.echo output="Task 3" + """ + + wb_svc.create_workbook_v2(wb_def) + wf_ex = self.engine.start_workflow('wb1.wf1', {}) + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + task_execs = wf_ex.task_executions + + self.assertEqual(states.SUCCESS, wf_ex.state) + self.assertIsNone(wf_ex.state_info) + self.assertEqual(3, len(task_execs)) + + task_1_ex = self._assert_single_item(task_execs, name='t1') + task_2_ex = self._assert_single_item(task_execs, name='t2') + task_3_ex = self._assert_single_item(task_execs, name='t3') + + self.assertEqual(states.SUCCESS, task_1_ex.state) + self.assertEqual(states.SUCCESS, task_2_ex.state) + self.assertEqual(states.SUCCESS, task_3_ex.state) + + # Make sure the remote executor is not called. + self.assertFalse(r_exe.RemoteExecutor.run_action.called) + + @mock.patch.object( + std_actions.EchoAction, + 'run', + mock.MagicMock( + side_effect=[ + 'Task 1.0', # Mock task1 success. + 'Task 1.1', # Mock task1 success. + 'Task 1.2', # Mock task1 success. + 'Task 2' # Mock task2 success. + ] + ) + ) + def test_run_with_items(self): + wb_def = """ + version: '2.0' + + name: wb1 + + workflows: + wf1: + type: direct + + tasks: + t1: + with-items: i in <% list(range(0, 3)) %> + action: std.echo output="Task 1.<% $.i %>" + publish: + v1: <% task(t1).result %> + on-success: + - t2 + t2: + action: std.echo output="Task 2" + """ + + wb_svc.create_workbook_v2(wb_def) + wf_ex = self.engine.start_workflow('wb1.wf1', {}) + self.await_workflow_success(wf_ex.id) + + with db_api.transaction(): + wf_ex = db_api.get_workflow_execution(wf_ex.id) + task_execs = wf_ex.task_executions + + self.assertEqual(states.SUCCESS, wf_ex.state) + self.assertEqual(2, len(wf_ex.task_executions)) + + task_1_ex = self._assert_single_item(task_execs, name='t1') + task_2_ex = self._assert_single_item(task_execs, name='t2') + + self.assertEqual(states.SUCCESS, task_1_ex.state) + self.assertEqual(states.SUCCESS, task_2_ex.state) + + with db_api.transaction(): + task_1_action_exs = db_api.get_action_executions( + task_execution_id=task_1_ex.id + ) + + self.assertEqual(3, len(task_1_action_exs)) + + # Make sure the remote executor is not called. + self.assertFalse(r_exe.RemoteExecutor.run_action.called) diff -Nru mistral-4.0.0/mistral/tests/unit/executors/test_plugins.py mistral-5.0.0~b2/mistral/tests/unit/executors/test_plugins.py --- mistral-4.0.0/mistral/tests/unit/executors/test_plugins.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/executors/test_plugins.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,40 @@ +# Copyright 2017 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from mistral.executors import base as exe +from mistral.executors import default_executor as d_exe +from mistral.executors import remote_executor as r_exe +from mistral.tests.unit.executors import base + + +LOG = logging.getLogger(__name__) + + +class PluginTestCase(base.ExecutorTestCase): + + def tearDown(self): + exe.cleanup() + super(PluginTestCase, self).tearDown() + + def test_get_local_executor(self): + executor = exe.get_executor('local') + + self.assertIsInstance(executor, d_exe.DefaultExecutor) + + def test_get_remote_executor(self): + executor = exe.get_executor('remote') + + self.assertIsInstance(executor, r_exe.RemoteExecutor) diff -Nru mistral-4.0.0/mistral/tests/unit/fake_policy.py mistral-5.0.0~b2/mistral/tests/unit/fake_policy.py --- mistral-4.0.0/mistral/tests/unit/fake_policy.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/fake_policy.py 2017-06-09 12:48:26.000000000 +0000 @@ -44,6 +44,7 @@ "executions:delete": "rule:admin_or_owner", "executions:get": "rule:admin_or_owner", "executions:list": "rule:admin_or_owner", + "executions:list:all_projects": "rule:admin_only", "executions:update": "rule:admin_or_owner", "members:create": "rule:admin_or_owner", diff -Nru mistral-4.0.0/mistral/tests/unit/hacking/test_checks.py mistral-5.0.0~b2/mistral/tests/unit/hacking/test_checks.py --- mistral-4.0.0/mistral/tests/unit/hacking/test_checks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/hacking/test_checks.py 2017-06-09 12:48:26.000000000 +0000 @@ -38,27 +38,27 @@ # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) - def run_check(self, code): - pep8.register_check(self.get_checker()) + def run_check(self, code, checker, filename=None): + pep8.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) - checker = pep8.Checker(lines=lines) - checker.check_all() + checker = pep8.Checker(filename=filename, lines=lines) + with mock.patch('pep8.StandardReport.get_file_results'): + checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print - def assert_has_errors(self, code, expected_errors=None): + def _assert_has_errors(self, code, checker, expected_errors=None, + filename=None): # Pull out the parts of the error that we'll match against. - actual_errors = (e[:3] for e in self.run_check(code)) - - # Adjust line numbers to make the fixture data more readable. - import_lines = len(self.code_ex.shared_imports.split('\n')) - 1 - actual_errors = [(e[0] - import_lines, e[1], e[2]) - for e in actual_errors] - + actual_errors = [e[:3] for e in + self.run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) + def _assert_has_no_errors(self, code, checker, filename=None): + self._assert_has_errors(code, checker, filename=filename) + def test_assert_equal_none(self): self.assertEqual(len(list(checks.assert_equal_none( "self.assertEqual(A, None)"))), 1) @@ -69,6 +69,40 @@ self.assertEqual( len(list(checks.assert_equal_none("self.assertIsNone()"))), 0) + def test_no_assert_equal_true_false(self): + code = """ + self.assertEqual(context_is_admin, True) + self.assertEqual(context_is_admin, False) + self.assertEqual(True, context_is_admin) + self.assertEqual(False, context_is_admin) + self.assertNotEqual(context_is_admin, True) + self.assertNotEqual(context_is_admin, False) + self.assertNotEqual(True, context_is_admin) + self.assertNotEqual(False, context_is_admin) + """ + errors = [(1, 0, 'M319'), (2, 0, 'M319'), (3, 0, 'M319'), + (4, 0, 'M319'), (5, 0, 'M319'), (6, 0, 'M319'), + (7, 0, 'M319'), (8, 0, 'M319')] + self._assert_has_errors(code, checks.no_assert_equal_true_false, + expected_errors=errors) + code = """ + self.assertEqual(context_is_admin, stuff) + self.assertNotEqual(context_is_admin, stuff) + """ + self._assert_has_no_errors(code, checks.no_assert_equal_true_false) + + def test_no_assert_true_false_is_not(self): + code = """ + self.assertTrue(test is None) + self.assertTrue(False is my_variable) + self.assertFalse(None is test) + self.assertFalse(my_variable is False) + """ + errors = [(1, 0, 'M320'), (2, 0, 'M320'), (3, 0, 'M320'), + (4, 0, 'M320')] + self._assert_has_errors(code, checks.no_assert_true_false_is_not, + expected_errors=errors) + def test_check_python3_xrange(self): func = checks.check_python3_xrange self.assertEqual(1, len(list(func('for i in xrange(10)')))) @@ -76,6 +110,27 @@ self.assertEqual(0, len(list(func('for i in range(10)')))) self.assertEqual(0, len(list(func('for i in six.moves.range(10)')))) + def test_dict_iteritems(self): + self.assertEqual(1, len(list(checks.check_python3_no_iteritems( + "obj.iteritems()")))) + + self.assertEqual(0, len(list(checks.check_python3_no_iteritems( + "six.iteritems(ob))")))) + + def test_dict_iterkeys(self): + self.assertEqual(1, len(list(checks.check_python3_no_iterkeys( + "obj.iterkeys()")))) + + self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( + "six.iterkeys(ob))")))) + + def test_dict_itervalues(self): + self.assertEqual(1, len(list(checks.check_python3_no_itervalues( + "obj.itervalues()")))) + + self.assertEqual(0, len(list(checks.check_python3_no_itervalues( + "six.itervalues(ob))")))) + class TestLoggingWithWarn(BaseLoggingCheckTest): @@ -84,4 +139,5 @@ code = self.code_ex.shared_imports + data['code'] errors = data['expected_errors'] - self.assert_has_errors(code, expected_errors=errors) + self._assert_has_errors(code, checks.CheckForLoggingIssues, + expected_errors=errors) diff -Nru mistral-4.0.0/mistral/tests/unit/lang/test_spec_caching.py mistral-5.0.0~b2/mistral/tests/unit/lang/test_spec_caching.py --- mistral-4.0.0/mistral/tests/unit/lang/test_spec_caching.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/lang/test_spec_caching.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,238 @@ +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral.db.v2 import api as db_api +from mistral.lang import parser as spec_parser +from mistral.services import workbooks as wb_service +from mistral.services import workflows as wf_service +from mistral.tests.unit import base +from mistral.workflow import states + + +class SpecificationCachingTest(base.DbTestCase): + def test_workflow_spec_caching(self): + wf_text = """ + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output="Echo" + """ + + wfs = wf_service.create_workflows(wf_text) + + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wfs[0].id, + wfs[0].updated_at + ) + + self.assertIsNotNone(wf_spec) + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + def test_workflow_spec_cache_update_via_workflow_service(self): + wf_text = """ + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output="Echo" + """ + + wfs = wf_service.create_workflows(wf_text) + + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wfs[0].id, + wfs[0].updated_at + ) + + self.assertEqual(1, len(wf_spec.get_tasks())) + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + # Now update workflow definition and check that cache is updated too. + + wf_text = """ + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output="1" + + task2: + action: std.echo output="2" + """ + + wfs = wf_service.update_workflows(wf_text) + + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wfs[0].id, + wfs[0].updated_at + ) + + self.assertEqual(2, len(wf_spec.get_tasks())) + self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + + def test_workflow_spec_cache_update_via_workbook_service(self): + wb_text = """ + version: '2.0' + + name: wb + + workflows: + wf: + tasks: + task1: + action: std.echo output="Echo" + """ + + wb_service.create_workbook_v2(wb_text) + + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) + + wf = db_api.get_workflow_definition('wb.wf') + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wf.id, + wf.updated_at + ) + + self.assertEqual(1, len(wf_spec.get_tasks())) + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + # Now update workflow definition and check that cache is updated too. + + wb_text = """ + version: '2.0' + + name: wb + + workflows: + wf: + tasks: + task1: + action: std.echo output="1" + + task2: + action: std.echo output="2" + """ + + wb_service.update_workbook_v2(wb_text) + + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + wf = db_api.get_workflow_definition(wf.id) + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wf.id, + wf.updated_at + ) + + self.assertEqual(2, len(wf_spec.get_tasks())) + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) + + def test_cache_workflow_spec_by_execution_id(self): + wf_text = """ + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output="Echo" + """ + + wfs = wf_service.create_workflows(wf_text) + + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) + + wf_def = wfs[0] + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wf_def.id, + wf_def.updated_at + ) + + self.assertEqual(1, len(wf_spec.get_tasks())) + self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + wf_ex = db_api.create_workflow_execution({ + 'id': '1-2-3-4', + 'name': 'wf', + 'workflow_id': wf_def.id, + 'spec': wf_spec.to_dict(), + 'state': states.RUNNING + }) + + # Check that we can get a valid spec by execution id. + + wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id( + wf_ex.id + ) + + self.assertEqual(1, len(wf_spec_by_exec_id.get_tasks())) + + # Now update workflow definition and check that cache is updated too. + + wf_text = """ + version: '2.0' + + wf: + tasks: + task1: + action: std.echo output="1" + + task2: + action: std.echo output="2" + """ + + wfs = wf_service.update_workflows(wf_text) + + self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) + + wf_spec = spec_parser.get_workflow_spec_by_definition_id( + wfs[0].id, + wfs[0].updated_at + ) + + self.assertEqual(2, len(wf_spec.get_tasks())) + self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) + self.assertEqual(1, spec_parser.get_wf_execution_spec_cache_size()) + + # Now finally update execution cache and check that we can + # get a valid spec by execution id. + spec_parser.cache_workflow_spec_by_execution_id(wf_ex.id, wf_spec) + + wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id( + wf_ex.id + ) + + self.assertEqual(2, len(wf_spec_by_exec_id.get_tasks())) diff -Nru mistral-4.0.0/mistral/tests/unit/lang/v2/base.py mistral-5.0.0~b2/mistral/tests/unit/lang/v2/base.py --- mistral-4.0.0/mistral/tests/unit/lang/v2/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/lang/v2/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,118 @@ +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +import yaml + +from mistral import exceptions as exc +from mistral.lang import parser as spec_parser +from mistral.tests.unit import base +from mistral import utils + + +class WorkflowSpecValidationTestCase(base.BaseTest): + + def __init__(self, *args, **kwargs): + super(WorkflowSpecValidationTestCase, self).__init__(*args, **kwargs) + + # The relative resource path is ./mistral/tests/resources/workbook/v2. + self._resource_path = 'workbook/v2' + + self._spec_parser = spec_parser.get_workflow_list_spec_from_yaml + + self._dsl_blank = { + 'version': '2.0', + 'test': { + 'type': 'direct' + } + } + + self._dsl_tasks = { + 'get': { + 'action': 'std.http', + 'input': { + 'url': 'http://www.openstack.org' + } + }, + 'echo': { + 'action': 'std.echo', + 'input': { + 'output': 'This is a test.' + } + }, + 'email': { + 'action': 'std.email', + 'input': { + 'from_addr': 'mistral@example.com', + 'to_addrs': ['admin@example.com'], + 'subject': 'Test', + 'body': 'This is a test.', + 'smtp_server': 'localhost', + 'smtp_password': 'password' + } + } + } + + def _parse_dsl_spec(self, dsl_file=None, add_tasks=False, + changes=None, expect_error=False): + if dsl_file and add_tasks: + raise Exception('The add_tasks option is not a valid ' + 'combination with the dsl_file option.') + + if dsl_file: + dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file) + + if changes: + dsl_dict = yaml.safe_load(dsl_yaml) + utils.merge_dicts(dsl_dict, changes) + dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False) + else: + dsl_dict = copy.deepcopy(self._dsl_blank) + + if add_tasks: + dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks) + + if changes: + utils.merge_dicts(dsl_dict, changes) + + dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False) + + if not expect_error: + return self._spec_parser(dsl_yaml) + else: + return self.assertRaises( + exc.DSLParsingException, + self._spec_parser, + dsl_yaml + ) + + +class WorkbookSpecValidationTestCase(WorkflowSpecValidationTestCase): + + def __init__(self, *args, **kwargs): + super(WorkbookSpecValidationTestCase, self).__init__(*args, **kwargs) + + self._spec_parser = spec_parser.get_workbook_spec_from_yaml + + self._dsl_blank = { + 'version': '2.0', + 'name': 'test_wb' + } + + def _parse_dsl_spec(self, dsl_file=None, add_tasks=False, + changes=None, expect_error=False): + return super(WorkbookSpecValidationTestCase, self)._parse_dsl_spec( + dsl_file=dsl_file, add_tasks=False, changes=changes, + expect_error=expect_error) diff -Nru mistral-4.0.0/mistral/tests/unit/lang/v2/test_actions.py mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_actions.py --- mistral-4.0.0/mistral/tests/unit/lang/v2/test_actions.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,126 @@ +# Copyright 2015 - StackStorm, Inc. +# Copyright 2016 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from mistral.tests.unit.lang.v2 import base +from mistral import utils + + +class ActionSpecValidation(base.WorkbookSpecValidationTestCase): + + def test_base_required(self): + actions = {'actions': {'a1': {}}} + + exception = self._parse_dsl_spec(changes=actions, expect_error=True) + + self.assertIn("'base' is a required property", exception.message) + + def test_base(self): + tests = [ + ({'actions': {'a1': {'base': ''}}}, True), + ({'actions': {'a1': {'base': None}}}, True), + ({'actions': {'a1': {'base': 12345}}}, True), + ({'actions': {'a1': {'base': 'std.noop'}}}, False), + ({'actions': {'a1': {'base': 'std.echo output="foo"'}}}, False), + ({'actions': {'a1': {'base': 'std.echo output="<% $.x %>"'}}}, + False), + ({'actions': {'a1': {'base': 'std.echo output="<% * %>"'}}}, True), + ({'actions': {'a1': {'base': 'std.echo output="{{ _.x }}"'}}}, + False), + ({'actions': {'a1': {'base': 'std.echo output="{{ * }}"'}}}, True) + ] + + for actions, expect_error in tests: + self._parse_dsl_spec(changes=actions, expect_error=expect_error) + + def test_base_input(self): + tests = [ + ({'base-input': {}}, True), + ({'base-input': None}, True), + ({'base-input': {'k1': 'v1', 'k2': '<% $.v2 %>'}}, False), + ({'base-input': {'k1': 'v1', 'k2': '<% * %>'}}, True), + ({'base-input': {'k1': 'v1', 'k2': '{{ _.v2 }}'}}, False), + ({'base-input': {'k1': 'v1', 'k2': '{{ * }}'}}, True) + ] + + actions = { + 'a1': { + 'base': 'foobar' + } + } + + for base_inputs, expect_error in tests: + overlay = {'actions': copy.deepcopy(actions)} + + utils.merge_dicts(overlay['actions']['a1'], base_inputs) + + self._parse_dsl_spec(changes=overlay, expect_error=expect_error) + + def test_input(self): + tests = [ + ({'input': ''}, True), + ({'input': []}, True), + ({'input': ['']}, True), + ({'input': None}, True), + ({'input': ['k1', 'k2']}, False), + ({'input': ['k1', 12345]}, True), + ({'input': ['k1', {'k2': 2}]}, False), + ({'input': [{'k1': 1}, {'k2': 2}]}, False), + ({'input': [{'k1': None}]}, False), + ({'input': [{'k1': 1}, {'k1': 1}]}, True), + ({'input': [{'k1': 1, 'k2': 2}]}, True) + ] + + actions = { + 'a1': { + 'base': 'foobar' + } + } + + for inputs, expect_error in tests: + overlay = {'actions': copy.deepcopy(actions)} + + utils.merge_dicts(overlay['actions']['a1'], inputs) + + self._parse_dsl_spec(changes=overlay, expect_error=expect_error) + + def test_output(self): + tests = [ + ({'output': None}, False), + ({'output': False}, False), + ({'output': 12345}, False), + ({'output': 0.12345}, False), + ({'output': 'foobar'}, False), + ({'output': '<% $.x %>'}, False), + ({'output': '<% * %>'}, True), + ({'output': '{{ _.x }}'}, False), + ({'output': '{{ * }}'}, True), + ({'output': ['v1']}, False), + ({'output': {'k1': 'v1'}}, False) + ] + + actions = { + 'a1': { + 'base': 'foobar' + } + } + + for outputs, expect_error in tests: + overlay = {'actions': copy.deepcopy(actions)} + + utils.merge_dicts(overlay['actions']['a1'], outputs) + + self._parse_dsl_spec(changes=overlay, expect_error=expect_error) diff -Nru mistral-4.0.0/mistral/tests/unit/lang/v2/test_tasks.py mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_tasks.py --- mistral-4.0.0/mistral/tests/unit/lang/v2/test_tasks.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_tasks.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,715 @@ +# Copyright 2015 - Huawei Technologies Co. Ltd +# Copyright 2015 - StackStorm, Inc. +# Copyright 2016 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mistral.lang.v2 import workflows +from mistral.tests.unit.lang.v2 import base as v2_base +from mistral import utils + + +class TaskSpecValidation(v2_base.WorkflowSpecValidationTestCase): + def test_type_injection(self): + tests = [ + ({'type': 'direct'}, False), + ({'type': 'reverse'}, False) + ] + + for wf_type, expect_error in tests: + overlay = {'test': wf_type} + wfs_spec = self._parse_dsl_spec(add_tasks=True, + changes=overlay, + expect_error=expect_error) + + if not expect_error: + self.assertIsInstance(wfs_spec, workflows.WorkflowListSpec) + self.assertEqual(1, len(wfs_spec.get_workflows())) + + wf_spec = wfs_spec.get_workflows()[0] + + self.assertEqual(wf_type['type'], wf_spec.get_type()) + + for task in wf_spec.get_tasks(): + self.assertEqual(task._data['type'], wf_type['type']) + + def test_action_or_workflow(self): + tests = [ + ({'action': 'std.noop'}, False), + ({'action': 'std.http url="openstack.org"'}, False), + ({'action': 'std.http url="openstack.org" timeout=10'}, False), + ({'action': 'std.http url=<% $.url %>'}, False), + ({'action': 'std.http url=<% $.url %> timeout=<% $.t %>'}, False), + ({'action': 'std.http url=<% * %>'}, True), + ({'action': 'std.http url={{ _.url }}'}, False), + ({'action': 'std.http url={{ _.url }} timeout={{ _.t }}'}, False), + ({'action': 'std.http url={{ $ }}'}, True), + ({'workflow': 'test.wf'}, False), + ({'workflow': 'test.wf k1="v1"'}, False), + ({'workflow': 'test.wf k1="v1" k2="v2"'}, False), + ({'workflow': 'test.wf k1=<% $.v1 %>'}, False), + ({'workflow': 'test.wf k1=<% $.v1 %> k2=<% $.v2 %>'}, False), + ({'workflow': 'test.wf k1=<% * %>'}, True), + ({'workflow': 'test.wf k1={{ _.v1 }}'}, False), + ({'workflow': 'test.wf k1={{ _.v1 }} k2={{ _.v2 }}'}, False), + ({'workflow': 'test.wf k1={{ $ }}'}, True), + ({'action': 'std.noop', 'workflow': 'test.wf'}, True), + ({'action': 123}, True), + ({'workflow': 123}, True), + ({'action': ''}, True), + ({'workflow': ''}, True), + ({'action': None}, True), + ({'workflow': None}, True) + ] + + for task, expect_error in tests: + overlay = {'test': {'tasks': {'task1': task}}} + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=expect_error + ) + + def test_inputs(self): + tests = [ + ({'input': ''}, True), + ({'input': {}}, True), + ({'input': None}, True), + ({'input': {'k1': 'v1'}}, False), + ({'input': {'k1': '<% $.v1 %>'}}, False), + ({'input': {'k1': '<% 1 + 2 %>'}}, False), + ({'input': {'k1': '<% * %>'}}, True), + ({'input': {'k1': '{{ _.v1 }}'}}, False), + ({'input': {'k1': '{{ 1 + 2 }}'}}, False), + ({'input': {'k1': '{{ * }}'}}, True) + ] + + for task_input, expect_error in tests: + overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} + + utils.merge_dicts(overlay['test']['tasks']['task1'], task_input) + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=expect_error + ) + + def test_with_items(self): + tests = [ + ({'with-items': ''}, True), + ({'with-items': []}, True), + ({'with-items': ['']}, True), + ({'with-items': None}, True), + ({'with-items': 12345}, True), + ({'with-items': 'x in y'}, True), + ({'with-items': '<% $.y %>'}, True), + ({'with-items': 'x in <% $.y %>'}, False), + ({'with-items': ['x in [1, 2, 3]']}, False), + ({'with-items': ['x in <% $.y %>']}, False), + ({'with-items': ['x in <% $.y %>', 'i in [1, 2, 3]']}, False), + ({'with-items': ['x in <% $.y %>', 'i in <% $.j %>']}, False), + ({'with-items': ['x in <% * %>']}, True), + ({'with-items': ['x in <% $.y %>', 'i in <% * %>']}, True), + ({'with-items': '{{ _.y }}'}, True), + ({'with-items': 'x in {{ _.y }}'}, False), + ({'with-items': ['x in [1, 2, 3]']}, False), + ({'with-items': ['x in {{ _.y }}']}, False), + ({'with-items': ['x in {{ _.y }}', 'i in [1, 2, 3]']}, False), + ({'with-items': ['x in {{ _.y }}', 'i in {{ _.j }}']}, False), + ({'with-items': ['x in {{ * }}']}, True), + ({'with-items': ['x in {{ _.y }}', 'i in {{ * }}']}, True) + ] + + for with_item, expect_error in tests: + overlay = {'test': {'tasks': {'get': with_item}}} + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_publish(self): + tests = [ + ({'publish': ''}, True), + ({'publish': {}}, True), + ({'publish': None}, True), + ({'publish': {'k1': 'v1'}}, False), + ({'publish': {'k1': '<% $.v1 %>'}}, False), + ({'publish': {'k1': '<% 1 + 2 %>'}}, False), + ({'publish': {'k1': '<% * %>'}}, True), + ({'publish': {'k1': '{{ _.v1 }}'}}, False), + ({'publish': {'k1': '{{ 1 + 2 }}'}}, False), + ({'publish': {'k1': '{{ * }}'}}, True) + ] + + for output, expect_error in tests: + overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} + + utils.merge_dicts(overlay['test']['tasks']['task1'], output) + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=expect_error + ) + + def test_publish_on_error(self): + tests = [ + ({'publish-on-error': ''}, True), + ({'publish-on-error': {}}, True), + ({'publish-on-error': None}, True), + ({'publish-on-error': {'k1': 'v1'}}, False), + ({'publish-on-error': {'k1': '<% $.v1 %>'}}, False), + ({'publish-on-error': {'k1': '<% 1 + 2 %>'}}, False), + ({'publish-on-error': {'k1': '<% * %>'}}, True), + ({'publish-on-error': {'k1': '{{ _.v1 }}'}}, False), + ({'publish-on-error': {'k1': '{{ 1 + 2 }}'}}, False), + ({'publish-on-error': {'k1': '{{ * }}'}}, True) + ] + + for output, expect_error in tests: + overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} + + utils.merge_dicts(overlay['test']['tasks']['task1'], output) + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=expect_error + ) + + def test_policies(self): + tests = [ + ({'retry': {'count': 3, 'delay': 1}}, False), + ({'retry': { + 'continue-on': '<% 1 %>', 'delay': 2, + 'break-on': '<% 1 %>', 'count': 2 + }}, False), + ({'retry': { + 'count': 3, 'delay': 1, 'continue-on': '<% 1 %>' + }}, False), + ({'retry': {'count': '<% 3 %>', 'delay': 1}}, False), + ({'retry': {'count': '<% * %>', 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': '<% 1 %>'}}, False), + ({'retry': {'count': 3, 'delay': '<% * %>'}}, True), + ({'retry': { + 'continue-on': '{{ 1 }}', 'delay': 2, + 'break-on': '{{ 1 }}', 'count': 2 + }}, False), + ({'retry': { + 'count': 3, 'delay': 1, 'continue-on': '{{ 1 }}' + }}, False), + ({'retry': {'count': '{{ 3 }}', 'delay': 1}}, False), + ({'retry': {'count': '{{ * }}', 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': '{{ 1 }}'}}, False), + ({'retry': {'count': 3, 'delay': '{{ * }}'}}, True), + ({'retry': {'count': -3, 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': -1}}, True), + ({'retry': {'count': '3', 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': '1'}}, True), + ({'retry': 'count=3 delay=1 break-on=<% false %>'}, False), + ({'retry': 'count=3 delay=1 break-on={{ false }}'}, False), + ({'retry': 'count=3 delay=1'}, False), + ({'retry': 'coun=3 delay=1'}, True), + ({'retry': None}, True), + ({'wait-before': 1}, False), + ({'wait-before': '<% 1 %>'}, False), + ({'wait-before': '<% * %>'}, True), + ({'wait-before': '{{ 1 }}'}, False), + ({'wait-before': '{{ * }}'}, True), + ({'wait-before': -1}, True), + ({'wait-before': 1.0}, True), + ({'wait-before': '1'}, True), + ({'wait-after': 1}, False), + ({'wait-after': '<% 1 %>'}, False), + ({'wait-after': '<% * %>'}, True), + ({'wait-after': '{{ 1 }}'}, False), + ({'wait-after': '{{ * }}'}, True), + ({'wait-after': -1}, True), + ({'wait-after': 1.0}, True), + ({'wait-after': '1'}, True), + ({'timeout': 300}, False), + ({'timeout': '<% 300 %>'}, False), + ({'timeout': '<% * %>'}, True), + ({'timeout': '{{ 300 }}'}, False), + ({'timeout': '{{ * }}'}, True), + ({'timeout': -300}, True), + ({'timeout': 300.0}, True), + ({'timeout': '300'}, True), + ({'pause-before': False}, False), + ({'pause-before': '<% False %>'}, False), + ({'pause-before': '<% * %>'}, True), + ({'pause-before': '{{ False }}'}, False), + ({'pause-before': '{{ * }}'}, True), + ({'pause-before': 'False'}, True), + ({'concurrency': 10}, False), + ({'concurrency': '<% 10 %>'}, False), + ({'concurrency': '<% * %>'}, True), + ({'concurrency': '{{ 10 }}'}, False), + ({'concurrency': '{{ * }}'}, True), + ({'concurrency': -10}, True), + ({'concurrency': 10.0}, True), + ({'concurrency': '10'}, True) + ] + + for policy, expect_error in tests: + overlay = {'test': {'tasks': {'get': policy}}} + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_direct_transition(self): + tests = [ + ({'on-success': ['email']}, False), + ({'on-success': [{'email': '<% 1 %>'}]}, False), + ({'on-success': [{'email': '<% 1 %>'}, 'echo']}, False), + ({'on-success': [{'email': '<% $.v1 in $.v2 %>'}]}, False), + ({'on-success': [{'email': '<% * %>'}]}, True), + ({'on-success': [{'email': '{{ 1 }}'}]}, False), + ({'on-success': [{'email': '{{ 1 }}'}, 'echo']}, False), + ({'on-success': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), + ({'on-success': [{'email': '{{ * }}'}]}, True), + ({'on-success': 'email'}, False), + ({'on-success': None}, True), + ({'on-success': ['']}, True), + ({'on-success': []}, True), + ({'on-success': ['email', 'email']}, True), + ({'on-success': ['email', 12345]}, True), + ({'on-error': ['email']}, False), + ({'on-error': [{'email': '<% 1 %>'}]}, False), + ({'on-error': [{'email': '<% 1 %>'}, 'echo']}, False), + ({'on-error': [{'email': '<% $.v1 in $.v2 %>'}]}, False), + ({'on-error': [{'email': '<% * %>'}]}, True), + ({'on-error': [{'email': '{{ 1 }}'}]}, False), + ({'on-error': [{'email': '{{ 1 }}'}, 'echo']}, False), + ({'on-error': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), + ({'on-error': [{'email': '{{ * }}'}]}, True), + ({'on-error': 'email'}, False), + ({'on-error': None}, True), + ({'on-error': ['']}, True), + ({'on-error': []}, True), + ({'on-error': ['email', 'email']}, True), + ({'on-error': ['email', 12345]}, True), + ({'on-complete': ['email']}, False), + ({'on-complete': [{'email': '<% 1 %>'}]}, False), + ({'on-complete': [{'email': '<% 1 %>'}, 'echo']}, False), + ({'on-complete': [{'email': '<% $.v1 in $.v2 %>'}]}, False), + ({'on-complete': [{'email': '<% * %>'}]}, True), + ({'on-complete': [{'email': '{{ 1 }}'}]}, False), + ({'on-complete': [{'email': '{{ 1 }}'}, 'echo']}, False), + ({'on-complete': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), + ({'on-complete': [{'email': '{{ * }}'}]}, True), + ({'on-complete': 'email'}, False), + ({'on-complete': None}, True), + ({'on-complete': ['']}, True), + ({'on-complete': []}, True), + ({'on-complete': ['email', 'email']}, True), + ({'on-complete': ['email', 12345]}, True) + ] + + for transition, expect_error in tests: + overlay = {'test': {'tasks': {}}} + + utils.merge_dicts(overlay['test']['tasks'], {'get': transition}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_direct_transition_advanced_schema(self): + tests = [ + ({'on-success': {'publish': {'var1': 1234}}}, True), + ({'on-success': {'publish': {'branch': {'var1': 1234}}}}, False), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + } + } + }, + False + ), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': '<% * %>'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + } + } + }, + True + ), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': 'email' + } + }, + False + ), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': ['email'] + } + }, + False + ), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% 1 %>'}] + } + }, + False + ), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% $.v1 and $.v2 %>'}] + } + }, + False + ), + ( + { + 'on-success': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% * %>'}] + } + }, + True + ), + ({'on-success': {'next': [{'email': '<% $.v1 %>'}]}}, False), + ({'on-success': {'next': 'email'}}, False), + ({'on-success': {'next': ['email']}}, False), + ({'on-success': {'next': [{'email': 'email'}]}}, True), + ({'on-error': {'publish': {'var1': 1234}}}, True), + ({'on-error': {'publish': {'branch': {'var1': 1234}}}}, False), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + } + } + }, + False + ), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': '<% * %>'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + } + } + }, + True + ), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': 'email' + } + }, + False + ), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': ['email'] + } + }, + False + ), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% 1 %>'}] + } + }, + False + ), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% $.v1 and $.v2 %>'}] + } + }, + False + ), + ( + { + 'on-error': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% * %>'}] + } + }, + True + ), + ({'on-error': {'next': [{'email': '<% $.v1 %>'}]}}, False), + ({'on-error': {'next': 'email'}}, False), + ({'on-error': {'next': ['email']}}, False), + ({'on-error': {'next': [{'email': 'email'}]}}, True), + ({'on-complete': {'publish': {'var1': 1234}}}, True), + ({'on-complete': {'publish': {'branch': {'var1': 1234}}}}, False), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + } + } + }, + False + ), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': '<% * %>'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + } + } + }, + True + ), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': 'email' + } + }, + False + ), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': ['email'] + } + }, + False + ), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% 1 %>'}] + } + }, + False + ), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% $.v1 and $.v2 %>'}] + } + }, + False + ), + ( + { + 'on-complete': { + 'publish': { + 'branch': {'var1': 1234}, + 'global': {'global_var1': 'val'}, + 'atomic': {'atomic_var1': '<% my_func() %>'} + }, + 'next': [{'email': '<% * %>'}] + } + }, + True + ), + ({'on-complete': {'next': [{'email': '<% $.v1 %>'}]}}, False), + ({'on-complete': {'next': 'email'}}, False), + ({'on-complete': {'next': ['email']}}, False), + ({'on-complete': {'next': [{'email': 'email'}]}}, True) + ] + + for transition, expect_error in tests: + overlay = {'test': {'tasks': {}}} + + utils.merge_dicts(overlay['test']['tasks'], {'get': transition}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_join(self): + tests = [ + ({'join': ''}, True), + ({'join': None}, True), + ({'join': 'all'}, False), + ({'join': 'one'}, False), + ({'join': 0}, False), + ({'join': 2}, False), + ({'join': 3}, True), + ({'join': '3'}, True), + ({'join': -3}, True) + ] + + on_success = {'on-success': ['email']} + + for join, expect_error in tests: + overlay = {'test': {'tasks': {}}} + + utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) + utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) + utils.merge_dicts(overlay['test']['tasks'], {'email': join}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_requires(self): + tests = [ + ({'requires': ''}, True), + ({'requires': []}, True), + ({'requires': ['']}, True), + ({'requires': None}, True), + ({'requires': 12345}, True), + ({'requires': ['echo']}, False), + ({'requires': ['echo', 'get']}, False), + ({'requires': 'echo'}, False), + ] + + for require, expect_error in tests: + overlay = {'test': {'tasks': {}}} + + utils.merge_dicts(overlay['test'], {'type': 'reverse'}) + utils.merge_dicts(overlay['test']['tasks'], {'email': require}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_keep_result(self): + tests = [ + ({'keep-result': ''}, True), + ({'keep-result': []}, True), + ({'keep-result': 'asd'}, True), + ({'keep-result': None}, True), + ({'keep-result': 12345}, True), + ({'keep-result': True}, False), + ({'keep-result': False}, False), + ({'keep-result': "<% 'a' in $.val %>"}, False), + ({'keep-result': '<% 1 + 2 %>'}, False), + ({'keep-result': '<% * %>'}, True), + ({'keep-result': "{{ 'a' in _.val }}"}, False), + ({'keep-result': '{{ 1 + 2 }}'}, False), + ({'keep-result': '{{ * }}'}, True) + ] + + for keep_result, expect_error in tests: + overlay = {'test': {'tasks': {}}} + + utils.merge_dicts(overlay['test']['tasks'], {'email': keep_result}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) diff -Nru mistral-4.0.0/mistral/tests/unit/lang/v2/test_workbook.py mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_workbook.py --- mistral-4.0.0/mistral/tests/unit/lang/v2/test_workbook.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_workbook.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,447 @@ +# Copyright 2013 - Mirantis, Inc. +# Copyright 2015 - StackStorm, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import re + +import yaml + +from mistral import exceptions as exc +from mistral.lang.v2 import workbook +from mistral.tests.unit.lang.v2 import base + + +class WorkbookSpecValidation(base.WorkbookSpecValidationTestCase): + + def test_build_valid_workbook_spec(self): + wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') + + # Workbook. + act_specs = wb_spec.get_actions() + wf_specs = wb_spec.get_workflows() + + self.assertEqual('2.0', wb_spec.get_version()) + self.assertEqual('my_workbook', wb_spec.get_name()) + self.assertEqual('This is a test workbook', wb_spec.get_description()) + self.assertListEqual(['test', 'v2'], wb_spec.get_tags()) + self.assertIsNotNone(act_specs) + self.assertIsNotNone(wf_specs) + + # Actions. + action_spec = act_specs.get('action1') + + self.assertIsNotNone(action_spec) + self.assertEqual('2.0', action_spec.get_version()) + self.assertEqual('action1', action_spec.get_name()) + self.assertEqual( + 'This is a test ad-hoc action', + action_spec.get_description() + ) + self.assertListEqual(['test', 'v2'], action_spec.get_tags()) + self.assertEqual('std.echo', action_spec.get_base()) + self.assertDictEqual( + {'output': 'Hello <% $.name %>!'}, + action_spec.get_base_input() + ) + self.assertDictEqual({}, action_spec.get_input()) + self.assertEqual('<% $ %>', action_spec.get_output()) + + # Workflows. + + self.assertEqual(2, len(wf_specs)) + + wf1_spec = wf_specs.get('wf1') + + self.assertEqual('2.0', wf1_spec.get_version()) + self.assertEqual('wf1', wf1_spec.get_name()) + self.assertEqual( + 'This is a test workflow', + wf1_spec.get_description() + ) + self.assertListEqual(['test', 'v2'], wf1_spec.get_tags()) + self.assertEqual('reverse', wf1_spec.get_type()) + self.assertEqual(2, len(wf1_spec.get_tasks())) + + # Tasks. + + task1_spec = wf1_spec.get_tasks().get('task1') + + self.assertIsNotNone(task1_spec) + self.assertEqual('2.0', task1_spec.get_version()) + self.assertEqual('task1', task1_spec.get_name()) + self.assertEqual('This is a test task', task1_spec.get_description()) + self.assertEqual('action1', task1_spec.get_action_name()) + self.assertEqual({'name': '<% $.name %>'}, task1_spec.get_input()) + + policies = task1_spec.get_policies() + + self.assertEqual(2, policies.get_wait_before()) + self.assertEqual(5, policies.get_wait_after()) + self.assertEqual(3, policies.get_concurrency()) + + retry_spec = policies.get_retry() + + self.assertEqual(10, retry_spec.get_count()) + self.assertEqual(30, retry_spec.get_delay()) + self.assertEqual('<% $.my_val = 10 %>', retry_spec.get_break_on()) + + task2_spec = wf1_spec.get_tasks().get('task2') + + self.assertIsNotNone(task2_spec) + self.assertEqual('2.0', task2_spec.get_version()) + self.assertEqual('task2', task2_spec.get_name()) + self.assertEqual('std.echo', task2_spec.get_action_name()) + self.assertIsNone(task2_spec.get_workflow_name()) + self.assertEqual( + {'output': 'Thanks <% $.name %>!'}, + task2_spec.get_input() + ) + + wf2_spec = wf_specs.get('wf2') + + self.assertEqual('2.0', wf2_spec.get_version()) + self.assertEqual('wf2', wf2_spec.get_name()) + self.assertListEqual(['test', 'v2'], wf2_spec.get_tags()) + self.assertEqual('direct', wf2_spec.get_type()) + self.assertEqual(11, len(wf2_spec.get_tasks())) + + task_defaults_spec = wf2_spec.get_task_defaults() + + self.assertListEqual( + [('fail', '<% $.my_val = 0 %>', {})], + task_defaults_spec.get_on_error().get_next() + ) + self.assertListEqual( + [('pause', '', {})], + task_defaults_spec.get_on_success().get_next() + ) + self.assertListEqual( + [('succeed', '', {})], + task_defaults_spec.get_on_complete().get_next() + ) + + task3_spec = wf2_spec.get_tasks().get('task3') + + self.assertIsNotNone(task3_spec) + self.assertEqual('2.0', task3_spec.get_version()) + self.assertEqual('task3', task3_spec.get_name()) + self.assertIsNone(task3_spec.get_action_name()) + self.assertEqual('wf1', task3_spec.get_workflow_name()) + self.assertEqual( + { + 'name': 'John Doe', + 'age': 32, + 'param1': None, + 'param2': False + }, + task3_spec.get_input() + ) + self.assertListEqual( + [('task4', '<% $.my_val = 1 %>', {})], + task3_spec.get_on_error().get_next() + ) + self.assertListEqual( + [('task5', '<% $.my_val = 2 %>', {})], + task3_spec.get_on_success().get_next() + ) + self.assertListEqual( + [('task6', '<% $.my_val = 3 %>', {})], + task3_spec.get_on_complete().get_next() + ) + + task7_spec = wf2_spec.get_tasks().get('task7') + + self.assertEqual( + { + 'is_true': True, + 'object_list': [1, None, 'str'], + 'is_string': '50' + }, + task7_spec.get_input() + ) + + self.assertEqual( + {'vm_info': '<% $.vms %>'}, + task7_spec.get_with_items() + ) + + task8_spec = wf2_spec.get_tasks().get('task8') + + self.assertEqual( + { + 'itemX': '<% $.arrayI %>', + "itemY": '<% $.arrayJ %>' + }, + task8_spec.get_with_items() + ) + + self.assertEqual( + { + 'expr_list': ['<% $.v %>', '<% $.k %>'], + 'expr': '<% $.value %>', + }, + task8_spec.get_input() + ) + + self.assertEqual('nova', task8_spec.get_target()) + + task9_spec = wf2_spec.get_tasks().get('task9') + + self.assertEqual('all', task9_spec.get_join()) + + task10_spec = wf2_spec.get_tasks().get('task10') + + self.assertEqual(2, task10_spec.get_join()) + + task11_spec = wf2_spec.get_tasks().get('task11') + + self.assertEqual('one', task11_spec.get_join()) + + task12_spec = wf2_spec.get_tasks().get('task12') + + self.assertDictEqual( + { + 'url': 'http://site.com?q=<% $.query %>', + 'params': '' + }, + task12_spec.get_input() + ) + + task13_spec = wf2_spec.get_tasks().get('task13') + + self.assertEqual('std.noop', task13_spec.get_action_name()) + self.assertEqual('No-op task', task13_spec.get_description()) + + def test_adhoc_action_with_base_in_one_string(self): + wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') + + act_specs = wb_spec.get_actions() + action_spec = act_specs.get("action2") + + self.assertEqual('std.echo', action_spec.get_base()) + self.assertEqual( + {'output': 'Echo output'}, + action_spec.get_base_input() + ) + + def test_spec_to_dict(self): + wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') + + d = wb_spec.to_dict() + + self.assertEqual('2.0', d['version']) + self.assertEqual('2.0', d['workflows']['version']) + self.assertEqual('2.0', d['workflows']['wf1']['version']) + + def test_version_required(self): + dsl_dict = copy.deepcopy(self._dsl_blank) + dsl_dict.pop('version', None) + + # TODO(m4dcoder): Check required property error when v1 is deprecated. + # The version property is not required for v1 workbook whereas it is + # a required property in v2. For backward compatibility, if no version + # is not provided, the workbook spec parser defaults to v1 and the + # required property exception is not triggered. However, a different + # spec validation error returns due to drastically different schema + # between workbook versions. + self.assertRaises( + exc.DSLParsingException, + self._spec_parser, + yaml.safe_dump(dsl_dict) + ) + + def test_version(self): + tests = [ + ({'version': None}, True), + ({'version': ''}, True), + ({'version': '1.0'}, True), + ({'version': '2.0'}, False), + ({'version': 2.0}, False), + ({'version': 2}, False) + ] + + for version, expect_error in tests: + self._parse_dsl_spec(changes=version, expect_error=expect_error) + + def test_name_required(self): + dsl_dict = copy.deepcopy(self._dsl_blank) + dsl_dict.pop('name', None) + + exception = self.assertRaises( + exc.DSLParsingException, + self._spec_parser, + yaml.safe_dump(dsl_dict) + ) + + self.assertIn("'name' is a required property", exception.message) + + def test_name(self): + tests = [ + ({'name': ''}, True), + ({'name': None}, True), + ({'name': 12345}, True), + ({'name': 'foobar'}, False) + ] + + for name, expect_error in tests: + self._parse_dsl_spec(changes=name, expect_error=expect_error) + + def test_description(self): + tests = [ + ({'description': ''}, True), + ({'description': None}, True), + ({'description': 12345}, True), + ({'description': 'This is a test workflow.'}, False) + ] + + for description, expect_error in tests: + self._parse_dsl_spec( + changes=description, + expect_error=expect_error + ) + + def test_tags(self): + tests = [ + ({'tags': ''}, True), + ({'tags': ['']}, True), + ({'tags': None}, True), + ({'tags': 12345}, True), + ({'tags': ['foo', 'bar']}, False), + ({'tags': ['foobar', 'foobar']}, True) + ] + + for tags, expect_error in tests: + self._parse_dsl_spec(changes=tags, expect_error=expect_error) + + def test_actions(self): + actions = { + 'version': '2.0', + 'noop': { + 'base': 'std.noop' + }, + 'echo': { + 'base': 'std.echo' + } + } + + tests = [ + ({'actions': []}, True), + ({'actions': {}}, True), + ({'actions': None}, True), + ({'actions': {'version': None}}, True), + ({'actions': {'version': ''}}, True), + ({'actions': {'version': '1.0'}}, True), + ({'actions': {'version': '2.0'}}, False), + ({'actions': {'version': 2.0}}, False), + ({'actions': {'version': 2}}, False), + ({'actions': {'noop': actions['noop']}}, False), + ({'actions': {'version': '2.0', 'noop': 'std.noop'}}, True), + ({'actions': actions}, False) + ] + + for adhoc_actions, expect_error in tests: + self._parse_dsl_spec( + changes=adhoc_actions, + expect_error=expect_error + ) + + def test_workflows(self): + workflows = { + 'version': '2.0', + 'wf1': { + 'tasks': { + 'noop': { + 'action': 'std.noop' + } + } + }, + 'wf2': { + 'tasks': { + 'echo': { + 'action': 'std.echo output="This is a test."' + } + } + } + } + + tests = [ + # ({'workflows': []}, True), + # ({'workflows': {}}, True), + # ({'workflows': None}, True), + # ({'workflows': {'version': None}}, True), + # ({'workflows': {'version': ''}}, True), + # ({'workflows': {'version': '1.0'}}, True), + # ({'workflows': {'version': '2.0'}}, False), + # ({'workflows': {'version': 2.0}}, False), + # ({'workflows': {'version': 2}}, False), + # ({'workflows': {'wf1': workflows['wf1']}}, False), + ({'workflows': {'version': '2.0', 'wf1': 'wf1'}}, True), + ({'workflows': workflows}, False) + ] + + for workflows, expect_error in tests: + self._parse_dsl_spec(changes=workflows, expect_error=expect_error) + + def test_workflow_name_validation(self): + wb_spec = self._parse_dsl_spec(dsl_file='workbook_schema_test.yaml') + + d = wb_spec.to_dict() + + self.assertEqual('2.0', d['version']) + self.assertEqual('2.0', d['workflows']['version']) + + workflow_names = ['workflowversion', 'versionworkflow', + 'workflowversionworkflow', 'version_workflow'] + + action_names = ['actionversion', 'versionaction', + 'actionversionaction'] + + for name in workflow_names: + self.assertEqual('2.0', d['workflows'][name]['version']) + self.assertEqual(name, d['workflows'][name]['name']) + + for name in action_names: + self.assertEqual('2.0', d['actions'][name]['version']) + self.assertEqual(name, d['actions'][name]['name']) + + def test_name_regex(self): + # We want to match a string containing version at any point. + valid_names = ( + "workflowversion", + "versionworkflow", + "workflowversionworkflow", + "version_workflow", + "version-workflow", + ) + + for valid in valid_names: + result = re.match(workbook.NON_VERSION_WORD_REGEX, valid) + self.assertNotEqual( + None, + result, + "Expected match for: {}".format(valid) + ) + + # ... except, we don't want to match a string that isn't just one word + # or is exactly "version" + invalid_names = ("version", "my workflow") + + for invalid in invalid_names: + result = re.match(workbook.NON_VERSION_WORD_REGEX, invalid) + self.assertEqual( + None, + result, + "Didn't expected match for: {}".format(invalid) + ) diff -Nru mistral-4.0.0/mistral/tests/unit/lang/v2/test_workflows.py mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_workflows.py --- mistral-4.0.0/mistral/tests/unit/lang/v2/test_workflows.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/lang/v2/test_workflows.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,427 @@ +# Copyright 2015 - StackStorm, Inc. +# Copyright 2016 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +import yaml + +from mistral import exceptions as exc +from mistral.tests.unit.lang.v2 import base +from mistral import utils + + +class WorkflowSpecValidation(base.WorkflowSpecValidationTestCase): + def test_workflow_types(self): + tests = [ + ({'type': 'direct'}, False), + ({'type': 'reverse'}, False), + ({'type': 'circular'}, True), + ({'type': None}, True) + ] + + for wf_type, expect_error in tests: + overlay = {'test': wf_type} + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_direct_workflow(self): + overlay = {'test': {'type': 'direct', 'tasks': {}}} + join = {'join': 'all'} + on_success = {'on-success': ['email']} + + utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) + utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) + utils.merge_dicts(overlay['test']['tasks'], {'email': join}) + + wfs_spec = self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=False + ) + + self.assertEqual(1, len(wfs_spec.get_workflows())) + self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) + self.assertEqual('direct', wfs_spec.get_workflows()[0].get_type()) + + def test_direct_workflow_invalid_task(self): + overlay = { + 'test': { + 'type': 'direct', + 'tasks': {} + } + } + requires = {'requires': ['echo', 'get']} + + utils.merge_dicts(overlay['test']['tasks'], {'email': requires}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=True + ) + + def test_direct_workflow_no_start_tasks(self): + overlay = { + 'test': { + 'type': 'direct', + 'tasks': { + 'task1': {'on-complete': 'task2'}, + 'task2': {'on-complete': 'task1'} + } + } + } + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=True + ) + + def test_direct_workflow_invalid_join(self): + tests = [ + ({'task3': {'join': 2}}, False), + ({'task3': {'join': 5}}, True), + ({'task3': {'join': 1}}, False), + ({'task3': {'join': 'one'}}, False), + ({'task3': {'join': 'all'}}, False), + ({'task4': {'join': 'all'}}, True), + ({'task4': {'join': 1}}, True), + ({'task4': {'join': 'one'}}, True) + ] + + for test in tests: + overlay = { + 'test': { + 'type': 'direct', + 'tasks': { + 'task1': {'on-complete': 'task3'}, + 'task2': {'on-complete': 'task3'} + } + } + } + + utils.merge_dicts(overlay['test']['tasks'], test[0]) + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=test[1] + ) + + def test_reverse_workflow(self): + overlay = {'test': {'type': 'reverse', 'tasks': {}}} + require = {'requires': ['echo', 'get']} + + utils.merge_dicts(overlay['test']['tasks'], {'email': require}) + + wfs_spec = self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=False + ) + + self.assertEqual(1, len(wfs_spec.get_workflows())) + self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) + self.assertEqual('reverse', wfs_spec.get_workflows()[0].get_type()) + + def test_reverse_workflow_invalid_task(self): + overlay = {'test': {'type': 'reverse', 'tasks': {}}} + join = {'join': 'all'} + on_success = {'on-success': ['email']} + + utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) + utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) + utils.merge_dicts(overlay['test']['tasks'], {'email': join}) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=True + ) + + def test_version_required(self): + dsl_dict = copy.deepcopy(self._dsl_blank) + dsl_dict.pop('version', None) + + exception = self.assertRaises( + exc.DSLParsingException, + self._spec_parser, + yaml.safe_dump(dsl_dict) + ) + + self.assertIn("'version' is a required property", exception.message) + + def test_version(self): + tests = [ + ({'version': None}, True), + ({'version': ''}, True), + ({'version': '2.0'}, False), + ({'version': 2.0}, False), + ({'version': 2}, False) + ] + + for version, expect_error in tests: + self._parse_dsl_spec( + add_tasks=True, + changes=version, + expect_error=expect_error + ) + + def test_inputs(self): + tests = [ + ({'input': ['var1', 'var2']}, False), + ({'input': ['var1', 'var1']}, True), + ({'input': [12345]}, True), + ({'input': [None]}, True), + ({'input': ['']}, True), + ({'input': None}, True), + ({'input': []}, True), + ({'input': ['var1', {'var2': 2}]}, False), + ({'input': [{'var1': 1}, {'var2': 2}]}, False), + ({'input': [{'var1': None}]}, False), + ({'input': [{'var1': 1}, {'var1': 1}]}, True), + ({'input': [{'var1': 1, 'var2': 2}]}, True) + ] + + for wf_input, expect_error in tests: + overlay = {'test': wf_input} + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_outputs(self): + tests = [ + ({'output': {'k1': 'a', 'k2': 1, 'k3': True, 'k4': None}}, False), + ({'output': {'k1': '<% $.v1 %>'}}, False), + ({'output': {'k1': '<% 1 + 2 %>'}}, False), + ({'output': {'k1': '<% * %>'}}, True), + ({'output': []}, True), + ({'output': 'whatever'}, True), + ({'output': None}, True), + ({'output': {}}, True) + ] + + for wf_output, expect_error in tests: + overlay = {'test': wf_output} + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_vars(self): + tests = [ + ({'vars': {'v1': 'a', 'v2': 1, 'v3': True, 'v4': None}}, False), + ({'vars': {'v1': '<% $.input_var1 %>'}}, False), + ({'vars': {'v1': '<% 1 + 2 %>'}}, False), + ({'vars': {'v1': '<% * %>'}}, True), + ({'vars': {'v1': '{{ _.input_var1 }}'}}, False), + ({'vars': {'v1': '{{ 1 + 2 }}'}}, False), + ({'vars': {'v1': '{{ * }}'}}, True), + ({'vars': []}, True), + ({'vars': 'whatever'}, True), + ({'vars': None}, True), + ({'vars': {}}, True) + ] + + for wf_vars, expect_error in tests: + overlay = {'test': wf_vars} + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_tasks_required(self): + exception = self._parse_dsl_spec( + add_tasks=False, + expect_error=True + ) + + self.assertIn("'tasks' is a required property", exception.message) + + def test_tasks(self): + tests = [ + ({'tasks': {}}, True), + ({'tasks': None}, True), + ({'tasks': self._dsl_tasks}, False) + ] + + for wf_tasks, expect_error in tests: + overlay = {'test': wf_tasks} + + self._parse_dsl_spec( + add_tasks=False, + changes=overlay, + expect_error=expect_error + ) + + def test_task_defaults(self): + tests = [ + ({'on-success': ['email']}, False), + ({'on-success': [{'email': '<% 1 %>'}]}, False), + ({'on-success': [{'email': '<% 1 %>'}, 'echo']}, False), + ({'on-success': [{'email': '<% $.v1 in $.v2 %>'}]}, False), + ({'on-success': [{'email': '<% * %>'}]}, True), + ({'on-success': [{'email': '{{ 1 }}'}]}, False), + ({'on-success': [{'email': '{{ 1 }}'}, 'echo']}, False), + ({'on-success': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), + ({'on-success': [{'email': '{{ * }}'}]}, True), + ({'on-success': 'email'}, False), + ({'on-success': None}, True), + ({'on-success': ['']}, True), + ({'on-success': []}, True), + ({'on-success': ['email', 'email']}, True), + ({'on-success': ['email', 12345]}, True), + ({'on-error': ['email']}, False), + ({'on-error': [{'email': '<% 1 %>'}]}, False), + ({'on-error': [{'email': '<% 1 %>'}, 'echo']}, False), + ({'on-error': [{'email': '<% $.v1 in $.v2 %>'}]}, False), + ({'on-error': [{'email': '<% * %>'}]}, True), + ({'on-error': [{'email': '{{ 1 }}'}]}, False), + ({'on-error': [{'email': '{{ 1 }}'}, 'echo']}, False), + ({'on-error': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), + ({'on-error': [{'email': '{{ * }}'}]}, True), + ({'on-error': 'email'}, False), + ({'on-error': None}, True), + ({'on-error': ['']}, True), + ({'on-error': []}, True), + ({'on-error': ['email', 'email']}, True), + ({'on-error': ['email', 12345]}, True), + ({'on-complete': ['email']}, False), + ({'on-complete': [{'email': '<% 1 %>'}]}, False), + ({'on-complete': [{'email': '<% 1 %>'}, 'echo']}, False), + ({'on-complete': [{'email': '<% $.v1 in $.v2 %>'}]}, False), + ({'on-complete': [{'email': '<% * %>'}]}, True), + ({'on-complete': [{'email': '{{ 1 }}'}]}, False), + ({'on-complete': [{'email': '{{ 1 }}'}, 'echo']}, False), + ({'on-complete': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), + ({'on-complete': [{'email': '{{ * }}'}]}, True), + ({'on-complete': 'email'}, False), + ({'on-complete': None}, True), + ({'on-complete': ['']}, True), + ({'on-complete': []}, True), + ({'on-complete': ['email', 'email']}, True), + ({'on-complete': ['email', 12345]}, True), + ({'requires': ''}, True), + ({'requires': []}, True), + ({'requires': ['']}, True), + ({'requires': None}, True), + ({'requires': 12345}, True), + ({'requires': ['echo']}, False), + ({'requires': ['echo', 'get']}, False), + ({'requires': 'echo'}, False), + ({'retry': {'count': 3, 'delay': 1}}, False), + ({'retry': {'count': '<% 3 %>', 'delay': 1}}, False), + ({'retry': {'count': '<% * %>', 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': '<% 1 %>'}}, False), + ({'retry': {'count': 3, 'delay': '<% * %>'}}, True), + ({'retry': {'count': '{{ 3 }}', 'delay': 1}}, False), + ({'retry': {'count': '{{ * }}', 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': '{{ 1 }}'}}, False), + ({'retry': {'count': 3, 'delay': '{{ * }}'}}, True), + ({'retry': {'count': -3, 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': -1}}, True), + ({'retry': {'count': '3', 'delay': 1}}, True), + ({'retry': {'count': 3, 'delay': '1'}}, True), + ({'retry': None}, True), + ({'wait-before': 1}, False), + ({'wait-before': '<% 1 %>'}, False), + ({'wait-before': '<% * %>'}, True), + ({'wait-before': '{{ 1 }}'}, False), + ({'wait-before': '{{ * }}'}, True), + ({'wait-before': -1}, True), + ({'wait-before': 1.0}, True), + ({'wait-before': '1'}, True), + ({'wait-after': 1}, False), + ({'wait-after': '<% 1 %>'}, False), + ({'wait-after': '<% * %>'}, True), + ({'wait-after': '{{ 1 }}'}, False), + ({'wait-after': '{{ * }}'}, True), + ({'wait-after': -1}, True), + ({'wait-after': 1.0}, True), + ({'wait-after': '1'}, True), + ({'timeout': 300}, False), + ({'timeout': '<% 300 %>'}, False), + ({'timeout': '<% * %>'}, True), + ({'timeout': '{{ 300 }}'}, False), + ({'timeout': '{{ * }}'}, True), + ({'timeout': -300}, True), + ({'timeout': 300.0}, True), + ({'timeout': '300'}, True), + ({'pause-before': False}, False), + ({'pause-before': '<% False %>'}, False), + ({'pause-before': '<% * %>'}, True), + ({'pause-before': '{{ False }}'}, False), + ({'pause-before': '{{ * }}'}, True), + ({'pause-before': 'False'}, True), + ({'concurrency': 10}, False), + ({'concurrency': '<% 10 %>'}, False), + ({'concurrency': '<% * %>'}, True), + ({'concurrency': '{{ 10 }}'}, False), + ({'concurrency': '{{ * }}'}, True), + ({'concurrency': -10}, True), + ({'concurrency': 10.0}, True), + ({'concurrency': '10'}, True) + ] + + for default, expect_error in tests: + overlay = {'test': {'task-defaults': {}}} + + utils.merge_dicts(overlay['test']['task-defaults'], default) + + self._parse_dsl_spec( + add_tasks=True, + changes=overlay, + expect_error=expect_error + ) + + def test_invalid_item(self): + overlay = {'name': 'invalid'} + + exception = self._parse_dsl_spec(changes=overlay, expect_error=True) + + self.assertIn("Invalid DSL", exception.message) + + def test_invalid_name(self): + invalid_wf = { + 'version': '2.0', + 'b98180ba-48a0-4e26-ab2e-50dc224f6fd1': { + 'type': 'direct', + 'tasks': {'t1': {'action': 'std.noop'}} + } + } + + dsl_yaml = yaml.safe_dump(invalid_wf, default_flow_style=False) + + exception = self.assertRaises( + exc.InvalidModelException, + self._spec_parser, + dsl_yaml + ) + + self.assertIn( + "Workflow name cannot be in the format of UUID", + exception.message + ) diff -Nru mistral-4.0.0/mistral/tests/unit/mstrlfixtures/hacking.py mistral-5.0.0~b2/mistral/tests/unit/mstrlfixtures/hacking.py --- mistral-4.0.0/mistral/tests/unit/mstrlfixtures/hacking.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/mstrlfixtures/hacking.py 2017-06-09 12:48:26.000000000 +0000 @@ -35,6 +35,6 @@ LOG.warn('text') """, 'expected_errors': [ - (4, 9, 'M001'), + (8, 9, 'M001'), ], } diff -Nru mistral-4.0.0/mistral/tests/unit/rpc/kombu/base.py mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/base.py --- mistral-4.0.0/mistral/tests/unit/rpc/kombu/base.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,28 @@ +# Copyright (c) 2016 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from mistral import config as cfg +from mistral.rpc.kombu import base as kombu_base +from mistral.tests.unit import base + + +class KombuTestCase(base.BaseTest): + + def setUp(self): + super(KombuTestCase, self).setUp() + + kombu_base.set_transport_options(check_backend=False) + + cfg.CONF.set_default('rpc_backend', 'kombu') diff -Nru mistral-4.0.0/mistral/tests/unit/rpc/kombu/fake_kombu.py mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/fake_kombu.py --- mistral-4.0.0/mistral/tests/unit/rpc/kombu/fake_kombu.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/fake_kombu.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,48 @@ +# Copyright (c) 2016 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from kombu import mixins as mx +import mock + + +# Hack for making tests works with kombu listener +mixins = mx + +producer = mock.MagicMock() + +producers = mock.MagicMock() +producers.__getitem__ = lambda *args, **kwargs: producer + +connection = mock.MagicMock() + +connections = mock.MagicMock() +connections.__getitem__ = lambda *args, **kwargs: connection + +serialization = mock.MagicMock() + + +def BrokerConnection(*args, **kwargs): + return mock.MagicMock() + + +def Exchange(*args, **kwargs): + return mock.MagicMock() + + +def Queue(*args, **kwargs): + return mock.MagicMock() + + +def Consumer(*args, **kwargs): + return mock.MagicMock() diff -Nru mistral-4.0.0/mistral/tests/unit/rpc/kombu/test_kombu_client.py mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/test_kombu_client.py --- mistral-4.0.0/mistral/tests/unit/rpc/kombu/test_kombu_client.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/test_kombu_client.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,96 @@ +# Copyright (c) 2016 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from mistral import exceptions as exc +from mistral.tests.unit.rpc.kombu import base +from mistral.tests.unit.rpc.kombu import fake_kombu + +import mock +from six import moves + +with mock.patch.dict('sys.modules', kombu=fake_kombu): + from mistral.rpc.kombu import base as kombu_base + from mistral.rpc.kombu import kombu_client + + +class TestException(exc.MistralException): + pass + + +class KombuClientTestCase(base.KombuTestCase): + + _RESPONSE = "response" + + def setUp(self): + super(KombuClientTestCase, self).setUp() + + conf = mock.MagicMock() + + listener_class = kombu_client.kombu_listener.KombuRPCListener + + kombu_client.kombu_listener.KombuRPCListener = mock.MagicMock() + + def restore_listener(): + kombu_client.kombu_listener.KombuRPCListener = listener_class + + self.addCleanup(restore_listener) + + self.client = kombu_client.KombuRPCClient(conf) + self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() + + def test_sync_call_result_get(self): + self.client._listener.get_result = mock.MagicMock( + return_value={ + kombu_base.TYPE: None, + kombu_base.RESULT: self.client._serialize_message({ + 'body': self._RESPONSE + }) + } + ) + + response = self.client.sync_call(self.ctx, 'method') + + self.assertEqual(response, self._RESPONSE) + + def test_sync_call_result_not_get(self): + self.client._listener.get_result = mock.MagicMock( + side_effect=moves.queue.Empty + ) + + self.assertRaises( + exc.MistralException, + self.client.sync_call, + self.ctx, + 'method_not_found' + ) + + def test_sync_call_result_type_error(self): + def side_effect(*args, **kwargs): + return { + kombu_base.TYPE: 'error', + kombu_base.RESULT: TestException() + } + + self.client._wait_for_result = mock.MagicMock(side_effect=side_effect) + + self.assertRaises( + TestException, + self.client.sync_call, + self.ctx, + 'method' + ) + + def test_async_call(self): + self.assertIsNone(self.client.async_call(self.ctx, 'method')) diff -Nru mistral-4.0.0/mistral/tests/unit/rpc/kombu/test_kombu_listener.py mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/test_kombu_listener.py --- mistral-4.0.0/mistral/tests/unit/rpc/kombu/test_kombu_listener.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/test_kombu_listener.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,220 @@ +# Copyright (c) 2017 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from mistral import exceptions as exc +from mistral.tests.unit.rpc.kombu import base +from mistral.tests.unit.rpc.kombu import fake_kombu +from mistral import utils + +import mock +from six import moves + +with mock.patch.dict('sys.modules', kombu=fake_kombu): + from mistral.rpc.kombu import base as kombu_base + from mistral.rpc.kombu import kombu_listener + + +class TestException(exc.MistralException): + pass + + +class KombuListenerTestCase(base.KombuTestCase): + + def setUp(self): + super(KombuListenerTestCase, self).setUp() + + self.listener = kombu_listener.KombuRPCListener( + [mock.MagicMock()], + mock.MagicMock() + ) + self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() + + def test_add_listener(self): + correlation_id = utils.generate_unicode_uuid() + + self.listener.add_listener(correlation_id) + + self.assertEqual( + type(self.listener._results.get(correlation_id)), + moves.queue.Queue + ) + + self.assertEqual(0, self.listener._results[correlation_id].qsize()) + + def test_remove_listener_correlation_id_in_results(self): + correlation_id = utils.generate_unicode_uuid() + + self.listener.add_listener(correlation_id) + + self.assertEqual( + type(self.listener._results.get(correlation_id)), + moves.queue.Queue + ) + + self.listener.remove_listener(correlation_id) + + self.assertEqual( + self.listener._results.get(correlation_id), + None + ) + + def test_remove_listener_correlation_id_not_in_results(self): + correlation_id = utils.generate_unicode_uuid() + + self.listener.add_listener(correlation_id) + + self.assertEqual( + type(self.listener._results.get(correlation_id)), + moves.queue.Queue + ) + + self.listener.remove_listener(utils.generate_unicode_uuid()) + + self.assertEqual( + type(self.listener._results.get(correlation_id)), + moves.queue.Queue + ) + + @mock.patch('threading.Thread') + def test_start_thread_not_set(self, thread_class_mock): + thread_mock = mock.MagicMock() + thread_class_mock.return_value = thread_mock + + self.listener.start() + + self.assertTrue(thread_mock.daemon) + self.assertEqual(thread_mock.start.call_count, 1) + + @mock.patch('threading.Thread') + def test_start_thread_set(self, thread_class_mock): + thread_mock = mock.MagicMock() + thread_class_mock.return_value = thread_mock + + self.listener._thread = mock.MagicMock() + self.listener.start() + + self.assertEqual(thread_mock.start.call_count, 0) + + def test_get_result_results_in_queue(self): + expected_result = 'abcd' + correlation_id = utils.generate_unicode_uuid() + + self.listener.add_listener(correlation_id) + self.listener._results.get(correlation_id).put(expected_result) + + result = self.listener.get_result(correlation_id, 5) + + self.assertEqual(result, expected_result) + + def test_get_result_not_in_queue(self): + correlation_id = utils.generate_unicode_uuid() + + self.listener.add_listener(correlation_id) + + self.assertRaises( + moves.queue.Empty, + self.listener.get_result, + correlation_id, + 1 # timeout + ) + + def test_get_result_lack_of_queue(self): + correlation_id = utils.generate_unicode_uuid() + + self.assertRaises( + KeyError, + self.listener.get_result, + correlation_id, + 1 # timeout + ) + + def test__on_response_message_ack_fail(self): + message = mock.MagicMock() + message.ack.side_effect = Exception('Test Exception') + response = 'response' + + kombu_listener.LOG = mock.MagicMock() + + self.listener.on_message(response, message) + self.assertEqual(kombu_listener.LOG.debug.call_count, 1) + self.assertEqual(kombu_listener.LOG.exception.call_count, 1) + + def test__on_response_message_ack_ok_corr_id_not_match(self): + message = mock.MagicMock() + message.properties = mock.MagicMock() + message.properties.__getitem__ = lambda *args, **kwargs: True + response = 'response' + + kombu_listener.LOG = mock.MagicMock() + + self.listener.on_message(response, message) + self.assertEqual(kombu_listener.LOG.debug.call_count, 3) + self.assertEqual(kombu_listener.LOG.exception.call_count, 0) + + def test__on_response_message_ack_ok_messsage_type_error(self): + correlation_id = utils.generate_unicode_uuid() + + message = mock.MagicMock() + message.properties = dict() + message.properties['type'] = 'error' + message.properties['correlation_id'] = correlation_id + + response = TestException('response') + + kombu_listener.LOG = mock.MagicMock() + + self.listener.add_listener(correlation_id) + self.listener.on_message(response, message) + + self.assertEqual(kombu_listener.LOG.debug.call_count, 2) + self.assertEqual(kombu_listener.LOG.exception.call_count, 0) + + result = self.listener.get_result(correlation_id, 5) + + self.assertDictEqual( + result, + { + kombu_base.TYPE: 'error', + kombu_base.RESULT: response + } + ) + + def test__on_response_message_ack_ok(self): + correlation_id = utils.generate_unicode_uuid() + + message = mock.MagicMock() + message.properties = dict() + message.properties['type'] = None + message.properties['correlation_id'] = correlation_id + + response = 'response' + + kombu_listener.LOG = mock.MagicMock() + + self.listener.add_listener(correlation_id) + self.listener.on_message(response, message) + + self.assertEqual(kombu_listener.LOG.debug.call_count, 2) + self.assertEqual(kombu_listener.LOG.exception.call_count, 0) + + result = self.listener.get_result(correlation_id, 5) + + self.assertDictEqual( + result, + { + kombu_base.TYPE: None, + kombu_base.RESULT: response + } + ) diff -Nru mistral-4.0.0/mistral/tests/unit/rpc/kombu/test_kombu_server.py mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/test_kombu_server.py --- mistral-4.0.0/mistral/tests/unit/rpc/kombu/test_kombu_server.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/rpc/kombu/test_kombu_server.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,296 @@ +# Copyright (c) 2016 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from mistral import exceptions as exc +from mistral.tests.unit.rpc.kombu import base +from mistral.tests.unit.rpc.kombu import fake_kombu + +import mock +import socket +from stevedore import driver + +with mock.patch.dict('sys.modules', kombu=fake_kombu): + from mistral.rpc.kombu import kombu_server + + +class TestException(exc.MistralError): + pass + + +class KombuServerTestCase(base.KombuTestCase): + + def setUp(self): + super(KombuServerTestCase, self).setUp() + + self.conf = mock.MagicMock() + self.server = kombu_server.KombuRPCServer(self.conf) + self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() + + def test_is_running_is_running(self): + self.server._running.set() + self.assertTrue(self.server.is_running) + + def test_is_running_is_not_running(self): + self.server._running.clear() + self.assertFalse(self.server.is_running) + + def test_stop(self): + self.server.stop() + self.assertFalse(self.server.is_running) + + def test_publish_message(self): + body = 'body' + reply_to = 'reply_to' + corr_id = 'corr_id' + type = 'type' + + acquire_mock = mock.MagicMock() + fake_kombu.producer.acquire.return_value = acquire_mock + + enter_mock = mock.MagicMock() + acquire_mock.__enter__.return_value = enter_mock + + self.server.publish_message(body, reply_to, corr_id, type) + enter_mock.publish.assert_called_once_with( + body={'body': '"body"'}, + exchange='openstack', + routing_key=reply_to, + correlation_id=corr_id, + type=type, + serializer='json' + ) + + def test_run_launch_successfully(self): + acquire_mock = mock.MagicMock() + acquire_mock.drain_events.side_effect = TestException() + fake_kombu.connection.acquire.return_value = acquire_mock + + self.assertRaises(TestException, self.server.run) + self.assertTrue(self.server.is_running) + + def test_run_launch_successfully_than_stop(self): + + def side_effect(*args, **kwargs): + self.assertTrue(self.server.is_running) + raise KeyboardInterrupt + + acquire_mock = mock.MagicMock() + acquire_mock.drain_events.side_effect = side_effect + fake_kombu.connection.acquire.return_value = acquire_mock + + self.server.run() + self.assertFalse(self.server.is_running) + self.assertEqual(self.server._sleep_time, 1) + + def test_run_socket_error_reconnect(self): + + def side_effect(*args, **kwargs): + if acquire_mock.drain_events.call_count == 1: + raise socket.error() + raise TestException() + + acquire_mock = mock.MagicMock() + acquire_mock.drain_events.side_effect = side_effect + fake_kombu.connection.acquire.return_value = acquire_mock + + self.assertRaises(TestException, self.server.run) + self.assertEqual(self.server._sleep_time, 2) + + def test_run_socket_timeout_still_running(self): + + def side_effect(*args, **kwargs): + if acquire_mock.drain_events.call_count == 0: + raise socket.timeout() + raise TestException() + + acquire_mock = mock.MagicMock() + acquire_mock.drain_events.side_effect = side_effect + fake_kombu.connection.acquire.return_value = acquire_mock + + self.assertRaises( + TestException, + self.server.run + ) + self.assertTrue(self.server.is_running) + + def test_run_keyboard_interrupt_not_running(self): + acquire_mock = mock.MagicMock() + acquire_mock.drain_events.side_effect = KeyboardInterrupt() + fake_kombu.connection.acquire.return_value = acquire_mock + + self.assertIsNone(self.server.run()) + self.assertFalse(self.server.is_running) + + @mock.patch.object( + kombu_server.KombuRPCServer, + '_on_message', + mock.MagicMock() + ) + @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') + def test__on_message_safe_message_processing_ok(self, publish_message): + message = mock.MagicMock() + + self.server._on_message_safe(None, message) + + self.assertEqual(message.ack.call_count, 1) + self.assertEqual(publish_message.call_count, 0) + + @mock.patch.object(kombu_server.KombuRPCServer, '_on_message') + @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') + def test__on_message_safe_message_processing_raise( + self, + publish_message, + _on_message + ): + reply_to = 'reply_to' + correlation_id = 'corr_id' + message = mock.MagicMock() + message.properties = { + 'reply_to': reply_to, + 'correlation_id': correlation_id + } + + test_exception = TestException() + _on_message.side_effect = test_exception + + self.server._on_message_safe(None, message) + + self.assertEqual(message.ack.call_count, 1) + self.assertEqual(publish_message.call_count, 1) + + @mock.patch.object( + kombu_server.KombuRPCServer, + '_get_rpc_method', + mock.MagicMock(return_value=None) + ) + def test__on_message_rpc_method_not_found(self): + request = { + 'rpc_ctx': {}, + 'rpc_method': 'not_found_method', + 'arguments': {} + } + + message = mock.MagicMock() + message.properties = { + 'reply_to': None, + 'correlation_id': None + } + + self.assertRaises( + exc.MistralException, + self.server._on_message, + request, + message + ) + + @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') + @mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method') + @mock.patch('mistral.context.MistralContext') + def test__on_message_is_async(self, mistral_context, get_rpc_method, + publish_message): + result = 'result' + request = { + 'async': True, + 'rpc_ctx': {}, + 'rpc_method': 'found_method', + 'arguments': self.server._serialize_message({ + 'a': 1, + 'b': 2 + }) + } + + message = mock.MagicMock() + message.properties = { + 'reply_to': None, + 'correlation_id': None + } + message.delivery_info.get.return_value = False + + rpc_method = mock.MagicMock(return_value=result) + get_rpc_method.return_value = rpc_method + + self.server._on_message(request, message) + rpc_method.assert_called_once_with( + rpc_ctx=mistral_context(), + a=1, + b=2 + ) + self.assertEqual(publish_message.call_count, 0) + + @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') + @mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method') + @mock.patch('mistral.context.MistralContext') + def test__on_message_is_sync(self, mistral_context, get_rpc_method, + publish_message): + result = 'result' + request = { + 'async': False, + 'rpc_ctx': {}, + 'rpc_method': 'found_method', + 'arguments': self.server._serialize_message({ + 'a': 1, + 'b': 2 + }) + } + + reply_to = 'reply_to' + correlation_id = 'corr_id' + message = mock.MagicMock() + message.properties = { + 'reply_to': reply_to, + 'correlation_id': correlation_id + } + message.delivery_info.get.return_value = False + + rpc_method = mock.MagicMock(return_value=result) + get_rpc_method.return_value = rpc_method + + self.server._on_message(request, message) + rpc_method.assert_called_once_with( + rpc_ctx=mistral_context(), + a=1, + b=2 + ) + publish_message.assert_called_once_with( + result, + reply_to, + correlation_id + ) + + @mock.patch('stevedore.driver.DriverManager') + def test__prepare_worker(self, driver_manager_mock): + worker_mock = mock.MagicMock() + mgr_mock = mock.MagicMock() + mgr_mock.driver.return_value = worker_mock + + def side_effect(*args, **kwargs): + return mgr_mock + + driver_manager_mock.side_effect = side_effect + + self.server._prepare_worker('blocking') + + self.assertEqual(self.server._worker, worker_mock) + + @mock.patch('stevedore.driver.DriverManager') + def test__prepare_worker_no_valid_executor(self, driver_manager_mock): + + driver_manager_mock.side_effect = driver.NoMatches() + + self.assertRaises( + driver.NoMatches, + self.server._prepare_worker, + 'non_valid_executor' + ) diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_action_service.py mistral-5.0.0~b2/mistral/tests/unit/services/test_action_service.py --- mistral-4.0.0/mistral/tests/unit/services/test_action_service.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_action_service.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,10 +15,10 @@ from oslo_config import cfg from mistral.db.v2 import api as db_api +from mistral.lang import parser as spec_parser from mistral.services import actions as action_service from mistral.tests.unit import base from mistral import utils -from mistral.workbook import parser as spec_parser # Use the set_default method to set value otherwise in certain test cases diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_event_engine.py mistral-5.0.0~b2/mistral/tests/unit/services/test_event_engine.py --- mistral-4.0.0/mistral/tests/unit/services/test_event_engine.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_event_engine.py 2017-06-09 12:48:26.000000000 +0000 @@ -1,4 +1,5 @@ # Copyright 2016 Catalyst IT Ltd +# Copyright 2017 Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +19,8 @@ from oslo_config import cfg from mistral.db.v2.sqlalchemy import api as db_api -from mistral.engine.rpc_backend import rpc -from mistral.event_engine import event_engine +from mistral.event_engine import default_event_engine as evt_eng +from mistral.rpc import clients as rpc from mistral.services import workflows from mistral.tests.unit import base @@ -61,7 +62,7 @@ @mock.patch.object(rpc, 'get_engine_client', mock.Mock()) def test_event_engine_start_with_no_triggers(self): - e_engine = event_engine.EventEngine() + e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) @@ -74,7 +75,7 @@ def test_event_engine_start_with_triggers(self, mock_start): trigger = db_api.create_event_trigger(EVENT_TRIGGER) - e_engine = event_engine.EventEngine() + e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) @@ -96,7 +97,7 @@ def test_process_event_queue(self, mock_start): db_api.create_event_trigger(EVENT_TRIGGER) - e_engine = event_engine.EventEngine() + e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) @@ -138,8 +139,8 @@ } ] - converter = event_engine.NotificationsConverter() - converter.definitions = [event_engine.EventDefinition(event_def) + converter = evt_eng.NotificationsConverter() + converter.definitions = [evt_eng.EventDefinition(event_def) for event_def in reversed(definition_cfg)] notification = { @@ -165,8 +166,8 @@ } ] - converter = event_engine.NotificationsConverter() - converter.definitions = [event_engine.EventDefinition(event_def) + converter = evt_eng.NotificationsConverter() + converter.definitions = [evt_eng.EventDefinition(event_def) for event_def in reversed(definition_cfg)] notification = { diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_expiration_policy.py mistral-5.0.0~b2/mistral/tests/unit/services/test_expiration_policy.py --- mistral-4.0.0/mistral/tests/unit/services/test_expiration_policy.py 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_expiration_policy.py 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,409 @@ +# Copyright 2015 - Alcatel-lucent, Inc. +# Copyright 2015 - StackStorm, Inc. +# Copyright 2016 - Brocade Communications Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +from mistral import context as ctx +from mistral.db.v2 import api as db_api +from mistral.services import expiration_policy +from mistral.services.expiration_policy import ExecutionExpirationPolicy +from mistral.tests.unit import base +from mistral.tests.unit.base import get_context +from oslo_config import cfg + + +def _create_workflow_executions(): + time_now = datetime.datetime.utcnow() + + wf_execs = [ + { + 'id': 'success_expired', + 'name': 'success_expired', + 'created_at': time_now - datetime.timedelta(minutes=60), + 'updated_at': time_now - datetime.timedelta(minutes=59), + 'workflow_name': 'test_exec', + 'state': "SUCCESS", + }, + { + 'id': 'error_expired', + 'name': 'error_expired', + 'created_at': time_now - datetime.timedelta(days=3, minutes=10), + 'updated_at': time_now - datetime.timedelta(days=3), + 'workflow_name': 'test_exec', + 'state': "ERROR", + }, + { + 'id': 'running_not_expired', + 'name': 'running_not_expired', + 'created_at': time_now - datetime.timedelta(days=3, minutes=10), + 'updated_at': time_now - datetime.timedelta(days=3), + 'workflow_name': 'test_exec', + 'state': "RUNNING", + }, + { + 'id': 'running_not_expired2', + 'name': 'running_not_expired2', + 'created_at': time_now - datetime.timedelta(days=3, minutes=10), + 'updated_at': time_now - datetime.timedelta(days=4), + 'workflow_name': 'test_exec', + 'state': "RUNNING", + }, + { + 'id': 'success_not_expired', + 'name': 'success_not_expired', + 'created_at': time_now - datetime.timedelta(minutes=15), + 'updated_at': time_now - datetime.timedelta(minutes=5), + 'workflow_name': 'test_exec', + 'state': "SUCCESS", + }, + { + 'id': 'abc', + 'name': 'cancelled_expired', + 'created_at': time_now - datetime.timedelta(minutes=60), + 'updated_at': time_now - datetime.timedelta(minutes=59), + 'workflow_name': 'test_exec', + 'state': "CANCELLED", + }, + { + 'id': 'cancelled_not_expired', + 'name': 'cancelled_not_expired', + 'created_at': time_now - datetime.timedelta(minutes=15), + 'updated_at': time_now - datetime.timedelta(minutes=6), + 'workflow_name': 'test_exec', + 'state': "CANCELLED", + } + ] + + for wf_exec in wf_execs: + db_api.create_workflow_execution(wf_exec) + + # Create a nested workflow execution. + + db_api.create_task_execution( + { + 'id': 'running_not_expired', + 'workflow_execution_id': 'success_not_expired', + 'name': 'my_task' + } + ) + + db_api.create_workflow_execution( + { + 'id': 'expired_but_not_a_parent', + 'name': 'expired_but_not_a_parent', + 'created_at': time_now - datetime.timedelta(days=15), + 'updated_at': time_now - datetime.timedelta(days=10), + 'workflow_name': 'test_exec', + 'state': "SUCCESS", + 'task_execution_id': 'running_not_expired' + } + ) + + +def _switch_context(is_default, is_admin): + ctx.set_ctx(get_context(is_default, is_admin)) + + +class ExpirationPolicyTest(base.DbTestCase): + def test_expiration_policy_for_executions_with_different_project_id(self): + # Delete execution uses a secured filtering and we need + # to verify that admin able to do that for other projects. + cfg.CONF.set_default('auth_enable', True, group='pecan') + + # Since we are removing other projects execution, + # we want to load the executions with other project_id. + _switch_context(False, False) + + _create_workflow_executions() + + now = datetime.datetime.utcnow() + + # This execution has a parent wf and testing that we are + # querying only for parent wfs. + exec_child = db_api.get_workflow_execution('expired_but_not_a_parent') + + self.assertEqual('running_not_expired', exec_child.task_execution_id) + + # Call for all expired wfs execs. + execs = db_api.get_executions_to_clean(now) + + # Should be only 5, the RUNNING execution shouldn't return, + # so the child wf (that has parent task id). + self.assertEqual(5, len(execs)) + + # Switch context to Admin since expiration policy running as Admin. + _switch_context(True, True) + + _set_expiration_policy_config(1, 30, None) + expiration_policy.run_execution_expiration_policy(self, ctx) + + # Only non_expired available (update_at < older_than). + execs = db_api.get_executions_to_clean(now) + + self.assertEqual(2, len(execs)) + self.assertListEqual( + [ + 'cancelled_not_expired', + 'success_not_expired' + ], + sorted([ex.id for ex in execs]) + ) + + _set_expiration_policy_config(1, 5, None) + expiration_policy.run_execution_expiration_policy(self, ctx) + execs = db_api.get_executions_to_clean(now) + + self.assertEqual(0, len(execs)) + + def test_deletion_of_expired_executions_with_batch_size_scenario1(self): + """scenario1 + + This test will use batch_size of 3, + 5 expired executions and different values of "older_than" + which is 30 and 5 minutes respectively. + Expected_result: All expired executions are successfully deleted. + """ + + cfg.CONF.set_default( + 'batch_size', + 3, + group='execution_expiration_policy' + ) + + _create_workflow_executions() + + _set_expiration_policy_config(1, 30, None) + + # Call for all expired wfs execs. + now = datetime.datetime.utcnow() + execs = db_api.get_executions_to_clean(now) + + # Should be only 5, the RUNNING execution shouldn't return, + # so the child wf (that has parent task id). + self.assertEqual(5, len(execs)) + + older_than = cfg.CONF.execution_expiration_policy.older_than + exp_time = (datetime.datetime.utcnow() + - datetime.timedelta(minutes=older_than)) + batch_size = cfg.CONF.execution_expiration_policy.batch_size + mfe = cfg.CONF.execution_expiration_policy.max_finished_executions + expiration_policy._delete_executions( + batch_size, + exp_time, + mfe + ) + execs = db_api.get_executions_to_clean(now) + self.assertEqual(2, len(execs)) + + _set_expiration_policy_config(1, 5, None) + expiration_policy.run_execution_expiration_policy(self, ctx) + execs = db_api.get_executions_to_clean(now) + self.assertEqual(0, len(execs)) + + def test_deletion_of_expired_executions_with_batch_size_scenario2(self): + """scenario2 + + This test will use batch_size of 2, 5 expired executions + with value of "older_than" that is 5 minutes. + Expected_result: All expired executions are successfully deleted. + """ + + cfg.CONF.set_default( + 'batch_size', + 2, + group='execution_expiration_policy' + ) + + _create_workflow_executions() + + _set_expiration_policy_config(1, 5, None) + + # Call for all expired wfs execs. + now = datetime.datetime.utcnow() + execs = db_api.get_executions_to_clean(now) + + # Should be only 5, the RUNNING execution shouldn't return, + # so the child wf (that has parent task id). + self.assertEqual(5, len(execs)) + + older_than = cfg.CONF.execution_expiration_policy.older_than + exp_time = (datetime.datetime.utcnow() + - datetime.timedelta(minutes=older_than)) + batch_size = cfg.CONF.execution_expiration_policy.batch_size + mfe = cfg.CONF.execution_expiration_policy.max_finished_executions + expiration_policy._delete_executions( + batch_size, + exp_time, + mfe + ) + execs = db_api.get_executions_to_clean(now) + self.assertEqual(0, len(execs)) + + def test_expiration_policy_for_executions_with_max_executions_scen1(self): + """scenario1 + + Tests the max_executions logic with + max_finished_executions = + 'total not expired and completed executions' - 1 + """ + + _create_workflow_executions() + + now = datetime.datetime.utcnow() + + # Call for all expired wfs execs. + execs = db_api.get_executions_to_clean(now) + + # Should be only 5, the RUNNING execution shouldn't return, + # so the child wf (that has parent task id). + self.assertEqual(5, len(execs)) + + _set_expiration_policy_config(1, 30, 1) + expiration_policy.run_execution_expiration_policy(self, ctx) + + # Assert the two running executions + # (running_not_expired, running_not_expired2), + # the sub execution (expired_but_not_a_parent) and the one allowed + # finished execution (success_not_expired) are there. + execs = db_api.get_workflow_executions() + self.assertEqual(4, len(execs)) + self.assertListEqual( + [ + 'expired_but_not_a_parent', + 'running_not_expired', + 'running_not_expired2', + 'success_not_expired' + ], + sorted([ex.id for ex in execs]) + ) + + def test_expiration_policy_for_executions_with_max_executions_scen2(self): + """scenario2 + + Tests the max_executions logic with: + max_finished_executions > total completed executions + """ + + _create_workflow_executions() + + now = datetime.datetime.utcnow() + + # Call for all expired wfs execs. + execs = db_api.get_executions_to_clean(now) + + # Should be only 5, the RUNNING execution shouldn't return, + # so the child wf (that has parent task id). + self.assertEqual(5, len(execs)) + + _set_expiration_policy_config(1, 30, 100) + expiration_policy.run_execution_expiration_policy(self, ctx) + + # Assert the two running executions + # (running_not_expired, running_not_expired2), the sub execution + # (expired_but_not_a_parent) and the all finished execution + # (success_not_expired, 'cancelled_not_expired') are there. + execs = db_api.get_workflow_executions() + self.assertEqual(5, len(execs)) + self.assertListEqual( + [ + 'cancelled_not_expired', + 'expired_but_not_a_parent', + 'running_not_expired', + 'running_not_expired2', + 'success_not_expired' + ], + sorted([ex.id for ex in execs]) + ) + + def test_negative_wrong_conf_values(self): + _set_expiration_policy_config(None, None, None) + e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) + + self.assertDictEqual({}, e_policy._periodic_spacing) + self.assertListEqual([], e_policy._periodic_tasks) + + _set_expiration_policy_config(None, 60, None) + e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) + + self.assertDictEqual({}, e_policy._periodic_spacing) + self.assertListEqual([], e_policy._periodic_tasks) + + _set_expiration_policy_config(60, None, None) + e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) + + self.assertDictEqual({}, e_policy._periodic_spacing) + self.assertListEqual([], e_policy._periodic_tasks) + + def test_periodic_task_parameters(self): + _set_expiration_policy_config(17, 13, None) + + e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) + + self.assertEqual( + 17 * 60, + e_policy._periodic_spacing['run_execution_expiration_policy'] + ) + + def test_periodic_task_scheduling(self): + def _assert_scheduling(expiration_policy_config, should_schedule): + ExecutionExpirationPolicy._periodic_tasks = [] + _set_expiration_policy_config(*expiration_policy_config) + e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) + + if should_schedule: + self.assertTrue( + e_policy._periodic_tasks, + "Periodic task should have been created." + ) + else: + self.assertFalse( + e_policy._periodic_tasks, + "Periodic task shouldn't have been created." + ) + + _assert_scheduling([1, 1, None], True) + _assert_scheduling([1, None, 1], True) + _assert_scheduling([1, 1, 1], True) + _assert_scheduling([1, None, None], False) + _assert_scheduling([None, 1, 1], False) + + def tearDown(self): + """Restores the size limit config to default.""" + super(ExpirationPolicyTest, self).tearDown() + + cfg.CONF.set_default('auth_enable', False, group='pecan') + + ctx.set_ctx(None) + + _set_expiration_policy_config(None, None, None) + + +def _set_expiration_policy_config(evaluation_interval, older_than, mfe): + cfg.CONF.set_default( + 'evaluation_interval', + evaluation_interval, + group='execution_expiration_policy' + ) + cfg.CONF.set_default( + 'older_than', + older_than, + group='execution_expiration_policy' + ) + cfg.CONF.set_default( + 'max_finished_executions', + mfe, + group='execution_expiration_policy' + ) diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_expired_executions_policy.py mistral-5.0.0~b2/mistral/tests/unit/services/test_expired_executions_policy.py --- mistral-4.0.0/mistral/tests/unit/services/test_expired_executions_policy.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_expired_executions_policy.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,210 +0,0 @@ -# Copyright 2015 - Alcatel-lucent, Inc. -# Copyright 2015 - StackStorm, Inc. -# Copyright 2016 - Brocade Communications Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime - -from mistral import context as ctx -from mistral.db.v2 import api as db_api -from mistral.services import expiration_policy -from mistral.tests.unit import base -from oslo_config import cfg - - -def _create_workflow_executions(): - time_now = datetime.datetime.now() - - wf_execs = [ - { - 'id': '123', - 'name': 'success_expired', - 'created_at': time_now - datetime.timedelta(minutes=60), - 'updated_at': time_now - datetime.timedelta(minutes=59), - 'workflow_name': 'test_exec', - 'state': "SUCCESS", - }, - { - 'id': '456', - 'name': 'error_expired', - 'created_at': time_now - datetime.timedelta(days=3, minutes=10), - 'updated_at': time_now - datetime.timedelta(days=3), - 'workflow_name': 'test_exec', - 'state': "ERROR", - }, - { - 'id': '789', - 'name': 'running_not_expired', - 'created_at': time_now - datetime.timedelta(days=3, minutes=10), - 'updated_at': time_now - datetime.timedelta(days=3), - 'workflow_name': 'test_exec', - 'state': "RUNNING", - }, - { - 'id': '987', - 'name': 'success_not_expired', - 'created_at': time_now - datetime.timedelta(minutes=15), - 'updated_at': time_now - datetime.timedelta(minutes=5), - 'workflow_name': 'test_exec', - 'state': "SUCCESS", - }, - { - 'id': 'abc', - 'name': 'cancelled_expired', - 'created_at': time_now - datetime.timedelta(minutes=60), - 'updated_at': time_now - datetime.timedelta(minutes=59), - 'workflow_name': 'test_exec', - 'state': "CANCELLED", - }, - { - 'id': 'def', - 'name': 'cancelled_not_expired', - 'created_at': time_now - datetime.timedelta(minutes=15), - 'updated_at': time_now - datetime.timedelta(minutes=5), - 'workflow_name': 'test_exec', - 'state': "CANCELLED", - } - ] - - for wf_exec in wf_execs: - db_api.create_workflow_execution(wf_exec) - - # Create a nested workflow execution. - - db_api.create_task_execution( - { - 'id': '789', - 'workflow_execution_id': '987', - 'name': 'my_task' - } - ) - - db_api.create_workflow_execution( - { - 'id': '654', - 'name': 'expired but not a parent', - 'created_at': time_now - datetime.timedelta(days=15), - 'updated_at': time_now - datetime.timedelta(days=10), - 'workflow_name': 'test_exec', - 'state': "SUCCESS", - 'task_execution_id': '789' - } - ) - - -def _switch_context(project_id, is_admin): - _ctx = ctx.MistralContext( - user_id=None, - project_id=project_id, - auth_token=None, - is_admin=is_admin - ) - - ctx.set_ctx(_ctx) - - -class ExpirationPolicyTest(base.DbTestCase): - def test_expiration_policy_for_executions(self): - # Delete execution uses a secured filtering and we need - # to verify that admin able to do that for other projects. - cfg.CONF.set_default('auth_enable', True, group='pecan') - - # Since we are removing other projects execution, - # we want to load the executions with other project_id. - _switch_context('non_admin_project', False) - - _create_workflow_executions() - - now = datetime.datetime.now() - - # This execution has a parent wf and testing that we are - # querying only for parent wfs. - exec_child = db_api.get_workflow_execution('654') - - self.assertEqual('789', exec_child.task_execution_id) - - # Call for all expired wfs execs. - execs = db_api.get_expired_executions(now) - - # Should be only 5, the RUNNING execution shouldn't return, - # so the child wf (that has parent task id). - self.assertEqual(5, len(execs)) - - # Switch context to Admin since expiration policy running as Admin. - _switch_context(None, True) - - _set_expiration_policy_config(1, 30) - expiration_policy.run_execution_expiration_policy(self, ctx) - - # Only non_expired available (update_at < older_than). - execs = db_api.get_expired_executions(now) - - self.assertEqual(2, len(execs)) - self.assertListEqual(['987', 'def'], sorted([ex.id for ex in execs])) - - _set_expiration_policy_config(1, 5) - expiration_policy.run_execution_expiration_policy(self, ctx) - execs = db_api.get_expired_executions(now) - - self.assertEqual(0, len(execs)) - - def test_negative_wrong_conf_values(self): - _set_expiration_policy_config(None, None) - e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) - - self.assertDictEqual({}, e_policy._periodic_spacing) - self.assertListEqual([], e_policy._periodic_tasks) - - _set_expiration_policy_config(None, 60) - e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) - - self.assertDictEqual({}, e_policy._periodic_spacing) - self.assertListEqual([], e_policy._periodic_tasks) - - _set_expiration_policy_config(60, None) - e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) - - self.assertDictEqual({}, e_policy._periodic_spacing) - self.assertListEqual([], e_policy._periodic_tasks) - - def test_periodic_task_parameters(self): - _set_expiration_policy_config(17, 13) - - e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) - - self.assertEqual(17 * 60, e_policy._periodic_spacing - ['run_execution_expiration_policy']) - - def tearDown(self): - """Restores the size limit config to default.""" - super(ExpirationPolicyTest, self).tearDown() - - cfg.CONF.set_default('auth_enable', False, group='pecan') - - ctx.set_ctx(None) - - _set_expiration_policy_config(None, None) - - -def _set_expiration_policy_config(evaluation_interval, older_than): - cfg.CONF.set_default( - 'evaluation_interval', - evaluation_interval, - group='execution_expiration_policy' - ) - cfg.CONF.set_default( - 'older_than', - older_than, - group='execution_expiration_policy' - ) diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_trigger_service.py mistral-5.0.0~b2/mistral/tests/unit/services/test_trigger_service.py --- mistral-4.0.0/mistral/tests/unit/services/test_trigger_service.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_trigger_service.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,10 +15,11 @@ import datetime import eventlet import mock + from oslo_config import cfg -from mistral.engine.rpc_backend import rpc from mistral import exceptions as exc +from mistral.rpc import clients as rpc from mistral.services import periodic from mistral.services import security from mistral.services import triggers as t_s @@ -224,31 +225,59 @@ self.assertEqual('my_trust_id', trigger.trust_id) - def test_get_trigger_in_correct_orders(self): - t1_name = 'trigger-%s' % utils.generate_unicode_uuid() + @mock.patch.object(security, 'create_trust', + type('trust', (object,), {'id': 'my_trust_id'})) + @mock.patch.object(security, 'create_context', mock.Mock()) + @mock.patch.object(rpc.EngineClient, 'start_workflow', mock.Mock()) + @mock.patch( + 'mistral.services.periodic.advance_cron_trigger', + mock.MagicMock(side_effect=new_advance_cron_trigger) + ) + @mock.patch.object(security, 'delete_trust') + def test_create_delete_trust_in_trigger(self, delete_trust): + cfg.CONF.set_default('auth_enable', True, group='pecan') + trigger_thread = periodic.setup() + self.addCleanup(trigger_thread.stop) + self.addCleanup( + cfg.CONF.set_default, 'auth_enable', + False, group='pecan' + ) t_s.create_cron_trigger( - t1_name, + 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, - '*/5 * * * *', - None, + '* * * * * *', None, + 1, datetime.datetime(2010, 8, 25) ) + self._await( + lambda: delete_trust.call_count == 1, timeout=10 + ) + self.assertEqual('my_trust_id', delete_trust.mock_calls[0][1][0]) + + def test_get_trigger_in_correct_orders(self): + t1_name = 'trigger-%s' % utils.generate_unicode_uuid() + + t_s.create_cron_trigger( + t1_name, + self.wf.name, + {}, + pattern='*/5 * * * *', + start_time=datetime.datetime(2010, 8, 25) + ) + t2_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t2_name, self.wf.name, {}, - {}, - '*/1 * * * *', - None, - None, - datetime.datetime(2010, 8, 22) + pattern='*/1 * * * *', + start_time=datetime.datetime(2010, 8, 22) ) t3_name = 'trigger-%s' % utils.generate_unicode_uuid() @@ -257,11 +286,8 @@ t3_name, self.wf.name, {}, - {}, - '*/2 * * * *', - None, - None, - datetime.datetime(2010, 9, 21) + pattern='*/2 * * * *', + start_time=datetime.datetime(2010, 9, 21) ) t4_name = 'trigger-%s' % utils.generate_unicode_uuid() @@ -270,11 +296,8 @@ t4_name, self.wf.name, {}, - {}, - '*/3 * * * *', - None, - None, - datetime.datetime.now() + datetime.timedelta(0, 50) + pattern='*/3 * * * *', + start_time=datetime.datetime.utcnow() + datetime.timedelta(0, 50) ) trigger_names = [t.name for t in t_s.get_next_cron_triggers()] diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_workbook_service.py mistral-5.0.0~b2/mistral/tests/unit/services/test_workbook_service.py --- mistral-4.0.0/mistral/tests/unit/services/test_workbook_service.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_workbook_service.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,9 +15,9 @@ from oslo_config import cfg from mistral.db.v2 import api as db_api +from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit import base -from mistral.workbook import parser as spec_parser # Use the set_default method to set value otherwise in certain test cases diff -Nru mistral-4.0.0/mistral/tests/unit/services/test_workflow_service.py mistral-5.0.0~b2/mistral/tests/unit/services/test_workflow_service.py --- mistral-4.0.0/mistral/tests/unit/services/test_workflow_service.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/services/test_workflow_service.py 2017-06-09 12:48:26.000000000 +0000 @@ -19,10 +19,10 @@ from mistral.db.v2.sqlalchemy import api as db_api from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import workflows as wf_service from mistral.tests.unit import base from mistral import utils -from mistral.workbook import parser as spec_parser from mistral.workflow import states diff -Nru mistral-4.0.0/mistral/tests/unit/test_command_dispatcher.py mistral-5.0.0~b2/mistral/tests/unit/test_command_dispatcher.py --- mistral-4.0.0/mistral/tests/unit/test_command_dispatcher.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/test_command_dispatcher.py 2017-06-09 12:48:26.000000000 +0000 @@ -29,9 +29,6 @@ class CommandDispatcherTest(base.BaseTest): - def setUp(self): - super(CommandDispatcherTest, self).setUp() - def test_rearrange_commands(self): no_wait = commands.RunTask(None, None, None, None) fail = commands.FailWorkflow(None, None, None, None) diff -Nru mistral-4.0.0/mistral/tests/unit/test_serialization.py mistral-5.0.0~b2/mistral/tests/unit/test_serialization.py --- mistral-4.0.0/mistral/tests/unit/test_serialization.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/test_serialization.py 2017-06-09 12:48:26.000000000 +0000 @@ -42,7 +42,7 @@ serialization.register_serializer(MyClass, MyClassSerializer()) - self.addCleanup(serialization.cleanup) + self.addCleanup(serialization.unregister_serializer, MyClass) def test_dict_based_serializer(self): obj = MyClass('a', 'b') diff -Nru mistral-4.0.0/mistral/tests/unit/utils/test_inspect_utils.py mistral-5.0.0~b2/mistral/tests/unit/utils/test_inspect_utils.py --- mistral-4.0.0/mistral/tests/unit/utils/test_inspect_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/utils/test_inspect_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -20,6 +20,15 @@ from mistral.workflow import commands +class ClassWithProperties(object): + + a = 1 + + @property + def prop(self): + pass + + class InspectUtilsTest(base.BaseTest): def test_get_parameters_str(self): action_class = std_actions.HTTPAction @@ -38,7 +47,10 @@ clazz = commands.RunTask parameters_str = i_u.get_arg_list_as_str(clazz.__init__) - self.assertEqual('wf_ex, wf_spec, task_spec, ctx', parameters_str) + self.assertEqual( + 'wf_ex, wf_spec, task_spec, ctx, triggered_by=null', + parameters_str + ) def test_get_parameters_str_with_function_parameter(self): @@ -48,3 +60,9 @@ parameters_str = i_u.get_arg_list_as_str(test_func) self.assertEqual("foo, bar=null", parameters_str) + + def test_get_public_fields(self): + + attrs = i_u.get_public_fields(ClassWithProperties) + + self.assertEqual(attrs, {'a': 1}) diff -Nru mistral-4.0.0/mistral/tests/unit/utils/test_utils.py mistral-5.0.0~b2/mistral/tests/unit/utils/test_utils.py --- mistral-4.0.0/mistral/tests/unit/utils/test_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/utils/test_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -100,25 +100,26 @@ self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) - def test_get_input_dict(self): + def test_get_dict_from_entries(self): input = ['param1', {'param2': 2}] - input_dict = utils.get_input_dict(input) + input_dict = utils.get_dict_from_entries(input) self.assertIn('param1', input_dict) self.assertIn('param2', input_dict) self.assertEqual(2, input_dict.get('param2')) self.assertIs(input_dict.get('param1'), utils.NotDefined) - def test_get_input_dict_from_input_string(self): - input_string = 'param1, param2=2, param3="var3"' - input_dict = utils.get_dict_from_string(input_string) + def test_get_input_dict_from_string(self): + self.assertDictEqual( + { + 'param1': utils.NotDefined, + 'param2': 2, + 'param3': 'var3' + }, + utils.get_dict_from_string('param1, param2=2, param3="var3"') + ) - self.assertIn('param1', input_dict) - self.assertIn('param2', input_dict) - self.assertIn('param3', input_dict) - self.assertEqual(2, input_dict.get('param2')) - self.assertEqual('var3', input_dict.get('param3')) - self.assertIs(input_dict.get('param1'), utils.NotDefined) + self.assertDictEqual({}, utils.get_dict_from_string('')) def test_paramiko_to_private_key(self): self.assertRaises( diff -Nru mistral-4.0.0/mistral/tests/unit/workbook/test_spec_caching.py mistral-5.0.0~b2/mistral/tests/unit/workbook/test_spec_caching.py --- mistral-4.0.0/mistral/tests/unit/workbook/test_spec_caching.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workbook/test_spec_caching.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,238 +0,0 @@ -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mistral.db.v2 import api as db_api -from mistral.services import workbooks as wb_service -from mistral.services import workflows as wf_service -from mistral.tests.unit import base -from mistral.workbook import parser as spec_parser -from mistral.workflow import states - - -class SpecificationCachingTest(base.DbTestCase): - def test_workflow_spec_caching(self): - wf_text = """ - version: '2.0' - - wf: - tasks: - task1: - action: std.echo output="Echo" - """ - - wfs = wf_service.create_workflows(wf_text) - - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wfs[0].id, - wfs[0].updated_at - ) - - self.assertIsNotNone(wf_spec) - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - def test_workflow_spec_cache_update_via_workflow_service(self): - wf_text = """ - version: '2.0' - - wf: - tasks: - task1: - action: std.echo output="Echo" - """ - - wfs = wf_service.create_workflows(wf_text) - - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wfs[0].id, - wfs[0].updated_at - ) - - self.assertEqual(1, len(wf_spec.get_tasks())) - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - # Now update workflow definition and check that cache is updated too. - - wf_text = """ - version: '2.0' - - wf: - tasks: - task1: - action: std.echo output="1" - - task2: - action: std.echo output="2" - """ - - wfs = wf_service.update_workflows(wf_text) - - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wfs[0].id, - wfs[0].updated_at - ) - - self.assertEqual(2, len(wf_spec.get_tasks())) - self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - - def test_workflow_spec_cache_update_via_workbook_service(self): - wb_text = """ - version: '2.0' - - name: wb - - workflows: - wf: - tasks: - task1: - action: std.echo output="Echo" - """ - - wb_service.create_workbook_v2(wb_text) - - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) - - wf = db_api.get_workflow_definition('wb.wf') - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wf.id, - wf.updated_at - ) - - self.assertEqual(1, len(wf_spec.get_tasks())) - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - # Now update workflow definition and check that cache is updated too. - - wb_text = """ - version: '2.0' - - name: wb - - workflows: - wf: - tasks: - task1: - action: std.echo output="1" - - task2: - action: std.echo output="2" - """ - - wb_service.update_workbook_v2(wb_text) - - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - wf = db_api.get_workflow_definition(wf.id) - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wf.id, - wf.updated_at - ) - - self.assertEqual(2, len(wf_spec.get_tasks())) - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) - - def test_cache_workflow_spec_by_execution_id(self): - wf_text = """ - version: '2.0' - - wf: - tasks: - task1: - action: std.echo output="Echo" - """ - - wfs = wf_service.create_workflows(wf_text) - - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) - - wf_def = wfs[0] - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wf_def.id, - wf_def.updated_at - ) - - self.assertEqual(1, len(wf_spec.get_tasks())) - self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - wf_ex = db_api.create_workflow_execution({ - 'id': '1-2-3-4', - 'name': 'wf', - 'workflow_id': wf_def.id, - 'spec': wf_spec.to_dict(), - 'state': states.RUNNING - }) - - # Check that we can get a valid spec by execution id. - - wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id( - wf_ex.id - ) - - self.assertEqual(1, len(wf_spec_by_exec_id.get_tasks())) - - # Now update workflow definition and check that cache is updated too. - - wf_text = """ - version: '2.0' - - wf: - tasks: - task1: - action: std.echo output="1" - - task2: - action: std.echo output="2" - """ - - wfs = wf_service.update_workflows(wf_text) - - self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) - - wf_spec = spec_parser.get_workflow_spec_by_definition_id( - wfs[0].id, - wfs[0].updated_at - ) - - self.assertEqual(2, len(wf_spec.get_tasks())) - self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) - self.assertEqual(1, spec_parser.get_wf_execution_spec_cache_size()) - - # Now finally update execution cache and check that we can - # get a valid spec by execution id. - spec_parser.cache_workflow_spec_by_execution_id(wf_ex.id, wf_spec) - - wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id( - wf_ex.id - ) - - self.assertEqual(2, len(wf_spec_by_exec_id.get_tasks())) diff -Nru mistral-4.0.0/mistral/tests/unit/workbook/v2/base.py mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/base.py --- mistral-4.0.0/mistral/tests/unit/workbook/v2/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -import yaml - -from mistral import exceptions as exc -from mistral.tests.unit import base -from mistral import utils -from mistral.workbook import parser as spec_parser - - -class WorkflowSpecValidationTestCase(base.BaseTest): - - def __init__(self, *args, **kwargs): - super(WorkflowSpecValidationTestCase, self).__init__(*args, **kwargs) - - # The relative resource path is ./mistral/tests/resources/workbook/v2. - self._resource_path = 'workbook/v2' - - self._spec_parser = spec_parser.get_workflow_list_spec_from_yaml - - self._dsl_blank = { - 'version': '2.0', - 'test': { - 'type': 'direct' - } - } - - self._dsl_tasks = { - 'get': { - 'action': 'std.http', - 'input': { - 'url': 'http://www.openstack.org' - } - }, - 'echo': { - 'action': 'std.echo', - 'input': { - 'output': 'This is a test.' - } - }, - 'email': { - 'action': 'std.email', - 'input': { - 'from_addr': 'mistral@example.com', - 'to_addrs': ['admin@example.com'], - 'subject': 'Test', - 'body': 'This is a test.', - 'smtp_server': 'localhost', - 'smtp_password': 'password' - } - } - } - - def _parse_dsl_spec(self, dsl_file=None, add_tasks=False, - changes=None, expect_error=False): - if dsl_file and add_tasks: - raise Exception('The add_tasks option is not a valid ' - 'combination with the dsl_file option.') - - if dsl_file: - dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file) - - if changes: - dsl_dict = yaml.safe_load(dsl_yaml) - utils.merge_dicts(dsl_dict, changes) - dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False) - else: - dsl_dict = copy.deepcopy(self._dsl_blank) - - if add_tasks: - dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks) - - if changes: - utils.merge_dicts(dsl_dict, changes) - - dsl_yaml = yaml.safe_dump(dsl_dict, default_flow_style=False) - - if not expect_error: - return self._spec_parser(dsl_yaml) - else: - return self.assertRaises( - exc.DSLParsingException, - self._spec_parser, - dsl_yaml - ) - - -class WorkbookSpecValidationTestCase(WorkflowSpecValidationTestCase): - - def __init__(self, *args, **kwargs): - super(WorkbookSpecValidationTestCase, self).__init__(*args, **kwargs) - - self._spec_parser = spec_parser.get_workbook_spec_from_yaml - - self._dsl_blank = { - 'version': '2.0', - 'name': 'test_wb' - } - - def _parse_dsl_spec(self, dsl_file=None, - changes=None, expect_error=False): - return super(WorkbookSpecValidationTestCase, self)._parse_dsl_spec( - dsl_file=dsl_file, add_tasks=False, changes=changes, - expect_error=expect_error) diff -Nru mistral-4.0.0/mistral/tests/unit/workbook/v2/test_actions.py mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_actions.py --- mistral-4.0.0/mistral/tests/unit/workbook/v2/test_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_actions.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -# Copyright 2015 - StackStorm, Inc. -# Copyright 2016 - Brocade Communications Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from mistral.tests.unit.workbook.v2 import base -from mistral import utils - - -class ActionSpecValidation(base.WorkbookSpecValidationTestCase): - - def test_base_required(self): - actions = {'actions': {'a1': {}}} - - exception = self._parse_dsl_spec(changes=actions, - expect_error=True) - - self.assertIn("'base' is a required property", exception.message) - - def test_base(self): - tests = [ - ({'actions': {'a1': {'base': ''}}}, True), - ({'actions': {'a1': {'base': None}}}, True), - ({'actions': {'a1': {'base': 12345}}}, True), - ({'actions': {'a1': {'base': 'std.noop'}}}, False), - ({'actions': {'a1': {'base': 'std.echo output="foo"'}}}, False), - ({'actions': {'a1': {'base': 'std.echo output="<% $.x %>"'}}}, - False), - ({'actions': {'a1': {'base': 'std.echo output="<% * %>"'}}}, True), - ({'actions': {'a1': {'base': 'std.echo output="{{ _.x }}"'}}}, - False), - ({'actions': {'a1': {'base': 'std.echo output="{{ * }}"'}}}, True) - ] - - for actions, expect_error in tests: - self._parse_dsl_spec(changes=actions, - expect_error=expect_error) - - def test_base_input(self): - tests = [ - ({'base-input': {}}, True), - ({'base-input': None}, True), - ({'base-input': {'k1': 'v1', 'k2': '<% $.v2 %>'}}, False), - ({'base-input': {'k1': 'v1', 'k2': '<% * %>'}}, True), - ({'base-input': {'k1': 'v1', 'k2': '{{ _.v2 }}'}}, False), - ({'base-input': {'k1': 'v1', 'k2': '{{ * }}'}}, True) - ] - - actions = { - 'a1': { - 'base': 'foobar' - } - } - - for base_inputs, expect_error in tests: - overlay = {'actions': copy.deepcopy(actions)} - utils.merge_dicts(overlay['actions']['a1'], base_inputs) - self._parse_dsl_spec(changes=overlay, - expect_error=expect_error) - - def test_input(self): - tests = [ - ({'input': ''}, True), - ({'input': []}, True), - ({'input': ['']}, True), - ({'input': None}, True), - ({'input': ['k1', 'k2']}, False), - ({'input': ['k1', 12345]}, True), - ({'input': ['k1', {'k2': 2}]}, False), - ({'input': [{'k1': 1}, {'k2': 2}]}, False), - ({'input': [{'k1': None}]}, False), - ({'input': [{'k1': 1}, {'k1': 1}]}, True), - ({'input': [{'k1': 1, 'k2': 2}]}, True) - ] - - actions = { - 'a1': { - 'base': 'foobar' - } - } - - for inputs, expect_error in tests: - overlay = {'actions': copy.deepcopy(actions)} - utils.merge_dicts(overlay['actions']['a1'], inputs) - self._parse_dsl_spec(changes=overlay, - expect_error=expect_error) - - def test_output(self): - tests = [ - ({'output': None}, False), - ({'output': False}, False), - ({'output': 12345}, False), - ({'output': 0.12345}, False), - ({'output': 'foobar'}, False), - ({'output': '<% $.x %>'}, False), - ({'output': '<% * %>'}, True), - ({'output': '{{ _.x }}'}, False), - ({'output': '{{ * }}'}, True), - ({'output': ['v1']}, False), - ({'output': {'k1': 'v1'}}, False) - ] - - actions = { - 'a1': { - 'base': 'foobar' - } - } - - for outputs, expect_error in tests: - overlay = {'actions': copy.deepcopy(actions)} - utils.merge_dicts(overlay['actions']['a1'], outputs) - self._parse_dsl_spec(changes=overlay, - expect_error=expect_error) diff -Nru mistral-4.0.0/mistral/tests/unit/workbook/v2/test_tasks.py mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_tasks.py --- mistral-4.0.0/mistral/tests/unit/workbook/v2/test_tasks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_tasks.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,390 +0,0 @@ -# Copyright 2015 - Huawei Technologies Co. Ltd -# Copyright 2015 - StackStorm, Inc. -# Copyright 2016 - Brocade Communications Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mistral.tests.unit.workbook.v2 import base as v2_base -from mistral import utils -from mistral.workbook.v2 import workflows - - -class TaskSpecValidation(v2_base.WorkflowSpecValidationTestCase): - def test_type_injection(self): - tests = [ - ({'type': 'direct'}, False), - ({'type': 'reverse'}, False) - ] - - for wf_type, expect_error in tests: - overlay = {'test': wf_type} - wfs_spec = self._parse_dsl_spec(add_tasks=True, - changes=overlay, - expect_error=expect_error) - - if not expect_error: - self.assertIsInstance(wfs_spec, workflows.WorkflowListSpec) - self.assertEqual(1, len(wfs_spec.get_workflows())) - - wf_spec = wfs_spec.get_workflows()[0] - - self.assertEqual(wf_type['type'], wf_spec.get_type()) - - for task in wf_spec.get_tasks(): - self.assertEqual(task._data['type'], wf_type['type']) - - def test_action_or_workflow(self): - tests = [ - ({'action': 'std.noop'}, False), - ({'action': 'std.http url="openstack.org"'}, False), - ({'action': 'std.http url="openstack.org" timeout=10'}, False), - ({'action': 'std.http url=<% $.url %>'}, False), - ({'action': 'std.http url=<% $.url %> timeout=<% $.t %>'}, False), - ({'action': 'std.http url=<% * %>'}, True), - ({'action': 'std.http url={{ _.url }}'}, False), - ({'action': 'std.http url={{ _.url }} timeout={{ _.t }}'}, False), - ({'action': 'std.http url={{ $ }}'}, True), - ({'workflow': 'test.wf'}, False), - ({'workflow': 'test.wf k1="v1"'}, False), - ({'workflow': 'test.wf k1="v1" k2="v2"'}, False), - ({'workflow': 'test.wf k1=<% $.v1 %>'}, False), - ({'workflow': 'test.wf k1=<% $.v1 %> k2=<% $.v2 %>'}, False), - ({'workflow': 'test.wf k1=<% * %>'}, True), - ({'workflow': 'test.wf k1={{ _.v1 }}'}, False), - ({'workflow': 'test.wf k1={{ _.v1 }} k2={{ _.v2 }}'}, False), - ({'workflow': 'test.wf k1={{ $ }}'}, True), - ({'action': 'std.noop', 'workflow': 'test.wf'}, True), - ({'action': 123}, True), - ({'workflow': 123}, True), - ({'action': ''}, True), - ({'workflow': ''}, True), - ({'action': None}, True), - ({'workflow': None}, True) - ] - - for task, expect_error in tests: - overlay = {'test': {'tasks': {'task1': task}}} - - self._parse_dsl_spec( - add_tasks=False, - changes=overlay, - expect_error=expect_error - ) - - def test_inputs(self): - tests = [ - ({'input': ''}, True), - ({'input': {}}, True), - ({'input': None}, True), - ({'input': {'k1': 'v1'}}, False), - ({'input': {'k1': '<% $.v1 %>'}}, False), - ({'input': {'k1': '<% 1 + 2 %>'}}, False), - ({'input': {'k1': '<% * %>'}}, True), - ({'input': {'k1': '{{ _.v1 }}'}}, False), - ({'input': {'k1': '{{ 1 + 2 }}'}}, False), - ({'input': {'k1': '{{ * }}'}}, True) - ] - - for task_input, expect_error in tests: - overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} - - utils.merge_dicts(overlay['test']['tasks']['task1'], task_input) - - self._parse_dsl_spec( - add_tasks=False, - changes=overlay, - expect_error=expect_error - ) - - def test_with_items(self): - tests = [ - ({'with-items': ''}, True), - ({'with-items': []}, True), - ({'with-items': ['']}, True), - ({'with-items': None}, True), - ({'with-items': 12345}, True), - ({'with-items': 'x in y'}, True), - ({'with-items': '<% $.y %>'}, True), - ({'with-items': 'x in <% $.y %>'}, False), - ({'with-items': ['x in [1, 2, 3]']}, False), - ({'with-items': ['x in <% $.y %>']}, False), - ({'with-items': ['x in <% $.y %>', 'i in [1, 2, 3]']}, False), - ({'with-items': ['x in <% $.y %>', 'i in <% $.j %>']}, False), - ({'with-items': ['x in <% * %>']}, True), - ({'with-items': ['x in <% $.y %>', 'i in <% * %>']}, True), - ({'with-items': '{{ _.y }}'}, True), - ({'with-items': 'x in {{ _.y }}'}, False), - ({'with-items': ['x in [1, 2, 3]']}, False), - ({'with-items': ['x in {{ _.y }}']}, False), - ({'with-items': ['x in {{ _.y }}', 'i in [1, 2, 3]']}, False), - ({'with-items': ['x in {{ _.y }}', 'i in {{ _.j }}']}, False), - ({'with-items': ['x in {{ * }}']}, True), - ({'with-items': ['x in {{ _.y }}', 'i in {{ * }}']}, True) - ] - - for with_item, expect_error in tests: - overlay = {'test': {'tasks': {'get': with_item}}} - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_publish(self): - tests = [ - ({'publish': ''}, True), - ({'publish': {}}, True), - ({'publish': None}, True), - ({'publish': {'k1': 'v1'}}, False), - ({'publish': {'k1': '<% $.v1 %>'}}, False), - ({'publish': {'k1': '<% 1 + 2 %>'}}, False), - ({'publish': {'k1': '<% * %>'}}, True), - ({'publish': {'k1': '{{ _.v1 }}'}}, False), - ({'publish': {'k1': '{{ 1 + 2 }}'}}, False), - ({'publish': {'k1': '{{ * }}'}}, True) - ] - - for output, expect_error in tests: - overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} - - utils.merge_dicts(overlay['test']['tasks']['task1'], output) - - self._parse_dsl_spec( - add_tasks=False, - changes=overlay, - expect_error=expect_error - ) - - def test_policies(self): - tests = [ - ({'retry': {'count': 3, 'delay': 1}}, False), - ({'retry': { - 'continue-on': '<% 1 %>', 'delay': 2, - 'break-on': '<% 1 %>', 'count': 2 - }}, False), - ({'retry': { - 'count': 3, 'delay': 1, 'continue-on': '<% 1 %>' - }}, False), - ({'retry': {'count': '<% 3 %>', 'delay': 1}}, False), - ({'retry': {'count': '<% * %>', 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': '<% 1 %>'}}, False), - ({'retry': {'count': 3, 'delay': '<% * %>'}}, True), - ({'retry': { - 'continue-on': '{{ 1 }}', 'delay': 2, - 'break-on': '{{ 1 }}', 'count': 2 - }}, False), - ({'retry': { - 'count': 3, 'delay': 1, 'continue-on': '{{ 1 }}' - }}, False), - ({'retry': {'count': '{{ 3 }}', 'delay': 1}}, False), - ({'retry': {'count': '{{ * }}', 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': '{{ 1 }}'}}, False), - ({'retry': {'count': 3, 'delay': '{{ * }}'}}, True), - ({'retry': {'count': -3, 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': -1}}, True), - ({'retry': {'count': '3', 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': '1'}}, True), - ({'retry': 'count=3 delay=1 break-on=<% false %>'}, False), - ({'retry': 'count=3 delay=1 break-on={{ false }}'}, False), - ({'retry': 'count=3 delay=1'}, False), - ({'retry': 'coun=3 delay=1'}, True), - ({'retry': None}, True), - ({'wait-before': 1}, False), - ({'wait-before': '<% 1 %>'}, False), - ({'wait-before': '<% * %>'}, True), - ({'wait-before': '{{ 1 }}'}, False), - ({'wait-before': '{{ * }}'}, True), - ({'wait-before': -1}, True), - ({'wait-before': 1.0}, True), - ({'wait-before': '1'}, True), - ({'wait-after': 1}, False), - ({'wait-after': '<% 1 %>'}, False), - ({'wait-after': '<% * %>'}, True), - ({'wait-after': '{{ 1 }}'}, False), - ({'wait-after': '{{ * }}'}, True), - ({'wait-after': -1}, True), - ({'wait-after': 1.0}, True), - ({'wait-after': '1'}, True), - ({'timeout': 300}, False), - ({'timeout': '<% 300 %>'}, False), - ({'timeout': '<% * %>'}, True), - ({'timeout': '{{ 300 }}'}, False), - ({'timeout': '{{ * }}'}, True), - ({'timeout': -300}, True), - ({'timeout': 300.0}, True), - ({'timeout': '300'}, True), - ({'pause-before': False}, False), - ({'pause-before': '<% False %>'}, False), - ({'pause-before': '<% * %>'}, True), - ({'pause-before': '{{ False }}'}, False), - ({'pause-before': '{{ * }}'}, True), - ({'pause-before': 'False'}, True), - ({'concurrency': 10}, False), - ({'concurrency': '<% 10 %>'}, False), - ({'concurrency': '<% * %>'}, True), - ({'concurrency': '{{ 10 }}'}, False), - ({'concurrency': '{{ * }}'}, True), - ({'concurrency': -10}, True), - ({'concurrency': 10.0}, True), - ({'concurrency': '10'}, True) - ] - - for policy, expect_error in tests: - overlay = {'test': {'tasks': {'get': policy}}} - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_direct_transition(self): - tests = [ - ({'on-success': ['email']}, False), - ({'on-success': [{'email': '<% 1 %>'}]}, False), - ({'on-success': [{'email': '<% 1 %>'}, 'echo']}, False), - ({'on-success': [{'email': '<% $.v1 in $.v2 %>'}]}, False), - ({'on-success': [{'email': '<% * %>'}]}, True), - ({'on-success': [{'email': '{{ 1 }}'}]}, False), - ({'on-success': [{'email': '{{ 1 }}'}, 'echo']}, False), - ({'on-success': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), - ({'on-success': [{'email': '{{ * }}'}]}, True), - ({'on-success': 'email'}, False), - ({'on-success': None}, True), - ({'on-success': ['']}, True), - ({'on-success': []}, True), - ({'on-success': ['email', 'email']}, True), - ({'on-success': ['email', 12345]}, True), - ({'on-error': ['email']}, False), - ({'on-error': [{'email': '<% 1 %>'}]}, False), - ({'on-error': [{'email': '<% 1 %>'}, 'echo']}, False), - ({'on-error': [{'email': '<% $.v1 in $.v2 %>'}]}, False), - ({'on-error': [{'email': '<% * %>'}]}, True), - ({'on-error': [{'email': '{{ 1 }}'}]}, False), - ({'on-error': [{'email': '{{ 1 }}'}, 'echo']}, False), - ({'on-error': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), - ({'on-error': [{'email': '{{ * }}'}]}, True), - ({'on-error': 'email'}, False), - ({'on-error': None}, True), - ({'on-error': ['']}, True), - ({'on-error': []}, True), - ({'on-error': ['email', 'email']}, True), - ({'on-error': ['email', 12345]}, True), - ({'on-complete': ['email']}, False), - ({'on-complete': [{'email': '<% 1 %>'}]}, False), - ({'on-complete': [{'email': '<% 1 %>'}, 'echo']}, False), - ({'on-complete': [{'email': '<% $.v1 in $.v2 %>'}]}, False), - ({'on-complete': [{'email': '<% * %>'}]}, True), - ({'on-complete': [{'email': '{{ 1 }}'}]}, False), - ({'on-complete': [{'email': '{{ 1 }}'}, 'echo']}, False), - ({'on-complete': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), - ({'on-complete': [{'email': '{{ * }}'}]}, True), - ({'on-complete': 'email'}, False), - ({'on-complete': None}, True), - ({'on-complete': ['']}, True), - ({'on-complete': []}, True), - ({'on-complete': ['email', 'email']}, True), - ({'on-complete': ['email', 12345]}, True) - ] - - for transition, expect_error in tests: - overlay = {'test': {'tasks': {}}} - - utils.merge_dicts(overlay['test']['tasks'], {'get': transition}) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_join(self): - tests = [ - ({'join': ''}, True), - ({'join': None}, True), - ({'join': 'all'}, False), - ({'join': 'one'}, False), - ({'join': 0}, False), - ({'join': 2}, False), - ({'join': 3}, True), - ({'join': '3'}, True), - ({'join': -3}, True) - ] - - on_success = {'on-success': ['email']} - - for join, expect_error in tests: - overlay = {'test': {'tasks': {}}} - - utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) - utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) - utils.merge_dicts(overlay['test']['tasks'], {'email': join}) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_requires(self): - tests = [ - ({'requires': ''}, True), - ({'requires': []}, True), - ({'requires': ['']}, True), - ({'requires': None}, True), - ({'requires': 12345}, True), - ({'requires': ['echo']}, False), - ({'requires': ['echo', 'get']}, False), - ({'requires': 'echo'}, False), - ] - - for require, expect_error in tests: - overlay = {'test': {'tasks': {}}} - - utils.merge_dicts(overlay['test'], {'type': 'reverse'}) - utils.merge_dicts(overlay['test']['tasks'], {'email': require}) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_keep_result(self): - tests = [ - ({'keep-result': ''}, True), - ({'keep-result': []}, True), - ({'keep-result': 'asd'}, True), - ({'keep-result': None}, True), - ({'keep-result': 12345}, True), - ({'keep-result': True}, False), - ({'keep-result': False}, False), - ({'keep-result': "<% 'a' in $.val %>"}, False), - ({'keep-result': '<% 1 + 2 %>'}, False), - ({'keep-result': '<% * %>'}, True), - ({'keep-result': "{{ 'a' in _.val }}"}, False), - ({'keep-result': '{{ 1 + 2 }}'}, False), - ({'keep-result': '{{ * }}'}, True) - ] - - for keep_result, expect_error in tests: - overlay = {'test': {'tasks': {}}} - - utils.merge_dicts(overlay['test']['tasks'], {'email': keep_result}) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) diff -Nru mistral-4.0.0/mistral/tests/unit/workbook/v2/test_workbook.py mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_workbook.py --- mistral-4.0.0/mistral/tests/unit/workbook/v2/test_workbook.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_workbook.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,433 +0,0 @@ -# Copyright 2013 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import re - -import yaml - -from mistral import exceptions as exc -from mistral.tests.unit.workbook.v2 import base -from mistral.workbook.v2 import workbook - - -class WorkbookSpecValidation(base.WorkbookSpecValidationTestCase): - - def test_build_valid_workbook_spec(self): - wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') - - # Workbook. - act_specs = wb_spec.get_actions() - wf_specs = wb_spec.get_workflows() - - self.assertEqual('2.0', wb_spec.get_version()) - self.assertEqual('my_workbook', wb_spec.get_name()) - self.assertEqual('This is a test workbook', wb_spec.get_description()) - self.assertListEqual(['test', 'v2'], wb_spec.get_tags()) - self.assertIsNotNone(act_specs) - self.assertIsNotNone(wf_specs) - - # Actions. - action_spec = act_specs.get('action1') - - self.assertIsNotNone(action_spec) - self.assertEqual('2.0', action_spec.get_version()) - self.assertEqual('action1', action_spec.get_name()) - self.assertEqual( - 'This is a test ad-hoc action', - action_spec.get_description() - ) - self.assertListEqual(['test', 'v2'], action_spec.get_tags()) - self.assertEqual('std.echo', action_spec.get_base()) - self.assertDictEqual( - {'output': 'Hello <% $.name %>!'}, - action_spec.get_base_input() - ) - self.assertDictEqual({}, action_spec.get_input()) - self.assertEqual('<% $ %>', action_spec.get_output()) - - # Workflows. - - self.assertEqual(2, len(wf_specs)) - - wf1_spec = wf_specs.get('wf1') - - self.assertEqual('2.0', wf1_spec.get_version()) - self.assertEqual('wf1', wf1_spec.get_name()) - self.assertEqual( - 'This is a test workflow', - wf1_spec.get_description() - ) - self.assertListEqual(['test', 'v2'], wf1_spec.get_tags()) - self.assertEqual('reverse', wf1_spec.get_type()) - self.assertEqual(2, len(wf1_spec.get_tasks())) - - # Tasks. - - task1_spec = wf1_spec.get_tasks().get('task1') - - self.assertIsNotNone(task1_spec) - self.assertEqual('2.0', task1_spec.get_version()) - self.assertEqual('task1', task1_spec.get_name()) - self.assertEqual('This is a test task', task1_spec.get_description()) - self.assertEqual('action1', task1_spec.get_action_name()) - self.assertEqual({'name': '<% $.name %>'}, task1_spec.get_input()) - - policies = task1_spec.get_policies() - - self.assertEqual(2, policies.get_wait_before()) - self.assertEqual(5, policies.get_wait_after()) - self.assertEqual(3, policies.get_concurrency()) - - retry_spec = policies.get_retry() - - self.assertEqual(10, retry_spec.get_count()) - self.assertEqual(30, retry_spec.get_delay()) - self.assertEqual('<% $.my_val = 10 %>', retry_spec.get_break_on()) - - task2_spec = wf1_spec.get_tasks().get('task2') - - self.assertIsNotNone(task2_spec) - self.assertEqual('2.0', task2_spec.get_version()) - self.assertEqual('task2', task2_spec.get_name()) - self.assertEqual('std.echo', task2_spec.get_action_name()) - self.assertIsNone(task2_spec.get_workflow_name()) - self.assertEqual( - {'output': 'Thanks <% $.name %>!'}, - task2_spec.get_input() - ) - - wf2_spec = wf_specs.get('wf2') - - self.assertEqual('2.0', wf2_spec.get_version()) - self.assertEqual('wf2', wf2_spec.get_name()) - self.assertListEqual(['test', 'v2'], wf2_spec.get_tags()) - self.assertEqual('direct', wf2_spec.get_type()) - self.assertEqual(11, len(wf2_spec.get_tasks())) - - task_defaults_spec = wf2_spec.get_task_defaults() - - self.assertListEqual( - [('fail', '<% $.my_val = 0 %>', {})], - task_defaults_spec.get_on_error() - ) - self.assertListEqual( - [('pause', '', {})], - task_defaults_spec.get_on_success() - ) - self.assertListEqual( - [('succeed', '', {})], - task_defaults_spec.get_on_complete() - ) - - task3_spec = wf2_spec.get_tasks().get('task3') - - self.assertIsNotNone(task3_spec) - self.assertEqual('2.0', task3_spec.get_version()) - self.assertEqual('task3', task3_spec.get_name()) - self.assertIsNone(task3_spec.get_action_name()) - self.assertEqual('wf1', task3_spec.get_workflow_name()) - self.assertEqual( - { - 'name': 'John Doe', - 'age': 32, - 'param1': None, - 'param2': False - }, - task3_spec.get_input() - ) - self.assertListEqual( - [('task4', '<% $.my_val = 1 %>', {})], - task3_spec.get_on_error() - ) - self.assertListEqual( - [('task5', '<% $.my_val = 2 %>', {})], - task3_spec.get_on_success() - ) - self.assertListEqual( - [('task6', '<% $.my_val = 3 %>', {})], - task3_spec.get_on_complete() - ) - - task7_spec = wf2_spec.get_tasks().get('task7') - - self.assertEqual( - { - 'is_true': True, - 'object_list': [1, None, 'str'], - 'is_string': '50' - }, - task7_spec.get_input() - ) - - self.assertEqual( - {'vm_info': '<% $.vms %>'}, - task7_spec.get_with_items() - ) - - task8_spec = wf2_spec.get_tasks().get('task8') - - self.assertEqual( - {"itemX": '<% $.arrayI %>', "itemY": '<% $.arrayJ %>'}, - task8_spec.get_with_items() - ) - - self.assertEqual( - { - 'expr_list': ['<% $.v %>', '<% $.k %>'], - 'expr': '<% $.value %>', - }, - task8_spec.get_input() - ) - - self.assertEqual('nova', task8_spec.get_target()) - - task9_spec = wf2_spec.get_tasks().get('task9') - - self.assertEqual('all', task9_spec.get_join()) - - task10_spec = wf2_spec.get_tasks().get('task10') - - self.assertEqual(2, task10_spec.get_join()) - - task11_spec = wf2_spec.get_tasks().get('task11') - - self.assertEqual('one', task11_spec.get_join()) - - task12_spec = wf2_spec.get_tasks().get('task12') - - self.assertDictEqual( - {'url': 'http://site.com?q=<% $.query %>', 'params': ''}, - task12_spec.get_input() - ) - - task13_spec = wf2_spec.get_tasks().get('task13') - - self.assertEqual('std.noop', task13_spec.get_action_name()) - self.assertEqual('No-op task', task13_spec.get_description()) - - def test_adhoc_action_with_base_in_one_string(self): - wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') - - act_specs = wb_spec.get_actions() - action_spec = act_specs.get("action2") - - self.assertEqual('std.echo', action_spec.get_base()) - self.assertEqual({'output': 'Echo output'}, - action_spec.get_base_input()) - - def test_spec_to_dict(self): - wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') - - d = wb_spec.to_dict() - - self.assertEqual('2.0', d['version']) - self.assertEqual('2.0', d['workflows']['version']) - self.assertEqual('2.0', d['workflows']['wf1']['version']) - - def test_version_required(self): - dsl_dict = copy.deepcopy(self._dsl_blank) - dsl_dict.pop('version', None) - - # TODO(m4dcoder): Check required property error when v1 is deprecated. - # The version property is not required for v1 workbook whereas it is - # a required property in v2. For backward compatibility, if no version - # is not provided, the workbook spec parser defaults to v1 and the - # required property exception is not triggered. However, a different - # spec validation error returns due to drastically different schema - # between workbook versions. - self.assertRaises(exc.DSLParsingException, - self._spec_parser, - yaml.safe_dump(dsl_dict)) - - def test_version(self): - tests = [ - ({'version': None}, True), - ({'version': ''}, True), - ({'version': '1.0'}, True), - ({'version': '2.0'}, False), - ({'version': 2.0}, False), - ({'version': 2}, False) - ] - - for version, expect_error in tests: - self._parse_dsl_spec(changes=version, - expect_error=expect_error) - - def test_name_required(self): - dsl_dict = copy.deepcopy(self._dsl_blank) - dsl_dict.pop('name', None) - - exception = self.assertRaises(exc.DSLParsingException, - self._spec_parser, - yaml.safe_dump(dsl_dict)) - - self.assertIn("'name' is a required property", exception.message) - - def test_name(self): - tests = [ - ({'name': ''}, True), - ({'name': None}, True), - ({'name': 12345}, True), - ({'name': 'foobar'}, False) - ] - - for name, expect_error in tests: - self._parse_dsl_spec(changes=name, - expect_error=expect_error) - - def test_description(self): - tests = [ - ({'description': ''}, True), - ({'description': None}, True), - ({'description': 12345}, True), - ({'description': 'This is a test workflow.'}, False) - ] - - for description, expect_error in tests: - self._parse_dsl_spec(changes=description, - expect_error=expect_error) - - def test_tags(self): - tests = [ - ({'tags': ''}, True), - ({'tags': ['']}, True), - ({'tags': None}, True), - ({'tags': 12345}, True), - ({'tags': ['foo', 'bar']}, False), - ({'tags': ['foobar', 'foobar']}, True) - ] - - for tags, expect_error in tests: - self._parse_dsl_spec(changes=tags, - expect_error=expect_error) - - def test_actions(self): - actions = { - 'version': '2.0', - 'noop': { - 'base': 'std.noop' - }, - 'echo': { - 'base': 'std.echo' - } - } - - tests = [ - ({'actions': []}, True), - ({'actions': {}}, True), - ({'actions': None}, True), - ({'actions': {'version': None}}, True), - ({'actions': {'version': ''}}, True), - ({'actions': {'version': '1.0'}}, True), - ({'actions': {'version': '2.0'}}, False), - ({'actions': {'version': 2.0}}, False), - ({'actions': {'version': 2}}, False), - ({'actions': {'noop': actions['noop']}}, False), - ({'actions': {'version': '2.0', 'noop': 'std.noop'}}, True), - ({'actions': actions}, False) - ] - - for adhoc_actions, expect_error in tests: - self._parse_dsl_spec(changes=adhoc_actions, - expect_error=expect_error) - - def test_workflows(self): - workflows = { - 'version': '2.0', - 'wf1': { - 'tasks': { - 'noop': { - 'action': 'std.noop' - } - } - }, - 'wf2': { - 'tasks': { - 'echo': { - 'action': 'std.echo output="This is a test."' - } - } - } - } - - tests = [ - ({'workflows': []}, True), - ({'workflows': {}}, True), - ({'workflows': None}, True), - ({'workflows': {'version': None}}, True), - ({'workflows': {'version': ''}}, True), - ({'workflows': {'version': '1.0'}}, True), - ({'workflows': {'version': '2.0'}}, False), - ({'workflows': {'version': 2.0}}, False), - ({'workflows': {'version': 2}}, False), - ({'workflows': {'wf1': workflows['wf1']}}, False), - ({'workflows': {'version': '2.0', 'wf1': 'wf1'}}, True), - ({'workflows': workflows}, False) - ] - - for workflows, expect_error in tests: - self._parse_dsl_spec(changes=workflows, - expect_error=expect_error) - - def test_workflow_name_validation(self): - wb_spec = self._parse_dsl_spec(dsl_file='workbook_schema_test.yaml') - - d = wb_spec.to_dict() - - self.assertEqual('2.0', d['version']) - self.assertEqual('2.0', d['workflows']['version']) - - workflow_names = ['workflowversion', 'versionworkflow', - 'workflowversionworkflow', 'version_workflow'] - - action_names = ['actionversion', 'versionaction', - 'actionversionaction'] - - for name in workflow_names: - self.assertEqual('2.0', d['workflows'][name]['version']) - self.assertEqual(name, d['workflows'][name]['name']) - - for name in action_names: - self.assertEqual('2.0', d['actions'][name]['version']) - self.assertEqual(name, d['actions'][name]['name']) - - def test_name_regex(self): - - # We want to match a string containing version at any point. - valid_names = ( - "workflowversion", - "versionworkflow", - "workflowversionworkflow", - "version_workflow", - "version-workflow", - ) - - for valid in valid_names: - result = re.match(workbook.NON_VERSION_WORD_REGEX, valid) - self.assertNotEqual(None, result, - "Expected match for: {}".format(valid)) - - # ... except, we don't want to match a string that isn't just one word - # or is exactly "version" - invalid_names = ( - "version", - "my workflow", - ) - - for invalid in invalid_names: - result = re.match(workbook.NON_VERSION_WORD_REGEX, invalid) - self.assertEqual(None, result, - "Didn't expected match for: {}".format(invalid)) diff -Nru mistral-4.0.0/mistral/tests/unit/workbook/v2/test_workflows.py mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_workflows.py --- mistral-4.0.0/mistral/tests/unit/workbook/v2/test_workflows.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workbook/v2/test_workflows.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,427 +0,0 @@ -# Copyright 2015 - StackStorm, Inc. -# Copyright 2016 - Brocade Communications Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -import yaml - -from mistral import exceptions as exc -from mistral.tests.unit.workbook.v2 import base -from mistral import utils - - -class WorkflowSpecValidation(base.WorkflowSpecValidationTestCase): - def test_workflow_types(self): - tests = [ - ({'type': 'direct'}, False), - ({'type': 'reverse'}, False), - ({'type': 'circular'}, True), - ({'type': None}, True) - ] - - for wf_type, expect_error in tests: - overlay = {'test': wf_type} - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_direct_workflow(self): - overlay = {'test': {'type': 'direct', 'tasks': {}}} - join = {'join': 'all'} - on_success = {'on-success': ['email']} - - utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) - utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) - utils.merge_dicts(overlay['test']['tasks'], {'email': join}) - - wfs_spec = self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=False - ) - - self.assertEqual(1, len(wfs_spec.get_workflows())) - self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) - self.assertEqual('direct', wfs_spec.get_workflows()[0].get_type()) - - def test_direct_workflow_invalid_task(self): - overlay = { - 'test': { - 'type': 'direct', - 'tasks': {} - } - } - requires = {'requires': ['echo', 'get']} - - utils.merge_dicts(overlay['test']['tasks'], {'email': requires}) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=True - ) - - def test_direct_workflow_no_start_tasks(self): - overlay = { - 'test': { - 'type': 'direct', - 'tasks': { - 'task1': {'on-complete': 'task2'}, - 'task2': {'on-complete': 'task1'} - } - } - } - - self._parse_dsl_spec( - add_tasks=False, - changes=overlay, - expect_error=True - ) - - def test_direct_workflow_invalid_join(self): - tests = [ - ({'task3': {'join': 2}}, False), - ({'task3': {'join': 5}}, True), - ({'task3': {'join': 1}}, False), - ({'task3': {'join': 'one'}}, False), - ({'task3': {'join': 'all'}}, False), - ({'task4': {'join': 'all'}}, True), - ({'task4': {'join': 1}}, True), - ({'task4': {'join': 'one'}}, True) - ] - - for test in tests: - overlay = { - 'test': { - 'type': 'direct', - 'tasks': { - 'task1': {'on-complete': 'task3'}, - 'task2': {'on-complete': 'task3'} - } - } - } - - utils.merge_dicts(overlay['test']['tasks'], test[0]) - - self._parse_dsl_spec( - add_tasks=False, - changes=overlay, - expect_error=test[1] - ) - - def test_reverse_workflow(self): - overlay = {'test': {'type': 'reverse', 'tasks': {}}} - require = {'requires': ['echo', 'get']} - - utils.merge_dicts(overlay['test']['tasks'], {'email': require}) - - wfs_spec = self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=False - ) - - self.assertEqual(1, len(wfs_spec.get_workflows())) - self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) - self.assertEqual('reverse', wfs_spec.get_workflows()[0].get_type()) - - def test_reverse_workflow_invalid_task(self): - overlay = {'test': {'type': 'reverse', 'tasks': {}}} - join = {'join': 'all'} - on_success = {'on-success': ['email']} - - utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) - utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) - utils.merge_dicts(overlay['test']['tasks'], {'email': join}) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=True - ) - - def test_version_required(self): - dsl_dict = copy.deepcopy(self._dsl_blank) - dsl_dict.pop('version', None) - - exception = self.assertRaises( - exc.DSLParsingException, - self._spec_parser, - yaml.safe_dump(dsl_dict) - ) - - self.assertIn("'version' is a required property", exception.message) - - def test_version(self): - tests = [ - ({'version': None}, True), - ({'version': ''}, True), - ({'version': '2.0'}, False), - ({'version': 2.0}, False), - ({'version': 2}, False) - ] - - for version, expect_error in tests: - self._parse_dsl_spec( - add_tasks=True, - changes=version, - expect_error=expect_error - ) - - def test_inputs(self): - tests = [ - ({'input': ['var1', 'var2']}, False), - ({'input': ['var1', 'var1']}, True), - ({'input': [12345]}, True), - ({'input': [None]}, True), - ({'input': ['']}, True), - ({'input': None}, True), - ({'input': []}, True), - ({'input': ['var1', {'var2': 2}]}, False), - ({'input': [{'var1': 1}, {'var2': 2}]}, False), - ({'input': [{'var1': None}]}, False), - ({'input': [{'var1': 1}, {'var1': 1}]}, True), - ({'input': [{'var1': 1, 'var2': 2}]}, True) - ] - - for wf_input, expect_error in tests: - overlay = {'test': wf_input} - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_outputs(self): - tests = [ - ({'output': {'k1': 'a', 'k2': 1, 'k3': True, 'k4': None}}, False), - ({'output': {'k1': '<% $.v1 %>'}}, False), - ({'output': {'k1': '<% 1 + 2 %>'}}, False), - ({'output': {'k1': '<% * %>'}}, True), - ({'output': []}, True), - ({'output': 'whatever'}, True), - ({'output': None}, True), - ({'output': {}}, True) - ] - - for wf_output, expect_error in tests: - overlay = {'test': wf_output} - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_vars(self): - tests = [ - ({'vars': {'v1': 'a', 'v2': 1, 'v3': True, 'v4': None}}, False), - ({'vars': {'v1': '<% $.input_var1 %>'}}, False), - ({'vars': {'v1': '<% 1 + 2 %>'}}, False), - ({'vars': {'v1': '<% * %>'}}, True), - ({'vars': {'v1': '{{ _.input_var1 }}'}}, False), - ({'vars': {'v1': '{{ 1 + 2 }}'}}, False), - ({'vars': {'v1': '{{ * }}'}}, True), - ({'vars': []}, True), - ({'vars': 'whatever'}, True), - ({'vars': None}, True), - ({'vars': {}}, True) - ] - - for wf_vars, expect_error in tests: - overlay = {'test': wf_vars} - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_tasks_required(self): - exception = self._parse_dsl_spec( - add_tasks=False, - expect_error=True - ) - - self.assertIn("'tasks' is a required property", exception.message) - - def test_tasks(self): - tests = [ - ({'tasks': {}}, True), - ({'tasks': None}, True), - ({'tasks': self._dsl_tasks}, False) - ] - - for wf_tasks, expect_error in tests: - overlay = {'test': wf_tasks} - - self._parse_dsl_spec( - add_tasks=False, - changes=overlay, - expect_error=expect_error - ) - - def test_task_defaults(self): - tests = [ - ({'on-success': ['email']}, False), - ({'on-success': [{'email': '<% 1 %>'}]}, False), - ({'on-success': [{'email': '<% 1 %>'}, 'echo']}, False), - ({'on-success': [{'email': '<% $.v1 in $.v2 %>'}]}, False), - ({'on-success': [{'email': '<% * %>'}]}, True), - ({'on-success': [{'email': '{{ 1 }}'}]}, False), - ({'on-success': [{'email': '{{ 1 }}'}, 'echo']}, False), - ({'on-success': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), - ({'on-success': [{'email': '{{ * }}'}]}, True), - ({'on-success': 'email'}, False), - ({'on-success': None}, True), - ({'on-success': ['']}, True), - ({'on-success': []}, True), - ({'on-success': ['email', 'email']}, True), - ({'on-success': ['email', 12345]}, True), - ({'on-error': ['email']}, False), - ({'on-error': [{'email': '<% 1 %>'}]}, False), - ({'on-error': [{'email': '<% 1 %>'}, 'echo']}, False), - ({'on-error': [{'email': '<% $.v1 in $.v2 %>'}]}, False), - ({'on-error': [{'email': '<% * %>'}]}, True), - ({'on-error': [{'email': '{{ 1 }}'}]}, False), - ({'on-error': [{'email': '{{ 1 }}'}, 'echo']}, False), - ({'on-error': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), - ({'on-error': [{'email': '{{ * }}'}]}, True), - ({'on-error': 'email'}, False), - ({'on-error': None}, True), - ({'on-error': ['']}, True), - ({'on-error': []}, True), - ({'on-error': ['email', 'email']}, True), - ({'on-error': ['email', 12345]}, True), - ({'on-complete': ['email']}, False), - ({'on-complete': [{'email': '<% 1 %>'}]}, False), - ({'on-complete': [{'email': '<% 1 %>'}, 'echo']}, False), - ({'on-complete': [{'email': '<% $.v1 in $.v2 %>'}]}, False), - ({'on-complete': [{'email': '<% * %>'}]}, True), - ({'on-complete': [{'email': '{{ 1 }}'}]}, False), - ({'on-complete': [{'email': '{{ 1 }}'}, 'echo']}, False), - ({'on-complete': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), - ({'on-complete': [{'email': '{{ * }}'}]}, True), - ({'on-complete': 'email'}, False), - ({'on-complete': None}, True), - ({'on-complete': ['']}, True), - ({'on-complete': []}, True), - ({'on-complete': ['email', 'email']}, True), - ({'on-complete': ['email', 12345]}, True), - ({'requires': ''}, True), - ({'requires': []}, True), - ({'requires': ['']}, True), - ({'requires': None}, True), - ({'requires': 12345}, True), - ({'requires': ['echo']}, False), - ({'requires': ['echo', 'get']}, False), - ({'requires': 'echo'}, False), - ({'retry': {'count': 3, 'delay': 1}}, False), - ({'retry': {'count': '<% 3 %>', 'delay': 1}}, False), - ({'retry': {'count': '<% * %>', 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': '<% 1 %>'}}, False), - ({'retry': {'count': 3, 'delay': '<% * %>'}}, True), - ({'retry': {'count': '{{ 3 }}', 'delay': 1}}, False), - ({'retry': {'count': '{{ * }}', 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': '{{ 1 }}'}}, False), - ({'retry': {'count': 3, 'delay': '{{ * }}'}}, True), - ({'retry': {'count': -3, 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': -1}}, True), - ({'retry': {'count': '3', 'delay': 1}}, True), - ({'retry': {'count': 3, 'delay': '1'}}, True), - ({'retry': None}, True), - ({'wait-before': 1}, False), - ({'wait-before': '<% 1 %>'}, False), - ({'wait-before': '<% * %>'}, True), - ({'wait-before': '{{ 1 }}'}, False), - ({'wait-before': '{{ * }}'}, True), - ({'wait-before': -1}, True), - ({'wait-before': 1.0}, True), - ({'wait-before': '1'}, True), - ({'wait-after': 1}, False), - ({'wait-after': '<% 1 %>'}, False), - ({'wait-after': '<% * %>'}, True), - ({'wait-after': '{{ 1 }}'}, False), - ({'wait-after': '{{ * }}'}, True), - ({'wait-after': -1}, True), - ({'wait-after': 1.0}, True), - ({'wait-after': '1'}, True), - ({'timeout': 300}, False), - ({'timeout': '<% 300 %>'}, False), - ({'timeout': '<% * %>'}, True), - ({'timeout': '{{ 300 }}'}, False), - ({'timeout': '{{ * }}'}, True), - ({'timeout': -300}, True), - ({'timeout': 300.0}, True), - ({'timeout': '300'}, True), - ({'pause-before': False}, False), - ({'pause-before': '<% False %>'}, False), - ({'pause-before': '<% * %>'}, True), - ({'pause-before': '{{ False }}'}, False), - ({'pause-before': '{{ * }}'}, True), - ({'pause-before': 'False'}, True), - ({'concurrency': 10}, False), - ({'concurrency': '<% 10 %>'}, False), - ({'concurrency': '<% * %>'}, True), - ({'concurrency': '{{ 10 }}'}, False), - ({'concurrency': '{{ * }}'}, True), - ({'concurrency': -10}, True), - ({'concurrency': 10.0}, True), - ({'concurrency': '10'}, True) - ] - - for default, expect_error in tests: - overlay = {'test': {'task-defaults': {}}} - - utils.merge_dicts(overlay['test']['task-defaults'], default) - - self._parse_dsl_spec( - add_tasks=True, - changes=overlay, - expect_error=expect_error - ) - - def test_invalid_item(self): - overlay = {'name': 'invalid'} - - exception = self._parse_dsl_spec(changes=overlay, expect_error=True) - - self.assertIn("Invalid DSL", exception.message) - - def test_invalid_name(self): - invalid_wf = { - 'version': '2.0', - 'b98180ba-48a0-4e26-ab2e-50dc224f6fd1': { - 'type': 'direct', - 'tasks': {'t1': {'action': 'std.noop'}} - } - } - - dsl_yaml = yaml.safe_dump(invalid_wf, default_flow_style=False) - - exception = self.assertRaises( - exc.InvalidModelException, - self._spec_parser, - dsl_yaml - ) - - self.assertIn( - "Workflow name cannot be in the format of UUID", - exception.message - ) diff -Nru mistral-4.0.0/mistral/tests/unit/workflow/test_direct_workflow.py mistral-5.0.0~b2/mistral/tests/unit/workflow/test_direct_workflow.py --- mistral-4.0.0/mistral/tests/unit/workflow/test_direct_workflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workflow/test_direct_workflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -17,9 +17,9 @@ from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import workflows as wf_service from mistral.tests.unit import base -from mistral.workbook import parser as spec_parser from mistral.workflow import direct_workflow as d_wf from mistral.workflow import states diff -Nru mistral-4.0.0/mistral/tests/unit/workflow/test_reverse_workflow.py mistral-5.0.0~b2/mistral/tests/unit/workflow/test_reverse_workflow.py --- mistral-4.0.0/mistral/tests/unit/workflow/test_reverse_workflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workflow/test_reverse_workflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,9 +15,9 @@ from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit import base -from mistral.workbook import parser as spec_parser from mistral.workflow import reverse_workflow as reverse_wf from mistral.workflow import states diff -Nru mistral-4.0.0/mistral/tests/unit/workflow/test_workflow_base.py mistral-5.0.0~b2/mistral/tests/unit/workflow/test_workflow_base.py --- mistral-4.0.0/mistral/tests/unit/workflow/test_workflow_base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/tests/unit/workflow/test_workflow_base.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,8 +13,8 @@ # limitations under the License. +from mistral.lang import parser as spec_parser from mistral.tests.unit import base -from mistral.workbook import parser as spec_parser from mistral.workflow import base as wf_base from mistral.workflow import direct_workflow as direct_wf from mistral.workflow import reverse_workflow as reverse_wf diff -Nru mistral-4.0.0/mistral/utils/expression_utils.py mistral-5.0.0~b2/mistral/utils/expression_utils.py --- mistral-4.0.0/mistral/utils/expression_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/expression_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -108,7 +108,8 @@ 'name': wf_ex.name, 'spec': wf_ex.spec, 'input': wf_ex.input, - 'params': wf_ex.params + 'params': wf_ex.params, + 'created_at': wf_ex.created_at.isoformat(' ') } @@ -261,3 +262,9 @@ def uuid_(context=None): return utils.generate_unicode_uuid() + + +def global_(context, var_name): + wf_ex = db_api.get_workflow_execution(context['__execution']['id']) + + return wf_ex.context.get(var_name) diff -Nru mistral-4.0.0/mistral/utils/filter_utils.py mistral-5.0.0~b2/mistral/utils/filter_utils.py --- mistral-4.0.0/mistral/utils/filter_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/filter_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -43,7 +43,7 @@ :param filter_type: filter type. Filter type can be 'eq', 'neq', 'gt', 'gte', 'lte', 'in', 'lt', 'nin'. Default is 'eq'. - :parma _filter: Optional. If provided same filter dictionary will + :param _filter: Optional. If provided same filter dictionary will be updated. :return: filter dictionary. diff -Nru mistral-4.0.0/mistral/utils/__init__.py mistral-5.0.0~b2/mistral/utils/__init__.py --- mistral-4.0.0/mistral/utils/__init__.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/__init__.py 2017-06-09 12:48:26.000000000 +0000 @@ -15,6 +15,7 @@ # limitations under the License. import contextlib +import datetime import functools import json import logging @@ -332,54 +333,61 @@ class NotDefined(object): - """This class is just a marker of input params without value.""" + """Marker of an empty value. + + In a number of cases None can't be used to express the semantics of + a not defined value because None is just a normal value rather than + a value set to denote that it's not defined. This class can be used + in such cases instead of None. + """ pass -def get_dict_from_string(input_string, delimiter=','): - if not input_string: +def get_dict_from_string(string, delimiter=','): + if not string: return {} - raw_inputs = input_string.split(delimiter) - - inputs = [] + kv_dicts = [] - for raw in raw_inputs: - input = raw.strip() - name_value = input.split('=') - - if len(name_value) > 1: + for kv_pair_str in string.split(delimiter): + kv_str = kv_pair_str.strip() + kv_list = kv_str.split('=') + if len(kv_list) > 1: try: - value = json.loads(name_value[1]) + value = json.loads(kv_list[1]) except ValueError: - value = name_value[1] + value = kv_list[1] - inputs += [{name_value[0]: value}] + kv_dicts += [{kv_list[0]: value}] else: - inputs += [name_value[0]] + kv_dicts += [kv_list[0]] - return get_input_dict(inputs) + return get_dict_from_entries(kv_dicts) -def get_input_dict(inputs): - """Transform input list to dictionary. +def get_dict_from_entries(entries): + """Transforms a list of entries into dictionary. - Ensure every input param has a default value(it will be a NotDefined - object if it's not provided). + :param entries: A list of entries. + If an entry is a dictionary the method simply updates the result + dictionary with its content. + If an entry is not a dict adds {entry, NotDefined} into the result. """ - input_dict = {} - for x in inputs: - if isinstance(x, dict): - input_dict.update(x) + + result = {} + + for e in entries: + if isinstance(e, dict): + result.update(e) else: - # NOTE(xylan): we put a NotDefined class here as the value of - # param without value specified, to distinguish from the valid - # values such as None, ''(empty string), etc. - input_dict[x] = NotDefined + # NOTE(kong): we put NotDefined here as the value of + # param without value specified, to distinguish from + # the valid values such as None, ''(empty string), etc. + result[e] = NotDefined - return input_dict + return result def get_process_identifier(): @@ -460,5 +468,33 @@ def utc_now_sec(): """Returns current time and drops microseconds.""" - d = timeutils.utcnow() - return d.replace(microsecond=0) + return timeutils.utcnow().replace(microsecond=0) + + +def datetime_to_str(val, sep=' '): + """Converts datetime value to string. + + If the given value is not an instance of datetime then the method + returns the same value. + + :param val: datetime value. + :param sep: Separator between date and time. + :return: Datetime as a string. + """ + if isinstance(val, datetime.datetime): + return val.isoformat(sep) + + return val + + +def datetime_to_str_in_dict(d, key, sep=' '): + """Converts datetime value in te given dict to string. + + :param d: A dictionary. + :param key: The key for which we need to convert the value. + :param sep: Separator between date and time. + """ + val = d.get(key) + + if val is not None: + d[key] = datetime_to_str(d[key], sep=sep) diff -Nru mistral-4.0.0/mistral/utils/inspect_utils.py mistral-5.0.0~b2/mistral/utils/inspect_utils.py --- mistral-4.0.0/mistral/utils/inspect_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/inspect_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -28,7 +28,8 @@ attr = getattr(obj, attribute_str) is_field = not (inspect.isbuiltin(attr) or inspect.isfunction(attr) - or inspect.ismethod(attr)) + or inspect.ismethod(attr) + or isinstance(attr, property)) if is_field: public_fields[attribute_str] = attr diff -Nru mistral-4.0.0/mistral/utils/openstack/keystone.py mistral-5.0.0~b2/mistral/utils/openstack/keystone.py --- mistral-4.0.0/mistral/utils/openstack/keystone.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/openstack/keystone.py 2017-06-09 12:48:26.000000000 +0000 @@ -68,7 +68,8 @@ return _admin_client(trust_id=trust_id) -def get_endpoint_for_project(service_name=None, service_type=None): +def get_endpoint_for_project(service_name=None, service_type=None, + region_name=None): if service_name is None and service_type is None: raise exceptions.MistralException( "Either 'service_name' or 'service_type' must be provided." @@ -78,19 +79,32 @@ service_catalog = obtain_service_catalog(ctx) + # When region_name is not passed, first get from context as region_name + # could be passed to rest api in http header ('X-Region-Name'). Otherwise, + # just get region from mistral configuration. + region = (region_name or ctx.region_name) + if service_name == 'keystone': + # Determining keystone endpoint should be done using + # keystone_authtoken section as this option is special for keystone. + region = region or CONF.keystone_authtoken.region_name + else: + region = region or CONF.openstack_actions.default_region + service_endpoints = service_catalog.get_endpoints( service_name=service_name, service_type=service_type, - region_name=ctx.region_name + region_name=region ) endpoint = None + os_actions_endpoint_type = CONF.openstack_actions.os_actions_endpoint_type + for endpoints in six.itervalues(service_endpoints): for ep in endpoints: # is V3 interface? if 'interface' in ep: interface_type = ep['interface'] - if CONF.os_actions_endpoint_type in interface_type: + if os_actions_endpoint_type in interface_type: endpoint = ks_endpoints.Endpoint( None, ep, @@ -114,7 +128,7 @@ raise exceptions.MistralException( "No endpoints found [service_name=%s, service_type=%s," " region_name=%s]" - % (service_name, service_type, ctx.region_name) + % (service_name, service_type, region) ) else: return endpoint diff -Nru mistral-4.0.0/mistral/utils/rest_utils.py mistral-5.0.0~b2/mistral/utils/rest_utils.py --- mistral-4.0.0/mistral/utils/rest_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/rest_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -44,6 +44,7 @@ pecan.response.translatable_error = e LOG.error('Error during API call: %s' % str(e)) + raise wsme_exc.ClientSideError( msg=six.text_type(e), status_code=e.http_code @@ -64,10 +65,12 @@ return func(*args, **kwargs) except (exc.MistralException, exc.MistralError) as e: LOG.error('Error during API call: %s' % str(e)) + return webob.Response( status=e.http_code, content_type='application/json', - body=json.dumps(dict(faultstring=six.text_type(e))) + body=json.dumps(dict(faultstring=six.text_type(e))), + charset='UTF-8' ) return wrapped @@ -128,8 +131,9 @@ all_projects=False, **filters): """Return a list of cls. - :param list_cls: Collection class (e.g.: Actions, Workflows, ...). - :param cls: Class (e.g.: Action, Workflow, ...). + :param list_cls: REST Resource collection class (e.g.: Actions, + Workflows, ...) + :param cls: REST Resource class (e.g.: Action, Workflow, ...) :param get_all_function: Request function to get all elements with filtering (limit, marker, sort_keys, sort_dirs, fields) @@ -160,6 +164,7 @@ # Admin user can get all tenants resources, no matter they are private or # public. insecure = False + if (all_projects or (auth_ctx.ctx().is_admin and filters.get('project_id', ''))): insecure = True @@ -169,36 +174,11 @@ if marker: marker_obj = get_function(marker) - list_to_return = [] + rest_resources = [] - if resource_function: - with db_api.transaction(): - # do not filter fields yet, resource_function needs the ORM object - db_list = get_all_function( - limit=limit, - marker=marker_obj, - sort_keys=sort_keys, - sort_dirs=sort_dirs, - insecure=insecure, - **filters - ) - - for data in db_list: - obj = resource_function(data) - - # filter fields using a loop instead of the ORM - if fields: - data = [] - for f in fields: - if hasattr(obj, f): - data.append(getattr(obj, f)) - - dict_data = dict(zip(fields, data)) - else: - dict_data = obj.to_dict() - - list_to_return.append(cls.from_dict(dict_data)) - else: + # If only certain fields are requested then we ignore "resource_function" + # parameter because it doesn't make sense anymore. + if fields: db_list = get_all_function( limit=limit, marker=marker_obj, @@ -209,14 +189,33 @@ **filters ) - for data in db_list: - dict_data = (dict(zip(fields, data)) if fields else - data.to_dict()) + for obj_values in db_list: + # Note: in case if only certain fields have been requested + # "db_list" contains tuples with values of db objects. + rest_resources.append( + cls.from_tuples(zip(fields, obj_values)) + ) + else: + with db_api.transaction(): + db_models = get_all_function( + limit=limit, + marker=marker_obj, + sort_keys=sort_keys, + sort_dirs=sort_dirs, + insecure=insecure, + **filters + ) + + for db_model in db_models: + if resource_function: + rest_resource = resource_function(db_model) + else: + rest_resource = cls.from_db_model(db_model) - list_to_return.append(cls.from_dict(dict_data)) + rest_resources.append(rest_resource) return list_cls.convert_with_links( - list_to_return, + rest_resources, limit, pecan.request.host_url, sort_keys=','.join(sort_keys), diff -Nru mistral-4.0.0/mistral/utils/rpc_utils.py mistral-5.0.0~b2/mistral/utils/rpc_utils.py --- mistral-4.0.0/mistral/utils/rpc_utils.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/utils/rpc_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -17,8 +17,8 @@ CONF = cfg.CONF -def get_rpc_backend(transport): - if transport: - return transport.transport +def get_rpc_backend(transport_url): + if transport_url: + return transport_url.transport return CONF.rpc_backend diff -Nru mistral-4.0.0/mistral/workbook/base.py mistral-5.0.0~b2/mistral/workbook/base.py --- mistral-4.0.0/mistral/workbook/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,376 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import json -import jsonschema -import re -import six - -from mistral import exceptions as exc -from mistral import expressions as expr -from mistral import utils -from mistral.workbook import types - - -CMD_PTRN = re.compile("^[\w\.]+[^=\(\s\"]*") - -EXPRESSION = '|'.join([expr.patterns[name] for name in expr.patterns]) -_ALL_IN_BRACKETS = "\[.*\]\s*" -_ALL_IN_QUOTES = "\"[^\"]*\"\s*" -_ALL_IN_APOSTROPHES = "'[^']*'\s*" -_DIGITS = "\d+" -_TRUE = "true" -_FALSE = "false" -_NULL = "null" - -ALL = ( - _ALL_IN_QUOTES, _ALL_IN_APOSTROPHES, EXPRESSION, - _ALL_IN_BRACKETS, _TRUE, _FALSE, _NULL, _DIGITS -) - -PARAMS_PTRN = re.compile("([-_\w]+)=(%s)" % "|".join(ALL)) - - -def instantiate_spec(spec_cls, data): - """Instantiates specification accounting for specification hierarchies. - - :param spec_cls: Specification concrete or base class. In case if base - class or the hierarchy is provided this method relies on attributes - _polymorphic_key and _polymorphic_value in order to find a concrete - class that needs to be instantiated. - :param data: Raw specification data as a dictionary. - """ - - if issubclass(spec_cls, BaseSpecList): - # Ignore polymorphic search for specification lists because - # it doesn't make sense for them. - return spec_cls(data) - - if not hasattr(spec_cls, '_polymorphic_key'): - spec = spec_cls(data) - - spec.validate_semantics() - - return spec - - key = spec_cls._polymorphic_key - - if not isinstance(key, tuple): - key_name = key - key_default = None - else: - key_name = key[0] - key_default = key[1] - - for cls in utils.iter_subclasses(spec_cls): - if not hasattr(cls, '_polymorphic_value'): - raise exc.DSLParsingException( - "Class '%s' is expected to have attribute '_polymorphic_value'" - " because it's a part of specification hierarchy inherited " - "from class '%s'." % (cls, spec_cls) - ) - - if cls._polymorphic_value == data.get(key_name, key_default): - spec = cls(data) - - spec.validate_semantics() - - return spec - - raise exc.DSLParsingException( - 'Failed to find a specification class to instantiate ' - '[spec_cls=%s, data=%s]' % (spec_cls, data) - ) - - -class BaseSpec(object): - """Base class for all DSL specifications. - - It represents a DSL entity such as workflow or task as a python object - providing more convenient API to analyse DSL than just working with raw - data in form of a dictionary. Specification classes also implement - all required validation logic by overriding instance method 'validate()'. - - Note that the specification mechanism allows to have polymorphic entities - in DSL. For example, if we find it more convenient to have separate - specification classes for different types of workflow (i.e. 'direct' and - 'reverse') we can do so. In this case, in order to instantiate them - correctly method 'instantiate_spec' must always be used where argument - 'spec_cls' must be a root class of the specification hierarchy containing - class attribute '_polymorhpic_key' pointing to a key in raw data relying - on which we can find a concrete class. Concrete classes then must all have - attribute '_polymorhpic_value' corresponding to a value in a raw data. - Attribute '_polymorhpic_key' can be either a string or a tuple of size two - where the first value is a key name itself and the second value is a - default polymorphic value that must be used if raw data doesn't contain - a configured key at all. An example of this situation is when we don't - specify a workflow type in DSL. In this case, we assume it's 'direct'. - """ - - # See http://json-schema.org - _schema = { - 'type': 'object' - } - - _meta_schema = { - 'type': 'object' - } - - _definitions = {} - - _version = '2.0' - - @classmethod - def get_schema(cls, includes=['meta', 'definitions']): - schema = copy.deepcopy(cls._schema) - - schema['properties'] = utils.merge_dicts( - schema.get('properties', {}), - cls._meta_schema.get('properties', {}), - overwrite=False - ) - - if includes and 'meta' in includes: - schema['required'] = list( - set(schema.get('required', []) + - cls._meta_schema.get('required', [])) - ) - - if includes and 'definitions' in includes: - schema['definitions'] = utils.merge_dicts( - schema.get('definitions', {}), - cls._definitions, - overwrite=False - ) - - return schema - - def __init__(self, data): - self._data = data - - self.validate_schema() - - def validate_schema(self): - """Validates DSL entity schema that this specification represents. - - By default, this method just validate schema of DSL entity that this - specification represents using "_schema" class attribute. - Additionally, child classes may implement additional logic to validate - more specific things like YAQL expressions in their fields. - - Note that this method is called before construction of specification - fields and validation logic should only rely on raw data provided as - a dictionary accessible through '_data' instance field. - """ - - try: - jsonschema.validate(self._data, self.get_schema()) - except jsonschema.ValidationError as e: - raise exc.InvalidModelException("Invalid DSL: %s" % e) - - def validate_semantics(self): - """Validates semantics of specification object. - - Child classes may implement validation logic to check things like - integrity of corresponding data structure (e.g. task graph) or - other things that can't be expressed in JSON schema. - - This method is called after specification has been built (i.e. - its initializer has finished it's work) so that validation logic - can rely on initialized specification fields. - """ - pass - - def validate_expr(self, dsl_part): - if isinstance(dsl_part, six.string_types): - expr.validate(dsl_part) - elif isinstance(dsl_part, list): - for expression in dsl_part: - if isinstance(expression, six.string_types): - expr.validate(expression) - elif isinstance(dsl_part, dict): - for expression in dsl_part.values(): - if isinstance(expression, six.string_types): - expr.validate(expression) - - def _spec_property(self, prop_name, spec_cls): - prop_val = self._data.get(prop_name) - - return instantiate_spec(spec_cls, prop_val) if prop_val else None - - def _group_spec(self, spec_cls, *prop_names): - if not prop_names: - return None - - data = {} - - for prop_name in prop_names: - prop_val = self._data.get(prop_name) - - if prop_val: - data[prop_name] = prop_val - - return instantiate_spec(spec_cls, data) - - def _inject_version(self, prop_names): - for prop_name in prop_names: - prop_data = self._data.get(prop_name) - - if isinstance(prop_data, dict): - prop_data['version'] = self._version - - def _as_dict(self, prop_name): - prop_val = self._data.get(prop_name) - - if not prop_val: - return {} - - if isinstance(prop_val, dict): - return prop_val - elif isinstance(prop_val, list): - result = {} - - for t in prop_val: - result.update(t if isinstance(t, dict) else {t: ''}) - - return result - elif isinstance(prop_val, six.string_types): - return {prop_val: ''} - - def _as_list_of_tuples(self, prop_name): - prop_val = self._data.get(prop_name) - - if not prop_val: - return [] - - if isinstance(prop_val, six.string_types): - return [self._as_tuple(prop_val)] - - return [self._as_tuple(item) for item in prop_val] - - @staticmethod - def _as_tuple(val): - return list(val.items())[0] if isinstance(val, dict) else (val, '') - - @staticmethod - def _parse_cmd_and_input(cmd_str): - # TODO(rakhmerov): Try to find a way with one expression. - cmd_matcher = CMD_PTRN.search(cmd_str) - - if not cmd_matcher: - msg = "Invalid action/workflow task property: %s" % cmd_str - raise exc.InvalidModelException(msg) - - cmd = cmd_matcher.group() - - params = {} - - for match in re.findall(PARAMS_PTRN, cmd_str): - k = match[0] - # Remove embracing quotes. - v = match[1].strip() - if v[0] == '"' or v[0] == "'": - v = v[1:-1] - else: - try: - v = json.loads(v) - except Exception: - pass - - params[k] = v - - return cmd, params - - def to_dict(self): - return self._data - - def get_version(self): - return self._version - - def __repr__(self): - return "%s %s" % (self.__class__.__name__, self.to_dict()) - - -class BaseListSpec(BaseSpec): - item_class = None - - _schema = { - "type": "object", - "properties": { - "version": types.VERSION - }, - "additionalProperties": types.NONEMPTY_DICT, - "required": ["version"], - } - - def __init__(self, data): - super(BaseListSpec, self).__init__(data) - - self.items = [] - - for k, v in data.items(): - if k != 'version': - v['name'] = k - self._inject_version([k]) - self.items.append(instantiate_spec(self.item_class, v)) - - def validate_schema(self): - super(BaseListSpec, self).validate_schema() - - if len(self._data.keys()) < 2: - raise exc.InvalidModelException( - 'At least one item must be in the list [data=%s].' % - self._data - ) - - def get_items(self): - return self.items - - def __getitem__(self, idx): - return self.items[idx] - - def __len__(self): - return len(self.items) - - -class BaseSpecList(object): - item_class = None - - _version = '2.0' - - def __init__(self, data): - self.items = {} - - for k, v in data.items(): - if k != 'version': - v['name'] = k - v['version'] = self._version - self.items[k] = instantiate_spec(self.item_class, v) - - def item_keys(self): - return self.items.keys() - - def __iter__(self): - return six.itervalues(self.items) - - def __getitem__(self, name): - return self.items.get(name) - - def __len__(self): - return len(self.items) - - def get(self, name): - return self.__getitem__(name) diff -Nru mistral-4.0.0/mistral/workbook/parser.py mistral-5.0.0~b2/mistral/workbook/parser.py --- mistral-4.0.0/mistral/workbook/parser.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/parser.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,263 +0,0 @@ -# Copyright 2013 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import cachetools -import threading -import yaml -from yaml import error - -import six - -from mistral.db.v2 import api as db_api -from mistral import exceptions as exc -from mistral.workbook import base -from mistral.workbook.v2 import actions as actions_v2 -from mistral.workbook.v2 import tasks as tasks_v2 -from mistral.workbook.v2 import workbook as wb_v2 -from mistral.workbook.v2 import workflows as wf_v2 - -V2_0 = '2.0' - -ALL_VERSIONS = [V2_0] - - -_WF_EX_CACHE = cachetools.LRUCache(maxsize=100) -_WF_EX_CACHE_LOCK = threading.RLock() - -_WF_DEF_CACHE = cachetools.LRUCache(maxsize=100) -_WF_DEF_CACHE_LOCK = threading.RLock() - - -def parse_yaml(text): - """Loads a text in YAML format as dictionary object. - - :param text: YAML text. - :return: Parsed YAML document as dictionary. - """ - - try: - return yaml.safe_load(text) or {} - except error.YAMLError as e: - raise exc.DSLParsingException( - "Definition could not be parsed: %s\n" % e - ) - - -def _get_spec_version(spec_dict): - # If version is not specified it will '2.0' by default. - ver = V2_0 - - if 'version' in spec_dict: - ver = spec_dict['version'] - - def _raise(ver): - raise exc.DSLParsingException('Unsupported DSL version: %s' % ver) - try: - str_ver = str(float(ver)) - except (ValueError, TypeError): - _raise(ver) - - if not ver or str_ver not in ALL_VERSIONS: - _raise(ver) - - return ver - - -# Factory methods to get specifications either from raw YAML formatted text or -# from dictionaries parsed from YAML formatted text. - -def get_workbook_spec(spec_dict): - if _get_spec_version(spec_dict) == V2_0: - return base.instantiate_spec(wb_v2.WorkbookSpec, spec_dict) - - return None - - -def get_workbook_spec_from_yaml(text): - return get_workbook_spec(parse_yaml(text)) - - -def get_action_spec(spec_dict): - if _get_spec_version(spec_dict) == V2_0: - return base.instantiate_spec(actions_v2.ActionSpec, spec_dict) - - return None - - -def get_action_spec_from_yaml(text, action_name): - spec_dict = parse_yaml(text) - - spec_dict['name'] = action_name - - return get_action_spec(spec_dict) - - -def get_action_list_spec(spec_dict): - return base.instantiate_spec(actions_v2.ActionListSpec, spec_dict) - - -def get_action_list_spec_from_yaml(text): - return get_action_list_spec(parse_yaml(text)) - - -def get_workflow_spec(spec_dict): - """Get workflow specification object from dictionary. - - NOTE: For large workflows this method can work very long (seconds). - For this reason, method 'get_workflow_spec_by_definition_id' or - 'get_workflow_spec_by_execution_id' should be used whenever possible - because they cache specification objects. - - :param spec_dict: Raw specification dictionary. - """ - if _get_spec_version(spec_dict) == V2_0: - return base.instantiate_spec(wf_v2.WorkflowSpec, spec_dict) - - return None - - -def get_workflow_list_spec(spec_dict): - return base.instantiate_spec(wf_v2.WorkflowListSpec, spec_dict) - - -def get_workflow_spec_from_yaml(text): - return get_workflow_spec(parse_yaml(text)) - - -def get_workflow_list_spec_from_yaml(text): - return get_workflow_list_spec(parse_yaml(text)) - - -def get_task_spec(spec_dict): - if _get_spec_version(spec_dict) == V2_0: - return base.instantiate_spec(tasks_v2.TaskSpec, spec_dict) - - return None - - -def get_workflow_definition(wb_def, wf_name): - wf_name = wf_name + ":" - - return _parse_def_from_wb(wb_def, "workflows:", wf_name) - - -def get_action_definition(wb_def, action_name): - action_name += ":" - - return _parse_def_from_wb(wb_def, "actions:", action_name) - - -def _parse_def_from_wb(wb_def, section_name, item_name): - io = six.StringIO(wb_def[wb_def.index(section_name):]) - io.readline() - definition = [] - ident = 0 - # Get the indentation of the action/workflow name tag. - for line in io: - if item_name == line.strip(): - ident = line.index(item_name) - definition.append(line.lstrip()) - break - - # Add strings to list unless same/less indentation is found. - for line in io: - new_line = line.strip() - - if not new_line: - definition.append(line) - elif new_line.startswith("#"): - new_line = line if ident > line.index("#") else line[ident:] - definition.append(new_line) - else: - temp = line.index(line.lstrip()) - if ident < temp: - definition.append(line[ident:]) - else: - break - - io.close() - definition = ''.join(definition).rstrip() + '\n' - - return definition - - -# Methods for obtaining specifications in a more efficient way using -# caching techniques. - -@cachetools.cached(_WF_EX_CACHE, lock=_WF_EX_CACHE_LOCK) -def get_workflow_spec_by_execution_id(wf_ex_id): - """Gets workflow specification by workflow execution id. - - The idea is that when a workflow execution is running we - must be getting the same workflow specification even if - - :param wf_ex_id: Workflow execution id. - :return: Workflow specification. - """ - if not wf_ex_id: - return None - - wf_ex = db_api.get_workflow_execution(wf_ex_id) - - return get_workflow_spec(wf_ex.spec) - - -@cachetools.cached(_WF_DEF_CACHE, lock=_WF_DEF_CACHE_LOCK) -def get_workflow_spec_by_definition_id(wf_def_id, wf_def_updated_at): - """Gets specification by workflow definition id and its 'updated_at'. - - The idea of this method is to return a cached specification for the - given workflow id and workflow definition 'updated_at'. As long as the - given workflow definition remains the same in DB users of this method - will be getting a cached value. Once the workflow definition has - changed clients will be providing a different 'updated_at' value and - hence this method will be called and spec is updated for this combination - of parameters. Old cached values will be kicked out by LRU algorithm - if the cache runs out of space. - - :param wf_def_id: Workflow definition id. - :param wf_def_updated_at: Workflow definition 'updated_at' value. It - serves only as part of cache key and is not explicitly used in the - method. - :return: Workflow specification. - """ - if not wf_def_id: - return None - - wf_def = db_api.get_workflow_definition(wf_def_id) - - return get_workflow_spec(wf_def.spec) - - -def cache_workflow_spec_by_execution_id(wf_ex_id, wf_spec): - with _WF_EX_CACHE_LOCK: - _WF_EX_CACHE[cachetools.keys.hashkey(wf_ex_id)] = wf_spec - - -def get_wf_execution_spec_cache_size(): - return len(_WF_EX_CACHE) - - -def get_wf_definition_spec_cache_size(): - return len(_WF_DEF_CACHE) - - -def clear_caches(): - """Clears all specification caches.""" - with _WF_EX_CACHE_LOCK: - _WF_EX_CACHE.clear() - - with _WF_DEF_CACHE_LOCK: - _WF_DEF_CACHE.clear() diff -Nru mistral-4.0.0/mistral/workbook/types.py mistral-5.0.0~b2/mistral/workbook/types.py --- mistral-4.0.0/mistral/workbook/types.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/types.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mistral import expressions - - -NONEMPTY_STRING = { - "type": "string", - "minLength": 1 -} - -UNIQUE_STRING_LIST = { - "type": "array", - "items": NONEMPTY_STRING, - "uniqueItems": True, - "minItems": 1 -} - -POSITIVE_INTEGER = { - "type": "integer", - "minimum": 0 -} - -POSITIVE_NUMBER = { - "type": "number", - "minimum": 0.0 -} - -EXPRESSION = { - "oneOf": [{ - "type": "string", - "pattern": "^%s\\s*$" % expressions.patterns[name] - } for name in expressions.patterns] -} - -EXPRESSION_CONDITION = { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^\w+$": EXPRESSION - } -} - -ANY = { - "anyOf": [ - {"type": "array"}, - {"type": "boolean"}, - {"type": "integer"}, - {"type": "number"}, - {"type": "object"}, - {"type": "string"} - ] -} - -ANY_NULLABLE = { - "anyOf": [ - {"type": "null"}, - {"type": "array"}, - {"type": "boolean"}, - {"type": "integer"}, - {"type": "number"}, - {"type": "object"}, - {"type": "string"} - ] -} - -NONEMPTY_DICT = { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^\w+$": ANY_NULLABLE - } -} - -ONE_KEY_DICT = { - "type": "object", - "minProperties": 1, - "maxProperties": 1, - "patternProperties": { - "^\w+$": ANY_NULLABLE - } -} - -STRING_OR_EXPRESSION_CONDITION = { - "oneOf": [ - NONEMPTY_STRING, - EXPRESSION_CONDITION - ] -} - -EXPRESSION_OR_POSITIVE_INTEGER = { - "oneOf": [ - EXPRESSION, - POSITIVE_INTEGER - ] -} - -EXPRESSION_OR_BOOLEAN = { - "oneOf": [ - EXPRESSION, - {"type": "boolean"} - ] -} - - -UNIQUE_STRING_OR_EXPRESSION_CONDITION_LIST = { - "type": "array", - "items": STRING_OR_EXPRESSION_CONDITION, - "uniqueItems": True, - "minItems": 1 -} - -VERSION = { - "anyOf": [ - NONEMPTY_STRING, - POSITIVE_INTEGER, - POSITIVE_NUMBER - ] -} - -WORKFLOW_TYPE = { - "enum": ["reverse", "direct"] -} - -STRING_OR_ONE_KEY_DICT = { - "oneOf": [ - NONEMPTY_STRING, - ONE_KEY_DICT - ] -} - -UNIQUE_STRING_OR_ONE_KEY_DICT_LIST = { - "type": "array", - "items": STRING_OR_ONE_KEY_DICT, - "uniqueItems": True, - "minItems": 1 -} diff -Nru mistral-4.0.0/mistral/workbook/v2/actions.py mistral-5.0.0~b2/mistral/workbook/v2/actions.py --- mistral-4.0.0/mistral/workbook/v2/actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/actions.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from mistral import utils -from mistral.workbook import types -from mistral.workbook.v2 import base - - -class ActionSpec(base.BaseSpec): - # See http://json-schema.org - _schema = { - "type": "object", - "properties": { - "base": types.NONEMPTY_STRING, - "base-input": types.NONEMPTY_DICT, - "input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST, - "output": types.ANY_NULLABLE, - }, - "required": ["base"], - "additionalProperties": False - } - - def __init__(self, data): - super(ActionSpec, self).__init__(data) - - self._name = data['name'] - self._description = data.get('description') - self._tags = data.get('tags', []) - self._base = data['base'] - self._base_input = data.get('base-input', {}) - self._input = utils.get_input_dict(data.get('input', [])) - self._output = data.get('output') - - self._base, _input = self._parse_cmd_and_input(self._base) - - utils.merge_dicts(self._base_input, _input) - - def validate_schema(self): - super(ActionSpec, self).validate_schema() - - # Validate YAQL expressions. - inline_params = self._parse_cmd_and_input(self._data.get('base'))[1] - self.validate_expr(inline_params) - - self.validate_expr(self._data.get('base-input', {})) - - if isinstance(self._data.get('output'), six.string_types): - self.validate_expr(self._data.get('output')) - - def get_name(self): - return self._name - - def get_description(self): - return self._description - - def get_tags(self): - return self._tags - - def get_base(self): - return self._base - - def get_base_input(self): - return self._base_input - - def get_input(self): - return self._input - - def get_output(self): - return self._output - - -class ActionSpecList(base.BaseSpecList): - item_class = ActionSpec - - -class ActionListSpec(base.BaseListSpec): - item_class = ActionSpec - - def get_actions(self): - return self.get_items() diff -Nru mistral-4.0.0/mistral/workbook/v2/base.py mistral-5.0.0~b2/mistral/workbook/v2/base.py --- mistral-4.0.0/mistral/workbook/v2/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/base.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mistral.workbook import base -from mistral.workbook import types - - -class BaseSpec(base.BaseSpec): - _version = "2.0" - - _meta_schema = { - "type": "object", - "properties": { - "name": types.NONEMPTY_STRING, - "version": types.VERSION, - "description": types.NONEMPTY_STRING, - "tags": types.UNIQUE_STRING_LIST - }, - "required": ["name", "version"] - } - - -class BaseSpecList(base.BaseSpecList): - _version = "2.0" - - -class BaseListSpec(base.BaseListSpec): - _version = "2.0" diff -Nru mistral-4.0.0/mistral/workbook/v2/policies.py mistral-5.0.0~b2/mistral/workbook/v2/policies.py --- mistral-4.0.0/mistral/workbook/v2/policies.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/policies.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mistral.workbook import types -from mistral.workbook.v2 import base -from mistral.workbook.v2 import retry_policy - - -RETRY_SCHEMA = retry_policy.RetrySpec.get_schema(includes=None) -WAIT_BEFORE_SCHEMA = types.EXPRESSION_OR_POSITIVE_INTEGER -WAIT_AFTER_SCHEMA = types.EXPRESSION_OR_POSITIVE_INTEGER -TIMEOUT_SCHEMA = types.EXPRESSION_OR_POSITIVE_INTEGER -PAUSE_BEFORE_SCHEMA = types.EXPRESSION_OR_BOOLEAN -CONCURRENCY_SCHEMA = types.EXPRESSION_OR_POSITIVE_INTEGER - - -class PoliciesSpec(base.BaseSpec): - # See http://json-schema.org - _schema = { - "type": "object", - "properties": { - "retry": RETRY_SCHEMA, - "wait-before": WAIT_BEFORE_SCHEMA, - "wait-after": WAIT_AFTER_SCHEMA, - "timeout": TIMEOUT_SCHEMA, - "pause-before": PAUSE_BEFORE_SCHEMA, - "concurrency": CONCURRENCY_SCHEMA, - }, - "additionalProperties": False - } - - @classmethod - def get_schema(cls, includes=['definitions']): - return super(PoliciesSpec, cls).get_schema(includes) - - def __init__(self, data): - super(PoliciesSpec, self).__init__(data) - - self._retry = self._spec_property('retry', retry_policy.RetrySpec) - self._wait_before = data.get('wait-before', 0) - self._wait_after = data.get('wait-after', 0) - self._timeout = data.get('timeout', 0) - self._pause_before = data.get('pause-before', False) - self._concurrency = data.get('concurrency', 0) - - def validate_schema(self): - super(PoliciesSpec, self).validate_schema() - - # Validate YAQL expressions. - self.validate_expr(self._data.get('wait-before', 0)) - self.validate_expr(self._data.get('wait-after', 0)) - self.validate_expr(self._data.get('timeout', 0)) - self.validate_expr(self._data.get('pause-before', False)) - self.validate_expr(self._data.get('concurrency', 0)) - - def get_retry(self): - return self._retry - - def get_wait_before(self): - return self._wait_before - - def get_wait_after(self): - return self._wait_after - - def get_timeout(self): - return self._timeout - - def get_pause_before(self): - return self._pause_before - - def get_concurrency(self): - return self._concurrency diff -Nru mistral-4.0.0/mistral/workbook/v2/retry_policy.py mistral-5.0.0~b2/mistral/workbook/v2/retry_policy.py --- mistral-4.0.0/mistral/workbook/v2/retry_policy.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/retry_policy.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from mistral.workbook import types -from mistral.workbook.v2 import base - - -class RetrySpec(base.BaseSpec): - # See http://json-schema.org - _retry_dict_schema = { - "type": "object", - "properties": { - "count": { - "oneOf": [ - types.EXPRESSION, - types.POSITIVE_INTEGER - ] - }, - "break-on": types.EXPRESSION, - "continue-on": types.EXPRESSION, - "delay": { - "oneOf": [ - types.EXPRESSION, - types.POSITIVE_INTEGER - ] - }, - }, - "required": ["delay", "count"], - "additionalProperties": False - } - - _schema = { - "oneOf": [ - _retry_dict_schema, - types.NONEMPTY_STRING - ] - } - - @classmethod - def get_schema(cls, includes=['definitions']): - return super(RetrySpec, cls).get_schema(includes) - - def __init__(self, data): - data = self._transform_retry_one_line(data) - super(RetrySpec, self).__init__(data) - - self._break_on = data.get('break-on') - self._count = data.get('count') - self._continue_on = data.get('continue-on') - self._delay = data['delay'] - - def _transform_retry_one_line(self, retry): - if isinstance(retry, six.string_types): - _, params = self._parse_cmd_and_input(retry) - return params - - return retry - - def validate_schema(self): - super(RetrySpec, self).validate_schema() - - # Validate YAQL expressions. - self.validate_expr(self._data.get('count')) - self.validate_expr(self._data.get('delay')) - self.validate_expr(self._data.get('break-on')) - self.validate_expr(self._data.get('continue-on')) - - def get_count(self): - return self._count - - def get_break_on(self): - return self._break_on - - def get_continue_on(self): - return self._continue_on - - def get_delay(self): - return self._delay diff -Nru mistral-4.0.0/mistral/workbook/v2/task_defaults.py mistral-5.0.0~b2/mistral/workbook/v2/task_defaults.py --- mistral-4.0.0/mistral/workbook/v2/task_defaults.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/task_defaults.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -from mistral.workbook import types -from mistral.workbook.v2 import base -from mistral.workbook.v2 import policies -from mistral.workbook.v2 import tasks - - -direct_wf_ts = tasks.DirectWorkflowTaskSpec - - -class TaskDefaultsSpec(base.BaseSpec): - # See http://json-schema.org - _task_policies_schema = policies.PoliciesSpec.get_schema( - includes=None) - - _on_clause_type = { - "oneOf": [ - types.NONEMPTY_STRING, - types.UNIQUE_STRING_OR_EXPRESSION_CONDITION_LIST - ] - } - - _schema = { - "type": "object", - "properties": { - "retry": policies.RETRY_SCHEMA, - "wait-before": policies.WAIT_BEFORE_SCHEMA, - "wait-after": policies.WAIT_AFTER_SCHEMA, - "timeout": policies.TIMEOUT_SCHEMA, - "pause-before": policies.PAUSE_BEFORE_SCHEMA, - "concurrency": policies.CONCURRENCY_SCHEMA, - "on-complete": _on_clause_type, - "on-success": _on_clause_type, - "on-error": _on_clause_type, - "requires": { - "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] - } - }, - "additionalProperties": False - } - - @classmethod - def get_schema(cls, includes=['definitions']): - return super(TaskDefaultsSpec, cls).get_schema(includes) - - def __init__(self, data): - super(TaskDefaultsSpec, self).__init__(data) - - self._policies = self._group_spec( - policies.PoliciesSpec, - 'retry', - 'wait-before', - 'wait-after', - 'timeout', - 'pause-before', - 'concurrency' - ) - self._on_complete = direct_wf_ts.prepare_on_clause( - self._as_list_of_tuples('on-complete') - ) - self._on_success = direct_wf_ts.prepare_on_clause( - self._as_list_of_tuples('on-success') - ) - self._on_error = direct_wf_ts.prepare_on_clause( - self._as_list_of_tuples('on-error') - ) - self._requires = data.get('requires', []) - - def validate_schema(self): - super(TaskDefaultsSpec, self).validate_schema() - - # Validate YAQL expressions. - self._validate_transitions('on-complete') - self._validate_transitions('on-success') - self._validate_transitions('on-error') - - def _validate_transitions(self, on_clause): - val = self._data.get(on_clause, []) - - [self.validate_expr(t) - for t in ([val] if isinstance(val, six.string_types) else val)] - - def get_policies(self): - return self._policies - - def get_on_complete(self): - return self._on_complete - - def get_on_success(self): - return self._on_success - - def get_on_error(self): - return self._on_error - - def get_requires(self): - if isinstance(self._requires, six.string_types): - return [self._requires] - - return self._requires diff -Nru mistral-4.0.0/mistral/workbook/v2/tasks.py mistral-5.0.0~b2/mistral/workbook/v2/tasks.py --- mistral-4.0.0/mistral/workbook/v2/tasks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/tasks.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,338 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import json -import re -import six - -from mistral import exceptions as exc -from mistral import expressions -from mistral import utils -from mistral.workbook import types -from mistral.workbook.v2 import base -from mistral.workbook.v2 import policies - -_expr_ptrns = [expressions.patterns[name] for name in expressions.patterns] -WITH_ITEMS_PTRN = re.compile( - "\s*([\w\d_\-]+)\s*in\s*(\[.+\]|%s)" % '|'.join(_expr_ptrns) -) -RESERVED_TASK_NAMES = [ - 'noop', - 'fail', - 'succeed', - 'pause' -] - - -class TaskSpec(base.BaseSpec): - # See http://json-schema.org - _polymorphic_key = ('type', 'direct') - - _schema = { - "type": "object", - "properties": { - "type": types.WORKFLOW_TYPE, - "action": types.NONEMPTY_STRING, - "workflow": types.NONEMPTY_STRING, - "input": types.NONEMPTY_DICT, - "with-items": { - "oneOf": [ - types.NONEMPTY_STRING, - types.UNIQUE_STRING_LIST - ] - }, - "publish": types.NONEMPTY_DICT, - "publish-on-error": types.NONEMPTY_DICT, - "retry": policies.RETRY_SCHEMA, - "wait-before": policies.WAIT_BEFORE_SCHEMA, - "wait-after": policies.WAIT_AFTER_SCHEMA, - "timeout": policies.TIMEOUT_SCHEMA, - "pause-before": policies.PAUSE_BEFORE_SCHEMA, - "concurrency": policies.CONCURRENCY_SCHEMA, - "target": types.NONEMPTY_STRING, - "keep-result": types.EXPRESSION_OR_BOOLEAN, - "safe-rerun": types.EXPRESSION_OR_BOOLEAN - }, - "additionalProperties": False, - "anyOf": [ - { - "not": { - "type": "object", - "required": ["action", "workflow"] - }, - }, - { - "oneOf": [ - { - "type": "object", - "required": ["action"] - }, - { - "type": "object", - "required": ["workflow"] - } - ] - } - ] - } - - def __init__(self, data): - super(TaskSpec, self).__init__(data) - - self._name = data['name'] - self._description = data.get('description') - self._action = data.get('action') - self._workflow = data.get('workflow') - self._input = data.get('input', {}) - self._with_items = self._transform_with_items() - self._publish = data.get('publish', {}) - self._publish_on_error = data.get('publish-on-error', {}) - self._policies = self._group_spec( - policies.PoliciesSpec, - 'retry', - 'wait-before', - 'wait-after', - 'timeout', - 'pause-before', - 'concurrency' - ) - self._target = data.get('target') - self._keep_result = data.get('keep-result', True) - self._safe_rerun = data.get('safe-rerun', False) - - self._process_action_and_workflow() - - def validate_schema(self): - super(TaskSpec, self).validate_schema() - - action = self._data.get('action') - workflow = self._data.get('workflow') - - # Validate YAQL expressions. - if action or workflow: - inline_params = self._parse_cmd_and_input(action or workflow)[1] - self.validate_expr(inline_params) - - self.validate_expr(self._data.get('input', {})) - self.validate_expr(self._data.get('publish', {})) - self.validate_expr(self._data.get('keep-result', {})) - self.validate_expr(self._data.get('safe-rerun', {})) - - def _transform_with_items(self): - raw = self._data.get('with-items', []) - with_items = {} - - if isinstance(raw, six.string_types): - raw = [raw] - - for item in raw: - if not isinstance(item, six.string_types): - raise exc.InvalidModelException("'with-items' elements should" - " be strings: %s" % self._data) - - match = re.match(WITH_ITEMS_PTRN, item) - - if not match: - msg = ("Wrong format of 'with-items' property. Please use " - "format 'var in {[some, list] | <%% $.array %%> }: " - "%s" % self._data) - raise exc.InvalidModelException(msg) - - match_groups = match.groups() - var_name = match_groups[0] - array = match_groups[1] - - # Validate YAQL expression that may follow after "in" for the - # with-items syntax "var in {[some, list] | <% $.array %> }". - self.validate_expr(array) - - if array.startswith('['): - try: - array = json.loads(array) - except Exception as e: - msg = ("Invalid array in 'with-items' clause: " - "%s, error: %s" % (array, str(e))) - raise exc.InvalidModelException(msg) - - with_items[var_name] = array - - return with_items - - def _process_action_and_workflow(self): - params = {} - - if self._action: - self._action, params = self._parse_cmd_and_input(self._action) - elif self._workflow: - self._workflow, params = self._parse_cmd_and_input( - self._workflow) - else: - self._action = 'std.noop' - - utils.merge_dicts(self._input, params) - - def get_name(self): - return self._name - - def get_description(self): - return self._description - - def get_action_name(self): - return self._action if self._action else None - - def get_workflow_name(self): - return self._workflow - - def get_input(self): - return self._input - - def get_with_items(self): - return self._with_items - - def get_policies(self): - return self._policies - - def get_target(self): - return self._target - - def get_publish(self): - return self._publish - - def get_publish_on_error(self): - return self._publish_on_error - - def get_keep_result(self): - return self._keep_result - - def get_safe_rerun(self): - return self._safe_rerun - - def get_type(self): - if self._workflow: - return utils.WORKFLOW_TASK_TYPE - return utils.ACTION_TASK_TYPE - - -class DirectWorkflowTaskSpec(TaskSpec): - _polymorphic_value = 'direct' - - _on_clause_type = { - "oneOf": [ - types.NONEMPTY_STRING, - types.UNIQUE_STRING_OR_EXPRESSION_CONDITION_LIST - ] - } - - _direct_workflow_schema = { - "type": "object", - "properties": { - "type": {"enum": [_polymorphic_value]}, - "join": { - "oneOf": [ - {"enum": ["all", "one"]}, - types.POSITIVE_INTEGER - ] - }, - "on-complete": _on_clause_type, - "on-success": _on_clause_type, - "on-error": _on_clause_type - } - } - - _schema = utils.merge_dicts(copy.deepcopy(TaskSpec._schema), - _direct_workflow_schema) - - def __init__(self, data): - super(DirectWorkflowTaskSpec, self).__init__(data) - - self._join = data.get('join') - self._on_complete = self.prepare_on_clause( - self._as_list_of_tuples('on-complete') - ) - self._on_success = self.prepare_on_clause( - self._as_list_of_tuples('on-success') - ) - self._on_error = self.prepare_on_clause( - self._as_list_of_tuples('on-error') - ) - - def validate_schema(self): - super(DirectWorkflowTaskSpec, self).validate_schema() - - # Validate YAQL expressions. - self._validate_transitions('on-complete') - self._validate_transitions('on-success') - self._validate_transitions('on-error') - - def _validate_transitions(self, on_clause): - val = self._data.get(on_clause, []) - - [self.validate_expr(t) - for t in ([val] if isinstance(val, six.string_types) else val)] - - @staticmethod - def prepare_on_clause(list_of_tuples): - for i, task in enumerate(list_of_tuples): - task_name, params = DirectWorkflowTaskSpec._parse_cmd_and_input( - task[0] - ) - list_of_tuples[i] = (task_name, task[1], params) - - return list_of_tuples - - def get_join(self): - return self._join - - def get_on_complete(self): - return self._on_complete - - def get_on_success(self): - return self._on_success - - def get_on_error(self): - return self._on_error - - -class ReverseWorkflowTaskSpec(TaskSpec): - _polymorphic_value = 'reverse' - - _reverse_workflow_schema = { - "type": "object", - "properties": { - "type": {"enum": [_polymorphic_value]}, - "requires": { - "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] - } - } - } - - _schema = utils.merge_dicts(copy.deepcopy(TaskSpec._schema), - _reverse_workflow_schema) - - def __init__(self, data): - super(ReverseWorkflowTaskSpec, self).__init__(data) - - self._requires = data.get('requires', []) - - def get_requires(self): - if isinstance(self._requires, six.string_types): - return [self._requires] - - return self._requires - - -class TaskSpecList(base.BaseSpecList): - item_class = TaskSpec diff -Nru mistral-4.0.0/mistral/workbook/v2/workbook.py mistral-5.0.0~b2/mistral/workbook/v2/workbook.py --- mistral-4.0.0/mistral/workbook/v2/workbook.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/workbook.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -# Copyright 2014 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mistral.workbook.v2 import actions as act -from mistral.workbook.v2 import base -from mistral.workbook.v2 import workflows as wf - -# We want to match any single word that isn't exactly "version" -NON_VERSION_WORD_REGEX = "^(?!version$)[\w-]+$" - - -class WorkbookSpec(base.BaseSpec): - # See http://json-schema.org - - _action_schema = act.ActionSpec.get_schema(includes=None) - - _workflow_schema = wf.WorkflowSpec.get_schema(includes=None) - - _schema = { - "type": "object", - "properties": { - "version": {"enum": ["2.0", 2.0]}, - "actions": { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^version$": {"enum": ["2.0", 2.0]}, - NON_VERSION_WORD_REGEX: _action_schema - }, - "additionalProperties": False - }, - "workflows": { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^version$": {"enum": ["2.0", 2.0]}, - NON_VERSION_WORD_REGEX: _workflow_schema - }, - "additionalProperties": False - } - }, - "additionalProperties": False - } - - def __init__(self, data): - super(WorkbookSpec, self).__init__(data) - - self._inject_version(['actions', 'workflows', 'triggers']) - - self._name = data['name'] - self._description = data.get('description') - self._tags = data.get('tags', []) - self._actions = self._spec_property('actions', act.ActionSpecList) - self._workflows = self._spec_property('workflows', wf.WorkflowSpecList) - - def get_name(self): - return self._name - - def get_description(self): - return self._description - - def get_tags(self): - return self._tags - - def get_actions(self): - return self._actions - - def get_workflows(self): - return self._workflows diff -Nru mistral-4.0.0/mistral/workbook/v2/workflows.py mistral-5.0.0~b2/mistral/workbook/v2/workflows.py --- mistral-4.0.0/mistral/workbook/v2/workflows.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workbook/v2/workflows.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,388 +0,0 @@ -# Copyright 2015 - Mirantis, Inc. -# Copyright 2015 - StackStorm, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_utils import uuidutils -import six -import threading - -from mistral import exceptions as exc -from mistral import utils -from mistral.workbook import types -from mistral.workbook.v2 import base -from mistral.workbook.v2 import task_defaults -from mistral.workbook.v2 import tasks - - -class WorkflowSpec(base.BaseSpec): - # See http://json-schema.org - - _polymorphic_key = ('type', 'direct') - - _task_defaults_schema = task_defaults.TaskDefaultsSpec.get_schema( - includes=None) - - _meta_schema = { - "type": "object", - "properties": { - "type": types.WORKFLOW_TYPE, - "task-defaults": _task_defaults_schema, - "input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST, - "output": types.NONEMPTY_DICT, - "output-on-error": types.NONEMPTY_DICT, - "vars": types.NONEMPTY_DICT - }, - "required": ["tasks"], - "additionalProperties": False - } - - def __init__(self, data): - super(WorkflowSpec, self).__init__(data) - - self._name = data['name'] - self._description = data.get('description') - self._tags = data.get('tags', []) - self._type = data['type'] if 'type' in data else 'direct' - self._input = utils.get_input_dict(data.get('input', [])) - self._output = data.get('output', {}) - self._output_on_error = data.get('output-on-error', {}) - self._vars = data.get('vars', {}) - - self._task_defaults = self._spec_property( - 'task-defaults', - task_defaults.TaskDefaultsSpec - ) - - # Inject 'type' here, so instantiate_spec function can recognize the - # specific subclass of TaskSpec. - for task in six.itervalues(self._data.get('tasks')): - task['type'] = self._type - - self._tasks = self._spec_property('tasks', tasks.TaskSpecList) - - def validate_schema(self): - super(WorkflowSpec, self).validate_schema() - - if not self._data.get('tasks'): - raise exc.InvalidModelException( - "Workflow doesn't have any tasks [data=%s]" % self._data - ) - - # Validate expressions. - self.validate_expr(self._data.get('output', {})) - self.validate_expr(self._data.get('vars', {})) - - def validate_semantics(self): - super(WorkflowSpec, self).validate_semantics() - - # Distinguish workflow name from workflow UUID. - if uuidutils.is_uuid_like(self._name): - raise exc.InvalidModelException( - "Workflow name cannot be in the format of UUID." - ) - - def _validate_task_link(self, task_name, allow_engine_cmds=True): - valid_task = self._task_exists(task_name) - - if allow_engine_cmds: - valid_task |= task_name in tasks.RESERVED_TASK_NAMES - - if not valid_task: - raise exc.InvalidModelException( - "Task '%s' not found." % task_name - ) - - def _task_exists(self, task_name): - return self.get_tasks()[task_name] is not None - - def get_name(self): - return self._name - - def get_description(self): - return self._description - - def get_tags(self): - return self._tags - - def get_type(self): - return self._type - - def get_input(self): - return self._input - - def get_output(self): - return self._output - - def get_output_on_error(self): - return self._output_on_error - - def get_vars(self): - return self._vars - - def get_task_defaults(self): - return self._task_defaults - - def get_tasks(self): - return self._tasks - - def get_task(self, name): - return self._tasks[name] - - -class DirectWorkflowSpec(WorkflowSpec): - _polymorphic_value = 'direct' - - _schema = { - "properties": { - "tasks": { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^\w+$": - tasks.DirectWorkflowTaskSpec.get_schema(includes=None) - } - }, - } - } - - def __init__(self, data): - super(DirectWorkflowSpec, self).__init__(data) - - # Init simple dictionary based caches for inbound and - # outbound task specifications. In fact, we don't need - # any special cache implementations here because these - # structures can't grow indefinitely. - self.inbound_tasks_cache_lock = threading.RLock() - self.inbound_tasks_cache = {} - self.outbound_tasks_cache_lock = threading.RLock() - self.outbound_tasks_cache = {} - - def validate_semantics(self): - super(DirectWorkflowSpec, self).validate_semantics() - - # Check if there are start tasks. - if not self.find_start_tasks(): - raise exc.DSLParsingException( - 'Failed to find start tasks in direct workflow. ' - 'There must be at least one task without inbound transition.' - '[workflow_name=%s]' % self._name - ) - - self._check_workflow_integrity() - self._check_join_tasks() - - def _check_workflow_integrity(self): - for t_s in self.get_tasks(): - out_task_names = self.find_outbound_task_names(t_s.get_name()) - - for out_t_name in out_task_names: - self._validate_task_link(out_t_name) - - def _check_join_tasks(self): - join_tasks = [t for t in self.get_tasks() if t.get_join()] - - err_msgs = [] - - for join_t in join_tasks: - t_name = join_t.get_name() - join_val = join_t.get_join() - - in_tasks = self.find_inbound_task_specs(join_t) - - if join_val == 'all': - if len(in_tasks) == 0: - err_msgs.append( - "No inbound tasks for task with 'join: all'" - " [task_name=%s]" % t_name - ) - - continue - - if join_val == 'one': - join_val = 1 - - if len(in_tasks) < join_val: - err_msgs.append( - "Not enough inbound tasks for task with 'join'" - " [task_name=%s, join=%s, inbound_tasks=%s]" % - (t_name, join_val, len(in_tasks)) - ) - - if len(err_msgs) > 0: - raise exc.InvalidModelException('\n'.join(err_msgs)) - - def find_start_tasks(self): - return [ - t_s for t_s in self.get_tasks() - if not self.has_inbound_transitions(t_s) - ] - - def find_inbound_task_specs(self, task_spec): - task_name = task_spec.get_name() - - with self.inbound_tasks_cache_lock: - specs = self.inbound_tasks_cache.get(task_name) - - if specs is not None: - return specs - - specs = [ - t_s for t_s in self.get_tasks() - if self.transition_exists(t_s.get_name(), task_name) - ] - - with self.inbound_tasks_cache_lock: - self.inbound_tasks_cache[task_name] = specs - - return specs - - def find_outbound_task_specs(self, task_spec): - task_name = task_spec.get_name() - - with self.outbound_tasks_cache_lock: - specs = self.outbound_tasks_cache.get(task_name) - - if specs is not None: - return specs - - specs = [ - t_s for t_s in self.get_tasks() - if self.transition_exists(task_name, t_s.get_name()) - ] - - with self.outbound_tasks_cache_lock: - self.outbound_tasks_cache[task_name] = specs - - return specs - - def has_inbound_transitions(self, task_spec): - return len(self.find_inbound_task_specs(task_spec)) > 0 - - def has_outbound_transitions(self, task_spec): - return len(self.find_outbound_task_specs(task_spec)) > 0 - - def find_outbound_task_names(self, task_name): - t_names = set() - - for tup in self.get_on_error_clause(task_name): - t_names.add(tup[0]) - - for tup in self.get_on_success_clause(task_name): - t_names.add(tup[0]) - - for tup in self.get_on_complete_clause(task_name): - t_names.add(tup[0]) - - return t_names - - def transition_exists(self, from_task_name, to_task_name): - t_names = self.find_outbound_task_names(from_task_name) - - return to_task_name in t_names - - def get_on_error_clause(self, t_name): - result = self.get_tasks()[t_name].get_on_error() - - if not result: - t_defaults = self.get_task_defaults() - - if t_defaults: - result = self._remove_task_from_clause( - t_defaults.get_on_error(), - t_name - ) - - return result - - def get_on_success_clause(self, t_name): - result = self.get_tasks()[t_name].get_on_success() - - if not result: - t_defaults = self.get_task_defaults() - - if t_defaults: - result = self._remove_task_from_clause( - t_defaults.get_on_success(), - t_name - ) - - return result - - def get_on_complete_clause(self, t_name): - result = self.get_tasks()[t_name].get_on_complete() - - if not result: - t_defaults = self.get_task_defaults() - - if t_defaults: - result = self._remove_task_from_clause( - t_defaults.get_on_complete(), - t_name - ) - - return result - - @staticmethod - def _remove_task_from_clause(on_clause, t_name): - return list([tup for tup in on_clause if tup[0] != t_name]) - - -class ReverseWorkflowSpec(WorkflowSpec): - _polymorphic_value = 'reverse' - - _schema = { - "properties": { - "tasks": { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^\w+$": - tasks.ReverseWorkflowTaskSpec.get_schema(includes=None) - } - }, - } - } - - def validate_semantics(self): - super(ReverseWorkflowSpec, self).validate_semantics() - - self._check_workflow_integrity() - - def _check_workflow_integrity(self): - for t_s in self.get_tasks(): - for req in self.get_task_requires(t_s): - self._validate_task_link(req, allow_engine_cmds=False) - - def get_task_requires(self, task_spec): - requires = set(task_spec.get_requires()) - - defaults = self.get_task_defaults() - - if defaults: - requires |= set(defaults.get_requires()) - - requires.discard(task_spec.get_name()) - - return list(requires) - - -class WorkflowSpecList(base.BaseSpecList): - item_class = WorkflowSpec - - -class WorkflowListSpec(base.BaseListSpec): - item_class = WorkflowSpec - - def get_workflows(self): - return self.get_items() diff -Nru mistral-4.0.0/mistral/workflow/base.py mistral-5.0.0~b2/mistral/workflow/base.py --- mistral-4.0.0/mistral/workflow/base.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workflow/base.py 2017-06-09 12:48:26.000000000 +0000 @@ -21,8 +21,8 @@ from osprofiler import profiler from mistral import exceptions as exc +from mistral.lang import parser as spec_parser from mistral import utils as u -from mistral.workbook import parser as spec_parser from mistral.workflow import commands from mistral.workflow import data_flow from mistral.workflow import lookup_utils @@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__) -@profiler.trace('wf-controller-get-controller') +@profiler.trace('wf-controller-get-controller', hide_args=True) def get_controller(wf_ex, wf_spec=None): """Gets a workflow controller instance by given workflow execution object. @@ -62,6 +62,33 @@ return ctrl_cls(wf_ex, wf_spec) +class TaskLogicalState(object): + """Task logical state. + + This data structure describes what state a task should have according + to the logic of the workflow type and state of other tasks. + """ + + def __init__(self, state, state_info=None, cardinality=0, + triggered_by=None): + self.state = state + self.state_info = state_info + self.cardinality = cardinality + self.triggered_by = triggered_by or [] + + def get_state(self): + return self.state + + def get_state_info(self): + return self.state_info + + def get_cardinality(self): + return self.cardinality + + def get_triggered_by(self): + return self.get_triggered_by + + class WorkflowController(object): """Workflow Controller base class. @@ -85,7 +112,7 @@ self.wf_spec = wf_spec - @profiler.trace('workflow-controller-continue-workflow') + @profiler.trace('workflow-controller-continue-workflow', hide_args=True) def continue_workflow(self, task_ex=None): """Calculates a list of commands to continue the workflow. diff -Nru mistral-4.0.0/mistral/workflow/commands.py mistral-5.0.0~b2/mistral/workflow/commands.py --- mistral-4.0.0/mistral/workflow/commands.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workflow/commands.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mistral.workbook import parser as spec_parser -from mistral.workbook.v2 import tasks +from mistral.lang import parser as spec_parser +from mistral.lang.v2 import tasks from mistral.workflow import states @@ -22,16 +22,17 @@ """Workflow command. A set of workflow commands form a communication protocol between workflow - handler and its clients. When workflow handler makes a decision about + controller and its clients. When workflow controller makes a decision about how to continue a workflow it returns a set of commands so that a caller knows what to do next. """ - def __init__(self, wf_ex, wf_spec, task_spec, ctx): + def __init__(self, wf_ex, wf_spec, task_spec, ctx, triggered_by=None): self.wf_ex = wf_ex self.wf_spec = wf_spec self.task_spec = task_spec self.ctx = ctx or {} + self.triggered_by = triggered_by class Noop(WorkflowCommand): @@ -44,8 +45,14 @@ class RunTask(WorkflowCommand): """Instruction to run a workflow task.""" - def __init__(self, wf_ex, wf_spec, task_spec, ctx): - super(RunTask, self).__init__(wf_ex, wf_spec, task_spec, ctx) + def __init__(self, wf_ex, wf_spec, task_spec, ctx, triggered_by=None): + super(RunTask, self).__init__( + wf_ex, + wf_spec, + task_spec, + ctx, + triggered_by=triggered_by + ) self.wait = False self.unique_key = None @@ -58,20 +65,26 @@ def __repr__(self): return ( - "Run task [workflow=%s, task=%s, waif_flag=%s]" - % (self.wf_ex.name, self.task_spec.get_name(), self.wait) + "Run task [workflow=%s, task=%s, waif_flag=%s, triggered_by=%s]" % + ( + self.wf_ex.name, + self.task_spec.get_name(), + self.wait, + self.triggered_by + ) ) class RunExistingTask(WorkflowCommand): """Command for running already existent task.""" - def __init__(self, wf_ex, wf_spec, task_ex, reset=True): + def __init__(self, wf_ex, wf_spec, task_ex, reset=True, triggered_by=None): super(RunExistingTask, self).__init__( wf_ex, wf_spec, spec_parser.get_task_spec(task_ex.spec), - task_ex.in_context + task_ex.in_context, + triggered_by=triggered_by ) self.task_ex = task_ex @@ -82,8 +95,15 @@ class SetWorkflowState(WorkflowCommand): """Instruction to change a workflow state.""" - def __init__(self, wf_ex, wf_spec, task_spec, ctx, new_state, msg): - super(SetWorkflowState, self).__init__(wf_ex, wf_spec, task_spec, ctx) + def __init__(self, wf_ex, wf_spec, task_spec, ctx, new_state, msg=None, + triggered_by=None): + super(SetWorkflowState, self).__init__( + wf_ex, + wf_spec, + task_spec, + ctx, + triggered_by=triggered_by + ) self.new_state = new_state self.msg = msg @@ -92,14 +112,16 @@ class FailWorkflow(SetWorkflowState): """Instruction to fail a workflow.""" - def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None): + def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None, + triggered_by=None): super(FailWorkflow, self).__init__( wf_ex, wf_spec, task_spec, ctx, states.ERROR, - msg + msg=msg, + triggered_by=triggered_by ) def __repr__(self): @@ -109,14 +131,16 @@ class SucceedWorkflow(SetWorkflowState): """Instruction to succeed a workflow.""" - def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None): + def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None, + triggered_by=None): super(SucceedWorkflow, self).__init__( wf_ex, wf_spec, task_spec, ctx, states.SUCCESS, - msg + msg=msg, + triggered_by=triggered_by ) def __repr__(self): @@ -126,14 +150,16 @@ class PauseWorkflow(SetWorkflowState): """Instruction to pause a workflow.""" - def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None): + def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None, + triggered_by=None): super(PauseWorkflow, self).__init__( wf_ex, wf_spec, task_spec, ctx, states.PAUSED, - msg + msg=msg, + triggered_by=triggered_by ) def __repr__(self): @@ -155,7 +181,7 @@ def create_command(cmd_name, wf_ex, wf_spec, task_spec, ctx, - explicit_params=None): + params=None, triggered_by=None): cmd_cls = get_command_class(cmd_name) or RunTask if issubclass(cmd_cls, SetWorkflowState): @@ -164,7 +190,14 @@ wf_spec, task_spec, ctx, - explicit_params.get('msg') + msg=params.get('msg'), + triggered_by=triggered_by ) else: - return cmd_cls(wf_ex, wf_spec, task_spec, ctx) + return cmd_cls( + wf_ex, + wf_spec, + task_spec, + ctx, + triggered_by=triggered_by + ) diff -Nru mistral-4.0.0/mistral/workflow/data_flow.py mistral-5.0.0~b2/mistral/workflow/data_flow.py --- mistral-4.0.0/mistral/workflow/data_flow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workflow/data_flow.py 2017-06-09 12:48:26.000000000 +0000 @@ -22,9 +22,9 @@ from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral import expressions as expr +from mistral.lang import parser as spec_parser from mistral import utils from mistral.utils import inspect_utils -from mistral.workbook import parser as spec_parser from mistral.workflow import states LOG = logging.getLogger(__name__) @@ -190,11 +190,7 @@ wf_ex = task_ex.workflow_execution - expr_ctx = ContextView( - task_ex.in_context, - wf_ex.context, - wf_ex.input - ) + expr_ctx = ContextView(task_ex.in_context, wf_ex.context, wf_ex.input) if task_ex.name in expr_ctx: LOG.warning( @@ -202,13 +198,29 @@ task_ex.name ) - data = ( - task_spec.get_publish() - if task_ex.state == states.SUCCESS - else task_spec.get_publish_on_error() + publish_spec = task_spec.get_publish(task_ex.state) + + if not publish_spec: + return + + # Publish branch variables. + branch_vars = publish_spec.get_branch() + + task_ex.published = expr.evaluate_recursively(branch_vars, expr_ctx) + + # Publish global variables. + global_vars = publish_spec.get_global() + + utils.merge_dicts( + task_ex.workflow_execution.context, + expr.evaluate_recursively(global_vars, expr_ctx) ) - task_ex.published = expr.evaluate_recursively(data, expr_ctx) + # TODO(rakhmerov): + # 1. Publish atomic variables. + # 2. Add the field "publish" in TaskExecution model similar to "published" + # but containing info as + # {'branch': {vars}, 'global': {vars}, 'atomic': {vars}} def evaluate_task_outbound_context(task_ex): @@ -224,8 +236,6 @@ if task_ex.in_context is not None else {} ) - remove_current_task_from_context(in_context) - return utils.update_dict(in_context, task_ex.published) @@ -256,7 +266,7 @@ return ctx -def remove_current_task_from_context(ctx): +def remove_internal_data_from_context(ctx): if '__task_execution' in ctx: del ctx['__task_execution'] @@ -267,8 +277,6 @@ if CONF.pecan.auth_enable: exec_ctx = auth_ctx.ctx() - LOG.debug('Data flow security context: %s' % exec_ctx) - if exec_ctx: wf_ex.context.update({'openstack': exec_ctx.to_dict()}) @@ -289,8 +297,14 @@ if 'env' in wf_ex.params: env = copy.deepcopy(wf_ex.params['env']) - # An env variable can be an expression of other env variables. - wf_ex.context['__env'] = expr.evaluate_recursively(env, {'__env': env}) + if ('evaluate_env' in wf_ex.params and + not wf_ex.params['evaluate_env']): + wf_ex.context['__env'] = env + else: + wf_ex.context['__env'] = expr.evaluate_recursively( + env, + {'__env': env} + ) def add_workflow_variables_to_context(wf_ex, wf_spec): diff -Nru mistral-4.0.0/mistral/workflow/direct_workflow.py mistral-5.0.0~b2/mistral/workflow/direct_workflow.py --- mistral-4.0.0/mistral/workflow/direct_workflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workflow/direct_workflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -61,8 +61,9 @@ if not t_spec.get_join(): return t_ex_candidate.processed - induced_state, _ = self._get_induced_join_state( + induced_state, _, _ = self._get_induced_join_state( self.wf_spec.get_tasks()[t_ex_candidate.name], + self._find_task_execution_by_name(t_ex_candidate.name), t_spec ) @@ -116,7 +117,7 @@ ctx = data_flow.evaluate_task_outbound_context(task_ex) - for t_n, params in self._find_next_tasks(task_ex, ctx=ctx): + for t_n, params, event_name in self._find_next_tasks(task_ex, ctx=ctx): t_s = self.wf_spec.get_tasks()[t_n] if not (t_s or t_n in commands.RESERVED_CMDS): @@ -124,13 +125,23 @@ elif not t_s: t_s = self.wf_spec.get_tasks()[task_ex.name] + data_flow.remove_internal_data_from_context(ctx) + + triggered_by = [ + { + 'task_id': task_ex.id, + 'event': event_name + } + ] + cmd = commands.create_command( t_n, self.wf_ex, self.wf_spec, t_s, ctx, - params + params=params, + triggered_by=triggered_by ) self._configure_if_join(cmd) @@ -159,12 +170,14 @@ def evaluate_workflow_final_context(self): ctx = {} - for t_ex in self._find_end_tasks(): + for t_ex in self._find_end_task_executions(): ctx = utils.merge_dicts( ctx, data_flow.evaluate_task_outbound_context(t_ex) ) + data_flow.remove_internal_data_from_context(ctx) + return ctx def get_logical_task_state(self, task_ex): @@ -174,7 +187,7 @@ # A simple 'non-join' task does not have any preconditions # based on state of other tasks so its logical state always # equals to its real state. - return task_ex.state, task_ex.state_info, 0 + return base.TaskLogicalState(task_ex.state, task_ex.state_info) return self._get_join_logical_state(task_spec) @@ -199,7 +212,7 @@ return True - def _find_end_tasks(self): + def _find_end_task_executions(self): def is_end_task(t_ex): try: return not self._has_outbound_tasks(t_ex) @@ -211,8 +224,10 @@ return True return list( - filter(is_end_task, - lookup_utils.find_completed_tasks(self.wf_ex.id)) + filter( + is_end_task, + lookup_utils.find_completed_task_executions(self.wf_ex.id) + ) ) def _has_outbound_tasks(self, task_ex): @@ -238,33 +253,33 @@ self.wf_ex.input ) - t_names_and_params = [] + # [(task_name, params, 'on-success'|'on-error'|'on-complete'), ...] + result = [] - if states.is_completed(t_state) and not states.is_cancelled(t_state): - t_names_and_params += ( - self._find_next_tasks_for_clause( - self.wf_spec.get_on_complete_clause(t_name), - ctx_view - ) - ) + def process_clause(clause, event_name): + task_tuples = self._find_next_tasks_for_clause(clause, ctx_view) - if t_state == states.ERROR: - t_names_and_params += ( - self._find_next_tasks_for_clause( - self.wf_spec.get_on_error_clause(t_name), - ctx_view - ) + for t in task_tuples: + result.append((t[0], t[1], event_name)) + + if t_state == states.SUCCESS: + process_clause( + self.wf_spec.get_on_success_clause(t_name), + 'on-success' + ) + elif t_state == states.ERROR: + process_clause( + self.wf_spec.get_on_error_clause(t_name), + 'on-error' ) - elif t_state == states.SUCCESS: - t_names_and_params += ( - self._find_next_tasks_for_clause( - self.wf_spec.get_on_success_clause(t_name), - ctx_view - ) + if states.is_completed(t_state) and not states.is_cancelled(t_state): + process_clause( + self.wf_spec.get_on_complete_clause(t_name), + 'on-complete' ) - return t_names_and_params + return result @staticmethod def _find_next_tasks_for_clause(clause, ctx): @@ -273,7 +288,7 @@ This method finds next task(command) base on given {name: condition} dictionary. - :param clause: Dictionary {task_name: condition} taken from + :param clause: Tuple (task_name, condition, parameters) taken from 'on-complete', 'on-success' or 'on-error' clause. :param ctx: Context that clause expressions should be evaluated against of. @@ -293,18 +308,18 @@ """Evaluates logical state of 'join' task. :param task_spec: 'join' task specification. - :return: Tuple (state, state_info, spec_cardinality) where 'state' and - 'state_info' describe the logical state of the given 'join' - task and 'spec_cardinality' gives the remaining number of - unfulfilled preconditions. If logical state is not WAITING then - 'spec_cardinality' should always be 0. + :return: TaskLogicalState (state, state_info, cardinality, + triggered_by) where 'state' and 'state_info' describe the logical + state of the given 'join' task and 'cardinality' gives the + remaining number of unfulfilled preconditions. If logical state + is not WAITING then 'cardinality' should always be 0. """ # TODO(rakhmerov): We need to use task_ex instead of task_spec # in order to cover a use case when there's more than one instance # of the same 'join' task in a workflow. # TODO(rakhmerov): In some cases this method will be expensive because - # it uses a multistep recursive search. We need to optimize it moving + # it uses a multi-step recursive search. We need to optimize it moving # forward (e.g. with Workflow Execution Graph). join_expr = task_spec.get_join() @@ -312,102 +327,142 @@ in_task_specs = self.wf_spec.find_inbound_task_specs(task_spec) if not in_task_specs: - return states.RUNNING, None, 0 + return base.TaskLogicalState(states.RUNNING) - # List of tuples (task_name, (state, depth)). - induced_states = [ - (t_s.get_name(), self._get_induced_join_state(t_s, task_spec)) - for t_s in in_task_specs - ] + # List of tuples (task_name, task_ex, state, depth, event_name). + induced_states = [] + + for t_s in in_task_specs: + t_ex = self._find_task_execution_by_name(t_s.get_name()) + + tup = self._get_induced_join_state(t_s, t_ex, task_spec) + + induced_states.append( + ( + t_s.get_name(), + t_ex, + tup[0], + tup[1], + tup[2] + ) + ) def count(state): cnt = 0 total_depth = 0 for s in induced_states: - if s[1][0] == state: + if s[2] == state: cnt += 1 - total_depth += s[1][1] + total_depth += s[3] return cnt, total_depth - errors_tuples = count(states.ERROR) + errors_tuple = count(states.ERROR) runnings_tuple = count(states.RUNNING) total_count = len(induced_states) def _blocked_message(): return ( 'Blocked by tasks: %s' % - [s[0] for s in induced_states if s[1][0] == states.WAITING] + [s[0] for s in induced_states if s[2] == states.WAITING] ) def _failed_message(): return ( 'Failed by tasks: %s' % - [s[0] for s in induced_states if s[1][0] == states.ERROR] + [s[0] for s in induced_states if s[2] == states.ERROR] ) + def _triggered_by(state): + return [ + {'task_id': s[1].id, 'event': s[4]} + for s in induced_states + if s[2] == state and s[1] is not None + ] + # If "join" is configured as a number or 'one'. if isinstance(join_expr, int) or join_expr == 'one': spec_cardinality = 1 if join_expr == 'one' else join_expr if runnings_tuple[0] >= spec_cardinality: - return states.RUNNING, None, 0 + return base.TaskLogicalState( + states.RUNNING, + triggered_by=_triggered_by(states.RUNNING) + ) # E.g. 'join: 3' with inbound [ERROR, ERROR, RUNNING, WAITING] # No chance to get 3 RUNNING states. - if errors_tuples[0] > (total_count - spec_cardinality): - return states.ERROR, _failed_message(), 0 + if errors_tuple[0] > (total_count - spec_cardinality): + return base.TaskLogicalState(states.ERROR, _failed_message()) # Calculate how many tasks need to finish to trigger this 'join'. cardinality = spec_cardinality - runnings_tuple[0] - return states.WAITING, _blocked_message(), cardinality + return base.TaskLogicalState( + states.WAITING, + _blocked_message(), + cardinality=cardinality + ) if join_expr == 'all': if total_count == runnings_tuple[0]: - return states.RUNNING, None, 0 + return base.TaskLogicalState( + states.RUNNING, + triggered_by=_triggered_by(states.RUNNING) + ) - if errors_tuples[0] > 0: - return states.ERROR, _failed_message(), 0 + if errors_tuple[0] > 0: + return base.TaskLogicalState( + states.ERROR, + _failed_message(), + triggered_by=_triggered_by(states.ERROR) + ) # Remaining cardinality is just a difference between all tasks and # a number of those tasks that induce RUNNING state. cardinality = total_count - runnings_tuple[1] - return states.WAITING, _blocked_message(), cardinality + return base.TaskLogicalState( + states.WAITING, + _blocked_message(), + cardinality=cardinality + ) raise RuntimeError('Unexpected join expression: %s' % join_expr) # TODO(rakhmerov): Method signature is incorrect given that # we may have multiple task executions for a task. It should # accept inbound task execution rather than a spec. - def _get_induced_join_state(self, inbound_task_spec, join_task_spec): + def _get_induced_join_state(self, in_task_spec, in_task_ex, + join_task_spec): join_task_name = join_task_spec.get_name() - in_task_ex = self._find_task_execution_by_name( - inbound_task_spec.get_name() - ) - if not in_task_ex: - possible, depth = self._possible_route(inbound_task_spec) + possible, depth = self._possible_route(in_task_spec) if possible: - return states.WAITING, depth + return states.WAITING, depth, None else: - return states.ERROR, depth + return states.ERROR, depth, 'impossible route' if not states.is_completed(in_task_ex.state): - return states.WAITING, 1 + return states.WAITING, 1, None - if join_task_name not in self._find_next_task_names(in_task_ex): - return states.ERROR, 1 + # [(task name, params, event name), ...] + next_tasks_tuples = self._find_next_tasks(in_task_ex) - return states.RUNNING, 1 + next_tasks_dict = {tup[0]: tup[2] for tup in next_tasks_tuples} + + if join_task_name not in next_tasks_dict: + return states.ERROR, 1, "not triggered" + + return states.RUNNING, 1, next_tasks_dict[join_task_name] def _find_task_execution_by_name(self, t_name): # Note: in case of 'join' completion check it's better to initialize # the entire task_executions collection to avoid too many DB queries. + t_execs = lookup_utils.find_task_executions_by_name( self.wf_ex.id, t_name diff -Nru mistral-4.0.0/mistral/workflow/lookup_utils.py mistral-5.0.0~b2/mistral/workflow/lookup_utils.py --- mistral-4.0.0/mistral/workflow/lookup_utils.py 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workflow/lookup_utils.py 2017-06-09 12:48:26.000000000 +0000 @@ -35,8 +35,21 @@ from mistral.db.v2 import api as db_api from mistral.workflow import states -_TASK_EXECUTIONS_CACHE_LOCK = threading.RLock() -_TASK_EXECUTIONS_CACHE = cachetools.LRUCache(maxsize=20000) + +def _create_lru_cache_for_workflow_execution(wf_ex_id): + return cachetools.LRUCache(maxsize=500) + +# This is a two-level caching structure. +# First level: [ -> ] +# Second level (task execution cache): [ -> ] +# The first level (by workflow execution id) allows to invalidate +# needed cache entry when the workflow gets completed. +_TASK_EX_CACHE = cachetools.LRUCache( + maxsize=100, + missing=_create_lru_cache_for_workflow_execution +) + +_CACHE_LOCK = threading.RLock() def find_task_executions_by_name(wf_ex_id, task_name): @@ -46,10 +59,8 @@ :param task_name: Task name. :return: Task executions (possibly a cached value). """ - cache_key = (wf_ex_id, task_name) - - with _TASK_EXECUTIONS_CACHE_LOCK: - t_execs = _TASK_EXECUTIONS_CACHE.get(cache_key) + with _CACHE_LOCK: + t_execs = _TASK_EX_CACHE[wf_ex_id].get(task_name) if t_execs: return t_execs @@ -66,8 +77,8 @@ ) if all_finished: - with _TASK_EXECUTIONS_CACHE_LOCK: - _TASK_EXECUTIONS_CACHE[cache_key] = t_execs + with _CACHE_LOCK: + _TASK_EX_CACHE[wf_ex_id][task_name] = t_execs return t_execs @@ -104,10 +115,20 @@ return find_task_executions_with_state(wf_ex_id, states.CANCELLED) -def find_completed_tasks(wf_ex_id): +def find_completed_task_executions(wf_ex_id): return db_api.get_completed_task_executions(workflow_execution_id=wf_ex_id) -def clean_caches(): - with _TASK_EXECUTIONS_CACHE_LOCK: - _TASK_EXECUTIONS_CACHE.clear() +def get_task_execution_cache_size(): + return len(_TASK_EX_CACHE) + + +def invalidate_cached_task_executions(wf_ex_id): + with _CACHE_LOCK: + if wf_ex_id in _TASK_EX_CACHE: + del _TASK_EX_CACHE[wf_ex_id] + + +def clear_caches(): + with _CACHE_LOCK: + _TASK_EX_CACHE.clear() diff -Nru mistral-4.0.0/mistral/workflow/reverse_workflow.py mistral-5.0.0~b2/mistral/workflow/reverse_workflow.py --- mistral-4.0.0/mistral/workflow/reverse_workflow.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral/workflow/reverse_workflow.py 2017-06-09 12:48:26.000000000 +0000 @@ -116,7 +116,7 @@ def get_logical_task_state(self, task_ex): # TODO(rakhmerov): Implement. - return task_ex.state, task_ex.state_info, 0 + return base.TaskLogicalState(task_ex.state, task_ex.state_info) def is_error_handled_for(self, task_ex): return task_ex.state != states.ERROR diff -Nru mistral-4.0.0/mistral.egg-info/entry_points.txt mistral-5.0.0~b2/mistral.egg-info/entry_points.txt --- mistral-4.0.0/mistral.egg-info/entry_points.txt 2017-02-22 13:45:26.000000000 +0000 +++ mistral-5.0.0~b2/mistral.egg-info/entry_points.txt 2017-06-09 12:52:03.000000000 +0000 @@ -25,11 +25,9 @@ keycloak-oidc = mistral.auth.keycloak:KeycloakAuthHandler keystone = mistral.auth.keystone:KeystoneAuthHandler -[mistral.engine.rpc_backend] -kombu_client = mistral.engine.rpc_backend.kombu.kombu_client:KombuRPCClient -kombu_server = mistral.engine.rpc_backend.kombu.kombu_server:KombuRPCServer -oslo_client = mistral.engine.rpc_backend.oslo.oslo_client:OsloRPCClient -oslo_server = mistral.engine.rpc_backend.oslo.oslo_server:OsloRPCServer +[mistral.executors] +local = mistral.executors.default_executor:DefaultExecutor +remote = mistral.executors.remote_executor:RemoteExecutor [mistral.expression.evaluators] jinja = mistral.expressions.jinja_expression:InlineJinjaEvaluator @@ -38,17 +36,30 @@ [mistral.expression.functions] env = mistral.utils.expression_utils:env_ execution = mistral.utils.expression_utils:execution_ +global = mistral.utils.expression_utils:global_ json_pp = mistral.utils.expression_utils:json_pp_ task = mistral.utils.expression_utils:task_ tasks = mistral.utils.expression_utils:tasks_ uuid = mistral.utils.expression_utils:uuid_ +[mistral.rpc.backends] +kombu_client = mistral.rpc.kombu.kombu_client:KombuRPCClient +kombu_server = mistral.rpc.kombu.kombu_server:KombuRPCServer +oslo_client = mistral.rpc.oslo.oslo_client:OsloRPCClient +oslo_server = mistral.rpc.oslo.oslo_server:OsloRPCServer + [oslo.config.opts] mistral.config = mistral.config:list_opts [oslo.config.opts.defaults] mistral.config = mistral.config:set_cors_middleware_defaults +[pygments.lexers] +mistral = mistral.ext.pygmentplugin:MistralLexer + [tempest.test_plugins] mistral_test = mistral_tempest_tests.plugin:MistralTempestPlugin +[wsgi_scripts] +mistral-wsgi-api = mistral.api.app:init_wsgi + diff -Nru mistral-4.0.0/mistral.egg-info/pbr.json mistral-5.0.0~b2/mistral.egg-info/pbr.json --- mistral-4.0.0/mistral.egg-info/pbr.json 2017-02-22 13:45:26.000000000 +0000 +++ mistral-5.0.0~b2/mistral.egg-info/pbr.json 2017-06-09 12:52:03.000000000 +0000 @@ -1 +1 @@ -{"is_release": true, "git_version": "b8927d8"} \ No newline at end of file +{"git_version": "8d51753", "is_release": true} \ No newline at end of file diff -Nru mistral-4.0.0/mistral.egg-info/PKG-INFO mistral-5.0.0~b2/mistral.egg-info/PKG-INFO --- mistral-4.0.0/mistral.egg-info/PKG-INFO 2017-02-22 13:45:26.000000000 +0000 +++ mistral-5.0.0~b2/mistral.egg-info/PKG-INFO 2017-06-09 12:52:03.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: mistral -Version: 4.0.0 +Version: 5.0.0.0b2 Summary: Mistral Project Home-page: http://docs.openstack.org/developer/mistral Author: OpenStack Mistral Team @@ -10,41 +10,24 @@ Team and repository tags ======================== - .. image:: http://governance.openstack.org/badges/mistral.svg - :target: http://governance.openstack.org/reference/tags/index.html - - .. Change things from this point on + .. image:: https://governance.openstack.org/badges/mistral.svg + :target: https://governance.openstack.org/reference/tags/index.html Mistral ======= - Workflow Service for OpenStack cloud. + Workflow Service for OpenStack cloud. This project aims to provide a mechanism + to define tasks and workflows without writing code, manage and execute them in + the cloud environment. Installation ~~~~~~~~~~~~ - Prerequisites - ------------- - - It is necessary to install some specific system libs for installing Mistral. - They can be installed on most popular operating systems using their package - manager (for Ubuntu - *apt*, for Fedora, CentOS - *yum*, for Mac OS - *brew* - or *macports*). - - The list of needed packages is shown below: - - * **python-dev** - * **python-setuptools** - * **python-pip** - * **libffi-dev** - * **libxslt1-dev (or libxslt-dev)** - * **libxml2-dev** - * **libyaml-dev** - * **libssl-dev** + The following are the steps to install Mistral on debian-based systems. - In case of ubuntu, just run:: + To install Mistral, you have to install the following prerequisites:: - apt-get install python-dev python-setuptools libffi-dev \ + $ apt-get install python-dev python-setuptools libffi-dev \ libxslt1-dev libxml2-dev libyaml-dev libssl-dev **Mistral can be used without authentication at all or it can work with @@ -68,23 +51,6 @@ Information about how to install Mistral with devstack can be found `here `_. - **Virtualenv installation**:: - - $ tox - - This will install necessary virtual environments and run all the project tests. - Installing virtual environments may take significant time (~10-15 mins). - - **Local installation**:: - - $ pip install -e . - - or:: - - $ pip install -r requirements.txt - $ python setup.py install - - Configuring Mistral ~~~~~~~~~~~~~~~~~~~ @@ -111,9 +77,9 @@ * Create the database and grant privileges:: $ mysql -u root -p - CREATE DATABASE mistral; - USE mistral - GRANT ALL ON mistral.* TO 'root'@'localhost'; + mysql> CREATE DATABASE mistral; + mysql> USE mistral + mysql> GRANT ALL ON mistral.* TO 'root'@'localhost'; #. Generate ``mistral.conf`` file:: @@ -173,9 +139,8 @@ of OpenStack projects in your deployment. Please find more detailed information in the ``tools/get_action_list.py`` script. - Before the First Run - ~~~~~~~~~~~~~~~~~~~~ + -------------------- After local installation you will find the commands ``mistral-server`` and ``mistral-db-manage`` available in your environment. The ``mistral-db-manage`` @@ -188,50 +153,35 @@ $ mistral-db-manage --config-file upgrade head + To populate the database with standard actions and workflows, type:: + $ mistral-db-manage --config-file populate + For more detailed information about ``mistral-db-manage`` script please check file ``mistral/db/sqlalchemy/migration/alembic_migrations/README.md``. - ** NOTE: For users want a dry run with SQLite backend(not used in production), - ``mistral-db-manage`` is not recommended for database initialization due to - `SQLite limitations `_. Please use - ``sync_db`` script described below instead for database initialization. - - Before starting Mistral server, run ``sync_db`` script. It prepares the DB, - creates in it with all standard actions and standard workflows which Mistral - provides for all mistral users. - - If you are using virtualenv:: - - $ tools/sync_db.sh --config-file - - Or run ``sync_db`` directly:: - - $ python tools/sync_db.py --config-file - - Running Mistral API server - ~~~~~~~~~~~~~~~~~~~~~~~~~~ + -------------------------- To run Mistral API server:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server api --config-file + --server api --config-file Running Mistral Engines - ~~~~~~~~~~~~~~~~~~~~~~~ + ----------------------- To run Mistral Engine:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server engine --config-file + --server engine --config-file Running Mistral Task Executors - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ------------------------------ To run Mistral Task Executor instance:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server executor --config-file + --server executor --config-file Note that at least one Engine instance and one Executor instance should be running in order for workflow tasks to be processed by Mistral. @@ -253,13 +203,13 @@ ... Workflow YAML ... Running Multiple Mistral Servers Under the Same Process - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ------------------------------------------------------- To run more than one server (API, Engine, or Task Executor) on the same process:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server api,engine --config-file + --server api,engine --config-file The value for the ``--server`` option can be a comma-delimited list. The valid options are ``all`` (which is the default if not specified) or any combination @@ -270,78 +220,59 @@ servers are launched on the same process. Otherwise, messages do not get delivered because the ``fake`` transport is using an in-process queue. + Project Goals 2017 + ------------------ - Mistral Client - ~~~~~~~~~~~~~~ - - The Mistral command line tool is provided by the ``python-mistralclient`` - package which is available - `here `__. - - - Debugging - ~~~~~~~~~ - - To debug using a local engine and executor without dependencies such as - RabbitMQ, make sure your ``/etc/mistral/mistral.conf`` has the following settings:: - - [DEFAULT] - rpc_backend = fake - - [pecan] - auth_enable = False - - and run the following command in *pdb*, *PyDev* or *PyCharm*:: - - mistral/cmd/launch.py --server all --config-file /etc/mistral/mistral.conf --use-debugger - - .. note:: - - In PyCharm, you also need to enable the Gevent compatibility flag in - Settings -> Build, Execution, Deployment -> Python Debugger -> Gevent - compatible. Without this setting, PyCharm will not show variable values - and become unstable during debugging. - - - Running unit tests in PyCharm - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #. **Complete Mistral documentation**. - In order to be able to conveniently run unit tests, you need to: + Mistral documentation should be more usable. It requires focused work to + make it well structured, eliminate gaps in API/Mistral Workflow Language + specifications, add more examples and tutorials. - 1. Set unit tests as the default runner: + *Definition of done*: + All capabilities are covered, all documentation topics are written using + the same style and structure principles. The obvious sub-goal of this goal + is to establish these principles. - Settings -> Tools -> Python Integrated Tools -> Default test runner: Unittests + #. **Complete Mistral Custom Actions API**. - 2. Enable test detection for all classes: + There has been the initiative in Mistral team since April of 2016 to + refactor Mistral actions subsystem in order to make the process of + developing Mistral actions easier and clearer. In 2017 we need to complete + this effort and make sure that all APIs are stable and it’s well-documented. - Run/Debug Configurations -> Defaults -> Python tests -> Unittests -> uncheck - Inspect only subclasses of unittest.TestCase + *Definition of done*: + All API interfaces are stable, existing actions are rewritten using this new + API, OpenStack actions are also rewritten based on the new API and moved to + mistral-extra repo. Everything is well documented and the doc has enough + examples. - Running examples - ~~~~~~~~~~~~~~~~ + #. **Finish Mistral multi-node mode**. - To run the examples find them in mistral-extra repository - (https://github.com/openstack/mistral-extra) and follow the instructions on - each example. + Mistral needs to be proven to work reliably in multi-node mode. In order + to achieve it we need to make a number of engine, executor and RPC + changes and configure a CI gate to run stress tests on multi-node Mistral. + *Definition of done*: + CI gate supports MySQL, all critically important functionality (join, + with-items, parallel workflows, sequential workflows) is covered by tests. - Tests - ~~~~~ + #. **Reduce workflow execution time**. - You can run some of the functional tests in non-openstack mode locally. To do - this: + *Definition of done*: Average workflow execution time reduced by 30%. - #. set ``auth_enable = False`` in the ``mistral.conf`` and restart Mistral - #. execute:: + Project Resources + ----------------- - $ ./run_functional_tests.sh + * `Mistral Official Documentation `_ - To run tests for only one version need to specify it:: + * Project status, bugs, and blueprints are tracked on + `Launchpad `_ - $ bash run_functional_tests.sh v1 + * Additional resources are linked from the project + `Wiki `_ page - More information about automated tests for Mistral can be found on - `Mistral Wiki `_. + * Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 Platform: UNKNOWN diff -Nru mistral-4.0.0/mistral.egg-info/requires.txt mistral-5.0.0~b2/mistral.egg-info/requires.txt --- mistral-4.0.0/mistral.egg-info/requires.txt 2017-02-22 13:45:26.000000000 +0000 +++ mistral-5.0.0~b2/mistral.egg-info/requires.txt 2017-06-09 12:52:03.000000000 +0000 @@ -1,41 +1,42 @@ alembic>=0.8.10 aodhclient>=0.7.0 -Babel>=2.3.4 +Babel!=2.4.0,>=2.3.4 croniter>=0.3.4 cachetools>=1.1.0 -eventlet!=0.18.3,>=0.18.2 +eventlet!=0.18.3,<0.21.0,>=0.18.2 gnocchiclient>=2.7.0 Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 jsonschema!=2.5.0,<3.0.0,>=2.0.0 keystonemiddleware>=4.12.0 +mistral-lib>=0.2.0 networkx>=1.10 oslo.concurrency>=3.8.0 -oslo.config!=3.18.0,>=3.14.0 -oslo.db>=4.15.0 -oslo.i18n>=2.1.0 -oslo.messaging>=5.14.0 -oslo.middleware>=3.0.0 +oslo.config>=4.0.0 +oslo.db>=4.21.1 +oslo.i18n!=3.15.2,>=2.1.0 +oslo.messaging!=5.25.0,>=5.24.2 +oslo.middleware>=3.27.0 oslo.policy>=1.17.0 -oslo.utils>=3.18.0 -oslo.log>=3.11.0 +oslo.utils>=3.20.0 +oslo.log>=3.22.0 oslo.serialization>=1.10.0 oslo.service>=1.10.0 osprofiler>=1.4.0 paramiko>=2.0 -pbr>=1.8 +pbr!=2.1.0,>=2.0.0 pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 python-barbicanclient>=4.0.0 python-ceilometerclient>=2.5.0 -python-cinderclient!=1.7.0,!=1.7.1,>=1.6.0 +python-cinderclient>=2.1.0 python-designateclient>=1.5.0 -python-glanceclient>=2.5.0 +python-glanceclient>=2.7.0 python-heatclient>=1.6.1 python-keystoneclient>=3.8.0 -python-mistralclient>=2.0.0 +python-mistralclient>=3.1.0 python-magnumclient>=2.0.0 python-muranoclient>=0.8.2 -python-neutronclient>=5.1.0 -python-novaclient!=7.0.0,>=6.0.0 +python-neutronclient>=6.3.0 +python-novaclient>=7.1.0 python-senlinclient>=1.1.0 python-swiftclient>=3.2.0 python-tackerclient>=0.8.0 @@ -44,12 +45,12 @@ python-ironic-inspector-client>=1.5.0 python-zaqarclient>=1.0.0 PyYAML>=3.10.0 -requests!=2.12.2,>=2.10.0 +requests!=2.12.2,!=2.13.0,>=2.10.0 tenacity>=3.2.1 -setuptools!=24.0.0,>=16.0 +setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,>=16.0 six>=1.9.0 -SQLAlchemy<1.1.0,>=1.0.10 -stevedore>=1.17.1 +SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 +stevedore>=1.20.0 WSME>=0.8 yaql>=1.1.0 tooz>=1.47.0 diff -Nru mistral-4.0.0/mistral.egg-info/SOURCES.txt mistral-5.0.0~b2/mistral.egg-info/SOURCES.txt --- mistral-4.0.0/mistral.egg-info/SOURCES.txt 2017-02-22 13:45:28.000000000 +0000 +++ mistral-5.0.0~b2/mistral.egg-info/SOURCES.txt 2017-06-09 12:52:05.000000000 +0000 @@ -25,6 +25,7 @@ devstack/README.rst devstack/plugin.sh devstack/settings +devstack/files/apache-mistral-api.template doc/README.md doc/source/README.rst doc/source/architecture.rst @@ -39,6 +40,7 @@ doc/source/_theme/theme.conf doc/source/developer/asynchronous_actions.rst doc/source/developer/creating_custom_action.rst +doc/source/developer/debug.rst doc/source/developer/devstack.rst doc/source/developer/extending_yaql.rst doc/source/developer/index.rst @@ -53,7 +55,6 @@ doc/source/guides/installation_guide.rst doc/source/guides/mistralclient_guide.rst doc/source/guides/upgrade_guide.rst -doc/source/guides/using_yaql.rst doc/source/img/Mistral_actions.png doc/source/img/Mistral_cron_trigger.png doc/source/img/Mistral_dashboard_debug_config.png @@ -184,43 +185,64 @@ mistral/engine/actions.py mistral/engine/base.py mistral/engine/default_engine.py -mistral/engine/default_executor.py mistral/engine/dispatcher.py mistral/engine/engine_server.py -mistral/engine/executor_server.py mistral/engine/policies.py mistral/engine/task_handler.py mistral/engine/tasks.py mistral/engine/utils.py mistral/engine/workflow_handler.py mistral/engine/workflows.py -mistral/engine/rpc_backend/__init__.py -mistral/engine/rpc_backend/base.py -mistral/engine/rpc_backend/rpc.py -mistral/engine/rpc_backend/kombu/__init__.py -mistral/engine/rpc_backend/kombu/base.py -mistral/engine/rpc_backend/kombu/kombu_client.py -mistral/engine/rpc_backend/kombu/kombu_hosts.py -mistral/engine/rpc_backend/kombu/kombu_listener.py -mistral/engine/rpc_backend/kombu/kombu_server.py -mistral/engine/rpc_backend/kombu/examples/__init__.py -mistral/engine/rpc_backend/kombu/examples/client.py -mistral/engine/rpc_backend/kombu/examples/server.py -mistral/engine/rpc_backend/oslo/__init__.py -mistral/engine/rpc_backend/oslo/oslo_client.py -mistral/engine/rpc_backend/oslo/oslo_server.py mistral/event_engine/__init__.py -mistral/event_engine/event_engine.py +mistral/event_engine/base.py +mistral/event_engine/default_event_engine.py mistral/event_engine/event_engine_server.py +mistral/executors/__init__.py +mistral/executors/base.py +mistral/executors/default_executor.py +mistral/executors/executor_server.py +mistral/executors/remote_executor.py mistral/expressions/__init__.py mistral/expressions/base_expression.py mistral/expressions/jinja_expression.py mistral/expressions/yaql_expression.py +mistral/ext/__init__.py +mistral/ext/pygmentplugin.py mistral/hacking/__init__.py mistral/hacking/checks.py +mistral/lang/__init__.py +mistral/lang/base.py +mistral/lang/parser.py +mistral/lang/types.py +mistral/lang/v2/__init__.py +mistral/lang/v2/actions.py +mistral/lang/v2/base.py +mistral/lang/v2/on_clause.py +mistral/lang/v2/policies.py +mistral/lang/v2/publish.py +mistral/lang/v2/retry_policy.py +mistral/lang/v2/task_defaults.py +mistral/lang/v2/tasks.py +mistral/lang/v2/workbook.py +mistral/lang/v2/workflows.py mistral/resources/actions/wait_ssh.yaml mistral/resources/workflows/create_instance.yaml mistral/resources/workflows/delete_instance.yaml +mistral/rpc/__init__.py +mistral/rpc/base.py +mistral/rpc/clients.py +mistral/rpc/kombu/__init__.py +mistral/rpc/kombu/base.py +mistral/rpc/kombu/kombu_client.py +mistral/rpc/kombu/kombu_hosts.py +mistral/rpc/kombu/kombu_listener.py +mistral/rpc/kombu/kombu_server.py +mistral/rpc/kombu/examples/__init__.py +mistral/rpc/kombu/examples/client.py +mistral/rpc/kombu/examples/server.py +mistral/rpc/oslo/__init__.py +mistral/rpc/oslo/oslo_client.py +mistral/rpc/oslo/oslo_server.py mistral/service/__init__.py mistral/service/base.py mistral/service/coordination.py @@ -237,6 +259,7 @@ mistral/tests/__init__.py mistral/tests/resources/action_jinja.yaml mistral/tests/resources/action_v2.yaml +mistral/tests/resources/single_wf.yaml mistral/tests/resources/wb_v1.yaml mistral/tests/resources/wb_v2.yaml mistral/tests/resources/wb_with_nested_wf.yaml @@ -245,6 +268,7 @@ mistral/tests/resources/wf_task_ex_concurrency.yaml mistral/tests/resources/wf_v2.yaml mistral/tests/resources/openstack/action_collection_wb.yaml +mistral/tests/resources/openstack/test_mapping.json mistral/tests/resources/workbook/v2/my_workbook.yaml mistral/tests/resources/workbook/v2/workbook_schema_test.yaml mistral/tests/unit/__init__.py @@ -271,6 +295,7 @@ mistral/tests/unit/api/test_access_control.py mistral/tests/unit/api/test_auth.py mistral/tests/unit/api/test_cors_middleware.py +mistral/tests/unit/api/test_resource_base.py mistral/tests/unit/api/test_service.py mistral/tests/unit/api/v2/__init__.py mistral/tests/unit/api/v2/test_action_executions.py @@ -288,6 +313,7 @@ mistral/tests/unit/api/v2/test_workflows.py mistral/tests/unit/db/__init__.py mistral/tests/unit/db/v2/__init__.py +mistral/tests/unit/db/v2/test_db_model.py mistral/tests/unit/db/v2/test_insert_or_ignore.py mistral/tests/unit/db/v2/test_locking.py mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py @@ -311,6 +337,7 @@ mistral/tests/unit/engine/test_execution_fields_size_limitation.py mistral/tests/unit/engine/test_javascript_action.py mistral/tests/unit/engine/test_join.py +mistral/tests/unit/engine/test_lookup_utils.py mistral/tests/unit/engine/test_noop_task.py mistral/tests/unit/engine/test_policies.py mistral/tests/unit/engine/test_profiler.py @@ -333,25 +360,37 @@ mistral/tests/unit/engine/test_workflow_stop.py mistral/tests/unit/engine/test_workflow_variables.py mistral/tests/unit/engine/test_yaql_functions.py -mistral/tests/unit/engine/rpc_backend/__init__.py -mistral/tests/unit/engine/rpc_backend/kombu/__init__.py -mistral/tests/unit/engine/rpc_backend/kombu/base.py -mistral/tests/unit/engine/rpc_backend/kombu/fake_kombu.py -mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_client.py -mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_listener.py -mistral/tests/unit/engine/rpc_backend/kombu/test_kombu_server.py +mistral/tests/unit/executors/__init__.py +mistral/tests/unit/executors/base.py +mistral/tests/unit/executors/test_local_executor.py +mistral/tests/unit/executors/test_plugins.py mistral/tests/unit/expressions/test_jinja_expression.py mistral/tests/unit/expressions/test_yaql_expression.py mistral/tests/unit/hacking/__init__.py mistral/tests/unit/hacking/test_checks.py +mistral/tests/unit/lang/__init__.py +mistral/tests/unit/lang/test_spec_caching.py +mistral/tests/unit/lang/v2/__init__.py +mistral/tests/unit/lang/v2/base.py +mistral/tests/unit/lang/v2/test_actions.py +mistral/tests/unit/lang/v2/test_tasks.py +mistral/tests/unit/lang/v2/test_workbook.py +mistral/tests/unit/lang/v2/test_workflows.py mistral/tests/unit/mstrlfixtures/__init__.py mistral/tests/unit/mstrlfixtures/hacking.py mistral/tests/unit/mstrlfixtures/policy_fixtures.py +mistral/tests/unit/rpc/__init__.py +mistral/tests/unit/rpc/kombu/__init__.py +mistral/tests/unit/rpc/kombu/base.py +mistral/tests/unit/rpc/kombu/fake_kombu.py +mistral/tests/unit/rpc/kombu/test_kombu_client.py +mistral/tests/unit/rpc/kombu/test_kombu_listener.py +mistral/tests/unit/rpc/kombu/test_kombu_server.py mistral/tests/unit/services/__init__.py mistral/tests/unit/services/test_action_manager.py mistral/tests/unit/services/test_action_service.py mistral/tests/unit/services/test_event_engine.py -mistral/tests/unit/services/test_expired_executions_policy.py +mistral/tests/unit/services/test_expiration_policy.py mistral/tests/unit/services/test_scheduler.py mistral/tests/unit/services/test_trigger_service.py mistral/tests/unit/services/test_workbook_service.py @@ -360,14 +399,6 @@ mistral/tests/unit/utils/test_inspect_utils.py mistral/tests/unit/utils/test_keystone_utils.py mistral/tests/unit/utils/test_utils.py -mistral/tests/unit/workbook/__init__.py -mistral/tests/unit/workbook/test_spec_caching.py -mistral/tests/unit/workbook/v2/__init__.py -mistral/tests/unit/workbook/v2/base.py -mistral/tests/unit/workbook/v2/test_actions.py -mistral/tests/unit/workbook/v2/test_tasks.py -mistral/tests/unit/workbook/v2/test_workbook.py -mistral/tests/unit/workbook/v2/test_workflows.py mistral/tests/unit/workflow/__init__.py mistral/tests/unit/workflow/test_direct_workflow.py mistral/tests/unit/workflow/test_reverse_workflow.py @@ -385,19 +416,6 @@ mistral/utils/wf_trace.py mistral/utils/openstack/__init__.py mistral/utils/openstack/keystone.py -mistral/workbook/__init__.py -mistral/workbook/base.py -mistral/workbook/parser.py -mistral/workbook/types.py -mistral/workbook/v2/__init__.py -mistral/workbook/v2/actions.py -mistral/workbook/v2/base.py -mistral/workbook/v2/policies.py -mistral/workbook/v2/retry_policy.py -mistral/workbook/v2/task_defaults.py -mistral/workbook/v2/tasks.py -mistral/workbook/v2/workbook.py -mistral/workbook/v2/workflows.py mistral/workflow/__init__.py mistral/workflow/base.py mistral/workflow/commands.py @@ -448,10 +466,14 @@ rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py releasenotes/notes/.placeholder +releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml releasenotes/notes/alternative-rpc-layer-21ca7f6171c8f628.yaml releasenotes/notes/changing-context-in-delayed-calls-78d8e9a622fe3fe9.yaml releasenotes/notes/changing-isolation-level-to-read-committed-7080833ad284b901.yaml +releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml +releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml releasenotes/notes/function-called-tasks-available-in-an-expression-17ca83d797ffb3ab.yaml +releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml releasenotes/notes/ironic-api-newton-9397da8135bb97b4.yaml releasenotes/notes/keycloak-auth-support-74131b49e2071762.yaml releasenotes/notes/magnum-actions-support-b131fa942b937fa5.yaml @@ -465,6 +487,9 @@ releasenotes/notes/mistral-tempest-plugin-2f6dcbceb4d27eb0.yaml releasenotes/notes/new-service-actions-support-47279bd649732632.yaml releasenotes/notes/region-name-support-9e4b4ccd963ace88.yaml +releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml +releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml +releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml releasenotes/notes/tacket-actions-support-2b4cee2644313cb3.yaml releasenotes/notes/transition-message-8dc4dd99240bd0f7.yaml releasenotes/notes/update-retry-policy-fb5e73ce717ed066.yaml diff -Nru mistral-4.0.0/mistral_tempest_tests/services/v2/mistral_client.py mistral-5.0.0~b2/mistral_tempest_tests/services/v2/mistral_client.py --- mistral-4.0.0/mistral_tempest_tests/services/v2/mistral_client.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/services/v2/mistral_client.py 2017-06-09 12:48:26.000000000 +0000 @@ -75,6 +75,9 @@ return resp, json.loads(body) + def get_action_execution(self, action_execution_id): + return self.get('action_executions/%s' % action_execution_id) + def create_execution(self, identifier, wf_input=None, params=None): if uuidutils.is_uuid_like(identifier): body = {"workflow_id": "%s" % identifier} @@ -130,9 +133,11 @@ return [t for t in all_tasks if t['workflow_name'] == wf_name] def create_action_execution(self, request_body, extra_headers={}): - resp, body = self.post_json('action_executions', - request_body, - extra_headers) + resp, body = self.post_json( + 'action_executions', + request_body, + extra_headers + ) params = json.loads(request_body.get('params', '{}')) if params.get('save_result', False): diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_action_executions.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_action_executions.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_action_executions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_action_executions.py 2017-06-09 12:48:26.000000000 +0000 @@ -16,6 +16,7 @@ import six from oslo_log import log as logging +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -29,6 +30,17 @@ _service = 'workflowv2' @classmethod + def resource_setup(cls): + super(ActionExecutionTestsV2, cls).resource_setup() + + cls.client.create_action_execution( + { + 'name': 'std.echo', + 'input': '{"output": "Hello, Mistral!"}' + } + ) + + @classmethod def resource_cleanup(cls): for action_ex in cls.client.action_executions: try: @@ -43,6 +55,7 @@ super(ActionExecutionTestsV2, cls).resource_cleanup() @test.attr(type='sanity') + @decorators.idempotent_id('a72603bd-5d49-4d92-9747-8da6322e867d') def test_run_action_execution(self): resp, body = self.client.create_action_execution( { @@ -59,6 +72,63 @@ ) @test.attr(type='sanity') + @decorators.idempotent_id('0623cb62-b20a-45c8-afd9-8da46e1bb3cb') + def test_list_action_executions(self): + resp, body = self.client.get_list_obj('action_executions') + + self.assertEqual(200, resp.status) + + @test.attr(type='sanity') + @decorators.idempotent_id('cd36ea00-7e22-4c3d-90c3-fb441b93cf12') + def test_output_appear_in_response_only_when_needed(self): + resp, body = self.client.get_list_obj('action_executions') + + self.assertEqual(200, resp.status) + action_execution = body['action_executions'][0] + self.assertNotIn("output", action_execution) + + resp, body = self.client.get_list_obj( + 'action_executions?include_output=True' + ) + + self.assertEqual(200, resp.status) + action_execution = body['action_executions'][0] + self.assertIn("output", action_execution) + + resp, body = self.client.get_action_execution(action_execution['id']) + self.assertIn("output", body) + + # Test when passing task execution ID + + resp, body = self.client.create_workflow('wf_v2.yaml') + wf_name = body['workflows'][0]['name'] + self.assertEqual(201, resp.status) + resp, body = self.client.create_execution(wf_name) + self.assertEqual(201, resp.status) + resp, body = self.client.get_list_obj('tasks') + self.assertEqual(200, resp.status) + task_id = body['tasks'][0]['id'] + + resp, body = self.client.get_list_obj( + 'action_executions?include_output=true&task_execution_id=%s' % + task_id + ) + + self.assertEqual(200, resp.status) + action_execution = body['action_executions'][0] + self.assertIn("output", action_execution) + + resp, body = self.client.get_list_obj( + 'action_executions?&task_execution_id=%s' % + task_id + ) + + self.assertEqual(200, resp.status) + action_execution = body['action_executions'][0] + self.assertNotIn("output", action_execution) + + @test.attr(type='sanity') + @decorators.idempotent_id('dc76aeda-9243-45cf-bfd2-141d3af8b28b') def test_run_action_std_http(self): resp, body = self.client.create_action_execution( { @@ -72,6 +142,7 @@ self.assertTrue(output['result']['status'] in range(200, 307)) @test.attr(type='sanity') + @decorators.idempotent_id('befa9b1c-01a4-41bc-b060-88cb1b147dfb') def test_run_action_std_http_error(self): resp, body = self.client.create_action_execution( { @@ -85,6 +156,23 @@ self.assertEqual(404, output['result']['status']) @test.attr(type='sanity') + @test.related_bug('1667415') + @decorators.idempotent_id('3c73de7a-4af0-4657-90d6-d7ebd3c7da18') + def test_run_action_std_http_non_utf8_response(self): + resp, body = self.client.create_action_execution( + { + 'name': 'std.http', + 'input': + '{"url": "https://www.google.co.il/search?q=testTest"}' + } + ) + + self.assertEqual(201, resp.status) + output = json.loads(body['output']) + self.assertEqual(200, output['result']['status']) + + @test.attr(type='sanity') + @decorators.idempotent_id('d98586bf-fdc4-44f6-9837-700d35b5f889') def test_create_action_execution(self): resp, body = self.client.create_action_execution( { @@ -112,6 +200,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('99f22c17-6fb4-4480-96d3-4a82672916b7') def test_delete_nonexistent_action_execution(self): self.assertRaises( exceptions.NotFound, @@ -121,6 +210,7 @@ ) @test.attr(type='sanity') + @decorators.idempotent_id('2dbd74ba-4950-4c52-8bd3-070d634dcd05') def test_create_action_execution_sync(self): token = self.client.auth_provider.get_token() resp, body = self.client.create_action_execution( diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_actions.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_actions.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. import datetime +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -37,6 +38,7 @@ super(ActionTestsV2, self).tearDown() @test.attr(type='smoke') + @decorators.idempotent_id('2e1a578a-1c27-409a-96be-84b5c41498cd') def test_get_list_actions(self): resp, body = self.client.get_list_obj('actions') @@ -45,6 +47,7 @@ self.assertNotIn('next', body) @test.attr(type='smoke') + @decorators.idempotent_id('786ee85c-c32d-4ac9-8f45-79ab6bc47ef1') def test_get_list_actions_with_pagination(self): resp, body = self.client.get_list_obj( 'actions?limit=1&sort_keys=name&sort_dirs=desc' @@ -84,6 +87,7 @@ self.assertGreater(name_1, name_2) @test.attr(type='negative') + @decorators.idempotent_id('5148358e-200f-49c7-8e88-1ddeec61c6a9') def test_get_list_actions_nonexist_sort_dirs(self): context = self.assertRaises( exceptions.BadRequest, @@ -97,6 +101,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('85482ce8-70f4-47a6-9e80-de1ac22b6412') def test_get_list_actions_invalid_limit(self): context = self.assertRaises( exceptions.BadRequest, @@ -110,6 +115,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('a203e75b-2013-422f-b9eb-da4375041058') def test_get_list_actions_duplicate_sort_keys(self): context = self.assertRaises( exceptions.BadRequest, @@ -123,6 +129,7 @@ ) @test.attr(type='smoke') + @decorators.idempotent_id('9a53af71-8f1e-4ad5-b572-2c4c621715c0') def test_get_list_actions_equal_to_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -138,6 +145,7 @@ self.assertFalse(act['is_system']) @test.attr(type='smoke') + @decorators.idempotent_id('3c3d28ce-9490-41ae-a918-c28f843841e1') def test_get_list_actions_not_equal_to_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -153,6 +161,7 @@ self.assertTrue(act['is_system']) @test.attr(type='smoke') + @decorators.idempotent_id('84823a84-5caa-427d-8a2c-622a1d1893b1') def test_get_list_actions_in_list_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -169,6 +178,7 @@ self.assertListEqual(created_acts, action_names) @test.attr(type='smoke') + @decorators.idempotent_id('4b05dfcf-ef39-4032-9528-c8422c7329dd') def test_get_list_actions_not_in_list_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -186,6 +196,7 @@ self.assertNotIn(act, action_names) @test.attr(type='smoke') + @decorators.idempotent_id('20b3d527-447d-492b-8cb7-ac5e3757d7d5') def test_get_list_actions_greater_than_filter(self): time = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") resp, body = self.client.get_list_obj( @@ -196,6 +207,7 @@ self.assertEqual([], body['actions']) @test.attr(type='smoke') + @decorators.idempotent_id('7f598dba-f169-47ec-a487-f0ed31484aff') def test_get_list_actions_greater_than_equal_to_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -212,6 +224,7 @@ self.assertIn(created_acts[0], actions) @test.attr(type='smoke') + @decorators.idempotent_id('874fb57d-a762-4dc3-841d-396657510d23') def test_get_list_actions_less_than_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -228,6 +241,7 @@ self.assertNotIn(created_acts[0], actions) @test.attr(type='smoke') + @decorators.idempotent_id('1fda6c31-b0c3-4b78-9f67-b920e1f6c973') def test_get_list_actions_less_than_equal_to_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -244,6 +258,7 @@ self.assertIn(created_acts[0], actions) @test.attr(type='smoke') + @decorators.idempotent_id('cbb716f1-7fc7-4884-8fa9-6ff2bc35ee29') def test_get_list_actions_multiple_filter(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -261,6 +276,7 @@ self.assertIn(created_acts[0], actions) @test.attr(type='negative') + @decorators.idempotent_id('45fdc1f3-4d89-4035-9b76-08ef94c92628') def test_get_list_actions_invalid_filter(self): self.assertRaises( exceptions.BadRequest, @@ -279,6 +295,7 @@ ) @test.attr(type='sanity') + @decorators.idempotent_id('5dbceaf3-6a32-4a4f-9427-1bbdb6f3c574') def test_create_and_delete_few_actions(self): resp, body = self.client.create_action('action_v2.yaml') self.assertEqual(201, resp.status) @@ -302,6 +319,7 @@ self.client.actions.remove(act) @test.attr(type='sanity') + @decorators.idempotent_id('d7dad5de-6b1f-4813-b789-78f075252639') def test_get_action(self): _, body = self.client.create_action('action_v2.yaml') action_name = body['actions'][0]['name'] @@ -311,6 +329,7 @@ self.assertEqual(action_name, body['name']) @test.attr(type='sanity') + @decorators.idempotent_id('21a031c8-8e2d-421f-8dfe-71a3b5e44381') def test_update_action(self): _, body = self.client.create_action('action_v2.yaml') action = body['actions'][0]['name'] @@ -334,6 +353,7 @@ for item in body['actions']])) @test.attr(type='sanity') + @decorators.idempotent_id('329b1030-c55c-45f0-8129-cc892bc23dcc') def test_get_action_definition(self): _, body = self.client.create_action('action_v2.yaml') act_name = body['actions'][0]['name'] @@ -344,6 +364,7 @@ self.assertIn(act_name, body) @test.attr(type='negative') + @decorators.idempotent_id('c2b5be88-571a-4855-922f-9a338dba6adb') def test_get_nonexistent_action(self): self.assertRaises( exceptions.NotFound, @@ -352,6 +373,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('fc2fafcb-9bb4-4a18-a507-3f9964f4a08a') def test_double_creation(self): self.client.create_action('action_v2.yaml') @@ -362,6 +384,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('0c456a73-9c39-4aeb-b3ca-3ea4338bc9ab') def test_create_action_invalid_def(self): self.assertRaises( exceptions.BadRequest, @@ -370,6 +393,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('469677b5-22ab-4e2a-aee6-5bcc9dac93de') def test_update_action_invalid_def(self): self.assertRaises( exceptions.BadRequest, @@ -378,6 +402,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('ab444607-40fc-47cb-982f-83762d5b64c9') def test_delete_nonexistent_action(self): self.assertRaises( exceptions.NotFound, @@ -386,6 +411,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('74d0d480-793a-46ca-b88a-8336c1897f3a') def test_delete_standard_action(self): self.assertRaises( exceptions.BadRequest, diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_cron_triggers.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_cron_triggers.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_cron_triggers.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_cron_triggers.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. from oslo_concurrency.fixture import lockutils +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -42,6 +43,7 @@ super(CronTriggerTestsV2, self).tearDown() @test.attr(type='smoke') + @decorators.idempotent_id('c53b44dd-59b3-4a4b-b22a-21abb4cecea0') def test_get_list_cron_triggers(self): resp, body = self.client.get_list_obj('cron_triggers') @@ -49,6 +51,7 @@ self.assertEqual([], body['cron_triggers']) @test.attr(type='sanity') + @decorators.idempotent_id('fbc641fa-8704-45b3-b259-136eb956394c') def test_create_and_delete_cron_triggers(self): tr_name = 'trigger' @@ -72,6 +75,7 @@ self.assertNotIn(tr_name, trs_names) @test.attr(type='sanity') + @decorators.idempotent_id('b8b9102e-b323-492f-af41-4f5368971e36') def test_create_and_delete_oneshot_cron_triggers(self): tr_name = 'trigger' @@ -96,6 +100,7 @@ self.assertNotIn(tr_name, trs_names) @test.attr(type='sanity') + @decorators.idempotent_id('5224359b-3c31-4fe7-a4eb-dc9da843137e') def test_create_two_cron_triggers_for_one_wf(self): tr_name_1 = 'trigger1' tr_name_2 = 'trigger2' @@ -118,6 +123,7 @@ self.assertIn(tr_name_2, trs_names) @test.attr(type='sanity') + @decorators.idempotent_id('967da6e3-f9a2-430a-9390-0d73f2143aba') def test_get_cron_trigger(self): tr_name = 'trigger' self.client.create_cron_trigger( @@ -129,36 +135,42 @@ self.assertEqual(tr_name, body['name']) @test.attr(type='negative') + @decorators.idempotent_id('d0e4d894-9a50-4919-a008-a9f255b6b6a3') def test_create_cron_trigger_nonexistent_wf(self): self.assertRaises(exceptions.NotFound, self.client.create_cron_trigger, 'trigger', 'nonexist', None, '5 * * * *') @test.attr(type='negative') + @decorators.idempotent_id('83f0d420-fd3c-4e75-87b1-854cefb28bda') def test_create_cron_trigger_invalid_count(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, 'trigger', 'nonexist', None, '5 * * * *', None, "q") @test.attr(type='negative') + @decorators.idempotent_id('4190e0af-3c64-4f57-a0b8-d9d3d41fd323') def test_create_cron_trigger_negative_count(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, 'trigger', 'nonexist', None, '5 * * * *', None, -1) @test.attr(type='negative') + @decorators.idempotent_id('210c37e8-990e-4260-b3b3-93f254e6a4d7') def test_create_cron_trigger_invalid_first_date(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, 'trigger', 'nonexist', None, '5 * * * *', "q") @test.attr(type='negative') + @decorators.idempotent_id('17990a39-8f66-4748-8ba5-ca87befbb198') def test_create_cron_trigger_count_only(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, 'trigger', 'nonexist', None, None, None, "42") @test.attr(type='negative') + @decorators.idempotent_id('029e0a1e-2252-4a37-b9bd-cfbe407c6ade') def test_create_cron_trigger_date_and_count_without_pattern(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, @@ -166,18 +178,21 @@ "4242-12-25 13:37", "42") @test.attr(type='negative') + @decorators.idempotent_id('54650d60-ec17-44b7-8732-2183852789ae') def test_get_nonexistent_cron_trigger(self): self.assertRaises(exceptions.NotFound, self.client.get_object, 'cron_triggers', 'trigger') @test.attr(type='negative') + @decorators.idempotent_id('c663599e-5cd7-49ff-9c0f-f82a5bcc5fdb') def test_delete_nonexistent_trigger(self): self.assertRaises(exceptions.NotFound, self.client.delete_obj, 'cron_triggers', 'trigger') @test.attr(type='negative') + @decorators.idempotent_id('d1328d2b-5dc2-4521-93ec-d734d5fb4df7') def test_create_two_cron_triggers_with_same_name(self): tr_name = 'trigger' self.client.create_cron_trigger( @@ -187,6 +202,7 @@ tr_name, self.wf_name, None, '5 * * * *') @test.attr(type='negative') + @decorators.idempotent_id('3e51fc44-ce38-4653-9e4e-08b077a1dbc5') def test_create_two_cron_triggers_with_same_pattern(self): self.client.create_cron_trigger( 'trigger1', @@ -208,12 +224,14 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('6f3c08f3-9498-410e-a44b-4f9c6c971405') def test_invalid_cron_pattern_not_enough_params(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, 'trigger', self.wf_name, None, '5 *') @test.attr(type='negative') + @decorators.idempotent_id('26cb52e7-1ef3-45a2-a870-1baec2382c55') def test_invalid_cron_pattern_out_of_range(self): self.assertRaises(exceptions.BadRequest, self.client.create_cron_trigger, diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_event_triggers.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_event_triggers.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_event_triggers.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_event_triggers.py 2017-06-09 12:48:26.000000000 +0000 @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -52,6 +53,7 @@ super(EventTriggerTestsV2, self).tearDown() @test.attr(type='sanity') + @decorators.idempotent_id('cfdf9aee-09ce-49bf-af05-97c5542bc131') def test_create_get_delete_event_trigger(self): name = 'my_event_trigger' @@ -78,18 +80,21 @@ self.assertNotIn(name, trs_names) @test.attr(type='negative') + @decorators.idempotent_id('20e547d6-9a16-4cac-9b1a-f3520c58cdd7') def test_create_event_trigger_without_necessary_param(self): self.assertRaises(exceptions.BadRequest, self.client.create_event_trigger, self.wf_id, EXCHANGE, EVENT_ENGINE_TOPIC, '') @test.attr(type='negative') + @decorators.idempotent_id('ed02f500-9436-4a7b-a135-f210e1c32b22') def test_create_event_trigger_with_nonexist_wf(self): self.assertRaises(exceptions.BadRequest, self.client.create_event_trigger, 'nonexist', EXCHANGE, EVENT_ENGINE_TOPIC, EVENT) @test.attr(type='negative') + @decorators.idempotent_id('0ab556b6-ab76-492e-8eef-c79955003a93') def test_create_event_trigger_duplicate(self): name = 'my_event_trigger' @@ -101,6 +106,7 @@ self.wf_id, EXCHANGE, EVENT_ENGINE_TOPIC, EVENT) @test.attr(type='negative') + @decorators.idempotent_id('56b90a90-9ff3-42f8-a9eb-04a77198710e') def test_get_nonexistent_event_trigger(self): fake_id = '123e4567-e89b-12d3-a456-426655440000' diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_executions.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_executions.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_executions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_executions.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. from oslo_concurrency.fixture import lockutils +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -47,12 +48,14 @@ super(ExecutionTestsV2, self).tearDown() @test.attr(type='smoke') + @decorators.idempotent_id('c0b4b658-6f01-4680-b402-2f683b3d78b6') def test_get_list_executions(self): resp, body = self.client.get_list_obj('executions') self.assertEqual(200, resp.status) self.assertNotIn('next', body) @test.attr(type='smoke') + @decorators.idempotent_id('0bfcb4b0-b1e4-4499-b81b-0e86c8a2a841') def test_get_list_executions_with_pagination(self): resp, body = self.client.create_execution(self.direct_wf_name) exec_id_1 = body['id'] @@ -108,6 +111,7 @@ self.assertGreater(workflow_name_2, workflow_name_1) @test.attr(type='sanity') + @decorators.idempotent_id('5d8ebe04-8de6-414d-908f-213af59e4c6a') def test_create_execution_for_direct_wf(self): resp, body = self.client.create_execution(self.direct_wf_name) exec_id = body['id'] @@ -120,6 +124,7 @@ [ex_id['id'] for ex_id in body['executions']]) @test.attr(type='sanity') + @decorators.idempotent_id('101bfdff-8309-4add-9504-544b15f13d95') def test_create_execution_for_reverse_wf(self): resp, body = self.client.create_execution( self.reverse_wf['name'], @@ -143,6 +148,7 @@ self.assertEqual('SUCCESS', body['state']) @test.attr(type='sanity') + @decorators.idempotent_id('2df30966-9c45-4a2e-942d-e74bd92cb5aa') def test_create_execution_by_wf_id(self): resp, body = self.client.create_execution(self.direct_wf_id) exec_id = body['id'] @@ -157,6 +163,7 @@ ) @test.attr(type='sanity') + @decorators.idempotent_id('f7f50198-2dbd-4ca1-af51-d0eadc1108ac') def test_get_execution(self): _, execution = self.client.create_execution(self.direct_wf_name) @@ -169,6 +176,7 @@ self.assertEqual(execution['id'], body['id']) @test.attr(type='sanity') + @decorators.idempotent_id('2f142ba0-6b88-4d63-8544-05c3dbfe13cc') def test_update_execution_pause(self): _, execution = self.client.create_execution(self.direct_wf_name) resp, body = self.client.update_execution( @@ -178,6 +186,7 @@ self.assertEqual('PAUSED', body['state']) @test.attr(type='sanity') + @decorators.idempotent_id('f0557236-55ab-457d-9197-05bc2ae53e21') def test_update_execution_description(self): _, execution = self.client.create_execution(self.direct_wf_name) resp, body = self.client.update_execution( @@ -187,6 +196,7 @@ self.assertEqual('description', body['description']) @test.attr(type='sanity') + @decorators.idempotent_id('c54b4d68-b179-4339-bdab-a91cd6e819b7') def test_update_execution_fail(self): _, execution = self.client.create_execution(self.direct_wf_name) resp, body = self.client.update_execution( @@ -196,12 +206,41 @@ self.assertEqual('ERROR', body['state']) self.assertEqual('Forced', body['state_info']) + @test.attr(type='sanity') + @decorators.idempotent_id('b5ce0d18-7d78-45bb-813e-ed94cea65fd0') + def test_update_execution_by_admin(self): + _, execution = self.client.create_execution(self.direct_wf_name) + resp, body = self.admin_client.update_execution( + execution['id'], '{"description": "description set by admin"}') + + self.assertEqual(200, resp.status) + self.assertEqual('description set by admin', body['description']) + + resp, body = self.client.get_object('executions', execution['id']) + + self.assertEqual(200, resp.status) + self.assertEqual("description set by admin", body['description']) + + @test.attr(type='sanity') + @decorators.idempotent_id('c6247362-a082-49ad-a2c3-aaf12419a477') + def test_update_execution_by_other_fail(self): + _, execution = self.client.create_execution(self.direct_wf_name) + + self.assertRaises( + exceptions.NotFound, + self.alt_client.update_execution, + execution['id'], + '{"description": "description set by admin"}' + ) + @test.attr(type='negative') + @decorators.idempotent_id('d8bde271-6785-4ace-9173-a8a3a01d5eaa') def test_get_nonexistent_execution(self): self.assertRaises(exceptions.NotFound, self.client.get_object, 'executions', '1a2b3c') @test.attr(type='negative') + @decorators.idempotent_id('e26e31ba-88cf-4b90-8b3a-fd4ecc612252') def test_update_nonexistent_execution(self): put_body = '{"state": "STOPPED"}' @@ -210,18 +249,21 @@ '1a2b3c', put_body) @test.attr(type='negative') + @decorators.idempotent_id('b337e270-b3b6-41e2-8de2-05030b06fc37') def test_delete_nonexistent_execution(self): self.assertRaises(exceptions.NotFound, self.client.delete_obj, 'executions', 'nonexist') @test.attr(type='negative') + @decorators.idempotent_id('46f7b4b0-7d4a-4bdc-b2b6-46343cdd6f3a') def test_create_ex_for_nonexistent_wf(self): self.assertRaises(exceptions.NotFound, self.client.create_execution, 'nonexist') @test.attr(type='negative') + @decorators.idempotent_id('9d27247e-b4d4-40ab-9181-9986655a6be4') def test_create_execution_for_reverse_wf_invalid_start_task(self): self.assertRaises( exceptions.BadRequest, @@ -232,6 +274,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('0d6ac42b-4059-40ef-99d0-a65b3cd1837c') def test_create_execution_forgot_input_params(self): self.assertRaises( exceptions.BadRequest, @@ -241,6 +284,7 @@ ) @test.attr(type='sanity') + @decorators.idempotent_id('52779c73-7563-47b2-8231-a24d6bf531a7') def test_action_ex_concurrency(self): resp, wf = self.client.create_workflow("wf_action_ex_concurrency.yaml") self.assertEqual(201, resp.status) @@ -254,6 +298,7 @@ self.client.wait_execution_success(execution) @test.attr(type='sanity') + @decorators.idempotent_id('eb061c4d-2892-47f0-81e6-37ba15c376bb') def test_task_ex_concurrency(self): resp, wf = self.client.create_workflow("wf_task_ex_concurrency.yaml") self.assertEqual(201, resp.status) @@ -265,3 +310,21 @@ self.assertEqual('RUNNING', execution['state']) self.client.wait_execution(execution, target_state='ERROR') + + @test.attr(type='sanity') + @decorators.idempotent_id('acc8e401-2b26-4c41-9e79-8da791da85c0') + def test_delete_execution_by_admin(self): + _, body = self.client.create_execution(self.direct_wf_id) + exec_id = body['id'] + resp, _ = self.admin_client.delete_obj('executions', exec_id) + + self.assertEqual(204, resp.status) + + self.client.executions.remove(exec_id) + + self.assertRaises( + exceptions.NotFound, + self.client.get_object, + 'executions', + exec_id + ) diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_tasks.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_tasks.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_tasks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_tasks.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. from oslo_concurrency.fixture import lockutils +from tempest.lib import decorators from tempest import test from mistral_tempest_tests.tests import base @@ -28,6 +29,7 @@ _, body = self.client.create_workflow('wf_v2.yaml') self.direct_wf_name = body['workflows'][0]['name'] _, execution = self.client.create_execution(self.direct_wf_name) + self.execution_id = execution['id'] def tearDown(self): for wf in self.client.workflows: @@ -41,6 +43,7 @@ super(TasksTestsV2, self).tearDown() @test.attr(type='smoke') + @decorators.idempotent_id('81159dce-3802-44ee-a8d4-5ddca106fd91') def test_get_tasks_list(self): resp, body = self.client.get_list_obj('tasks') @@ -48,6 +51,7 @@ self.assertNotEmpty(body['tasks']) @test.attr(type='sanity') + @decorators.idempotent_id('f62664de-bd2b-4153-8d0f-5a76d78abbad') def test_get_task(self): resp, body = self.client.get_list_obj('tasks') @@ -56,6 +60,18 @@ self.direct_wf_name, body['tasks'][-1]['workflow_name'] ) + @test.attr(type='sanity') + @decorators.idempotent_id('3230d694-40fd-4094-ad12-024f40a21b94') + def test_get_tasks_of_execution(self): + resp, body = self.client.get_list_obj( + 'tasks?workflow_execution_id=%s' % self.execution_id + ) + + self.assertEqual(200, resp.status) + self.assertEqual( + self.direct_wf_name, body['tasks'][-1]['workflow_name'] + ) + class TaskTypesTestsV2(base.TestCase): @@ -71,13 +87,14 @@ _, execution = self.client.create_execution(self.nested_wf_name) @test.attr(type='sanity') + @decorators.idempotent_id('1ac726eb-b945-4b82-8755-a2fb2dc009bc') def test_task_type(self): resp, body = self.client.get_list_obj('tasks') self.assertEqual(200, resp.status) bt = body['tasks'] - ll = [[v for k, v in d.iteritems() if 'type' in k] for d in bt] + ll = [[v for k, v in d.items() if 'type' in k] for d in bt] types_list = [item for sublist in ll for item in sublist] self.assertIn( diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_workbooks.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_workbooks.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_workbooks.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_workbooks.py 2017-06-09 12:48:26.000000000 +0000 @@ -13,6 +13,7 @@ # under the License. from oslo_concurrency.fixture import lockutils +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -31,6 +32,7 @@ super(WorkbookTestsV2, self).tearDown() @test.attr(type='smoke') + @decorators.idempotent_id('4d8752b9-8d69-4d81-8710-5dd8ef699b95') def test_get_list_workbooks(self): resp, body = self.client.get_list_obj('workbooks') @@ -38,6 +40,7 @@ self.assertEqual([], body['workbooks']) @test.attr(type='sanity') + @decorators.idempotent_id('1a078ca2-bcf9-4eb9-8ed5-e3545038aa76') def test_create_and_delete_workbook(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) resp, body = self.client.create_workbook('wb_v2.yaml') @@ -55,6 +58,7 @@ self.assertEqual([], body['workbooks']) @test.attr(type='sanity') + @decorators.idempotent_id('80f7d7a6-2821-4ab0-b090-ca45c98258ba') def test_get_workbook(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workbook('wb_v2.yaml') @@ -65,6 +69,7 @@ self.assertEqual(name, body['name']) @test.attr(type='sanity') + @decorators.idempotent_id('4d3b1e43-a493-41be-9c8a-389511675403') def test_update_workbook(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workbook('wb_v2.yaml') @@ -75,6 +80,7 @@ self.assertEqual(name, body['name']) @test.attr(type='sanity') + @decorators.idempotent_id('506cdcc2-082f-4e1f-9ab2-717acd7f0eb5') def test_get_workbook_definition(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workbook('wb_v2.yaml') @@ -85,17 +91,20 @@ self.assertIsNotNone(body) @test.attr(type='negative') + @decorators.idempotent_id('d99f11c1-05a3-4d90-89c6-8d85558d3708') def test_get_nonexistent_workbook_definition(self): self.assertRaises(exceptions.NotFound, self.client.get_definition, 'workbooks', 'nonexist') @test.attr(type='negative') + @decorators.idempotent_id('61ed021e-ec56-42cb-ad05-eb6979aa00fd') def test_get_nonexistent_workbook(self): self.assertRaises(exceptions.NotFound, self.client.get_object, 'workbooks', 'nonexist') @test.attr(type='negative') + @decorators.idempotent_id('e3d76f8b-220d-4250-8238-0ba27fda6de9') def test_double_create_workbook(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workbook('wb_v2.yaml') @@ -111,6 +120,7 @@ self.assertEqual([], body['workbooks']) @test.attr(type='negative') + @decorators.idempotent_id('1cd6f6f7-b166-454e-96d2-bf1f95c23015') def test_create_wb_with_invalid_def(self): self.assertRaises( exceptions.BadRequest, @@ -119,6 +129,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('ac9a05d3-e285-4d88-91eb-fb9ad694a89a') def test_update_wb_with_invalid_def(self): self.assertRaises( exceptions.BadRequest, diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_workflows.py mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_workflows.py --- mistral-4.0.0/mistral_tempest_tests/tests/api/v2/test_workflows.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/api/v2/test_workflows.py 2017-06-09 12:48:26.000000000 +0000 @@ -14,6 +14,7 @@ import json from oslo_concurrency.fixture import lockutils +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -33,6 +34,7 @@ super(WorkflowTestsV2, self).tearDown() @test.attr(type='smoke') + @decorators.idempotent_id('e9cd6817-e8d1-4604-ba76-b0e17219f4c5') def test_get_list_workflows(self): resp, body = self.client.get_list_obj('workflows') self.assertEqual(200, resp.status) @@ -44,6 +46,7 @@ self.assertNotIn('next', body) @test.attr(type='smoke') + @decorators.idempotent_id('be8a4a44-eeb3-48e3-b11d-b83ba14dbf2c') def test_get_list_workflows_by_admin(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) @@ -60,6 +63,7 @@ self.assertIn(name, names) @test.attr(type='smoke') + @decorators.idempotent_id('c9e2ebbc-02aa-4c33-b244-e471c8266aa7') def test_get_list_workflows_with_project_by_admin(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) @@ -80,6 +84,7 @@ self.assertIn(name, names) @test.attr(type='smoke') + @decorators.idempotent_id('b8dc1b02-8509-45e2-9df7-4630cdcfa1ab') def test_get_list_other_project_private_workflows(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) @@ -100,6 +105,7 @@ self.assertNotIn(name, names) @test.attr(type='smoke') + @decorators.idempotent_id('2063143b-ced8-4037-9383-e2504be581e6') def test_get_list_workflows_with_fields(self): resp, body = self.client.get_list_obj('workflows?fields=name') @@ -109,6 +115,7 @@ self.assertListEqual(sorted(['id', 'name']), sorted(list(wf))) @test.attr(type='smoke') + @decorators.idempotent_id('81f28735-e74e-4dc1-8b94-b548f8a80556') def test_get_list_workflows_with_pagination(self): resp, body = self.client.get_list_obj( 'workflows?limit=1&sort_keys=name&sort_dirs=desc' @@ -148,6 +155,7 @@ self.assertGreater(name_1, name_2) @test.attr(type='negative') + @decorators.idempotent_id('cdb5586f-a72f-4371-88d1-1472675915c3') def test_get_list_workflows_nonexist_sort_dirs(self): context = self.assertRaises( exceptions.BadRequest, @@ -161,6 +169,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('ac41a0ea-2be6-4307-9003-6b8dd52b0bf9') def test_get_list_workflows_invalid_limit(self): context = self.assertRaises( exceptions.BadRequest, @@ -174,6 +183,7 @@ ) @test.attr(type='negative') + @decorators.idempotent_id('55759713-a8d7-44c2-aff1-2383f51136bd') def test_get_list_workflows_duplicate_sort_keys(self): context = self.assertRaises( exceptions.BadRequest, @@ -187,6 +197,7 @@ ) @test.attr(type='sanity') + @decorators.idempotent_id('e26b30b9-6699-4020-93a0-e25c2daca59a') def test_create_and_delete_workflow(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) resp, body = self.client.create_workflow('wf_v2.yaml') @@ -209,6 +220,7 @@ self.assertNotIn(name, names) @test.attr(type='sanity') + @decorators.idempotent_id('f5a4a771-79b2-4f28-bfac-940aa83990a4') def test_get_workflow(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workflow('wf_v2.yaml') @@ -220,17 +232,19 @@ self.assertEqual(name, body['name']) @test.attr(type='sanity') + @decorators.idempotent_id('f516aad0-9a50-4ace-a217-fa1931fd9335') def test_update_workflow(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) - _, body = self.client.create_workflow('wf_v2.yaml') + _, body = self.client.create_workflow('single_wf.yaml') name = body['workflows'][0]['name'] - resp, body = self.client.update_request('workflows', 'wf_v2.yaml') + resp, body = self.client.update_request('workflows', 'single_wf.yaml') self.assertEqual(200, resp.status) self.assertEqual(name, body['workflows'][0]['name']) @test.attr(type='sanity') + @decorators.idempotent_id('02bc1fc3-c31a-4e37-bb3d-eda46818505c') def test_get_workflow_definition(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workflow('wf_v2.yaml') @@ -242,6 +256,7 @@ self.assertIsNotNone(body) @test.attr(type='sanity') + @decorators.idempotent_id('04fbd003-0e52-4034-858e-6634d4f84b29') def test_get_workflow_uploaded_in_wb(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workbook('wb_v2.yaml') @@ -254,17 +269,20 @@ self.assertNotEmpty(wf_names) @test.attr(type='negative') + @decorators.idempotent_id('5e5f0403-fb2c-41ae-bf6f-25c181515358') def test_get_nonexistent_workflow_definition(self): self.assertRaises(exceptions.NotFound, self.client.get_definition, 'workflows', 'nonexist') @test.attr(type='negative') + @decorators.idempotent_id('23c72d01-c3bb-43d6-ba15-9b49c15f800c') def test_get_nonexistent_workflow(self): self.assertRaises(exceptions.NotFound, self.client.get_object, 'workflows', 'nonexist') @test.attr(type='negative') + @decorators.idempotent_id('6b917213-7f11-423a-8fe0-55795dcf0fb2') def test_double_create_workflows(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) _, body = self.client.create_workflow('wf_v2.yaml') @@ -273,18 +291,21 @@ 'wf_v2.yaml') @test.attr(type='negative') + @decorators.idempotent_id('ffcd63d2-1104-4320-a67b-fadc4e2a0631') def test_create_wf_with_invalid_def(self): self.assertRaises(exceptions.BadRequest, self.client.create_workflow, 'wb_v1.yaml') @test.attr(type='negative') + @decorators.idempotent_id('eed46931-5485-436c-810f-1f63362223b9') def test_update_wf_with_invalid_def(self): self.assertRaises(exceptions.BadRequest, self.client.update_request, 'workflows', 'wb_v1.yaml') @test.attr(type='negative') + @decorators.idempotent_id('9b7f5b5a-cacd-4f98-a35a-decf065b8234') def test_delete_wf_with_trigger_associate(self): tr_name = 'trigger' resp, body = self.client.create_workflow('wf_v2.yaml') @@ -304,6 +325,7 @@ self.client.triggers.remove(tr_name) @test.attr(type='negative') + @decorators.idempotent_id('46325022-cbd2-48f3-95f3-e587aab3b655') def test_delete_wf_with_event_trigger_associate(self): _, body = self.client.create_workflow('wf_v2.yaml') wf_id = body['workflows'][0]['id'] @@ -323,6 +345,7 @@ self.client.event_triggers.remove(body['id']) @test.attr(type='negative') + @decorators.idempotent_id('1cb929e6-d375-4dcb-ab7c-73aa205af896') def test_delete_wf_with_trigger_associate_in_other_tenant(self): self.useFixture(lockutils.LockFixture('mistral-workflow')) tr_name = 'trigger' @@ -352,6 +375,7 @@ self.alt_client.triggers.remove(tr_name) @test.attr(type='negative') + @decorators.idempotent_id('f575713b-27fd-4ec8-b84f-468a7adf5ed2') def test_delete_nonexistent_wf(self): self.assertRaises(exceptions.NotFound, self.client.delete_obj, diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_multi_vim_authentication.py mistral-5.0.0~b2/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_multi_vim_authentication.py --- mistral-4.0.0/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_multi_vim_authentication.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_multi_vim_authentication.py 2017-06-09 12:48:26.000000000 +0000 @@ -14,8 +14,10 @@ import base64 from urlparse import urlparse +from keystoneclient import service_catalog as ks_service_catalog from oslo_serialization import jsonutils from oslo_utils import uuidutils +from tempest.lib import decorators from tempest import test from mistral_tempest_tests.tests import base @@ -29,6 +31,7 @@ super(MultiVimActionsTests, cls).resource_setup() @test.attr(type='openstack') + @decorators.idempotent_id('dadc9960-9c03-41a9-9a9d-7e97d527e6dd') def test_multi_vim_support_target_headers(self): client_1 = self.alt_client client_2 = self.client @@ -56,6 +59,7 @@ ) @test.attr(type='openstack') + @decorators.idempotent_id('bc0e9b99-62b0-4d96-95c9-016a3f69b02a') def test_multi_vim_support_target_headers_and_service_catalog(self): client_1 = self.alt_client client_2 = self.client @@ -63,18 +67,35 @@ # List stacks with client1, but with the target headers of client2, # and additionally with an invalid X-Target-Service-Catalog. extra_headers = _extract_target_headers_from_client(client_2) - service_dict = dict(client_2.auth_provider.cache[1]) - for endpoint in service_dict['serviceCatalog']: - if endpoint['name'] == 'heat': - endpoint['endpoints'][0]['publicURL'] = "invalid" + # Use ServiceCatalog to eliminate differences between keystone v2 and + # v3. + token_data = client_2.auth_provider.auth_data[1] + service_catalog = ks_service_catalog.ServiceCatalog.factory( + token_data + ).get_data() + + for service in service_catalog: + if service['name'] == 'heat': + for ep in service['endpoints']: + if 'publicURL' in ep: + ep['publicURL'] = "invalid" + elif ep['interface'] == 'public': + ep['url'] = "invalid" + break + break + + if 'catalog' in token_data: + token_data['catalog'] = service_catalog + else: + token_data['serviceCatalog'] = service_catalog - service_catalog = { + invalid_service_catalog = { "X-Target-Service-Catalog": base64.b64encode( - jsonutils.dumps(service_dict) + jsonutils.dumps(token_data) ) } - extra_headers.update(service_catalog) + extra_headers.update(invalid_service_catalog) result = _execute_action( client_1, _get_list_stack_request(), diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_openstack_actions.py mistral-5.0.0~b2/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_openstack_actions.py --- mistral-4.0.0/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_openstack_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_openstack_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from tempest.lib import decorators from tempest import test from mistral_tempest_tests.tests import base @@ -32,6 +33,7 @@ 'openstack/action_collection_wb.yaml') @test.attr(type='openstack') + @decorators.idempotent_id('9a999fc2-a089-4375-bc69-e1ed85b17a82') def test_nova_actions(self): wf_name = self.wb['name'] + '.nova' _, execution = self.client.create_execution(wf_name) @@ -41,15 +43,17 @@ self.assertEqual('SUCCESS', executed_task['state']) @test.attr(type='openstack') + @decorators.idempotent_id('81bdc1c9-cd9a-4c97-b8ce-e44f5211eace') def test_keystone_actions(self): wf_name = self.wb['name'] + '.keystone' - _, execution = self.client.create_execution(wf_name) - self.client.wait_execution_success(execution) - executed_task = self.client.get_wf_tasks(wf_name)[-1] + _, execution = self.admin_client.create_execution(wf_name) + self.admin_client.wait_execution_success(execution) + executed_task = self.admin_client.get_wf_tasks(wf_name)[-1] self.assertEqual('SUCCESS', executed_task['state']) @test.attr(type='openstack') + @decorators.idempotent_id('fde681b8-3e1b-4172-a4b8-2fcac1f070d9') def test_heat_actions(self): wf_name = self.wb['name'] + '.heat' _, execution = self.client.create_execution(wf_name) @@ -59,6 +63,7 @@ self.assertEqual('SUCCESS', executed_task['state']) @test.attr(type='openstack') + @decorators.idempotent_id('5981360d-f336-45ca-9d38-799c7a8ade26') def test_glance_actions(self): wf_name = self.wb['name'] + '.glance' _, execution = self.client.create_execution(wf_name) @@ -68,6 +73,7 @@ self.assertEqual('SUCCESS', executed_task['state']) @test.attr(type='openstack') + @decorators.idempotent_id('a1f71a72-3681-4d32-aad9-117068717b33') def test_cinder_actions(self): wf_name = self.wb['name'] + '.cinder' _, execution = self.client.create_execution(wf_name) @@ -77,6 +83,7 @@ self.assertEqual('SUCCESS', executed_task['state']) @test.attr(type='openstack') + @decorators.idempotent_id('586dd973-fc65-40e2-9a85-31418b22473a') def test_neutron_actions(self): wf_name = self.wb['name'] + '.neutron' _, execution = self.client.create_execution(wf_name) diff -Nru mistral-4.0.0/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_ssh_actions.py mistral-5.0.0~b2/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_ssh_actions.py --- mistral-4.0.0/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_ssh_actions.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/mistral_tempest_tests/tests/scenario/engine/actions/v2/test_ssh_actions.py 2017-06-09 12:48:26.000000000 +0000 @@ -20,6 +20,7 @@ from oslo_log import log as logging from paramiko import ssh_exception from tempest import config +from tempest.lib import decorators from tempest.lib import exceptions from tempest import test @@ -246,6 +247,7 @@ super(SSHActionsTestsV2, cls).resource_cleanup() @test.attr(type='sanity') + @decorators.idempotent_id('3e12a2ad-5b10-46b0-ae1f-ed34d3cc6ae2') def test_run_ssh_action(self): input_data = { 'cmd': 'hostname', @@ -268,6 +270,7 @@ self.assertIn(self.public_vm['name'], output['result']) @test.attr(type='sanity') + @decorators.idempotent_id('6c09fb04-70b4-43a6-b5f8-a53ca92e66e0') def test_run_ssh_proxied_action(self): guest_vm_ip = self.guest_vm['addresses'].popitem()[1][0]['addr'] diff -Nru mistral-4.0.0/PKG-INFO mistral-5.0.0~b2/PKG-INFO --- mistral-4.0.0/PKG-INFO 2017-02-22 13:45:28.000000000 +0000 +++ mistral-5.0.0~b2/PKG-INFO 2017-06-09 12:52:05.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: mistral -Version: 4.0.0 +Version: 5.0.0.0b2 Summary: Mistral Project Home-page: http://docs.openstack.org/developer/mistral Author: OpenStack Mistral Team @@ -10,41 +10,24 @@ Team and repository tags ======================== - .. image:: http://governance.openstack.org/badges/mistral.svg - :target: http://governance.openstack.org/reference/tags/index.html - - .. Change things from this point on + .. image:: https://governance.openstack.org/badges/mistral.svg + :target: https://governance.openstack.org/reference/tags/index.html Mistral ======= - Workflow Service for OpenStack cloud. + Workflow Service for OpenStack cloud. This project aims to provide a mechanism + to define tasks and workflows without writing code, manage and execute them in + the cloud environment. Installation ~~~~~~~~~~~~ - Prerequisites - ------------- - - It is necessary to install some specific system libs for installing Mistral. - They can be installed on most popular operating systems using their package - manager (for Ubuntu - *apt*, for Fedora, CentOS - *yum*, for Mac OS - *brew* - or *macports*). - - The list of needed packages is shown below: - - * **python-dev** - * **python-setuptools** - * **python-pip** - * **libffi-dev** - * **libxslt1-dev (or libxslt-dev)** - * **libxml2-dev** - * **libyaml-dev** - * **libssl-dev** + The following are the steps to install Mistral on debian-based systems. - In case of ubuntu, just run:: + To install Mistral, you have to install the following prerequisites:: - apt-get install python-dev python-setuptools libffi-dev \ + $ apt-get install python-dev python-setuptools libffi-dev \ libxslt1-dev libxml2-dev libyaml-dev libssl-dev **Mistral can be used without authentication at all or it can work with @@ -68,23 +51,6 @@ Information about how to install Mistral with devstack can be found `here `_. - **Virtualenv installation**:: - - $ tox - - This will install necessary virtual environments and run all the project tests. - Installing virtual environments may take significant time (~10-15 mins). - - **Local installation**:: - - $ pip install -e . - - or:: - - $ pip install -r requirements.txt - $ python setup.py install - - Configuring Mistral ~~~~~~~~~~~~~~~~~~~ @@ -111,9 +77,9 @@ * Create the database and grant privileges:: $ mysql -u root -p - CREATE DATABASE mistral; - USE mistral - GRANT ALL ON mistral.* TO 'root'@'localhost'; + mysql> CREATE DATABASE mistral; + mysql> USE mistral + mysql> GRANT ALL ON mistral.* TO 'root'@'localhost'; #. Generate ``mistral.conf`` file:: @@ -173,9 +139,8 @@ of OpenStack projects in your deployment. Please find more detailed information in the ``tools/get_action_list.py`` script. - Before the First Run - ~~~~~~~~~~~~~~~~~~~~ + -------------------- After local installation you will find the commands ``mistral-server`` and ``mistral-db-manage`` available in your environment. The ``mistral-db-manage`` @@ -188,50 +153,35 @@ $ mistral-db-manage --config-file upgrade head + To populate the database with standard actions and workflows, type:: + $ mistral-db-manage --config-file populate + For more detailed information about ``mistral-db-manage`` script please check file ``mistral/db/sqlalchemy/migration/alembic_migrations/README.md``. - ** NOTE: For users want a dry run with SQLite backend(not used in production), - ``mistral-db-manage`` is not recommended for database initialization due to - `SQLite limitations `_. Please use - ``sync_db`` script described below instead for database initialization. - - Before starting Mistral server, run ``sync_db`` script. It prepares the DB, - creates in it with all standard actions and standard workflows which Mistral - provides for all mistral users. - - If you are using virtualenv:: - - $ tools/sync_db.sh --config-file - - Or run ``sync_db`` directly:: - - $ python tools/sync_db.py --config-file - - Running Mistral API server - ~~~~~~~~~~~~~~~~~~~~~~~~~~ + -------------------------- To run Mistral API server:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server api --config-file + --server api --config-file Running Mistral Engines - ~~~~~~~~~~~~~~~~~~~~~~~ + ----------------------- To run Mistral Engine:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server engine --config-file + --server engine --config-file Running Mistral Task Executors - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ------------------------------ To run Mistral Task Executor instance:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server executor --config-file + --server executor --config-file Note that at least one Engine instance and one Executor instance should be running in order for workflow tasks to be processed by Mistral. @@ -253,13 +203,13 @@ ... Workflow YAML ... Running Multiple Mistral Servers Under the Same Process - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ------------------------------------------------------- To run more than one server (API, Engine, or Task Executor) on the same process:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server api,engine --config-file + --server api,engine --config-file The value for the ``--server`` option can be a comma-delimited list. The valid options are ``all`` (which is the default if not specified) or any combination @@ -270,78 +220,59 @@ servers are launched on the same process. Otherwise, messages do not get delivered because the ``fake`` transport is using an in-process queue. + Project Goals 2017 + ------------------ - Mistral Client - ~~~~~~~~~~~~~~ - - The Mistral command line tool is provided by the ``python-mistralclient`` - package which is available - `here `__. - - - Debugging - ~~~~~~~~~ - - To debug using a local engine and executor without dependencies such as - RabbitMQ, make sure your ``/etc/mistral/mistral.conf`` has the following settings:: - - [DEFAULT] - rpc_backend = fake - - [pecan] - auth_enable = False - - and run the following command in *pdb*, *PyDev* or *PyCharm*:: - - mistral/cmd/launch.py --server all --config-file /etc/mistral/mistral.conf --use-debugger - - .. note:: - - In PyCharm, you also need to enable the Gevent compatibility flag in - Settings -> Build, Execution, Deployment -> Python Debugger -> Gevent - compatible. Without this setting, PyCharm will not show variable values - and become unstable during debugging. - - - Running unit tests in PyCharm - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + #. **Complete Mistral documentation**. - In order to be able to conveniently run unit tests, you need to: + Mistral documentation should be more usable. It requires focused work to + make it well structured, eliminate gaps in API/Mistral Workflow Language + specifications, add more examples and tutorials. - 1. Set unit tests as the default runner: + *Definition of done*: + All capabilities are covered, all documentation topics are written using + the same style and structure principles. The obvious sub-goal of this goal + is to establish these principles. - Settings -> Tools -> Python Integrated Tools -> Default test runner: Unittests + #. **Complete Mistral Custom Actions API**. - 2. Enable test detection for all classes: + There has been the initiative in Mistral team since April of 2016 to + refactor Mistral actions subsystem in order to make the process of + developing Mistral actions easier and clearer. In 2017 we need to complete + this effort and make sure that all APIs are stable and it’s well-documented. - Run/Debug Configurations -> Defaults -> Python tests -> Unittests -> uncheck - Inspect only subclasses of unittest.TestCase + *Definition of done*: + All API interfaces are stable, existing actions are rewritten using this new + API, OpenStack actions are also rewritten based on the new API and moved to + mistral-extra repo. Everything is well documented and the doc has enough + examples. - Running examples - ~~~~~~~~~~~~~~~~ + #. **Finish Mistral multi-node mode**. - To run the examples find them in mistral-extra repository - (https://github.com/openstack/mistral-extra) and follow the instructions on - each example. + Mistral needs to be proven to work reliably in multi-node mode. In order + to achieve it we need to make a number of engine, executor and RPC + changes and configure a CI gate to run stress tests on multi-node Mistral. + *Definition of done*: + CI gate supports MySQL, all critically important functionality (join, + with-items, parallel workflows, sequential workflows) is covered by tests. - Tests - ~~~~~ + #. **Reduce workflow execution time**. - You can run some of the functional tests in non-openstack mode locally. To do - this: + *Definition of done*: Average workflow execution time reduced by 30%. - #. set ``auth_enable = False`` in the ``mistral.conf`` and restart Mistral - #. execute:: + Project Resources + ----------------- - $ ./run_functional_tests.sh + * `Mistral Official Documentation `_ - To run tests for only one version need to specify it:: + * Project status, bugs, and blueprints are tracked on + `Launchpad `_ - $ bash run_functional_tests.sh v1 + * Additional resources are linked from the project + `Wiki `_ page - More information about automated tests for Mistral can be found on - `Mistral Wiki `_. + * Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 Platform: UNKNOWN diff -Nru mistral-4.0.0/README.rst mistral-5.0.0~b2/README.rst --- mistral-4.0.0/README.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/README.rst 2017-06-09 12:48:26.000000000 +0000 @@ -2,41 +2,24 @@ Team and repository tags ======================== -.. image:: http://governance.openstack.org/badges/mistral.svg - :target: http://governance.openstack.org/reference/tags/index.html - -.. Change things from this point on +.. image:: https://governance.openstack.org/badges/mistral.svg + :target: https://governance.openstack.org/reference/tags/index.html Mistral ======= -Workflow Service for OpenStack cloud. +Workflow Service for OpenStack cloud. This project aims to provide a mechanism +to define tasks and workflows without writing code, manage and execute them in +the cloud environment. Installation ~~~~~~~~~~~~ -Prerequisites -------------- - -It is necessary to install some specific system libs for installing Mistral. -They can be installed on most popular operating systems using their package -manager (for Ubuntu - *apt*, for Fedora, CentOS - *yum*, for Mac OS - *brew* -or *macports*). - -The list of needed packages is shown below: - -* **python-dev** -* **python-setuptools** -* **python-pip** -* **libffi-dev** -* **libxslt1-dev (or libxslt-dev)** -* **libxml2-dev** -* **libyaml-dev** -* **libssl-dev** +The following are the steps to install Mistral on debian-based systems. -In case of ubuntu, just run:: +To install Mistral, you have to install the following prerequisites:: - apt-get install python-dev python-setuptools libffi-dev \ + $ apt-get install python-dev python-setuptools libffi-dev \ libxslt1-dev libxml2-dev libyaml-dev libssl-dev **Mistral can be used without authentication at all or it can work with @@ -60,23 +43,6 @@ Information about how to install Mistral with devstack can be found `here `_. -**Virtualenv installation**:: - - $ tox - -This will install necessary virtual environments and run all the project tests. -Installing virtual environments may take significant time (~10-15 mins). - -**Local installation**:: - - $ pip install -e . - -or:: - - $ pip install -r requirements.txt - $ python setup.py install - - Configuring Mistral ~~~~~~~~~~~~~~~~~~~ @@ -103,9 +69,9 @@ * Create the database and grant privileges:: $ mysql -u root -p - CREATE DATABASE mistral; - USE mistral - GRANT ALL ON mistral.* TO 'root'@'localhost'; + mysql> CREATE DATABASE mistral; + mysql> USE mistral + mysql> GRANT ALL ON mistral.* TO 'root'@'localhost'; #. Generate ``mistral.conf`` file:: @@ -165,9 +131,8 @@ of OpenStack projects in your deployment. Please find more detailed information in the ``tools/get_action_list.py`` script. - Before the First Run -~~~~~~~~~~~~~~~~~~~~ +-------------------- After local installation you will find the commands ``mistral-server`` and ``mistral-db-manage`` available in your environment. The ``mistral-db-manage`` @@ -180,50 +145,35 @@ $ mistral-db-manage --config-file upgrade head +To populate the database with standard actions and workflows, type:: + $ mistral-db-manage --config-file populate + For more detailed information about ``mistral-db-manage`` script please check file ``mistral/db/sqlalchemy/migration/alembic_migrations/README.md``. -** NOTE: For users want a dry run with SQLite backend(not used in production), -``mistral-db-manage`` is not recommended for database initialization due to -`SQLite limitations `_. Please use -``sync_db`` script described below instead for database initialization. - -Before starting Mistral server, run ``sync_db`` script. It prepares the DB, -creates in it with all standard actions and standard workflows which Mistral -provides for all mistral users. - -If you are using virtualenv:: - - $ tools/sync_db.sh --config-file - -Or run ``sync_db`` directly:: - - $ python tools/sync_db.py --config-file - - Running Mistral API server -~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------- To run Mistral API server:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server api --config-file + --server api --config-file Running Mistral Engines -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- To run Mistral Engine:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server engine --config-file + --server engine --config-file Running Mistral Task Executors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------ To run Mistral Task Executor instance:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server executor --config-file + --server executor --config-file Note that at least one Engine instance and one Executor instance should be running in order for workflow tasks to be processed by Mistral. @@ -245,13 +195,13 @@ ... Workflow YAML ... Running Multiple Mistral Servers Under the Same Process -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------- To run more than one server (API, Engine, or Task Executor) on the same process:: $ tox -evenv -- python mistral/cmd/launch.py \ - --server api,engine --config-file + --server api,engine --config-file The value for the ``--server`` option can be a comma-delimited list. The valid options are ``all`` (which is the default if not specified) or any combination @@ -262,75 +212,56 @@ servers are launched on the same process. Otherwise, messages do not get delivered because the ``fake`` transport is using an in-process queue. +Project Goals 2017 +------------------ -Mistral Client -~~~~~~~~~~~~~~ - -The Mistral command line tool is provided by the ``python-mistralclient`` -package which is available -`here `__. - - -Debugging -~~~~~~~~~ - -To debug using a local engine and executor without dependencies such as -RabbitMQ, make sure your ``/etc/mistral/mistral.conf`` has the following settings:: - - [DEFAULT] - rpc_backend = fake - - [pecan] - auth_enable = False - -and run the following command in *pdb*, *PyDev* or *PyCharm*:: - - mistral/cmd/launch.py --server all --config-file /etc/mistral/mistral.conf --use-debugger - -.. note:: - - In PyCharm, you also need to enable the Gevent compatibility flag in - Settings -> Build, Execution, Deployment -> Python Debugger -> Gevent - compatible. Without this setting, PyCharm will not show variable values - and become unstable during debugging. - - -Running unit tests in PyCharm -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#. **Complete Mistral documentation**. -In order to be able to conveniently run unit tests, you need to: + Mistral documentation should be more usable. It requires focused work to + make it well structured, eliminate gaps in API/Mistral Workflow Language + specifications, add more examples and tutorials. -1. Set unit tests as the default runner: + *Definition of done*: + All capabilities are covered, all documentation topics are written using + the same style and structure principles. The obvious sub-goal of this goal + is to establish these principles. - Settings -> Tools -> Python Integrated Tools -> Default test runner: Unittests +#. **Complete Mistral Custom Actions API**. -2. Enable test detection for all classes: + There has been the initiative in Mistral team since April of 2016 to + refactor Mistral actions subsystem in order to make the process of + developing Mistral actions easier and clearer. In 2017 we need to complete + this effort and make sure that all APIs are stable and it’s well-documented. - Run/Debug Configurations -> Defaults -> Python tests -> Unittests -> uncheck - Inspect only subclasses of unittest.TestCase + *Definition of done*: + All API interfaces are stable, existing actions are rewritten using this new + API, OpenStack actions are also rewritten based on the new API and moved to + mistral-extra repo. Everything is well documented and the doc has enough + examples. -Running examples -~~~~~~~~~~~~~~~~ +#. **Finish Mistral multi-node mode**. -To run the examples find them in mistral-extra repository -(https://github.com/openstack/mistral-extra) and follow the instructions on -each example. + Mistral needs to be proven to work reliably in multi-node mode. In order + to achieve it we need to make a number of engine, executor and RPC + changes and configure a CI gate to run stress tests on multi-node Mistral. + *Definition of done*: + CI gate supports MySQL, all critically important functionality (join, + with-items, parallel workflows, sequential workflows) is covered by tests. -Tests -~~~~~ +#. **Reduce workflow execution time**. -You can run some of the functional tests in non-openstack mode locally. To do -this: + *Definition of done*: Average workflow execution time reduced by 30%. -#. set ``auth_enable = False`` in the ``mistral.conf`` and restart Mistral -#. execute:: +Project Resources +----------------- - $ ./run_functional_tests.sh +* `Mistral Official Documentation `_ -To run tests for only one version need to specify it:: +* Project status, bugs, and blueprints are tracked on + `Launchpad `_ - $ bash run_functional_tests.sh v1 +* Additional resources are linked from the project + `Wiki `_ page -More information about automated tests for Mistral can be found on -`Mistral Wiki `_. +* Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 diff -Nru mistral-4.0.0/releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml mistral-5.0.0~b2/releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml --- mistral-4.0.0/releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,11 @@ +--- +features: + - Support to specify 'action_region' for OpenStack actions so that it's + possible to operate different resources in different regions in one single + workflow. +upgrade: + - Run ``python tools/sync_db.py --config-file `` to + re-populate database. +deprecations: + - The config option 'os-actions-endpoint-type' is moved from DEFAULT group + to 'openstack_actions' group. diff -Nru mistral-4.0.0/releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml mistral-5.0.0~b2/releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml --- mistral-4.0.0/releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,16 @@ +--- +fixes: + - When we pass a workflow environment to workflow parameters + using 'env' Mistral first evaluates it assuming that it + can contain expressions (YAQL/Jinja) For example, one + environment variable can be expressed through the other. + In some cases it causes problems. For example, if the + environment is too big and has many expressions, especially + something like <% $ %> or <% env() %>. Also, in some cases + we don't want any evaluations to happen if we want to + have some informative text in the environment containing + expressions. + In order to address that the 'evaluate_env' workflow parameter + was added, defaulting to True for backwards compatibility. + If it's set to False then it disables evaluation of + expressions in the environment. diff -Nru mistral-4.0.0/releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml mistral-5.0.0~b2/releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml --- mistral-4.0.0/releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,9 @@ +--- +features: + - External OpenStack action mapping file could be specified at sync_db.sh or + mistral-db-mange script. + For more details see 'sync_db.sh --help' or 'mistral-db-manage --help'. + + - From now it is optional to list openstack modules in mapping file which + you would not include into supported action set. + diff -Nru mistral-4.0.0/releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml mistral-5.0.0~b2/releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml --- mistral-4.0.0/releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,16 @@ +--- + +features: + - | + + New parameter called 'include_output' added to action execution api. + By default output field does not return when calling list action executions + API + +critical: + - | + + By default, output field will not return when calling list action + executions. In the previous version it did, so if a user used this, and/or + wants to get output field when calling list action executions API, it will + be possible only by using the new include output parameter. diff -Nru mistral-4.0.0/releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml mistral-5.0.0~b2/releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml --- mistral-4.0.0/releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,4 @@ +--- +features: + - By default, admin user could get/list/update/delete other projects' + resources. In Pike, only workflow/execution are supported. diff -Nru mistral-4.0.0/releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml mistral-5.0.0~b2/releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml --- mistral-4.0.0/releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,4 @@ +--- +features: + - Mistral action developer can get the start time of a workflow execution by + using ``<% execution().created_at %>``. diff -Nru mistral-4.0.0/releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml mistral-5.0.0~b2/releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml --- mistral-4.0.0/releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml 2017-06-09 12:48:26.000000000 +0000 @@ -0,0 +1,5 @@ +--- +fixes: + - Added support for referencing task and workflow context data, including + environment variables via env(), when using YAQL/Jinja2 expressions inside AdHoc Actions. + YAQL/Jinja2 expressions can reference env() and other context data in the base-input section. diff -Nru mistral-4.0.0/releasenotes/source/index.rst mistral-5.0.0~b2/releasenotes/source/index.rst --- mistral-4.0.0/releasenotes/source/index.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/releasenotes/source/index.rst 2017-06-09 12:48:26.000000000 +0000 @@ -1,3 +1,16 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + ====================== Mistral Release Notes ====================== diff -Nru mistral-4.0.0/requirements.txt mistral-5.0.0~b2/requirements.txt --- mistral-4.0.0/requirements.txt 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/requirements.txt 2017-06-09 12:48:26.000000000 +0000 @@ -4,42 +4,43 @@ alembic>=0.8.10 # MIT aodhclient>=0.7.0 # Apache-2.0 -Babel>=2.3.4 # BSD +Babel!=2.4.0,>=2.3.4 # BSD croniter>=0.3.4 # MIT License cachetools>=1.1.0 # MIT License -eventlet!=0.18.3,>=0.18.2 # MIT +eventlet!=0.18.3,<0.21.0,>=0.18.2 # MIT gnocchiclient>=2.7.0 # Apache-2.0 Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause) jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT keystonemiddleware>=4.12.0 # Apache-2.0 +mistral-lib>=0.2.0 # Apache-2.0 networkx>=1.10 # BSD oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config!=3.18.0,>=3.14.0 # Apache-2.0 -oslo.db>=4.15.0 # Apache-2.0 -oslo.i18n>=2.1.0 # Apache-2.0 -oslo.messaging>=5.14.0 # Apache-2.0 -oslo.middleware>=3.0.0 # Apache-2.0 +oslo.config>=4.0.0 # Apache-2.0 +oslo.db>=4.21.1 # Apache-2.0 +oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 +oslo.messaging!=5.25.0,>=5.24.2 # Apache-2.0 +oslo.middleware>=3.27.0 # Apache-2.0 oslo.policy>=1.17.0 # Apache-2.0 -oslo.utils>=3.18.0 # Apache-2.0 -oslo.log>=3.11.0 # Apache-2.0 +oslo.utils>=3.20.0 # Apache-2.0 +oslo.log>=3.22.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 paramiko>=2.0 # LGPLv2.1+ -pbr>=1.8 # Apache-2.0 +pbr!=2.1.0,>=2.0.0 # Apache-2.0 pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD python-barbicanclient>=4.0.0 # Apache-2.0 python-ceilometerclient>=2.5.0 # Apache-2.0 -python-cinderclient!=1.7.0,!=1.7.1,>=1.6.0 # Apache-2.0 +python-cinderclient>=2.1.0 # Apache-2.0 python-designateclient>=1.5.0 # Apache-2.0 -python-glanceclient>=2.5.0 # Apache-2.0 +python-glanceclient>=2.7.0 # Apache-2.0 python-heatclient>=1.6.1 # Apache-2.0 python-keystoneclient>=3.8.0 # Apache-2.0 -python-mistralclient>=2.0.0 # Apache-2.0 +python-mistralclient>=3.1.0 # Apache-2.0 python-magnumclient>=2.0.0 # Apache-2.0 python-muranoclient>=0.8.2 # Apache-2.0 -python-neutronclient>=5.1.0 # Apache-2.0 -python-novaclient!=7.0.0,>=6.0.0 # Apache-2.0 +python-neutronclient>=6.3.0 # Apache-2.0 +python-novaclient>=7.1.0 # Apache-2.0 python-senlinclient>=1.1.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0 python-tackerclient>=0.8.0 # Apache-2.0 @@ -48,12 +49,12 @@ python-ironic-inspector-client>=1.5.0 # Apache-2.0 python-zaqarclient>=1.0.0 # Apache-2.0 PyYAML>=3.10.0 # MIT -requests!=2.12.2,>=2.10.0 # Apache-2.0 +requests!=2.12.2,!=2.13.0,>=2.10.0 # Apache-2.0 tenacity>=3.2.1 # Apache-2.0 -setuptools!=24.0.0,>=16.0 # PSF/ZPL +setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,>=16.0 # PSF/ZPL six>=1.9.0 # MIT -SQLAlchemy<1.1.0,>=1.0.10 # MIT -stevedore>=1.17.1 # Apache-2.0 +SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT +stevedore>=1.20.0 # Apache-2.0 WSME>=0.8 # MIT yaql>=1.1.0 # Apache 2.0 License tooz>=1.47.0 # Apache-2.0 diff -Nru mistral-4.0.0/setup.cfg mistral-5.0.0~b2/setup.cfg --- mistral-4.0.0/setup.cfg 2017-02-22 13:45:28.000000000 +0000 +++ mistral-5.0.0~b2/setup.cfg 2017-06-09 12:52:05.000000000 +0000 @@ -29,9 +29,6 @@ build-dir = doc/build all_files = 1 -[pbr] -autodoc_tree_index_modules = True - [upload_sphinx] upload-dir = doc/build/html @@ -39,11 +36,13 @@ console_scripts = mistral-server = mistral.cmd.launch:main mistral-db-manage = mistral.db.sqlalchemy.migration.cli:main -mistral.engine.rpc_backend = - oslo_client = mistral.engine.rpc_backend.oslo.oslo_client:OsloRPCClient - oslo_server = mistral.engine.rpc_backend.oslo.oslo_server:OsloRPCServer - kombu_client = mistral.engine.rpc_backend.kombu.kombu_client:KombuRPCClient - kombu_server = mistral.engine.rpc_backend.kombu.kombu_server:KombuRPCServer +wsgi_scripts = + mistral-wsgi-api = mistral.api.app:init_wsgi +mistral.rpc.backends = + oslo_client = mistral.rpc.oslo.oslo_client:OsloRPCClient + oslo_server = mistral.rpc.oslo.oslo_server:OsloRPCServer + kombu_client = mistral.rpc.kombu.kombu_client:KombuRPCClient + kombu_server = mistral.rpc.kombu.kombu_server:KombuRPCServer oslo.config.opts = mistral.config = mistral.config:list_opts oslo.config.opts.defaults = @@ -64,7 +63,11 @@ std.js = mistral.actions.std_actions:JavaScriptAction std.sleep = mistral.actions.std_actions:SleepAction std.test_dict = mistral.actions.std_actions:TestDictAction +mistral.executors = + local = mistral.executors.default_executor:DefaultExecutor + remote = mistral.executors.remote_executor:RemoteExecutor mistral.expression.functions = + global = mistral.utils.expression_utils:global_ json_pp = mistral.utils.expression_utils:json_pp_ task = mistral.utils.expression_utils:task_ tasks = mistral.utils.expression_utils:tasks_ @@ -80,6 +83,8 @@ kombu_driver.executors = blocking = futurist:SynchronousExecutor threading = futurist:ThreadPoolExecutor +pygments.lexers = + mistral = mistral.ext.pygmentplugin:MistralLexer [egg_info] tag_build = diff -Nru mistral-4.0.0/setup.py mistral-5.0.0~b2/setup.py --- mistral-4.0.0/setup.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/setup.py 2017-06-09 12:48:26.000000000 +0000 @@ -25,5 +25,5 @@ pass setuptools.setup( - setup_requires=['pbr>=1.8'], + setup_requires=['pbr>=2.0.0'], pbr=True) diff -Nru mistral-4.0.0/.testr.conf mistral-5.0.0~b2/.testr.conf --- mistral-4.0.0/.testr.conf 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/.testr.conf 2017-06-09 12:48:26.000000000 +0000 @@ -7,3 +7,4 @@ test_id_option=--load-list $IDFILE test_list_option=--list +test_run_concurrency=echo ${TEST_RUN_CONCURRENCY:-0} diff -Nru mistral-4.0.0/test-requirements.txt mistral-5.0.0~b2/test-requirements.txt --- mistral-4.0.0/test-requirements.txt 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/test-requirements.txt 2017-06-09 12:48:26.000000000 +0000 @@ -1,9 +1,9 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -coverage>=4.0 # Apache-2.0 +coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD -hacking<0.11,>=0.10.0 +hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 nose # LGPL oslosphinx>=4.7.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 @@ -11,11 +11,12 @@ pyflakes==0.8.1 # MIT mock>=2.0 # BSD requests-mock>=1.1 # Apache-2.0 -sphinx!=1.3b1,<1.4,>=1.2.1 # BSD +sphinx!=1.6.1,>=1.5.1 # BSD sphinxcontrib-httpdomain # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 openstackdocstheme>=1.5.0 # Apache-2.0 +tempest>=14.0.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT unittest2 # BSD -reno>=1.8.0 # Apache-2.0 +reno!=2.3.1,>=1.8.0 # Apache-2.0 diff -Nru mistral-4.0.0/tools/cover.sh mistral-5.0.0~b2/tools/cover.sh --- mistral-4.0.0/tools/cover.sh 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/tools/cover.sh 2017-06-09 12:48:26.000000000 +0000 @@ -19,31 +19,36 @@ diff -U 0 $1 $2 | sed 1,2d } -# Stash uncommitted changes, checkout master and save coverage report +# Stash uncommitted changes, checkout previous commit and save coverage report uncommitted=$(git status --porcelain | grep -v "^??") [[ -n $uncommitted ]] && git stash > /dev/null git checkout HEAD^ baseline_report=$(mktemp -t mistral_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" -coverage report > $baseline_report +coverage report -m > $baseline_report baseline_missing=$(awk 'END { print $3 }' $baseline_report) +previous_sha=$(git rev-parse HEAD); # Checkout back and unstash uncommitted changes (if any) git checkout - [[ -n $uncommitted ]] && git stash pop > /dev/null +# Erase previously collected coverage data. +coverage erase; + # Generate and save coverage report current_report=$(mktemp -t mistral_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" -coverage report > $current_report +coverage report -m > $current_report current_missing=$(awk 'END { print $3 }' $current_report) # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" -echo "Missing lines in master : ${baseline_missing}" +echo "Copmared against ${previous_sha}"; +echo "Missing lines in previous commit : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -gt $current_missing ]; diff -Nru mistral-4.0.0/tools/docker/Dockerfile mistral-5.0.0~b2/tools/docker/Dockerfile --- mistral-4.0.0/tools/docker/Dockerfile 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/tools/docker/Dockerfile 2017-06-09 12:48:26.000000000 +0000 @@ -1,4 +1,4 @@ -FROM ubuntu:14.04 +FROM ubuntu:16.04 MAINTAINER hardik.parekh@nectechnologies.in ADD . /opt/stack/mistral diff -Nru mistral-4.0.0/tools/docker/Dockerfile_script.sh mistral-5.0.0~b2/tools/docker/Dockerfile_script.sh --- mistral-4.0.0/tools/docker/Dockerfile_script.sh 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/tools/docker/Dockerfile_script.sh 2017-06-09 12:48:26.000000000 +0000 @@ -14,6 +14,7 @@ python-dev \ python-pip \ python-setuptools \ + sudo sudo pip install tox==1.6.1 python-mistralclient diff -Nru mistral-4.0.0/tools/docker/DOCKER_README.rst mistral-5.0.0~b2/tools/docker/DOCKER_README.rst --- mistral-4.0.0/tools/docker/DOCKER_README.rst 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/tools/docker/DOCKER_README.rst 2017-06-09 12:48:26.000000000 +0000 @@ -20,7 +20,6 @@ `_. - Build the Mistral image ----------------------- @@ -36,7 +35,7 @@ Start Mistral:: - docker run -d -p 8989:8989 --name mistral mistral-all + docker run -d --link rabbitmq:rabbitmq -p 8989:8989 --name mistral mistral-all To execute commands inside the container:: @@ -47,11 +46,40 @@ mistral workflow-list +Running Mistral From A Volume +----------------------------- + +A scenario you may find useful for development is to clone a Mistral git repo +and link it into the container via a volume. This will allow you to make changes +to the Mistral source on your local machine and execute them immediately in the +container. + +The following example illustrates launching the container from the local +directory of a git repo clone of Mistral.:: + + docker run -d --link rabbitmq:rabbitmq -v $(pwd):/opt/stack/mistral:Z -p 8989:8989 --name mistral mistral-all + +You might want to mount an additional drive to move files easily between your +development computer and the container. An easy way to do this is to mount an +additional volume that maps to /home/mistral/ in the container. + +Since the directory is already being used to store the mistral.conf and +mistral.sqlite files, you will want to copy these to the local directory you +intend to use for the mount. This example assumes the directory to mount is +"/tmp/mistral". You should change this to the actual directory you intend to +use.:: + + docker cp mistral:/home/mistral/mistral.conf /tmp/mistral/mistral.conf + docker cp mistral:/home/mistral/mistral.sqlite /tmp/mistral/mistral.sqlite + + docker run -d --link rabbitmq:rabbitmq -v $(pwd):/opt/stack/mistral:Z -v /tmp/mistral:/home/mistral:Z -p 8989:8989 --name mistral mistral-all + + Running Mistral with MySQL -------------------------- Other than the simplest use cases will very probably fail with various errors -due to the default Sqlight database. It is highly recommended that, for +due to the default Sqlite database. It is highly recommended that, for example, MySQL is used as database backend. The `start_mistral_rabbit_mysql.sh` script sets up a rabbitmq container, a diff -Nru mistral-4.0.0/tools/get_action_list.py mistral-5.0.0~b2/tools/get_action_list.py --- mistral-4.0.0/tools/get_action_list.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/tools/get_action_list.py 2017-06-09 12:48:26.000000000 +0000 @@ -23,13 +23,13 @@ from barbicanclient import base as barbican_base from barbicanclient import client as barbicanclient from ceilometerclient.v2 import client as ceilometerclient -from cinderclient.openstack.common.apiclient import base as cinder_base +from cinderclient.apiclient import base as cinder_base from cinderclient.v2 import client as cinderclient from designateclient import client as designateclient from glanceclient.v2 import client as glanceclient from gnocchiclient.v1 import base as gnocchi_base from gnocchiclient.v1 import client as gnocchiclient -from heatclient.openstack.common.apiclient import base as heat_base +from heatclient.common import base as heat_base from heatclient.v1 import client as heatclient from ironicclient.common import base as ironic_base from ironicclient.v1 import client as ironicclient diff -Nru mistral-4.0.0/tools/sync_db.py mistral-5.0.0~b2/tools/sync_db.py --- mistral-4.0.0/tools/sync_db.py 2017-02-22 13:40:59.000000000 +0000 +++ mistral-5.0.0~b2/tools/sync_db.py 2017-06-09 12:48:26.000000000 +0000 @@ -23,8 +23,8 @@ from mistral.services import action_manager from mistral.services import workflows - CONF = cfg.CONF +LOG = logging.getLogger(__name__) def main(): @@ -36,16 +36,23 @@ for group, opts in keystonemw_opts.list_auth_token_opts(): CONF.register_opts(opts, group=group) + CONF.register_cli_opt(config.os_actions_mapping_path) + + logging.register_options(CONF) + config.parse_args() if len(CONF.config_file) == 0: print("Usage: sync_db --config-file ") return exit(1) - logging.setup(CONF, 'Mistral') + LOG.info("Starting db_sync") + + LOG.debug("Setting up db") db_api.setup_db() + LOG.debug("populating db") action_manager.sync_db() workflows.sync_db() diff -Nru mistral-4.0.0/tox.ini mistral-5.0.0~b2/tox.ini --- mistral-4.0.0/tox.ini 2017-02-22 13:41:01.000000000 +0000 +++ mistral-5.0.0~b2/tox.ini 2017-06-09 12:48:26.000000000 +0000 @@ -5,7 +5,7 @@ [testenv] usedevelop = True -install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/ocata} {opts} {packages} +install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE = 1 PYTHONWARNINGS=default::DeprecationWarning @@ -29,7 +29,10 @@ commands = ./run_tests.sh -N --db-type mysql [testenv:pep8] -commands = flake8 {posargs} . {toxinidir}/tools/get_action_list.py {toxinidir}/tools/sync_db.py +basepython = python2.7 +commands = + flake8 {posargs} . {toxinidir}/tools/get_action_list.py {toxinidir}/tools/sync_db.py + check-uuid --package mistral_tempest_tests [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those @@ -45,11 +48,13 @@ #set PYTHONHASHSEED=0 to prevent wsmeext.sphinxext from randomly failing. [testenv:venv] +basepython = python2.7 setenv = PYTHONHASHSEED=0 commands = {posargs} #set PYTHONHASHSEED=0 to prevent wsmeext.sphinxext from randomly failing. [testenv:docs] +basepython = python2.7 setenv = PYTHONHASHSEED=0 commands = python setup.py build_sphinx