commit 0892db6fa778b1dab2ad5bf748331ba3df138757 Author: Jon Herron Date: Wed Oct 12 10:19:53 2022 -0400 Squashed 'SpiffWorkflow/' content from commit 63db3e4 git-subtree-dir: SpiffWorkflow git-subtree-split: 63db3e45947ec66b8d0efc2c74064004f8ff482c diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..6a1ab4596 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,9 @@ +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +max_line_length = 79 diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..ba297ee6d --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] + diff --git a/.github/workflows/publish-on-release.yml b/.github/workflows/publish-on-release.yml new file mode 100644 index 000000000..33a70c989 --- /dev/null +++ b/.github/workflows/publish-on-release.yml @@ -0,0 +1,33 @@ +name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI +on: + release: + types: [published] +jobs: + build-n-publish: + name: Build and publish Python 🐍 distributions 📦 to PyPI + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Set up Python 3.9 + uses: actions/setup-python@v1 + with: + python-version: 3.9 + - name: Install pypa/build + run: >- + python -m + pip install + build + --user + - name: Build a binary wheel and a source tarball + run: >- + python -m + build + --sdist + --wheel + --outdir dist/ + - name: Publish distribution 📦 to PyPI + if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@master + with: + username: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..d16de1656 --- /dev/null +++ b/.gitignore @@ -0,0 +1,279 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/python,intellij+all,macos +# Edit at https://www.toptal.com/developers/gitignore?templates=python,intellij+all,macos + +### Intellij+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### Intellij+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# End of https://www.toptal.com/developers/gitignore/api/python,intellij+all,macos + +*.py[co] +*.swp +dist +/build +*.egg-info +unit_test.cfg +nosetests.xml +.coverage +coverage.xml +.c9revisions +.idea +/venv diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 000000000..9d94bbeaf --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,16 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + + +build: + image: latest + +# Optionally build your docs in additional formats such as PDF +formats: [] + +python: + pip_install: true + version: 3.7 + extra_requirements: + - docs diff --git a/.sonarcloud.properties b/.sonarcloud.properties new file mode 100644 index 000000000..5b04a73e9 --- /dev/null +++ b/.sonarcloud.properties @@ -0,0 +1,7 @@ +sonar.organization=sartography +sonar.projectKey=sartography_SpiffWorkflow +sonar.host.url=https://sonarcloud.io +sonar.exclusions=*.bpmn,*.dmn,doc/** +sonar.sources=SpiffWorkflow +sonar.test.inclusions=tests +sonar.python.coverage.reportPaths=tests/SpiffWorkflow/coverage.xml diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 000000000..7e78d9af5 --- /dev/null +++ b/.tool-versions @@ -0,0 +1 @@ +python 3.10.4 diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..526088a6a --- /dev/null +++ b/.travis.yml @@ -0,0 +1,36 @@ +dist: focal +language: python + +python: + - 3.7 + - 3.8 + - 3.9 + - 3.10 + +addons: + sonarcloud: + organization: sartography + +install: + - pip install -r requirements.txt + - pip install celery + +script: + - cd tests/SpiffWorkflow + - coverage run --source=SpiffWorkflow -m unittest discover -v . "*Test.py" + - coverage xml -i + - cd ../.. + +after_success: + - sonar-scanner + +git: + depth: false + +jobs: + include: + - python: 3.7 + - python: 3.8 + - python: 3.9 + - python: 3.10 + env: RUN_QUALITY_GATES=true diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 000000000..05623d342 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,5 @@ +Samuel Abels +Ziad Sawalha +Matthew Hampton +Kelly McDonald +Dan Funk diff --git a/CONTRIB b/CONTRIB new file mode 100644 index 000000000..bb3f13ad2 --- /dev/null +++ b/CONTRIB @@ -0,0 +1,60 @@ +Guide for Contributors +####################### + +Coding style: + + Please follow PEP8: http://www.python.org/dev/peps/pep-0008/ + +Testing: + + Non-public classes and methods MUST be prefixed by _. This is also important + because the test and API documentation machinery makes assumptions based on + this convention. + + Every added public class MUST have a corresponding unit test. The tests are + placed in the following directory: tests/SpiffWorkflow/ + The test directory layout mirrors the source code directory layout, e.g. + SpiffWorkflow/specs/Join.py + has a corresponding test in + tests/SpiffWorkflow/specs/JoinTest.py + + The unit test for each class MUST have a CORRELATE class attribute that points + to the tested class. (The test machinery uses this attribute to find untested + methods.) + + Each commit MUST NOT break functionality. In other words, the code in the + repository should function at any time, and all test MUST pass. + +Documentation: + + Every public class and function or method MUST include API documentation. The + documentation MUST cover the method's arguments and return values. + + Write inline documentation generously. + +Repository: + + Make sure that each commit contains related changes only. E.g. don't fix + two unrelated bugs in one commit, or introduce a new feature while refactoring + another part of the program in the same commit. When in doubt, use multiple + small commits. In general, most commits should be relatively small unless they + are plain additions. + +Licensing: + + You have to agree to licensing under the lGPLv3, and every added file MUST + include a copyright header. + + If you modify a file and add a chunk of at least 7 lines in size, please add + yourself to the copyright header of that file. + +## Releases +For you dev op folks who release builds to the larger community ... + +Be sure to edit the conf.py, and update the release tag: doc/conf.py +And also edit setup.py and assure that has the same release tag. +New versions of SpiffWorkflow are automatically published to PyPi whenever +a maintainer of our GitHub repository creates a new release on GitHub. This +is managed through GitHub's actions. The configuration of which can be +found in .github/workflows/.... +Just create a release in GitHub that mathches the release number in doc/conf.py diff --git a/COPYING b/COPYING new file mode 100644 index 000000000..aad50338b --- /dev/null +++ b/COPYING @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..ca5f172f6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,4 @@ +FROM python:3.6 +RUN apt-get -y update && apt-get upgrade -yu +COPY . /tmp/SpiffWorkflow +RUN cd /tmp/SpiffWorkflow && make wheel && pip install dist/SpiffWorkflow*.whl diff --git a/INSTALL b/INSTALL new file mode 100644 index 000000000..3e8479788 --- /dev/null +++ b/INSTALL @@ -0,0 +1,3 @@ +To install this package, run + + sudo python setup.py install --prefix /usr/local diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..180691381 --- /dev/null +++ b/Makefile @@ -0,0 +1,88 @@ +NAME=SpiffWorkflow +VERSION=`python setup.py --version` +PREFIX=/usr/local/ +BIN_DIR=$(PREFIX)/bin +SITE_DIR=$(PREFIX)`python -c "import sys; from distutils.sysconfig import get_python_lib; print get_python_lib()[len(sys.prefix):]"` + +################################################################### +# Standard targets. +################################################################### +.PHONY : clean +clean: + find . -name "*.pyc" -o -name "*.pyo" | xargs -rn1 rm -f + find . -name "*.egg-info" | xargs -rn1 rm -r + rm -Rf build + cd doc; make clean + +.PHONY : dist-clean +dist-clean: clean + rm -Rf dist + +.PHONY : doc +doc: + cd doc; make + +install: + mkdir -p $(SITE_DIR) + ./version.sh + export PYTHONPATH=$(SITE_DIR):$(PYTHONPATH); \ + python setup.py install --prefix $(PREFIX) \ + --install-scripts $(BIN_DIR) \ + --install-lib $(SITE_DIR) + ./version.sh --reset + +uninstall: + # Sorry, Python's distutils support no such action yet. + +.PHONY : tests +tests: + cd tests/$(NAME) + PYTHONPATH=../.. python -m unittest discover -v . "*Test.py" + +.PHONY : tests-cov +tests-cov: + cd tests/$(NAME) + coverage run --source=$(NAME) -m unittest discover -v . "*Test.py" + +.PHONY : tests-ind +tests-ind: + cd tests/$(NAME) + @PYTHONPATH=../.. find . -name "*Test.py" -printf '%p' -exec python -m unittest {} \; + +.PHONY : tests-timing +tests-timing: + @make tests-ind 2>&1 | ./scripts/test_times.py + +################################################################### +# Package builders. +################################################################### +targz: clean + ./version.sh + python setup.py sdist --formats gztar + ./version.sh --reset + +tarbz: clean + ./version.sh + python setup.py sdist --formats bztar + ./version.sh --reset + +wheel: clean + ./version.sh + python setup.py bdist_wheel --universal + ./version.sh --reset + +deb: clean + ./version.sh + debuild -S -sa + cd ..; sudo pbuilder build $(NAME)_$(VERSION)-0ubuntu1.dsc; cd - + ./version.sh --reset + +dist: targz tarbz wheel + +################################################################### +# Publishers. +################################################################### +dist-publish: + ./version.sh + python setup.py bdist_wheel --universal upload + ./version.sh --reset diff --git a/README.md b/README.md new file mode 100644 index 000000000..a5ba1c772 --- /dev/null +++ b/README.md @@ -0,0 +1,121 @@ +## SpiffWorkflow +![Logo](./graphics/logo_med.png) + +Spiff Workflow is a workflow engine implemented in pure Python. It is based on +the excellent work of the Workflow Patterns initiative. In 2020 and 2021, +extensive support was added for BPMN / DMN processing. + +## Motivation +We created SpiffWorkflow to support the development of low-code business +applications in Python. Using BPMN will allow non-developers to describe +complex workflow processes in a visual diagram, coupled with a powerful python +script engine that works seamlessly within the diagrams. SpiffWorkflow can parse +these diagrams and execute them. The ability for businesses to create +clear, coherent diagrams that drive an application has far reaching potential. +While multiple tools exist for doing this in Java, we believe that wide +adoption of the Python Language, and it's ease of use, create a winning +strategy for building Low-Code applications. + + +## Build status +[![Build Status](https://travis-ci.com/sartography/SpiffWorkflow.svg?branch=master)](https://travis-ci.org/sartography/SpiffWorkflow) +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=sartography_SpiffWorkflow&metric=alert_status)](https://sonarcloud.io/dashboard?id=sartography_SpiffWorkflow) +[![Coverage](https://sonarcloud.io/api/project_badges/measure?project=sartography_SpiffWorkflow&metric=coverage)](https://sonarcloud.io/dashboard?id=sartography_SpiffWorkflow) +[![Maintainability Rating](https://sonarcloud.io/api/project_badges/measure?project=sartography_SpiffWorkflow&metric=sqale_rating)](https://sonarcloud.io/dashboard?id=sartography_SpiffWorkflow) +[![Documentation Status](https://readthedocs.org/projects/spiffworkflow/badge/?version=latest)](http://spiffworkflow.readthedocs.io/en/latest/?badge=latest) +[![Issues](https://img.shields.io/github/issues/sartography/spiffworkflow)](https://github.com/sartography/SpiffWorkflow/issues) +[![Pull Requests](https://img.shields.io/github/issues-pr/sartography/spiffworkflow)](https://github.com/sartography/SpiffWorkflow/pulls) + +## Code style + +[![PEP8](https://img.shields.io/badge/code%20style-pep8-orange.svg)](https://www.python.org/dev/peps/pep-0008/) + + +## Dependencies +We've worked to minimize external dependencies. We rely on lxml for parsing +XML Documents, and there is some legacy support for Celery, but it is not +core to the implementation, it is just a way to interconnect these systems. +Built with +- [lxml](https://lxml.de/) +- [celery](https://docs.celeryproject.org/en/stable/) + +## Features +* __BPMN__ - support for parsing BPMN diagrams, including the more complex +components, like pools and lanes, multi-instance tasks, sub-workflows, timer +events, signals, messages, boudary events and looping. +* __DMN__ - We have a baseline implementation of DMN that is well integrated +with our Python Execution Engine. +* __Forms__ - forms, including text fields, selection lists, and most every other +thing you can be extracted from the Camunda xml extension, and returned as +json data that can be used to generate forms on the command line, or in web +applications (we've used Formly to good success) +* __Python Workflows__ - We've retained support for building workflows directly +in code, or running workflows based on a internal json data structure. + +_A complete list of the latest features is available with our [release notes](https://github.com/sartography/SpiffWorkflow/releases/tag/1.0) for +version 1.0._ + +## Code Examples and Documentation +Detailed documentation is available on [ReadTheDocs](https://spiffworkflow.readthedocs.io/en/latest/) +Also, checkout our [example application](https://github.com/sartography/spiff-example-cli), which we +reference extensively from the Documentation. + +## Installation +``` +pip install spiffworkflow +``` + +## Tests +``` +cd tests/SpiffWorkflow +coverage run --source=SpiffWorkflow -m unittest discover -v . "*Test.py" +``` + +## Support +You can find us on Discord at https://discord.gg/zDEBEnrF + +Commercial support for SpiffWorkflow is available from +[Sartography](https://sartography.com) + +## Contribute +Pull Requests are and always will be welcome! + +Please check your formatting, assure that all tests are passing, and include +any additional tests that can demonstrate the new code you created is working +as expected. If applicable, please reference the issue number in your pull +request. + +## Credits and Thanks + +Samuel Abels (@knipknap) for creating SpiffWorkflow and maintaining it for over +a decade. + +Matthew Hampton (@matthewhampton) for his initial contributions around BPMN +parsing and execution. + +The University of Virginia for allowing us to take on the mammoth task of +building a general-purpose workflow system for BPMN, and allowing us to +contribute that back to the open source community. In particular, we would like +to thank [Ron Hutchins](https://www.linkedin.com/in/ron-hutchins-b19603123/), +for his trust and support. Without him our efforts would not be possible. + +Bruce Silver, the author of BPMN Quick and Easy Using Method and Style, whose +work we referenced extensively as we made implementation decisions and +educated ourselves on the BPMN and DMN standards. + +The BPMN.js library, without which we would not have the tools to effectively +build out our models, embed an editor in our application, and pull this mad +mess together. + +Kelly McDonald (@w4kpm) who dove deeper into the core of SpiffWorkflow than +anyone else, and was instrumental in helping us get some of these major +enhancements working correctly. + +Thanks also to the many contributions from our community. Large and small. +From Ziad (@ziadsawalha) in the early days to Elizabeth (@essweine) more +recently. It is good to be a part of this long lived and strong +community. + + +## License +GNU LESSER GENERAL PUBLIC LICENSE diff --git a/TODO b/TODO new file mode 100644 index 000000000..05ca4b422 --- /dev/null +++ b/TODO @@ -0,0 +1,5 @@ +* Write an asynchronous server. +* As soon as it is possible to trigger an action twice without + creating another branch (some kind of asynchronous notification, + perhaps), make sure to test the generalized AND-join with that + in xml/patterns/generalized_and_join.xml. diff --git a/VERSION.in b/VERSION.in new file mode 100644 index 000000000..97cfe6074 --- /dev/null +++ b/VERSION.in @@ -0,0 +1,4 @@ +""" +Warning: This file is automatically generated. +""" +__version__ = '@VERSION@' diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 000000000..e35d8850c --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1 @@ +_build diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 000000000..0bf4a3052 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,221 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " apidoc to build in the api documentation" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + +.PHONY: clean +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: apidoc +apidoc: + sphinx-apidoc -d5 -Mefo . ../SpiffWorkflow + +.PHONY: html +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/SpiffWorkflow.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/SpiffWorkflow.qhc" + +.PHONY: applehelp +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/SpiffWorkflow" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/SpiffWorkflow" + @echo "# devhelp" + +.PHONY: epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: latex +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/bpmn/Makefile b/doc/bpmn/Makefile new file mode 100644 index 000000000..391c73bf0 --- /dev/null +++ b/doc/bpmn/Makefile @@ -0,0 +1,25 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +.PHONY: apidoc +apidoc: + sphinx-apidoc -d5 -Mefo . ../venv/lib/python3.7/site-packages/SpiffWorkflow + + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/bpmn/advanced.rst b/doc/bpmn/advanced.rst new file mode 100644 index 000000000..c42f38910 --- /dev/null +++ b/doc/bpmn/advanced.rst @@ -0,0 +1,423 @@ +A More In-Depth Look at Some of SpiffWorkflow's Features +======================================================== + +Displaying Workflow State +------------------------- + +Filtering Tasks +^^^^^^^^^^^^^^^ + +In our earlier example, all we did was check the lane a task was in and display +it along with the task name and state. + +Lets take a look at a sample workflow with lanes: + +.. figure:: figures/lanes.png + :scale: 30% + :align: center + + Workflow with lanes + +To get all of the tasks that are ready for the 'Customer' workflow, we could +specify the lane when retrieving ready user tasks: + +.. code:: python + + ready_tasks = workflow.get_ready_user_tasks(lane='Customer') + +If there were no tasks ready for the 'Customer' lane, you would get an empty list, +and of course if you had no lane that was labeled 'Customer' you would *always* get an +empty list. + +We can also get a list of tasks by state. + +We need to import the :code:`Task` object (unless you want to memorize which numbers +correspond to which states). + +.. code:: python + + from SpiffWorkflow.task import Task + +To get a list of completed tasks + +.. code:: python + + tasks = workflow.get_tasks(Task.COMPLETED) + +The tasks themselves are not particularly intuitive to work with. So SpiffWorkflow +provides some facilities for obtaining a more user-friendly version of upcoming tasks. + +Nav(igation) List +^^^^^^^^^^^^^^^^^ + +In order to get the navigation list, we can call the workflow.get_nav_list() function. This +will return a list of dictionaries with information about each task and decision point in the +workflow. Each item in this list returns some information about the tasks that are in the workflow, +and how it relates to the other tasks. + +To give you an idea of what is in the list I'll include a segment from the documentation:: + + id - TaskSpec or Sequence flow id + task_id - The uuid of the actual task instance, if it exists. + name - The name of the task spec (or sequence) + description - Text description + backtracks - Boolean, if this backtracks back up the list or not + level - Depth in the tree - probably not needed + indent - A hint for indentation + child_count - The number of children that should be associated with + this item. + lane - This is the swimlane for the task if indicated. + state - Text based state (may be half baked in the case that we have + more than one state for a task spec - but I don't think those + are being reported in the list, so it may not matter) + Any task with a blank or None as the description are excluded from the list (i.e. gateways) + + +Because the output from this list may be used in a variety of contexts, the implementation is left to the user. + +MultiInstance Notes +------------------- + +**loopCardinality** - This variable can be a text representation of a +number - for example '2' or it can be the name of a variable in +task.data that resolves to a text representation of a number. +It can also be a collection such as a list or a dictionary. In the +case that it is a list, the loop cardinality is equal to the length of +the list and in the case of a dictionary, it is equal to the list of +the keys of the dictionary. + +If loopCardinality is left blank and the Collection is defined, or if +loopCardinality and Collection are the same collection, then the +MultiInstance will loop over the collection and update each element of +that collection with the new information. In this case, it is assumed +that the incoming collection is a dictionary, currently behavior for +working with a list in this manner is not defined and will raise an error. + +**Collection** This is the name of the collection that is created from +the data generated when the task is run. Examples of this would be +form data that is generated from a UserTask or data that is generated +from a script that is run. Currently the collection is built up to be +a dictionary with a numeric key that corresponds to the place in the +loopCardinality. For example, if we set the loopCardinality to be a +list such as ['a','b','c] the resulting collection would be {1:'result +from a',2:'result from b',3:'result from c'} - and this would be true +even if it is a parallel MultiInstance where it was filled out in a +different order. + +**Element Variable** This is the variable name for the current +iteration of the MultiInstance. In the case of the loopCardinality +being just a number, this would be 1,2,3, . . . If the +loopCardinality variable is mapped to a collection it would be either +the list value from that position, or it would be the value from the +dictionary where the keys are in sorted order. It is the content of the +element variable that should be updated in the task.data. This content +will then be added to the collection each time the task is completed. + +Example: + In a sequential MultiInstance, loop cardinality is ['a','b','c'] and elementVariable is 'myvar' + then in the case of a sequential multiinstance the first call would + have 'myvar':'a' in the first run of the task and 'myvar':'b' in the + second. + +Example: + In a Parallel MultiInstance, Loop cardinality is a variable that contains + {'a':'A','b':'B','c':'C'} and elementVariable is 'myvar' - when the multiinstance is ready, there + will be 3 tasks. If we choose the second task, the task.data will + contain 'myvar':'B'. + +Custom Script Engines +--------------------- + +You may need to modify the default script engine, whether because you need to make additional +functionality available to it, or because you might want to restrict its capabilities for +security reasons. + +.. warning:: + + The default script engine does little to no sanitization and uses :code:`eval` + and :code:`exec`! If you have security concerns, you should definitely investigate + replacing the default with your own implementation. + +The default script engine imports the following objects: + +- :code:`timedelta` +- :code:`datetime` +- :code:`dateparser` +- :code:`pytz` + +You could add other functions or classes from the standard python modules or any code you've +implemented yourself. + +In our example models so far, we've been using DMN tables to obtain product information. DMN +tables have a **lot** of uses so we wanted to feature them prominently, but in a simple way. + +If a customer was selecting a product, we would surely have information about how the product +could be customized in a database somewhere. We would not hard code product information in +our diagram (although it is much easier to modify the BPMN diagram than to change the code +itself!). Our shipping costs would not be static, but would depend on the size of the order and +where it was being shipped -- maybe we'd query an API provided by our shipper. + +SpiffWorkflow is obviously **not** going to know how to make a call to **your** database or +make API calls to **your** vendors. However, you can implement the calls yourself and make them +available as a method that can be used within a script task. + +We are not going to actually include a database or API and write code for connecting to and querying +it, but we can model our database with a simple dictionary lookup since we only have 7 products +and just return the same static info for shipping for the purposes of the tutorial. + +.. code:: python + + from collections import namedtuple + + from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine + + ProductInfo = namedtuple('ProductInfo', ['color', 'size', 'style', 'price']) + + INVENTORY = { + 'product_a': ProductInfo(False, False, False, 15.00), + 'product_b': ProductInfo(False, False, False, 15.00), + 'product_c': ProductInfo(True, False, False, 25.00), + 'product_d': ProductInfo(True, True, False, 20.00), + 'product_e': ProductInfo(True, True, True, 25.00), + 'product_f': ProductInfo(True, True, True, 30.00), + 'product_g': ProductInfo(False, False, True, 25.00), + } + + def lookup_product_info(product_name): + return INVENTORY[product_name] + + def lookup_shipping_cost(shipping_method): + return 25.00 if shipping_method == 'Overnight' else 5.00 + + additions = { + 'lookup_product_info': lookup_product_info, + 'lookup_shipping_cost': lookup_shipping_cost + } + + CustomScriptEngine = PythonScriptEngine(scriptingAdditions=additions) + +We pass the script engine we created to the workflow when we load it. + +.. code:: python + + return BpmnWorkflow(parser.get_spec(process), script_engine=CustomScriptEngine) + +We can use the custom functions in script tasks like any normal function: + +.. figure:: figures/custom_script_usage.png + :scale: 30% + :align: center + + Workflow with lanes + +And we can simplify our 'Call Activity' flows: + +.. figure:: figures/call_activity_script_flow.png + :scale: 30% + :align: center + + Workflow with lanes + +To run this workflow: + +.. code-block:: console + + ./run.py -p order_product -b bpmn/call_activity_script.bpmn bpmn/top_level_script.bpmn + +We have also done some work using `Restricted Python `_ +to provide more secure alternatives to standard python functions. + +Serialization +------------- + +.. warning:: + + Serialization Changed in Version 1.1.7. Support for pre-1.1.7 serialization will be dropped in 1.2. + The old serialization method still works but it is deprecated. + To migrate your system to the new version, see "Migrating between + serialization versions" below. + +So far, we've only considered the context where we will run the workflow from beginning to end in one +setting. This may not always be the case, we may be executing the workflow in the context of a web server where we +may have a user request a web page where we open a specific workflow that we may be in the middle of, do one step of +that workflow and then the user may be back in a few minutes, or maybe a few hours depending on the application. + +To accomplish this, we can import the serializer + +.. code:: python + + from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer + +This class contains a serializer for a workflow containing only standard BPMN Tasks. Since we are using custom task +classes (the Camunda :code:`UserTask` and the DMN :code:`BusinessRuleTask`), we'll need to import serializers for those task s +pecs as well. + +.. code:: python + + from SpiffWorkflow.camunda.serializer import UserTaskConverter + from SpiffWorkflow.dmn.serializer import BusinessRuleTaskConverter + +Strictly speaking, these are not serializers per se: they actually convert the tasks into dictionaries of +JSON-serializable objects. Conversion to JSON is done only as the last step and could easily be replaced with some +other output format. + +We'll need to configure a Workflow Spec Converter with our custom classes: + +.. code:: python + + wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter( + [ UserTaskConverter, BusinessRuleTaskConverter ]) + +We create a serializer that can handle our extended task specs: + +.. code:: python + + serializer = BpmnWorkflowSerializer(wf_spec_converter) + +We'll give the user the option of dumping the workflow at any time. + +.. code:: python + + filename = input('Enter filename: ') + state = serializer.serialize_json(workflow) + with open(filename, 'w') as dump: + dump.write(state) + +We'll ask them for a filename and use the serializer to dump the state to that file. + +To restore the workflow: + +.. code:: python + + if args.restore is not None: + with open(args.restore) as state: + wf = serializer.deserialize_json(state.read()) + +The workflow serializer is designed to be flexible and modular and as such is a little complicated. It has +two components: + +- a workflow spec converter (which handles workflow and task specs) +- a data converter (which handles workflow and task data). + +The default workflow spec converter likely to meet your needs, either on its own, or with the inclusion of +:code:`UserTask` and :code:`BusinessRuleTask` in the :code:`camnuda` and :code:`dmn` subpackages of this +library, and all you'll need to do is add them to the list of task converters, as we did above. + +However, he default data converter is very simple, adding only JSON-serializable conversions of :code:`datetime` +and :code:`timedelta` objects (we make these available in our default script engine) and UUIDs. If your +workflow or task data contains objects that are not JSON-serializable, you'll need to extend ours, or extend +its base class to create one of your own. + +To do extend ours: + +1. Subclass the base data converter +2. Register classes along with functions for converting them to and from dictionaries + +.. code:: python + + from SpiffWorkflow.bpmn.serializer.dictionary import DictionaryConverter + + class MyDataConverter(DictionaryConverter): + + def __init__(self): + super().__init__() + self.register(MyClass, self.my_class_to_dict, self.my_class_from_dict) + + def my_class_to_dict(self, obj): + return obj.__dict__ + + def my_class_from_dict(self, dct): + return MyClass(**dct) + +More information can be found in the class documentation for the +`default converter `_ +and its `base class `_ +. + +You can also replace ours entirely with one of your own. If you do so, you'll need to implement `convert` and +`restore` methods. The former should return a JSON-serializable representation of your workflow data; the +latter should recreate your data from the serialization. + +If you have written any custom task specs, you'll need to implement task spec converters for those as well. + +Task Spec converters are also based on the :code:`DictionaryConverter`. You should be able to use the +`BpmnTaskSpecConverter `_ +as a basis for your custom specs. It provides some methods for extracting attributes from Spiff base classes as well as +standard BPNN attributes from tasks that inherit from :code:`BMPNSpecMixin`. + +The `Camunda User Task Converter `_ +should provide a simple example of how you might create such a converter. + +Migrating Between Serialization Versions +---------------------------------------- + +Old (Non-Versioned) Serializer +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Prior to Spiff 1.1.7, the serialized output did not contain a version number. + +.. code:: python + + old_serializer = BpmnSerializer() # the deprecated serializer. + # new serializer, which can be customized as described above. + serializer = BpmnWorkflowSerializer(version="MY_APP_V_1.0") + +The new serializer has a :code:`get_version` method that will read the version +back out of the serialized json. If the version isn't found, it will return +:code:`None`, and you can then assume it is using the old style serializer. + +.. code:: python + + version = serializer.get_version(some_json) + if version == "MY_APP_V_1.0": + workflow = serializer.deserialize_json(some_json) + else: + workflow = old_serializer.deserialize_workflow(some_json, workflow_spec=spec) + + +If you are not using any custom tasks and do not require custom serialization, then you'll be able to +serialize the workflow in the new format: + +.. code:: python + + new_json = serializer.serialize_json(workflow) + +However, if you use custom tasks or data serialization, you'll also need to specify workflow spec or data +serializers, as in the examples in the previous section, before you'll be able to serialize with the new serializer. +The code would then look more like this: + +.. code:: python + + from SpiffWorkflow.camunda.serializer import UserTaskConverter + + old_serializer = BpmnSerializer() # the deprecated serializer. + + # new serializer, with customizations + wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([UserTaskConverter]) + data_converter = MyDataConverter + serializer = BpmnWorkflowSerializer(wf_spec_converter, data_converter, version="MY_APP_V_1.0") + + version = serializer.get_version(some_json) + if version == "MY_APP_V_1.0": + workflow = serializer.deserialize_json(some_json) + else: + workflow = old_serializer.deserialize_workflow(some_json, workflow_spec=spec) + + new_json = serializer.serialize_json(workflow) + +Because the serializer is highly customizable, we've made it possible for you to manage your own versions of the +serialization. You can do this by passing a version number into the serializer, which will be embedded in the +json of all workflows. This allow you to modify the serialization and customize it over time, and still manage +the different forms as you make adjustments without leaving people behind. + +Versioned Serializer +^^^^^^^^^^^^^^^^^^^^ + +As we make changes to Spiff, we may change the serialization format. For example, in 1.1.8, we changed +how subprocesses were handled interally in BPMN workflows and updated how they are serialized. If you have +not overridden our version number with one of your own, the serializer will transform the 1.0 format to the +new 1.1 format. + +If you've overridden the serializer version, you may need to incorporate our serialization changes with +your own. You can find our conversions in +`version_migrations.py `_ diff --git a/doc/bpmn/conf.py b/doc/bpmn/conf.py new file mode 100755 index 000000000..996b2d8f3 --- /dev/null +++ b/doc/bpmn/conf.py @@ -0,0 +1,60 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +numfig = True + +# -- Project information ----------------------------------------------------- + +project = 'SpiffWorkflow-BPMN Documentation' +copyright = '2020, Sartography' +author = 'Sartography' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. + +extensions = ['sphinx.ext.autodoc', # 'sphinx.ext.coverage', + 'sphinx.ext.viewcode', + 'sphinx.ext.autosummary', + #'sphinx.ext.intersphinx', + ] + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'default' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/bpmn/events.rst b/doc/bpmn/events.rst new file mode 100644 index 000000000..63fc697a2 --- /dev/null +++ b/doc/bpmn/events.rst @@ -0,0 +1,260 @@ +Events +====== + +BPMN Model +---------- + +We'll be using the following files from `spiff-example-cli `_. + +- `transaction `_ workflow +- `signal_event `_ workflow +- `events `_ workflow +- `call activity `_ workflow +- `product_prices `_ DMN table +- `shipping_costs `_ DMN table + +A general overview of events in BPMN can be found in the :doc:`/intro` +section of the documentation. + +SpiffWorkflow supports the following Event Definitions: + +- `Cancel Events`_ +- `Signal Events`_ +- `Terminate Events`_ +- `Error Events`_ +- `Escalation Events`_ +- `Timer Events`_ +- `Message Events`_ + +We'll include examples of all of these types in this section. + +Transactions +^^^^^^^^^^^^ + +We also need to introduce the concept of a Transaction, bceause certain events +can only be used in that context. A Transaction is essentially a subprocess, but +it must fully complete before it affects its outer workflow. + +We'll make our customer's ordering process through the point they review their order +into a Transaction. If they do not complete their order, then product selections and +customizations will be discarded; if they place the order, the workflow will proceed +as before. + +We'll also introduce our first event type, the Cancel Event. Cancel Events can +only be used in Transactions. + +Cancel Events +^^^^^^^^^^^^^ + +.. figure:: figures/transaction.png + :scale: 30% + :align: center + + Workflow with a transaction and Cancel Event + +We changed our 'Review Order' Task to be a User Task and have added a form, so +that we can give the customer the option of cancelling the order. If the customer +answers 'Y', then the workflow ends normally and we proceed to collecting +payment information. + +However, if the user elects to cancel their order, we use a 'Cancel End Event' +instead, which generates a Cancel Event. We can then attach a 'Cancel Boundary +Event' to the Transaction, and execute that path if the event occurs. Instead of +asking the customer for their payment info, we'll direct them to a form and ask +them why they cancelled their order. + +If the order is placed, the workflow will contain the order data; if it is +cancelled, it will contain the reason for cancellation instead. + +To run this workflow + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/transaction.bpmn bpmn/call_activity.bpmn + + +Signal Events +^^^^^^^^^^^^^ + +.. figure:: figures/signal_event.png + :scale: 30% + :align: center + + Workflow with Signal Events + +Suppose we also want to give our customer the ability to cancel their order at +any time up until they are charged. We need to throw an event after the charge +is placed and catch this event before the user completes the 'Cancel Order' task. +Once the charge is placed, the task that provides the option to cancel will +itself be cancelled when the charge event is received. + +We'll also need to detect the case that the customer cancels their order and +cancel the charge task if it occurs; we'll use a separate signal for that. + +Multiple tasks can catch the same signal event. Suppose we add a Manager role +to our workflow, and allow the Employee to refer unsuccessful charges to the +Manager for resolution. The Manager's task will also need to catch the 'Order +Cancelled' signal event. + +Signals are referred to by name. + +.. figure:: figures/throw_signal_event.png + :scale: 30% + :align: center + + Signal Event configuration + +.. Terminate Events: + +Terminate Events +^^^^^^^^^^^^^^^^ + +We also added a Terminate Event to the Manager Workflow. A regular End Event +simply marks the end of a path. A Terminate Event will indicate that the +entire workflow is complete and any remaining tasks should be cancelled. Our +customer cannot cancel an order that has already been cancelled, and we won't ask +them for feedback about it (we know it wasn't completed), so we do not want to +execute either of those tasks. + +We'll now modify our workflow to add an example of each of the other types of +events that SpiffWorkflow Supports. + +To run this workflow + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/signal_event.bpmn bpmn/call_activity.bpmn + +Error Events +^^^^^^^^^^^^ + +Let's turn to our order fulfillment subprocess. Either of these steps could +potentially fail, and we may want to handle each case differently. + +.. figure:: figures/events.png + :scale: 30% + :align: center + + Workflow with multiple event types + +One potential failure is that our product is unavailable. This actually might be +a temporary problem, but we'll assume that it is a show stopper for the sake of +this tutorial. + +We ask the Employee to verify that they were able to retrieve the product; if they +were unable to do so, then we generate an Error End Event, which we will handle +with an Interrupting Error Boundary Event (Error events are *always* Interrupting). + +If the product is unavailable, our Manager will notify the customer, issue a refund, +and cancel the order. + +Escalation Events +^^^^^^^^^^^^^^^^^ + +Escalation events are a lot like Error Events and as far as I can tell, which one +to use comes down to preference, with the caveat that if you want to use an Intermediate +Event, you'll have to use Escalation, because BPMN does not allow Intermediate Error Events, +and that Error Events cannot be Non-Interrupting. + +In our example, we'll assume that if we failed to ship the product, we can try again later, +so we will not end the Subprocess (Escalation events can be either Interrupting or +Non-Interrupting). + +However, we still want to notify our customer of a delay, so we use a Non-Interrupting +Escalation Boundary Event. + +Both Error and Escalation Events can be optionally associated with a code. Here is +Throw Event for our `product_not_shipped` Escalation. + +.. figure:: figures/throw_escalation_event.png + :scale: 30% + :align: center + + Throw Escalation Event configuration + +Error Event configuration is similar. + +If no code is provided in a Catch event, any event of the corresponding type will catch +the event. + +Timer Events +^^^^^^^^^^^^ + +In the previous section, we mentioned that that we would try again later if we were unable +to ship the order. We can use a Duration Timer Event to force our workflow to wait a certain +amount of time before continuing. We can use this as a regular Intermediate Event (in +'Try Again Later') or a Boundary Event. Timer Boundary Events can be Interrupting, but in +this case, we simply want to notify the customer of the delay while continuing to process +their order, so we use a Non-Interrupting Event. + +.. figure:: figures/timer_event.png + :scale: 30% + :align: center + + Duration Timer Event configuration + +We express the duration as a Python :code:`timedelta`. We show the configuration for the Boundary +Event. + +It is also possible to use a static datetime to trigger an event. It will need to be parseable +as a date by Python. + +Timer events can only be caught, that is waited on. The timer begins implicitly when we +reach the event. + +Message Events +^^^^^^^^^^^^^^ + +.. sidebar:: QA Lane + + Ideally, this lane would be a process independent from the ordering process (we don't want + it to be cancelled just because an order eventually completes). However, limitations of how + SpiffWorkflow handles processes precludes multiple top-level processes. + +In BPMN, Messages are used to communicate across processes and cannot be used within a +workflow, but SpiffWorkflow allows message communication between lanes as well as between +parent and child workflows. We'll use the first scenario in our example. + +We've added a QA lane to out ordering process, whose job is investigating order order delays +and recommending improvements. This portion of our process will only be started when an +appropriate message is received. + +Messages are similar to signals, in that they are referenced by name, but they have the +additional property that they may contain a payload. + +.. note:: + + We currently depend on some Camunda-specific features in our implementation, but we + intend to replace this with our own. + +.. figure:: figures/throw_message_event.png + :scale: 30% + :align: center + + Throw Message Event configuration + +The Throw Message Event Implementation should be 'Expression' and the Expression should +be a Python statement that can be evaluated. In this example, we'll just send the contents +of the :code:`reason_delayed` variable, which contains the response from the 'Investigate Delay' +Task. + +We can provide a name for the result variable, but I have not done that here, as it does not +make sense to me for the generator of the event to tell the handler what to call the value. +If you *do* specify a result variable, the message payload (the expression evaluated in the +context of the Throwing task) will be added to the handling task's data in a variable of that +name; if you leave it blank, SpiffWorkflow will create a variable of the form _Response. + +Running The Model +^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/events.bpmn bpmn/call_activity.bpmn + diff --git a/doc/bpmn/figures/business_rule_task.png b/doc/bpmn/figures/business_rule_task.png new file mode 100644 index 000000000..f76b8f62b Binary files /dev/null and b/doc/bpmn/figures/business_rule_task.png differ diff --git a/doc/bpmn/figures/call_activity.png b/doc/bpmn/figures/call_activity.png new file mode 100644 index 000000000..2bb55b957 Binary files /dev/null and b/doc/bpmn/figures/call_activity.png differ diff --git a/doc/bpmn/figures/call_activity_multi.png b/doc/bpmn/figures/call_activity_multi.png new file mode 100644 index 000000000..b2ae27da2 Binary files /dev/null and b/doc/bpmn/figures/call_activity_multi.png differ diff --git a/doc/bpmn/figures/call_activity_script_flow.png b/doc/bpmn/figures/call_activity_script_flow.png new file mode 100644 index 000000000..68f555890 Binary files /dev/null and b/doc/bpmn/figures/call_activity_script_flow.png differ diff --git a/doc/bpmn/figures/catch_esclalation_event.png b/doc/bpmn/figures/catch_esclalation_event.png new file mode 100644 index 000000000..646dc5d32 Binary files /dev/null and b/doc/bpmn/figures/catch_esclalation_event.png differ diff --git a/doc/bpmn/figures/custom_script_usage.png b/doc/bpmn/figures/custom_script_usage.png new file mode 100644 index 000000000..4d7a6fa77 Binary files /dev/null and b/doc/bpmn/figures/custom_script_usage.png differ diff --git a/doc/bpmn/figures/dmn_table.png b/doc/bpmn/figures/dmn_table.png new file mode 100644 index 000000000..75e21c5e8 Binary files /dev/null and b/doc/bpmn/figures/dmn_table.png differ diff --git a/doc/bpmn/figures/dmn_table_updated.png b/doc/bpmn/figures/dmn_table_updated.png new file mode 100644 index 000000000..ae1049f4e Binary files /dev/null and b/doc/bpmn/figures/dmn_table_updated.png differ diff --git a/doc/bpmn/figures/documentation.png b/doc/bpmn/figures/documentation.png new file mode 100644 index 000000000..c5d52f5c4 Binary files /dev/null and b/doc/bpmn/figures/documentation.png differ diff --git a/doc/bpmn/figures/documentation_multi.png b/doc/bpmn/figures/documentation_multi.png new file mode 100644 index 000000000..c9ee56915 Binary files /dev/null and b/doc/bpmn/figures/documentation_multi.png differ diff --git a/doc/bpmn/figures/events.png b/doc/bpmn/figures/events.png new file mode 100644 index 000000000..61041eb31 Binary files /dev/null and b/doc/bpmn/figures/events.png differ diff --git a/doc/bpmn/figures/exclusive_gateway.png b/doc/bpmn/figures/exclusive_gateway.png new file mode 100644 index 000000000..21bea4fe7 Binary files /dev/null and b/doc/bpmn/figures/exclusive_gateway.png differ diff --git a/doc/bpmn/figures/lanes.png b/doc/bpmn/figures/lanes.png new file mode 100644 index 000000000..c44dd80bc Binary files /dev/null and b/doc/bpmn/figures/lanes.png differ diff --git a/doc/bpmn/figures/message_start_event.png b/doc/bpmn/figures/message_start_event.png new file mode 100644 index 000000000..956d7dc4a Binary files /dev/null and b/doc/bpmn/figures/message_start_event.png differ diff --git a/doc/bpmn/figures/multiinstance_flow_configuration.png b/doc/bpmn/figures/multiinstance_flow_configuration.png new file mode 100644 index 000000000..d8b96c2ae Binary files /dev/null and b/doc/bpmn/figures/multiinstance_flow_configuration.png differ diff --git a/doc/bpmn/figures/multiinstance_form_configuration.png b/doc/bpmn/figures/multiinstance_form_configuration.png new file mode 100644 index 000000000..ea27d2462 Binary files /dev/null and b/doc/bpmn/figures/multiinstance_form_configuration.png differ diff --git a/doc/bpmn/figures/multiinstance_task_configuration.png b/doc/bpmn/figures/multiinstance_task_configuration.png new file mode 100644 index 000000000..926a4b25e Binary files /dev/null and b/doc/bpmn/figures/multiinstance_task_configuration.png differ diff --git a/doc/bpmn/figures/parallel_gateway.png b/doc/bpmn/figures/parallel_gateway.png new file mode 100644 index 000000000..dab512bf4 Binary files /dev/null and b/doc/bpmn/figures/parallel_gateway.png differ diff --git a/doc/bpmn/figures/script_task.png b/doc/bpmn/figures/script_task.png new file mode 100644 index 000000000..769863730 Binary files /dev/null and b/doc/bpmn/figures/script_task.png differ diff --git a/doc/bpmn/figures/signal_event.png b/doc/bpmn/figures/signal_event.png new file mode 100644 index 000000000..934e63da9 Binary files /dev/null and b/doc/bpmn/figures/signal_event.png differ diff --git a/doc/bpmn/figures/throw_escalation_event.png b/doc/bpmn/figures/throw_escalation_event.png new file mode 100644 index 000000000..790fbbfb0 Binary files /dev/null and b/doc/bpmn/figures/throw_escalation_event.png differ diff --git a/doc/bpmn/figures/throw_message_event.png b/doc/bpmn/figures/throw_message_event.png new file mode 100644 index 000000000..cfc3722ba Binary files /dev/null and b/doc/bpmn/figures/throw_message_event.png differ diff --git a/doc/bpmn/figures/throw_scalation_event.png b/doc/bpmn/figures/throw_scalation_event.png new file mode 100644 index 000000000..b3aee96cb Binary files /dev/null and b/doc/bpmn/figures/throw_scalation_event.png differ diff --git a/doc/bpmn/figures/throw_signal_event.png b/doc/bpmn/figures/throw_signal_event.png new file mode 100644 index 000000000..6810f76aa Binary files /dev/null and b/doc/bpmn/figures/throw_signal_event.png differ diff --git a/doc/bpmn/figures/timer_event.png b/doc/bpmn/figures/timer_event.png new file mode 100644 index 000000000..bd61b68d9 Binary files /dev/null and b/doc/bpmn/figures/timer_event.png differ diff --git a/doc/bpmn/figures/top_level.png b/doc/bpmn/figures/top_level.png new file mode 100644 index 000000000..da70d32a0 Binary files /dev/null and b/doc/bpmn/figures/top_level.png differ diff --git a/doc/bpmn/figures/transaction.png b/doc/bpmn/figures/transaction.png new file mode 100644 index 000000000..100c6f76e Binary files /dev/null and b/doc/bpmn/figures/transaction.png differ diff --git a/doc/bpmn/figures/user_task.png b/doc/bpmn/figures/user_task.png new file mode 100644 index 000000000..a9c0fe4e1 Binary files /dev/null and b/doc/bpmn/figures/user_task.png differ diff --git a/doc/bpmn/gateways.rst b/doc/bpmn/gateways.rst new file mode 100644 index 000000000..7b9265332 --- /dev/null +++ b/doc/bpmn/gateways.rst @@ -0,0 +1,73 @@ +Gateways +======== + +BPMN Model +---------- + +In this section, we'll expand our model by creating alternate paths through the +workflow depending on the current workflow state, in this case, answers provided +by the user through forms. + +We've also added a second DMN table to find the cost of the selected shipping +method, and we updated our order total calculations to incorporate that cost. + +We'll be using the following files from `spiff-example-cli `_. + +- `gateway_types `_ workflow +- `product_prices `_ DMN table +- `shipping_costs `_ DMN table + +Exclusive Gateway +^^^^^^^^^^^^^^^^^ + +Exclusive gateways are used when exactly one alternative can be selected. + +Suppose our products are T-shirts and we offer product C in several colors. After +the user selects a product, we check to see it if is customizable. Our default +branch will be 'Not Customizable', but we'll direct the user to a second form +if they select 'C'; our condition for choosing this branch is a simple python +expression. + +.. figure:: figures/exclusive_gateway.png + :scale: 30% + :align: center + + Flow configuration + +Parallel Gateway +^^^^^^^^^^^^^^^^ + +.. sidebar:: IDs vs Names + + We've assigned descriptive names to all our tasks so far. Text added to + the Name field will appear in the diagram, so sometimes it's better to + leave it blank to avoid visual clutter. I've put a description of the + gateway into the ID field instead. + +Parallel gateways are used when the subsequent tasks do not need to be completed +in any particular order. The user can complete them in any sequence and the +workflow will wait for all tasks to be finished before advancing. + +We do not care whether the user chooses a shipping method or enters their +address first, but they'll need to complete both tasks before continuing. + +We don't need to do any particular configuration for this gateway type. + +.. figure:: figures/parallel_gateway.png + :scale: 30% + :align: center + + Parallel Gateway example + +Running The Model +^^^^^^^^^^^^^^^^^ + +If you have set up our example repository, this model can be run with the +following command: + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/gateway_types.bpmn + diff --git a/doc/bpmn/index.rst b/doc/bpmn/index.rst new file mode 100644 index 000000000..591986b96 --- /dev/null +++ b/doc/bpmn/index.rst @@ -0,0 +1,77 @@ +BPMN Workflows +============== + +The basic idea of SpiffWorkflow is that you can use it to write an interpreter +in Python that creates business applications from BPMN models. In this section, +we'll develop a model of an example process and as well as a +simple workflow runner. + +We expect that readers will fall into two general categories: + +- People with a background in BPMN who might not be very familiar Python +- Python developers who might not know much about BPMN + +This section of the documentation provides an example that (hopefully) serves +the needs of both groups. We will introduce the BPMN elements that SpiffWorkflow +supports and show how to build a simple workflow runner around them. + +SpiffWorkflow does heavy-lifting such as keeping track of task dependencies and +states and providing the ability to serialize or deserialize a workflow that +has not been completed. The developer will write code for displaying workflow +state and presenting tasks to users of their application. + +All the Python code and BPMN models used here are available in an example +project called `spiff-example-cli `_. + +Quickstart +---------- + +Check out the code in `spiff-example-cli `_ +and follow the instructions to set up an environment to run it in. + +Run the sample workflow we built up using our example application with the following +command: + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/{product_prices,shipping_costs}.dmn \ + -b bpmn/{multiinstance,call_activity_multi}.bpmn + + +For a full description of program options: + +.. code-block:: console + + ./run.py --help + +The code in the workflow runner and the models in the bpmn directory of the +repository will be discussed in the remainder of this tutorial. + +Supported BPMN Elements +----------------------- + +.. toctree:: + :maxdepth: 3 + + tasks + gateways + organization + events + multiinstance + +Putting it All Together +----------------------- + +.. toctree:: + :maxdepth: 2 + + synthesis + +Features in More Depth +---------------------- + +.. toctree:: + :maxdepth: 2 + + advanced diff --git a/doc/bpmn/make.bat b/doc/bpmn/make.bat new file mode 100644 index 000000000..922152e96 --- /dev/null +++ b/doc/bpmn/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/bpmn/multiinstance.rst b/doc/bpmn/multiinstance.rst new file mode 100644 index 000000000..7cdb6f1a5 --- /dev/null +++ b/doc/bpmn/multiinstance.rst @@ -0,0 +1,112 @@ +MultiInstance Tasks +=================== + +BPMN Model +---------- + +We'll be using the following files from `spiff-example-cli `_. + +- `multiinstance `_ workflow +- `call activity multi `_ workflow +- `product_prices `_ DMN table +- `shipping_costs `_ DMN table + +Suppose we want our customer to be able to select more than one product. + +If we knew how many products they would select at the beginning of the workflow, we could +configure 'Select and Customize Product' as a Sequential MultiInstance Task. We would +specify the name of the collection and each iteration of the task would add a new item +to it. + +Since we can't know in advance how many products the order, we'll need to modify that +workflow to ask them whether they want to continue shopping and maintain their product +selections in a collection. + +.. figure:: figures/call_activity_multi.png + :scale: 30% + :align: center + + Selecting more than one product + +We'll also need to update our element docmentation to display all products. + +.. figure:: figures/documentation_multi.png + :scale: 30% + :align: center + + Updated Documentation for 'Review Order' + +.. note:: + + Note that we are using a dot instead of the typical python dictionary access to obtain + the values. Spiff automatically generates such a representation, which simplifies creating the + documentation strings; however regular Python syntax will work as well. + +Parallel MultiInstance +^^^^^^^^^^^^^^^^^^^^^^ + +We'll also update our 'Retrieve Product' task and 'Product Not Available' flows to +accommodate multiple products. We can use a Parallel MultiInstance for this, since +it does not matter what order our Employee retrieves the products in. + +.. figure:: figures/multiinstance_task_configuration.png + :scale: 30% + :align: center + + MultiInstance task configuration + +Spiff will generate a task for each of the items in the collection. Because of the way +SpiffWorkflow manages the data for these tasks, the collection MUST be a dictionary. + +Each value in the dictionary will be copied into a variable with the name specified in +the 'Element Variable' field, so you'll need to specify this as well. + +.. figure:: figures/multiinstance_form_configuration.png + :scale: 30% + :align: center + + MultiInstance form configuration + +We'll also need to update the form field id so that the results will be added to the +item of the collection rather than the top level of the task data. This is where the +'Element Variable' field comes in: we'll need to change `product_available` to +`product.product_available`, because we set up `product` as our reference to the +current item. + +.. figure:: figures/multiinstance_flow_configuration.png + :scale: 30% + :align: center + + Product available flow configuration + +Finally, we'll need to update our 'No' flow to check all items in the collection for +availability. + +.. note:: + + In our form configuration, we used `product.product_available` but when we reference + it in the flow, we use the standard python dictionary syntax. We can't use that + notation in form fields, so in this case we need to use SpiffWorkflow's dot notation + conversion. + +Sequential MultiInstance +^^^^^^^^^^^^^^^^^^^^^^^^ + +SpiffWorkflow also supports Sequential MultiInstance Tasks for previously defined +collections, or if the loopCardinality is known in advance, although we have not added an +example of this to our workflow. + +For more information about MultiInstance Tasks and SpiffWorkflow, see :doc:`/bpmn/advanced`. + +Running The Model +^^^^^^^^^^^^^^^^^ + +If you have set up our example repository, this model can be run with the +following command: + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/multiinstance.bpmn bpmn/call_activity_multi.bpmn + diff --git a/doc/bpmn/organization.rst b/doc/bpmn/organization.rst new file mode 100644 index 000000000..261b30cfc --- /dev/null +++ b/doc/bpmn/organization.rst @@ -0,0 +1,128 @@ +Organizing More Complex Workflows +================================= + +BPMN Model +---------- + +We'll be using the following files from `spiff-example-cli `_. + +- `lanes `_ workflow +- `top_level `_ workflow +- `call_activity `_ workflow +- `product_prices `_ DMN table +- `shipping_costs `_ DMN table + +Lanes +^^^^^ + +Lanes are a method in BPMN to distinguish roles for the workflow and who is +responsible for which actions. In some cases this will be different business +units, and in some cases this will be different individuals - it really depends +on the nature of the workflow. Within a BPMN editor, this is done by choosing the +'Create pool/participant' option from the toolbar on the left hand side. + +We'll modify our workflow to get the customer's payment information and send it +to an employee who will charge the customer and fulfill the order. + +.. figure:: figures/lanes.png + :scale: 30% + :align: center + + Workflow with lanes + +To run this workflow + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/lanes.bpmn + +For a simple code example of displaying a tasks lane, see `Handling Lanes`_ + +Subprocesses +^^^^^^^^^^^^ + +In general, subprocesses are a way of grouping work into smaller units. This, in +theory, will help us to re-use sections of business logic, but it will also allow +us to treat groups of work as a unit. + +Subprocesses come in two different flavors. In this workflow we see an Expanded +Subprocess. Unfortunately, we can't collapse an expanded subprocess within BPMN.js, +so expanded subprocesses are mainly useful for conceptualizing a group of tasks as +a unit. + +It also possible to refer to external subprocesses via a Call Activity Task. This +allows us to 'call' a separate workflow in a different file by referencing the ID of +the called workflow, which can simplify business logic and make it re-usable. + +We'll expand 'Fulfill Order' into sub tasks -- retrieving the product and shipping +the order -- and create an Expanded Subprocess. + +We'll also expand our selection of products, adding several new products and the ability +to customize certain products by size and style in addition to color. + +.. figure:: figures/dmn_table_updated.png + :scale: 30% + :align: center + + Updated Product List + +.. note:: + + I've added what customizations are available for each product in the 'Annotations' + column of the DMN table. This is not actually used by Spiff; it simply provides + the option of documenting the decisions contained in the table. + +Since adding gateways for navigating the new options will add a certain amount of +clutter to our diagram, we'll create a separate workflow around selecting and +customizing products and refer to that in our main workflow. + +.. figure:: figures/call_activity.png + :scale: 30% + :align: center + + Subworkflow for product selection + +When configuring the subworkflow, we need to make sure the 'CallActivity Type' of the +parent workflow is 'BPMN' and the 'Called Element' matches the ID we assigned in the +subworkflow. + +.. figure:: figures/top_level.png + :scale: 30% + :align: center + + Parent workflow + +Running the Model +^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + ./run.py -p order_product \ + -d bpmn/product_prices.dmn bpmn/shipping_costs.dmn \ + -b bpmn/top_level.bpmn bpmn/call_activity.bpmn + +Example Application Code +------------------------ + +Handling Lanes +^^^^^^^^^^^^^^ + +We are not required to handle lanes in our application, but most of the time we'll +probably want a way of filtering on lanes and selectively displaying tasks. In +our sample application, we'll simply display which lane a task belongs to. + +.. code:: python + + if hasattr(task.task_spec, 'lane') and task.task_spec.lane is not None: + lane = f'[{task.task_spec.lane}]' + else: + lane = '' + +The tasks lane can be obtained from :code:`task.task_spec.lane`. Not all tasks +will have a :code:`lane` attribute, so we need to check to make sure it exists +before attempting to access it (this is true for many task attributes). + +See the Filtering Tasks Section of :doc:`advanced` more information +about working with lanes in Spiff. diff --git a/doc/bpmn/synthesis.rst b/doc/bpmn/synthesis.rst new file mode 100644 index 000000000..140833f50 --- /dev/null +++ b/doc/bpmn/synthesis.rst @@ -0,0 +1,223 @@ +Putting it All Together +======================= + +In this section we'll be discussing the overall structure of the workflow +runner we developed in `spiff-example-cli `_. + +Loading a Workflow +------------------- + +We'll need the following imports: + +.. code:: python + + from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser + from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser + + from custom_script_engine import CustomScriptEngine + +We need to create a parser. We could have imported :code:`BpmnParser`, which +these parsers inherit from, but we need some additional features that the base +parser does not provide. + +.. code:: python + + class Parser(BpmnDmnParser): + OVERRIDE_PARSER_CLASSES = BpmnDmnParser.OVERRIDE_PARSER_CLASSES + OVERRIDE_PARSER_CLASSES.update(CamundaParser.OVERRIDE_PARSER_CLASSES) + +We'll use :code:`BpmnDmnParser` as our base class, because we would like the ability +to use DMN tables in our application. The :code:`BpmnDmnParser` provides a task +parser for Business Rule Tasks, which the underlying :code:`BpmnParser` it inherits from +does not contain. + +We also imported the :code:`CamundaParser` so that we can parse some Camunda +specific features we'll use (forms in User Tasks). The :code:`CamundaParser` User +Task parser will override the default parser. + +In general, any task parser can be replaced with a custom parser of your +own design if you have a BPMN modeller that produces XML not handled by the +BPMN parsers in SpiffWorkflow. + +.. code:: python + + def parse(process, bpmn_files, dmn_files): + parser = Parser() + parser.add_bpmn_files(bpmn_files) + if dmn_files: + parser.add_dmn_files(dmn_files) + top_level = parser.get_spec(process) + subprocesses = parser.get_process_specs() + return BpmnWorkflow(top_level, subprocesses, script_engine=CustomScriptEngine) + +We create an instance of our previously defined parser, add the BPMN files to it, and +optionally add any DMN files, if they were supplied. + +We'll obtain the workflow specification from the parser for the top level process +using :code:`parser.get_spec()`. + +We'll get the specs of all the processes that were parsed with :code:`parser.get_process_specs()` +and provide these to the workflow as well. If your entire workflow is contained in your +top-level process, you can omit this argument, but if your workflow contains call activities, +you'll need to include it. + +We also provide an enhanced script engine to our workflow. More information about how and +why you might want to do this is covered in :doc:`advanced`. The :code:`script_engine` +argument is optional and the default will be used if none is supplied. + +We return a :code:`BpmnWorkflow` based on the specs that uses the our custom script engine +to execute script tasks and evaluate expressions. + +Running a Workflow +------------------ + +This is our application's :code:`run()` method. + +The :code:`step` argument is a boolean that indicates whether we want the option of seeing +a more detailed representation of the state at each step, which we'll discuss in the +section following this one. + +.. code:: python + + def run(workflow, step): + + workflow.do_engine_steps() + + while not workflow.is_completed(): + + ready_tasks = workflow.get_ready_user_tasks() + options = { } + print() + for idx, task in enumerate(ready_tasks): + option = format_task(task, False) + options[str(idx + 1)] = task + print(f'{idx + 1}. {option}') + + selected = None + while selected not in options and selected not in ['', 'D', 'd']: + selected = input('Select task to complete, enter to wait, or D to dump the workflow state: ') + + if selected.lower() == 'd': + filename = input('Enter filename: ') + state = BpmnSerializer().serialize_workflow(workflow, include_spec=True) + with open(filename, 'w') as dump: + dump.write(state) + elif selected != '': + next_task = options[selected] + if isinstance(next_task.task_spec, UserTask): + complete_user_task(next_task) + next_task.complete() + elif isinstance(next_task.task_spec, ManualTask): + complete_manual_task(next_task) + next_task.complete() + else: + next_task.complete() + + workflow.refresh_waiting_tasks() + workflow.do_engine_steps() + if step: + print_state(workflow) + + print('\nWorkflow Data') + print(json.dumps(workflow.data, indent=2, separators=[ ', ', ': ' ])) + +The first line of this function is the one that does the bulk of the work in +SpiffWorkflow. Calling :code:`workflow.do_engine_steps()` causes Spiff to repeatedly +look for and execute any engine tasks that are ready. + +An **engine task** does not require user interaction. For instance, it could be +a Script task or selection of a flow from a gateway. Execution will +stop when only interactive tasks remain or the workflow is completed. + +A SpiffWorkflow application will call :code:`workflow.do_engine_steps()` to start the +workflow and then enter a loop that will + +- check for ready user tasks +- present the tasks to the user to complete +- complete the tasks +- refresh any waiting tasks +- complete any engine tasks that have been reached via user interactions + +until the workflow completes. + +When a workflow completes, the task data (just a dictionary passed from one task to the +next, and optionally modified by each task) is copied into the workflow data. We display +the end state of the workflow on completion. + +The rest of the code is all about presenting the tasks to the user and dumping the +workflow state. We've covered former in the BPMN Elements section of :doc:`index` +and will cover the latter in :doc:`advanced`. + +Handling task presentation is what **you** will be developing when you use SpiffWorkflow. + +Examining the Workflow State +---------------------------- + +When this application is run and we want to present steps to the user, we'll need +to be able to examine the workflow and task states and associated data. We'll cover +the basics of this in this section. + +The code below is a simple method for displaying information about a task. We use +this in two ways + +- presenting a list of tasks to a user (in this case the state will always be ready, so we won't include it) +- presenting the state of each task while stepping through the workflow (in this case you most likely do want to know the state). + +.. code:: python + + def format_task(task, include_state=True): + if hasattr(task.task_spec, 'lane') and task.task_spec.lane is not None: + lane = f'[{task.task_spec.lane}]' + else: + lane = '' + state = f'[{task.get_state_name()}]' if include_state else '' + return f'{lane} {task.task_spec.description} ({task.task_spec.name}) {state}' + +We previously went over obtaining the lane information in :doc:`organization`. + +We can call :code:`task.get_state_name()` to get a human-readable representation of +a task's state. + +We store the value provided in the :code:`name` attribute of the task (the text +entered in the 'Name' field in our sample models) in :code:`task.task_spec.description`. + +Here is the code we use for examining the workflow state. + +.. code:: python + + def print_state(workflow): + + task = workflow.last_task + print('\nLast Task') + print(format_task(task)) + print(json.dumps(task.data, indent=2, separators=[ ', ', ': ' ])) + + display_types = (UserTask, ManualTask, ScriptTask, ThrowingEvent, CatchingEvent) + all_tasks = [ task for task in workflow.get_tasks() if isinstance(task.task_spec, display_types) ] + upcoming_tasks = [ task for task in all_tasks if task.state in [Task.READY, Task.WAITING] ] + + print('\nUpcoming Tasks') + for idx, task in enumerate(upcoming_tasks): + print(format_task(task)) + + if input('\nShow all tasks? ').lower() == 'y': + for idx, task in enumerate(all_tasks): + print(format_task(task)) + +We can find out what the last task was with :code:`workflow.last_task`. We'll print +its information as described above, as well as a dump of its data. + +We can get a list of all tasks regardless of type or state with :code:`workflow.get_tasks()`. + +The actual list of tasks will get quite long (some tasks are expanded internally by Spiff into +multiple tasks, and all gateways and events are also treated as "tasks"). So we're filtering +the tasks to only display the ones that would have salience to a user here. + +We'll further filter those tasks for :code:`READY` and :code:`WAITING` tasks for a more +compact display, and only show all tasks when explicitly called for. + +This is a very simple application, so our interactions with tasks are very basic. You will +definitely want to see the 'Navigation List' section of :doc:`advanced` for more sophisticated +ways of managing workflow state. + diff --git a/doc/bpmn/tasks.rst b/doc/bpmn/tasks.rst new file mode 100644 index 000000000..b00c9160d --- /dev/null +++ b/doc/bpmn/tasks.rst @@ -0,0 +1,222 @@ +Tasks +===== + +BPMN Model +---------- + +In this example, we'll model a customer selecting a product to illustrate +the basic task types that can be used with SpiffWorkflow. + +We'll be using the following files from `spiff-example-cli `_. + +- `task_types `_ workflow +- `product_prices `_ DMN table + +User Tasks +^^^^^^^^^^ + +User tasks would typically be used in the case where the task would be +completed from within the application. + +User tasks can include forms that ask the user questions. When you click on a +user task in a BPMN modeler, the Properties Panel includes a form tab. Use this +tab to build your questions. + +We'll ask our hypothetical user to choose a product and quantity. + +The following example shows how a form might be set up in Camumda. + +.. figure:: figures/user_task.png + :scale: 30% + :align: center + + User Task configuration + +.. note:: + + SpiffWorkflow has some basic support for the free Camunda modeler, to use its + form building capabilities, but we intend to encapsulate this support in an + extension module and remove it from the core library eventually. + +See the `Handling User Tasks`_ section for a discussion of sample code. + +Business Rule Tasks +^^^^^^^^^^^^^^^^^^^ + +In our business rule task, we'll use a DMN table to look up the price of the +product the user chose. + +We'll need to create a DMN table. + +What is DMN? +++++++++++++ + +Decision Model and Notation (DMN) is a standard for business decision +modeling. DMN allows modelers to separate decision logic from process logic +and maintain it in a table format. DMN is linked into BPMN with a *decision +task*. + +With DMN, business analysts can model the rules that lead to a decision +in an easy to read table. Those tables can be executed directly by SpiffWorkflow. + +This minimizes the risk of misunderstandings between business analysts and +developers, and allows rapid changes in production. + +BPMN includes a decision task that refers to the decision table. The outcome of +the decision lookup allows the next gateway or activity to route the flow. + +Our Business Rule Task will make use of a DMN table. + +.. figure:: figures/dmn_table.png + :scale: 30% + :align: center + + DMN Table + +.. note:: + We add quote marks around the product names in the table. Spiff will + create an expression based on the exact contents of the table, so if + the quotes are omitted, the content will be interpreted as a variable + rather than a string. + +Then we'll refer to this table in the task configuration. + +.. figure:: figures/business_rule_task.png + :scale: 30% + :align: center + + Business Rule Task configuration + +Script Tasks +^^^^^^^^^^^^ + +The total order cost will need to be calculated on the fly. We can do this in +a script task. We'll configure the task with some simple Python code. + +.. figure:: figures/script_task.png + :scale: 30% + :align: center + + Script Task configuration + +The code in the script will have access to the task data, so variables that +have been defined previously will be available to it. + +Manual Tasks +^^^^^^^^^^^^ + +Our final task type is a manual task. We would use this task in the situation +where the application might simply need to mark a task that requires user +involvement complete without gathering any additional information from them. + +There is no special configuration for manual tasks. However, this is a good +place to note that we can use the BPMN element Documentation field to display +more information about the context of the item. + +Spiff is set up in a way that you could use any templating library you want, but +we have used `Jinja `_. + +In this example, we'll present an order summary to our customer. + +.. figure:: figures/documentation.png + :scale: 30% + :align: center + + Element Documentation + +See the `Handling Manual Tasks`_ section for a discussion of sample code. + +Running The Model +^^^^^^^^^^^^^^^^^ + +If you have set up our example repository, this model can be run with the +following command: + +.. code-block:: console + + ./run.py -p order_product -d bpmn/product_prices.dmn -b bpmn/task_types.bpmn + +Example Application Code +------------------------ + +Handling User Tasks +^^^^^^^^^^^^^^^^^^^ + +We will need to provide a way to display the form data and collect the user's +responses. + +.. code:: python + + for field in task.task_spec.form.fields: + if isinstance(field, EnumFormField): + option_map = dict([ (opt.name, opt.id) for opt in field.options ]) + options = "(" + ', '.join(option_map) + ")" + prompt = f"{field.label} {options} " + option = select_option(prompt, option_map.keys()) + response = option_map[option] + else: + response = input(f"{field.label} ") + if field.type == "long": + response = int(response) + task.update_data_var(field.id, response) + +The list of form fields for a task is stored in :code:`task.task_spec.form_fields`. + +For Enumerated fields, we want to get the possible options and present them to the +user. The variable names of the fields were stored in :code:`field.id`, but since +we set labels for each of the fields, we'd like to display those instead, and map +the user's selection back to the variable name. + +Our :code:`select_option` function simply repeats the prompt until the user +enters a value contained in the option list. + +For other fields, we'll just store whatever the user enters, although in the case +where they data type was specified to be a :code:`long`, we'll convert it to a +number. + +Finally, we need to explicitly store the user-provided response in a variable +with the expected name with :code:`task.update_data_var(field.id, response)`. + + +Handling Business Rule Tasks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We do not need to do any special configuration to handle these business rule +tasks. SpiffWorkflow does it all for us. + +Handling Script Tasks +^^^^^^^^^^^^^^^^^^^^^ + +We do not need to do any special configuration to handle script tasks, although it +is possible to implement a custom script engine. We demonstrate that process in +Custom Script Engines section :doc:`advanced` features. However, the default script +engine will work in many cases. + +Handling Manual Tasks +^^^^^^^^^^^^^^^^^^^^^ + +Our code for manual tasks simply asks the user to confirm that the task has been +completed. + +.. code:: python + + def complete_manual_task(task): + display_task(task) + input("Press any key to mark task complete") + +:code:`display_task()` is the code for converting the Documentation property of the task +into something that can be presented to the user. + +.. code:: python + + def display_task(task): + print(f'\n{task.task_spec.description}') + if task.task_spec.documentation is not None: + template = Template(task.task_spec.documentation) + print(template.render(task.data)) + +The template string can be obtained from :code:`task.task_spec.documentation`. + +As noted above, our template class comes from Jinja. We render the template +using the task data, which is just a dictionary. + diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 000000000..ae9037ba0 --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,64 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'SpiffWorkflow' +copyright = '2022, Sartography' +author = 'Sartography' + +# The full version, including alpha/beta/rc tags + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.autodoc', # 'sphinx.ext.coverage', + 'sphinx.ext.viewcode', + 'sphinx.ext.autosummary', + 'sphinx_rtd_theme', + #'sphinx.ext.intersphinx', + ] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "sphinx_rtd_theme" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Set the master index file. +master_doc = 'index' + +# Set the fav-icon +html_favicon = 'favicon.ico' diff --git a/doc/development.rst b/doc/development.rst new file mode 100644 index 000000000..175446e93 --- /dev/null +++ b/doc/development.rst @@ -0,0 +1,99 @@ +SpiffWorkflow Concepts +==================================== + +Specification vs. Workflow Instance +----------------------------------- + +One critical concept to know about SpiffWorkflow is the difference between a +:class:`SpiffWorkflow.specs.WorkflowSpec` and :class:`SpiffWorkflow.Workflow` and +the difference between a :class:`SpiffWorkflow.specs.TaskSpec` and :class:`SpiffWorkflow.Task`. + +In order to understand how to handle a running workflow consider the following process:: + + Choose product -> Choose amount -> Produce product A + `--> Produce product B + +As you can see, in this case the process resembles a simple tree. *Choose product*, +*Choose amount*, *Produce product A*, and *Produce product B* are all specific kinds +of *task specifications*, and the whole process is a *workflow specification*. + +But when you execute the workflow, the path taken does not necessarily have the same shape. For example, if the user chooses to produce 3 items of product A, the path taken looks like the following:: + + Choose product -> Choose amount -> Produce product A + |--> Produce product A + `--> Produce product A + +This is the reason why you will find two different categories of objects in Spiff Workflow: + +- **Specification objects** (WorkflowSpec and TaskSpec) represent the workflow definition, and +- **derivation tree objects** (Workflow and Task) model the task tree that represents the state of a running workflow. + +Understanding task states +------------------------- + +The following task states exist: + +.. image:: figures/state-diagram.png + +The states are reached in a strict order and the lines in the diagram show the possible state transitions. + +The order of these state transitions is violated only in one case: A *Trigger* task may add additional work to a task that was already COMPLETED, causing it to change the state back to FUTURE. + +- **MAYBE** means that the task will possibly, but not necessarily run at a future time. This means that it can not yet be fully determined as to whether or not it may run, for example, because the execution still depends on the outcome of an ExclusiveChoice task in the path that leads towards it. + +- **LIKELY** is like MAYBE, except it is considered to have a higher probability of being reached because the path leading towards it is the default choice in an ExclusiveChoice task. + +- **FUTURE** means that the processor has predicted that this this path will be taken and this task will, at some point, definitely run. (Unless the task is explicitly set to CANCELLED, which can not be predicted.) If a task is waiting on predecessors to run then it is in FUTURE state (not WAITING). + +- **WAITING** means *I am in the process of doing my work and have not finished. When the work is finished, then I will be READY for completion and will go to READY state*. WAITING is an optional state. + +- **READY** means "the preconditions for marking this task as complete are met". + +- **COMPLETED** means that the task is done. + +- **CANCELLED** means that the task was explicitly cancelled, for example by a CancelTask operation. + +Associating data with a workflow +-------------------------------- + +The difference between *specification objects* and *derivation tree objects* is also important when choosing how to store data in a workflow. Spiff Workflow supports storing data in two ways: + +- **Task spec data** is stored in the TaskSpec object. In other words, if a task causes task spec data to change, that change is reflected to all other instances in the derivation tree that use the TaskSpec object. +- **Task data** is local to the Task object, but is carried along to the children of each Task object in the derivation tree as the workflow progresses. + +Internal Details +---------------- + +A **derivation tree** is created based off of the spec using a hierarchy of +:class:`SpiffWorkflow.Task` objects (not :class:`SpiffWorkflow.specs.TaskSpec` objects!). +Each Task contains a reference to the TaskSpec that generated it. + +Think of a derivation tree as tree of execution paths (some, but not all, of +which will end up executing). Each Task object is basically a node in the +derivation tree. Each task in the tree links back to its parent (there are +no connection objects). The processing is done by walking down the +derivation tree one Task at a time and moving the task (and its +children) through the sequence of states towards completion. + +You can serialize/deserialize specs. You can also +serialize/deserialize a running workflow (it will pull in its spec as well). + +There's a decent eventing model that allows you to tie in to and receive +events (for each task, you can get event notifications from its TaskSpec). +The events correspond with how the processing is going in the derivation +tree, not necessarily how the workflow as a whole is moving. +See :class:`SpiffWorkflow.specs.TaskSpec` for docs on events. + +You can nest workflows (using the :class:`SpiffWorkflow.specs.SubWorkflowSpec`). + +The serialization code is done well which makes it easy to add new formats +if we need to support them. + + +Other documentation +------------------- + +**API documentation** is currently embedded into the Spiff Workflow source code and not yet made available in a prettier form. + +If you need more help, please create an issue in our +`issue tracker `_. diff --git a/doc/favicon.ico b/doc/favicon.ico new file mode 100644 index 000000000..d1bb90073 Binary files /dev/null and b/doc/favicon.ico differ diff --git a/doc/figures/ExclusiveGateway.png b/doc/figures/ExclusiveGateway.png new file mode 100644 index 000000000..1dec0e966 Binary files /dev/null and b/doc/figures/ExclusiveGateway.png differ diff --git a/doc/figures/action-management.png b/doc/figures/action-management.png new file mode 100644 index 000000000..bb7e2d8ce Binary files /dev/null and b/doc/figures/action-management.png differ diff --git a/doc/figures/bpmnbook.jpg b/doc/figures/bpmnbook.jpg new file mode 100644 index 000000000..e5b0a3936 Binary files /dev/null and b/doc/figures/bpmnbook.jpg differ diff --git a/doc/figures/classes.png b/doc/figures/classes.png new file mode 100644 index 000000000..07214a523 Binary files /dev/null and b/doc/figures/classes.png differ diff --git a/doc/figures/events.png b/doc/figures/events.png new file mode 100644 index 000000000..dc63fb2af Binary files /dev/null and b/doc/figures/events.png differ diff --git a/doc/figures/interaction.png b/doc/figures/interaction.png new file mode 100644 index 000000000..c92df5d6f Binary files /dev/null and b/doc/figures/interaction.png differ diff --git a/doc/figures/nuclear_strike.bpmn b/doc/figures/nuclear_strike.bpmn new file mode 100644 index 000000000..d3fbd7a94 --- /dev/null +++ b/doc/figures/nuclear_strike.bpmn @@ -0,0 +1,71 @@ + + + + + + + + + + SequenceFlow_1xrbp0m + + + SequenceFlow_1xrbp0m + SequenceFlow_1vwfrws + SequenceFlow_0x0u589 + + + SequenceFlow_1vwfrws + + + SequenceFlow_0x0u589 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/figures/simplestworkflow.png b/doc/figures/simplestworkflow.png new file mode 100644 index 000000000..0b910ac35 Binary files /dev/null and b/doc/figures/simplestworkflow.png differ diff --git a/doc/figures/state-diagram.png b/doc/figures/state-diagram.png new file mode 100644 index 000000000..cd55ef679 Binary files /dev/null and b/doc/figures/state-diagram.png differ diff --git a/doc/figures/state-diagram.svg b/doc/figures/state-diagram.svg new file mode 100644 index 000000000..021373cbf --- /dev/null +++ b/doc/figures/state-diagram.svg @@ -0,0 +1,3 @@ + + +2012-04-06 20:40ZCanvas 1Layer 1FUTUREMAYBELIKELYREADYWAITINGCOMPLETEMight runMight run (default path)Will definitely runRunning(not done)Done!Done(and acknowledged as done - we've moved on)CANCELLEDCan be cancelled any time diff --git a/doc/images/logo.png b/doc/images/logo.png new file mode 100644 index 000000000..d9a1067f6 Binary files /dev/null and b/doc/images/logo.png differ diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 000000000..74bccd055 --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,54 @@ +.. image:: https://travis-ci.com/sartography/SpiffWorkflow.svg?branch=master + :target: https://travis-ci.org/sartography/SpiffWorkflow + +.. image:: https://sonarcloud.io/api/project_badges/measure?project=sartography_SpiffWorkflow&metric=alert_status + :target: https://sonarcloud.io/dashboard?id=sartography_SpiffWorkflow + +.. image:: https://sonarcloud.io/api/project_badges/measure?project=sartography_SpiffWorkflow&metric=coverage + :target: https://sonarcloud.io/dashboard?id=sartography_SpiffWorkflow + :alt: Coverage + +.. image:: https://img.shields.io/github/stars/sartography/SpiffWorkflow.svg + :target: https://github.com/sartography/SpiffWorkflow/stargazers + +.. image:: https://img.shields.io/github/license/sartography/SpiffWorkflow.svg + :target: https://github.com/sartography/SpiffWorkflow/blob/master/COPYING + +What is SpiffWorkflow? +====================== +.. image:: images/logo.png + :align: center + +SpiffWorkflow allows your python application to process BPMN diagrams (think +of them as very powerful flow charts, See :doc:`intro`.) to accomplish +what would otherwise require writing a lot of complex business logic in your +code. You can use these diagrams to accomplish a number of tasks, such as: + + - Creating a questionnaire with multiple complex paths + - Implement an approval process that requires input from multiple users + - Allow non-programmers to modify the flow and behavior of your application. + +License +------- +Spiff Workflow is published under the terms of the +`GNU Lesser General Public License (LGPL) Version 3 `_. + + +Support +------- +You can find us on `our Discord Channel `_ + +Commercial support for SpiffWorkflow is available from +`Sartography `_ + + +Contents +-------- +.. toctree:: + :maxdepth: 2 + + intro + bpmn/index + development + non-bpmn/index + diff --git a/doc/intro.rst b/doc/intro.rst new file mode 100644 index 000000000..7077814ec --- /dev/null +++ b/doc/intro.rst @@ -0,0 +1,125 @@ +Overview +======== + +BPMN and SpiffWorkflow +---------------------- + +.. sidebar:: BPMN Resources + + This guide is a mere introduction to BPMN. + For more serious modeling, we recommend looking for more comprehensive + resources. We have used the `books by Bruce Silver `_ + as a guide for our BPMN modeling. + + .. image:: figures/bpmnbook.jpg + :align: center + +Business Process Model and Notation (BPMN) is a diagramming language for +specifying business processes. BPMN links the realms of business and IT, and +creates a common process language that can be shared between the two. + +BPMN describes details of process behaviors efficiently in a diagram. The +meaning is precise enough to describe the technical details that control +process execution in an automation engine. SpiffWorkflow allows you to create +code to directly execute a BPMN diagram. + +When using SpiffWorkflow, a client can manipulate the BPMN diagram and still +have their product work without a need for you to edit the Python code, +improving response and turnaround time. + +Today, nearly every process modeling tool supports BPMN in some fashion making +it a great tool to learn and use. + +To use SpiffWorkflow, you need at least a basic understanding of BPMN. +This page offers a brief overview. There are many resources for additional +information about BPMN. + +.. sidebar:: BPMN Modelers + + There are a number of modelers in existence, and any BPMN compliant modeler should work. + SpiffWorkflow has some basic support for the free Camunda modeler, to use it's form building + capabilities, but we intend to encapsulate this support in an extension module and remove + it from the core library eventually. It does help for making some examples and demonstrating + how one might implement user tasks in an online environment. + +In these examples and throughout the documentation we use the +`BPMN.js `_ BPMN Modeler. + + +A Simple Workflow +----------------- + +All BPMN models have a start event and at least one end event. The start event +is represented with a single thin border circle. An end event is represented +by a single thick border circle. + +The following example also has one task, represented by the rectangle with curved corners. + + +.. figure:: figures/simplestworkflow.png + :scale: 25% + :align: center + + A simple workflow. + + +The sequence flow is represented with a solid line connector. When the node at +the tail of a sequence flow completes, the node at the arrowhead is enabled to start. + + +A More Complicated Workflow +--------------------------- + +.. figure:: figures/ExclusiveGateway.png + :scale: 25% + :align: center + + A workflow with a gateway + + +In this example, the diamond shape is called a gateway. It represents a branch +point in our flow. This gateway is an exclusive data-based gateway (also +called an XOR gateway). With an exclusive gateway, you must take one path or +the other based on some data condition. BPMN has other gateway types. + +The important point is that we can use a gateway to add a branch in the +workflow **without** creating an explicit branch in our Python code. + +Events +------ + +In the above simple workflows, all of the transitions are deterministic and we +have direct connections between tasks. We need to handle the cases where an event +may or may not happen and link these events in different parts of the workflow. + +BPMN has a comprehensive suite of event elements that can used for this purpose. +SpiffWorkflow does not support every single BPMN event type, but it can handle +many of them. + +.. figure:: figures/events.png + :scale: 25% + :align: center + + A workflow containing events + + +We've already seen plain Start and End Events. BPMN also include the concepts +of Intermediate Events (standalone events that may be Throwing or Catching) as well +as Boundary Events (which can only be Caught). + +All Start Events are inherently Catching Events (a workflow can be initiated if a +particular event occurs) and all End Events are Throwing Events (they can convey +the final state of a workflow or path to other tasks and workflows). + +If an Intermediate Throwing Event is added to a flow, the event it represents +will be generated and the flow will continue immediately. If an Intermediate +Catching Event is added to a flow, the workflow will wait to catch the event it +represents before advancing. + +A Boundary Event represents an event that may be caught only while a particular task +is being executed and comes in two types: Interrupting (in which case the task it is +attached to will be cancelled if the event is received) or Non-Interrupting (in +which case the task will continue). In both cases, flows may emanate from the +Boundary Event, which will trigger those paths if the events occur while the task +is being executed. + diff --git a/doc/make.bat b/doc/make.bat new file mode 100644 index 000000000..922152e96 --- /dev/null +++ b/doc/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/non-bpmn/custom-tasks/index.rst b/doc/non-bpmn/custom-tasks/index.rst new file mode 100644 index 000000000..3a41fdad3 --- /dev/null +++ b/doc/non-bpmn/custom-tasks/index.rst @@ -0,0 +1,65 @@ +Implementing Custom Tasks +========================= + +Introduction +------------ + +In this second tutorial we are going to implement our own task, and +use serialization and deserialization to store and restore it. + +If you haven't already, you should complete the first +:doc:`../tutorial/index`. +We are also assuming that you are familiar with the :doc:`../basics`. + +Implementing the custom task +---------------------------- + +The first step is to create a :class:`SpiffWorkflow.specs.TaskSpec` that +fires the rocket:: + + from SpiffWorkflow.specs import Simple + + class NuclearStrike(Simple): + def _on_complete_hook(self, my_task): + print("Rocket sent!") + +Save this file as ``strike.py``. + +Now, before we are ready to define the workflow using XML or JSON, we will +also have extend the serializer to let SpiffWorkflow know how to represent +your NuclearStrike first. + +Preparing a serializer +---------------------- + +Before we can use JSON to specify a workflow, we first need to teach +SpiffWorkflow what our custom `NuclearChoice` looks like in JSON. +We do this by extending the +:mod:`SpiffWorkflow.serializer.json.JSONSerializer`. + +.. literalinclude:: serializer.py + +We save the serializer as ``serializer.py``. +We also need to update ``strike.py`` as follows: + +We also implement the deserializer: + +.. literalinclude:: strike.py + +That is all! You are now ready to create the specification from JSON. + +Creating a workflow specification (using JSON) +---------------------------------------------- + +Now we can use the NuclearStrike in the workflow specification in JSON. +Note that this specification is the same as in our first tutorial, +except that it references our class `strike.NuclearStrike`. + +.. literalinclude:: nuclear.json + +Using the custom serializer and task +------------------------------------ + +Here we use our brand new serializer in practice: + +.. literalinclude:: start.py diff --git a/doc/non-bpmn/custom-tasks/nuclear.json b/doc/non-bpmn/custom-tasks/nuclear.json new file mode 100644 index 000000000..46623f3a9 --- /dev/null +++ b/doc/non-bpmn/custom-tasks/nuclear.json @@ -0,0 +1,93 @@ +{ + "task_specs": { + "Start": { + "class": "SpiffWorkflow.specs.StartTask.StartTask", + "manual": false, + "outputs": [ + "general" + ] + }, + "general": { + "class": "SpiffWorkflow.specs.ExclusiveChoice.ExclusiveChoice", + "name": "general", + "manual": true, + "inputs": [ + "Start" + ], + "outputs": [ + "workflow_aborted", + "president" + ], + "choice": null, + "default_task_spec": "workflow_aborted", + "cond_task_specs": [ + [ + [ + "SpiffWorkflow.operators.Equal", + [ + [ + "Attrib", + "confirmation" + ], + [ + "value", + "yes" + ] + ] + ], + "president" + ] + ] + }, + "president": { + "class": "SpiffWorkflow.specs.ExclusiveChoice.ExclusiveChoice", + "name": "president", + "manual": true, + "inputs": [ + "general" + ], + "outputs": [ + "workflow_aborted", + "nuclear_strike" + ], + "choice": null, + "default_task_spec": "workflow_aborted", + "cond_task_specs": [ + [ + [ + "SpiffWorkflow.operators.Equal", + [ + [ + "Attrib", + "confirmation" + ], + [ + "value", + "yes" + ] + ] + ], + "nuclear_strike" + ] + ] + }, + "nuclear_strike": { + "class": "strike.NuclearStrike", + "name": "nuclear_strike", + "inputs": [ + "president" + ] + }, + "workflow_aborted": { + "class": "SpiffWorkflow.specs.Cancel.Cancel", + "name": "workflow_aborted", + "inputs": [ + "general", + "president" + ] + } + }, + "description": "", + "file": null, + "name": "" +} diff --git a/doc/non-bpmn/custom-tasks/serializer.py b/doc/non-bpmn/custom-tasks/serializer.py new file mode 100644 index 000000000..593728707 --- /dev/null +++ b/doc/non-bpmn/custom-tasks/serializer.py @@ -0,0 +1,11 @@ +from SpiffWorkflow.serializer.json import JSONSerializer +from strike import NuclearStrike + +class NuclearSerializer(JSONSerializer): + def serialize_nuclear_strike(self, task_spec): + return self.serialize_task_spec(task_spec) + + def deserialize_nuclear_strike(self, wf_spec, s_state): + spec = NuclearStrike(wf_spec, s_state['name']) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec diff --git a/doc/non-bpmn/custom-tasks/start.py b/doc/non-bpmn/custom-tasks/start.py new file mode 100644 index 000000000..472d20997 --- /dev/null +++ b/doc/non-bpmn/custom-tasks/start.py @@ -0,0 +1,18 @@ +import json +from SpiffWorkflow import Workflow +from SpiffWorkflow.specs import WorkflowSpec +from serializer import NuclearSerializer + +# Load from JSON +with open('nuclear.json') as fp: + workflow_json = fp.read() +serializer = NuclearSerializer() +spec = WorkflowSpec.deserialize(serializer, workflow_json) + +# Create the workflow. +workflow = Workflow(spec) + +# Execute until all tasks are done or require manual intervention. +# For the sake of this tutorial, we ignore the "manual" flag on the +# tasks. In practice, you probably don't want to do that. +workflow.complete_all(halt_on_manual=False) diff --git a/doc/non-bpmn/custom-tasks/strike.py b/doc/non-bpmn/custom-tasks/strike.py new file mode 100644 index 000000000..8dbaf2a24 --- /dev/null +++ b/doc/non-bpmn/custom-tasks/strike.py @@ -0,0 +1,12 @@ +from SpiffWorkflow.specs import Simple + +class NuclearStrike(Simple): + def _on_complete_hook(self, my_task): + print((self.my_variable, "sent!")) + + def serialize(self, serializer): + return serializer.serialize_nuclear_strike(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_nuclear_strike(wf_spec, s_state) diff --git a/doc/non-bpmn/index.rst b/doc/non-bpmn/index.rst new file mode 100644 index 000000000..60b763ef8 --- /dev/null +++ b/doc/non-bpmn/index.rst @@ -0,0 +1,12 @@ +Non-BPMN support +================ + +We have maintained support for legacy non-BPMN workflows, but we recommend using +SpiffWorkflow with BPMN, as this is where current development is focused. + +.. toctree:: + :maxdepth: 2 + + tutorial/index + custom-tasks/index + patterns \ No newline at end of file diff --git a/doc/non-bpmn/patterns.rst b/doc/non-bpmn/patterns.rst new file mode 100644 index 000000000..088e1c632 --- /dev/null +++ b/doc/non-bpmn/patterns.rst @@ -0,0 +1,77 @@ +.. _patterns: + +Supported Workflow Patterns +=========================== + +.. HINT:: + All examples are located + `here `_. + +Control-Flow Patterns +--------------------- + +1. Sequence [control-flow/sequence.xml] +2. Parallel Split [control-flow/parallel_split.xml] +3. Synchronization [control-flow/synchronization.xml] +4. Exclusive Choice [control-flow/exclusive_choice.xml] +5. Simple Merge [control-flow/simple_merge.xml] +6. Multi-Choice [control-flow/multi_choice.xml] +7. Structured Synchronizing Merge [control-flow/structured_synchronizing_merge.xml] +8. Multi-Merge [control-flow/multi_merge.xml] +9. Structured Discriminator [control-flow/structured_discriminator.xml] +10. Arbitrary Cycles [control-flow/arbitrary_cycles.xml] +11. Implicit Termination [control-flow/implicit_termination.xml] +12. Multiple Instances without Synchronization [control-flow/multi_instance_without_synch.xml] +13. Multiple Instances with a Priori Design-Time Knowledge [control-flow/multi_instance_with_a_priori_design_time_knowledge.xml] +14. Multiple Instances with a Priori Run-Time Knowledge [control-flow/multi_instance_with_a_priori_run_time_knowledge.xml] +15. Multiple Instances without a Priori Run-Time Knowledge [control-flow/multi_instance_without_a_priori.xml] +16. Deferred Choice [control-flow/deferred_choice.xml] +17. Interleaved Parallel Routing [control-flow/interleaved_parallel_routing.xml] +18. Milestone [control-flow/milestone.xml] +19. Cancel Task [control-flow/cancel_task.xml] +20. Cancel Case [control-flow/cancel_case.xml] +21. *NOT IMPLEMENTED* +22. Recursion [control-flow/recursion.xml] +23. Transient Trigger [control-flow/transient_trigger.xml] +24. Persistent Trigger [control-flow/persistent_trigger.xml] +25. Cancel Region [control-flow/cancel_region.xml] +26. Cancel Multiple Instance Task [control-flow/cancel_multi_instance_task.xml] +27. Complete Multiple Instance Task [control-flow/complete_multiple_instance_activity.xml] +28. Blocking Discriminator [control-flow/blocking_discriminator.xml] +29. Cancelling Discriminator [control-flow/cancelling_discriminator.xml] +30. Structured Partial Join [control-flow/structured_partial_join.xml] +31. Blocking Partial Join [control-flow/blocking_partial_join.xml] +32. Cancelling Partial Join [control-flow/cancelling_partial_join.xml] +33. Generalized AND-Join [control-flow/generalized_and_join.xml] +34. Static Partial Join for Multiple Instances [control-flow/static_partial_join_for_multi_instance.xml] +35. Cancelling Partial Join for Multiple Instances [control-flow/cancelling_partial_join_for_multi_instance.xml] +36. Dynamic Partial Join for Multiple Instances [control-flow/dynamic_partial_join_for_multi_instance.xml] +37. Acyclic Synchronizing Merge [control-flow/acyclic_synchronizing_merge.xml] +38. General Synchronizing Merge [control-flow/general_synchronizing_merge.xml] +39. Critical Section [control-flow/critical_section.xml] +40. Interleaved Routing [control-flow/interleaved_routing.xml] +41. Thread Merge [control-flow/thread_merge.xml] +42. Thread Split [control-flow/thread_split.xml] +43. Explicit Termination [control-flow/explicit_termination.xml] + +Workflow Data Patterns +---------------------- + +1. Task Data [data/task_data.xml] +2. Block Data [data/block_data.xml] +3. *NOT IMPLEMENTED* +4. *NOT IMPLEMENTED* +5. *NOT IMPLEMENTED* +6. *NOT IMPLEMENTED* +7. *NOT IMPLEMENTED* +8. *NOT IMPLEMENTED* +9. Task to Task [data/task_to_task.xml] +10. Block Task to Sub-Workflow Decomposition [data/block_to_subworkflow.xml] +11. Sub-Workflow Decomposition to Block Task [data/subworkflow_to_block.xml] + +Specs that have no corresponding workflow pattern on workflowpatterns.com +------------------------------------------------------------------------- + +- Execute - spawns a subprocess and waits for the results +- Transform - executes commands that can be used for data transforms +- Celery - executes a Celery task (see http://celeryproject.org/) diff --git a/doc/non-bpmn/tutorial/deserialize-wf.py b/doc/non-bpmn/tutorial/deserialize-wf.py new file mode 100644 index 000000000..f45beb307 --- /dev/null +++ b/doc/non-bpmn/tutorial/deserialize-wf.py @@ -0,0 +1,7 @@ +from SpiffWorkflow import Workflow +from SpiffWorkflow.serializer.json import JSONSerializer + +serializer = JSONSerializer() +with open('workflow.json') as fp: + workflow_json = fp.read() +workflow = Workflow.deserialize(serializer, workflow_json) diff --git a/doc/non-bpmn/tutorial/deserialize.py b/doc/non-bpmn/tutorial/deserialize.py new file mode 100644 index 000000000..166f27222 --- /dev/null +++ b/doc/non-bpmn/tutorial/deserialize.py @@ -0,0 +1,7 @@ +from SpiffWorkflow.specs import WorkflowSpec +from SpiffWorkflow.serializer.json import JSONSerializer + +serializer = JSONSerializer() +with open('workflow-spec.json') as fp: + workflow_json = fp.read() +spec = WorkflowSpec.deserialize(serializer, workflow_json) diff --git a/doc/non-bpmn/tutorial/index.rst b/doc/non-bpmn/tutorial/index.rst new file mode 100644 index 000000000..2ba301184 --- /dev/null +++ b/doc/non-bpmn/tutorial/index.rst @@ -0,0 +1,104 @@ +Tutorial - Non-BPMN +=================== + +Introduction +------------ + +In this chapter we are going to use Spiff Workflow to solve a real-world +problem: We will create a workflow for triggering a nuclear strike. + +We are assuming that you are familiar with the :doc:`../basics`. + +Assume you want to send the rockets, but only after both the president and +a general have signed off on it. + +There are two different ways of defining a workflow: Either by deserializing +(from XML or JSON), or using Python. + +Creating the workflow specification (using Python) +-------------------------------------------------- + +As a first step, we are going to create a simple workflow in code. +In Python, the workflow is defined as follows: + +.. literalinclude:: nuclear.py + +Hopefully the code is self explaining. +Using Python to write a workflow can quickly become tedious. It is +usually a better idea to use another format. + +Creating a workflow specification (using JSON) +---------------------------------------------- + +Once you have completed the serializer as shown above, you can +write the specification in JSON. + +Here is an example that is doing exactly the same as the Python +WorkflowSpec above: + +.. literalinclude:: nuclear.json + +Creating a workflow out of the specification +-------------------------------------------- + +Now it is time to get started and actually create and execute +a workflow according to the specification. + +Since we included *manual* tasks in the specification, you will want +to implement a user interface in practice, but we are just going to +assume that all tasks are automatic for this tutorial. +Note that the *manual* flag has no effect on the control flow; it is +just a flag that a user interface may use to identify tasks that +require a user input. + +.. literalinclude:: start.py + +:meth:`SpiffWorkflow.Workflow.complete_all` completes all tasks in +accordance to the specification, until no further tasks are READY +for being executed. +Note that this does not mean that the workflow is completed after +calling :meth:`SpiffWorkflow.Workflow.complete_all`, since some +tasks may be WAITING, or may be blocked by another WAITING task, +for example. + + +Serializing a workflow +---------------------- + +If you want to store a :class:`SpiffWorkflow.specs.WorkflowSpec`, you can +use :meth:`SpiffWorkflow.specs.WorkflowSpec.serialize`: + +.. literalinclude:: serialize.py + +If you want to store a :class:`SpiffWorkflow.Workflow`, use +use :meth:`SpiffWorkflow.Workflow.serialize`: + +.. literalinclude:: serialize-wf.py + +Deserializing a workflow +------------------------ + +The following example shows how to restore a +:class:`SpiffWorkflow.specs.WorkflowSpec` using +:meth:`SpiffWorkflow.specs.WorkflowSpec.serialize`. + +.. literalinclude:: deserialize.py + +To restore a :class:`SpiffWorkflow.Workflow`, use +:meth:`SpiffWorkflow.Workflow.serialize` instead: + +.. literalinclude:: deserialize-wf.py + +Where to go from here? +---------------------- + +This first tutorial actually has a problem: If you want to save the workflow, +SpiffWorkflow won't be able to re-connect the signals because it can not +save the reference to your code. + +So after deserializing the workflow, you will need to re-connect the signals +yourself. + +If you would rather have it such that SpiffWorkflow handles this for you, +you need to create a custom task and tell SpiffWorkflow how to +serialize and deserialize it. The next tutorial shows how this is done. diff --git a/doc/non-bpmn/tutorial/nuclear.json b/doc/non-bpmn/tutorial/nuclear.json new file mode 100644 index 000000000..7f0efb2a8 --- /dev/null +++ b/doc/non-bpmn/tutorial/nuclear.json @@ -0,0 +1,98 @@ +{ + "task_specs": { + "Start": { + "class": "SpiffWorkflow.specs.StartTask.StartTask", + "id" : 1, + "manual": false, + "outputs": [ + 2 + ] + }, + "general": { + "class": "SpiffWorkflow.specs.ExclusiveChoice.ExclusiveChoice", + "name": "general", + "id" : 2, + "manual": true, + "inputs": [ + 1 + ], + "outputs": [ + 5, + 3 + ], + "choice": null, + "default_task_spec": "workflow_aborted", + "cond_task_specs": [ + [ + [ + "SpiffWorkflow.operators.Equal", + [ + [ + "Attrib", + "confirmation" + ], + [ + "value", + "yes" + ] + ] + ], + "president" + ] + ] + }, + "president": { + "class": "SpiffWorkflow.specs.ExclusiveChoice.ExclusiveChoice", + "name": "president", + "id" : 3, + "manual": true, + "inputs": [ + 2 + ], + "outputs": [ + 5, + 4 + ], + "choice": null, + "default_task_spec": "workflow_aborted", + "cond_task_specs": [ + [ + [ + "SpiffWorkflow.operators.Equal", + [ + [ + "Attrib", + "confirmation" + ], + [ + "value", + "yes" + ] + ] + ], + "nuclear_strike" + ] + ] + }, + "nuclear_strike": { + "id" : 4, + "class": "SpiffWorkflow.specs.Simple.Simple", + "name": "nuclear_strike", + "inputs": [ + 3 + ] + }, + "workflow_aborted": { + "id" : 5, + "class": "SpiffWorkflow.specs.Cancel.Cancel", + "name": "workflow_aborted", + "inputs": [ + 2, + 3 + ] + } + }, + "description": "", + "file": null, + "name": "" +} diff --git a/doc/non-bpmn/tutorial/nuclear.py b/doc/non-bpmn/tutorial/nuclear.py new file mode 100644 index 000000000..237901b4f --- /dev/null +++ b/doc/non-bpmn/tutorial/nuclear.py @@ -0,0 +1,36 @@ +from SpiffWorkflow.specs import WorkflowSpec, ExclusiveChoice, Simple, Cancel +from SpiffWorkflow.operators import Equal, Attrib + +def my_nuclear_strike(msg): + print("Launched:", msg) + +class NuclearStrikeWorkflowSpec(WorkflowSpec): + def __init__(self): + WorkflowSpec.__init__(self) + + # The first step of our workflow is to let the general confirm + # the nuclear strike. + general_choice = ExclusiveChoice(self, 'general') + self.start.connect(general_choice) + + # The default choice of the general is to abort. + cancel = Cancel(self, 'workflow_aborted') + general_choice.connect(cancel) + + # Otherwise, we will ask the president to confirm. + president_choice = ExclusiveChoice(self, 'president') + cond = Equal(Attrib('confirmation'), 'yes') + general_choice.connect_if(cond, president_choice) + + # The default choice of the president is to abort. + president_choice.connect(cancel) + + # Otherwise, we will perform the nuclear strike. + strike = Simple(self, 'nuclear_strike') + president_choice.connect_if(cond, strike) + + # Now we connect our Python function to the Task named 'nuclear_strike' + strike.completed_event.connect(my_nuclear_strike) + + # As soon as all tasks are either "completed" or "aborted", the + # workflow implicitely ends. diff --git a/doc/non-bpmn/tutorial/serialize-wf.py b/doc/non-bpmn/tutorial/serialize-wf.py new file mode 100644 index 000000000..73189b1b3 --- /dev/null +++ b/doc/non-bpmn/tutorial/serialize-wf.py @@ -0,0 +1,14 @@ +import json +from SpiffWorkflow import Workflow +from SpiffWorkflow.serializer.json import JSONSerializer +from nuclear import NuclearStrikeWorkflowSpec + +serializer = JSONSerializer() +spec = NuclearStrikeWorkflowSpec() +workflow = Workflow(spec) +data = workflow.serialize(serializer) + +# This next line is unnecessary in practice; it just makes the JSON pretty. +pretty = json.dumps(json.loads(data), indent=4, separators=(',', ': ')) + +open('workflow.json', 'w').write(pretty) diff --git a/doc/non-bpmn/tutorial/serialize.py b/doc/non-bpmn/tutorial/serialize.py new file mode 100644 index 000000000..2e7475ff1 --- /dev/null +++ b/doc/non-bpmn/tutorial/serialize.py @@ -0,0 +1,12 @@ +import json +from SpiffWorkflow.serializer.json import JSONSerializer +from nuclear import NuclearStrikeWorkflowSpec + +serializer = JSONSerializer() +spec = NuclearStrikeWorkflowSpec() +data = spec.serialize(serializer) + +# This next line is unnecessary in practice; it just makes the JSON pretty. +pretty = json.dumps(json.loads(data), indent=4, separators=(',', ': ')) + +open('workflow-spec.json', 'w').write(pretty) diff --git a/doc/non-bpmn/tutorial/start.py b/doc/non-bpmn/tutorial/start.py new file mode 100644 index 000000000..e52b66c63 --- /dev/null +++ b/doc/non-bpmn/tutorial/start.py @@ -0,0 +1,25 @@ +import json +from SpiffWorkflow.workflow import Workflow +from SpiffWorkflow.specs import WorkflowSpec +from SpiffWorkflow.serializer.json import JSONSerializer + +# Load from JSON +with open('nuclear.json') as fp: + workflow_json = fp.read() +serializer = JSONSerializer() +spec = WorkflowSpec.deserialize(serializer, workflow_json) + +# Alternatively, create an instance of the Python based specification. +#from nuclear import NuclearStrikeWorkflowSpec +#spec = NuclearStrikeWorkflowSpec() + +# Create the workflow. +workflow = Workflow(spec) + +# Execute until all tasks are done or require manual intervention. +# For the sake of this tutorial, we ignore the "manual" flag on the +# tasks. In practice, you probably don't want to do that. +workflow.complete_all(halt_on_manual=False) + +# Alternatively, this is what a UI would do for a manual task. +#workflow.complete_task_from_id(...) diff --git a/graphics/business_end.svg b/graphics/business_end.svg new file mode 100644 index 000000000..f01382663 --- /dev/null +++ b/graphics/business_end.svg @@ -0,0 +1,6255 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FRONT END + BACK END + BUSINESS END + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FRONT + + BUSINESS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BACK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BUSINESS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BACK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FRONT + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FRONT + + BUSINESS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BACK + spiffworkflow-frontend + + spiffworkflow-backend + + + diff --git a/graphics/color_pallet.png b/graphics/color_pallet.png new file mode 100644 index 000000000..56b584e00 Binary files /dev/null and b/graphics/color_pallet.png differ diff --git a/graphics/favicon.ico b/graphics/favicon.ico new file mode 100644 index 000000000..d1bb90073 Binary files /dev/null and b/graphics/favicon.ico differ diff --git a/graphics/favicon.png b/graphics/favicon.png new file mode 100644 index 000000000..df94e58d2 Binary files /dev/null and b/graphics/favicon.png differ diff --git a/graphics/favicon.svg b/graphics/favicon.svg new file mode 100644 index 000000000..0bf1899be --- /dev/null +++ b/graphics/favicon.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/graphics/logo.png b/graphics/logo.png new file mode 100644 index 000000000..024caf2f2 Binary files /dev/null and b/graphics/logo.png differ diff --git a/graphics/logo.svg b/graphics/logo.svg new file mode 100644 index 000000000..824a9d6f6 --- /dev/null +++ b/graphics/logo.svg @@ -0,0 +1,254 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Draw the code + + diff --git a/graphics/logo2.png b/graphics/logo2.png new file mode 100644 index 000000000..78b4bcf2d Binary files /dev/null and b/graphics/logo2.png differ diff --git a/graphics/logo_icon.png b/graphics/logo_icon.png new file mode 100644 index 000000000..4e102b6d3 Binary files /dev/null and b/graphics/logo_icon.png differ diff --git a/graphics/logo_med.png b/graphics/logo_med.png new file mode 100644 index 000000000..12ced9f65 Binary files /dev/null and b/graphics/logo_med.png differ diff --git a/graphics/logo_symbol_only.svg b/graphics/logo_symbol_only.svg new file mode 100644 index 000000000..c0aac4688 --- /dev/null +++ b/graphics/logo_symbol_only.svg @@ -0,0 +1,150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/graphics/spiffworkflow_logo_ideas.svg b/graphics/spiffworkflow_logo_ideas.svg new file mode 100644 index 000000000..28e5d8ac5 --- /dev/null +++ b/graphics/spiffworkflow_logo_ideas.svg @@ -0,0 +1,1033 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SPIFF + + + + + + + + + + + + piffWorkflow + + + + + piffWorkflow + + Draw the code. + + + piffWorkflow + SPIFFWORKFLOW + + Draw the code. + Draw the code. + + + + piffWorkflow + + Draw the code. + + + + piffWorkflow + + Draw the code. + + + + + + + + + SpiffWorkflow + SpiffWorkflow + Draw the code + SpiffWorkflow + piffWorkflow + piffworkflow + Draw the code + Draw the code + + + + + + + + + + + + + + + + + + + + piffWorkflow + Draw the code + + + + + + + + + + + + + + + + + + + + + + Draw the code + + + + + + + + + + + + + + + + Draw the code + + + + + + + + + + + + + + + Draw the code + + + + + + + + + + + + + + + + Draw the code + + + + + Draw the code + + + Draw the code + piff Workflow + + + diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..864b334a8 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta:__legacy__" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..63b4abb42 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +# Celery locked to 5.2.3 due to https://github.com/celery/celery/issues/7409 +# Can remove this limitation when bug is resolved, or we drop support for python 3.7 +celery==5.2.3 +coverage +lxml +dateparser +pytz +. diff --git a/scripts/test_times.py b/scripts/test_times.py new file mode 100755 index 000000000..737983fa5 --- /dev/null +++ b/scripts/test_times.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import re +import sys + +def regex_line_parser(pattern, handler): + regex = re.compile(pattern) + def parser(line): + match = regex.match(line) + if match: + return handler(match) + return None + return parser + +def rstripped(match): + return match.group(0).rstrip() + +def tupled(match): + return (match.group(1), match.group(2)) + + +def parse(lines): + test_file = None + timing = None + test_file_timings = [] + + test_file_line_parser = regex_line_parser('.*?Test.py', rstripped) + timing_line_parser = regex_line_parser('Ran (.*) tests? in (.*)', tupled) + + for line in lines: + if test_file is None: + test_file = test_file_line_parser(line) + elif timing is None: + timing = timing_line_parser(line) + + if test_file is not None and timing is not None: + test_file_timings.append((test_file, timing)) + test_file = None + timing = None + + return test_file_timings + +def report(parsed_data): + lines = [ + '| Method | Time | Tests Ran |', + '|----|----|----|', + ] + + sorted_data = sorted(parsed_data, key=lambda d: d[1][1], reverse=True) + for d in sorted_data: + lines.append(f'| {d[0]} | {d[1][1]} | {d[1][0]} |') + + print('\n'.join(lines)) + +if __name__ == '__main__': + data = sys.stdin.readlines() + parsed_data = parse(data) + report(parsed_data) diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..03a0e4eeb --- /dev/null +++ b/setup.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +import pathlib +import sys + +sys.path.insert(0, '.') +sys.path.insert(0, 'SpiffWorkflow') +from setuptools import setup, find_packages + +# The directory containing this file +HERE = pathlib.Path(__file__).parent + +# The text of the README file +README = (HERE / "README.md").read_text() + +setup(name='SpiffWorkflow', + version='1.1.7', + description='A workflow framework and BPMN/DMN Processor', + long_description=README, + long_description_content_type="text/markdown", + author='Sartography', + author_email='dan@sartography.com', + license='lGPLv2', + packages=find_packages(exclude=['tests', 'tests.*']), + install_requires=['configparser', 'lxml', 'celery', 'dateparser', 'pytz', + # required for python 3.7 - https://stackoverflow.com/a/73932581 + 'importlib-metadata<5.0'], + keywords='spiff workflow bpmn engine', + url='https://github.com/sartography/SpiffWorkflow', + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', + 'Programming Language :: Python', + 'Topic :: Other/Nonlisted Topic', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules' + ]) diff --git a/sonar-project.properties b/sonar-project.properties new file mode 120000 index 000000000..56b26ee9b --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1 @@ +.sonarcloud.properties \ No newline at end of file diff --git a/tests/SpiffWorkflow/ExecuteProcessMock.py b/tests/SpiffWorkflow/ExecuteProcessMock.py new file mode 100644 index 000000000..787ccf415 --- /dev/null +++ b/tests/SpiffWorkflow/ExecuteProcessMock.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + +import time + + +def main(): + time.sleep(0.5) + print("127.0.0.1") + +if __name__ == "__main__": + main() diff --git a/tests/SpiffWorkflow/PatternTest.py b/tests/SpiffWorkflow/PatternTest.py new file mode 100644 index 000000000..8d11528fd --- /dev/null +++ b/tests/SpiffWorkflow/PatternTest.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +from builtins import object +import sys +import unittest +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + +from SpiffWorkflow.specs import WorkflowSpec +from SpiffWorkflow.task import Task +from SpiffWorkflow.serializer.prettyxml import XmlSerializer +from tests.SpiffWorkflow.util import run_workflow + + +class WorkflowTestData(object): + + def __init__(self, filename, spec, path, data): + self.filename = filename + self.spec = spec + self.path = path + self.data = data + + +class PatternTest(unittest.TestCase): + maxDiff = None + + def setUp(self): + Task.id_pool = 0 + Task.thread_id_pool = 0 + self.xml_path = ['data/spiff/control-flow', + 'data/spiff/data', + 'data/spiff/resource', + 'data/spiff'] + self.workflows = [] + + for basedir in self.xml_path: + dirname = os.path.join(os.path.dirname(__file__), basedir) + + for filename in os.listdir(dirname): + if not filename.endswith(('.xml', '.py')): + continue + if filename.endswith('__.py'): + continue + filename = os.path.join(dirname, filename) + self.load_workflow_spec(filename) + + def load_workflow_spec(self, filename): + # Load the .path file. + path_file = os.path.splitext(filename)[0] + '.path' + if os.path.exists(path_file): + with open(path_file) as fp: + expected_path = fp.read() + else: + expected_path = None + + # Load the .data file. + data_file = os.path.splitext(filename)[0] + '.data' + if os.path.exists(data_file): + with open(data_file) as fp: + expected_data = fp.read() + else: + expected_data = None + + # Test patterns that are defined in XML format. + if filename.endswith('.xml'): + with open(filename) as fp: + xml = fp.read() + serializer = XmlSerializer() + wf_spec = WorkflowSpec.deserialize( + serializer, xml, filename=filename) + + # Test patterns that are defined in Python. + elif filename.endswith('.py'): + with open(filename) as fp: + code = compile(fp.read(), filename, 'exec') + thedict = {} + result = eval(code, thedict) + wf_spec = thedict['TestWorkflowSpec']() + + else: + raise Exception('unsuported specification format', filename) + + test_data = WorkflowTestData( + filename, wf_spec, expected_path, expected_data) + self.workflows.append(test_data) + + def testWorkflowSpec(self): + for test in self.workflows: + print(test.filename) + run_workflow(self, test.spec, test.path, test.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(PatternTest) +if __name__ == '__main__': + if len(sys.argv) == 2: + test = PatternTest('run_pattern') + test.setUp() + test.run_pattern(sys.argv[1]) + sys.exit(0) + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/PersistSmallWorkflowTest.py b/tests/SpiffWorkflow/PersistSmallWorkflowTest.py new file mode 100644 index 000000000..c580c87d7 --- /dev/null +++ b/tests/SpiffWorkflow/PersistSmallWorkflowTest.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os.path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + +from SpiffWorkflow.workflow import Workflow +from SpiffWorkflow.specs import Join, MultiChoice, WorkflowSpec +from SpiffWorkflow.operators import Attrib, Equal, PathAttrib +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.specs.Simple import Simple +from SpiffWorkflow.serializer.dict import DictionarySerializer + + +class ASmallWorkflow(WorkflowSpec): + + def __init__(self): + super(ASmallWorkflow, self).__init__(name="asmallworkflow") + + multichoice = MultiChoice(self, 'multi_choice_1') + self.start.connect(multichoice) + + a1 = Simple(self, 'task_a1') + multichoice.connect(a1) + + a2 = Simple(self, 'task_a2') + cond = Equal(Attrib('test_attribute1'), PathAttrib('test/attribute2')) + multichoice.connect_if(cond, a2) + + syncmerge = Join(self, 'struct_synch_merge_1', 'multi_choice_1') + a1.connect(syncmerge) + a2.connect(syncmerge) + + end = Simple(self, 'End') + syncmerge.connect(end) + + +class PersistSmallWorkflowTest(unittest.TestCase): + + """Runs persistency tests agains a small and easy to inspect workflowdefinition""" + + def setUp(self): + self.wf_spec = ASmallWorkflow() + self.workflow = self._advance_to_a1(self.wf_spec) + + def _advance_to_a1(self, wf_spec): + workflow = Workflow(wf_spec) + + tasks = workflow.get_tasks(TaskState.READY) + task_start = tasks[0] + workflow.complete_task_from_id(task_start.id) + + tasks = workflow.get_tasks(TaskState.READY) + multichoice = tasks[0] + workflow.complete_task_from_id(multichoice.id) + + tasks = workflow.get_tasks(TaskState.READY) + task_a1 = tasks[0] + workflow.complete_task_from_id(task_a1.id) + return workflow + + def testDictionarySerializer(self): + """ + Tests the SelectivePickler serializer for persisting Workflows and Tasks. + """ + old_workflow = self.workflow + serializer = DictionarySerializer() + serialized_workflow = old_workflow.serialize(serializer) + + serializer = DictionarySerializer() + new_workflow = Workflow.deserialize(serializer, serialized_workflow) + + before = old_workflow.get_dump() + after = new_workflow.get_dump() + self.assertEqual(before, after) + + def testDeserialization(self): + """ + Tests the that deserialized workflow matches the original workflow + """ + old_workflow = self.workflow + old_workflow.spec.start.set_data(marker=True) + serializer = DictionarySerializer() + serialized_workflow = old_workflow.serialize(serializer) + + serializer = DictionarySerializer() + new_workflow = Workflow.deserialize(serializer, serialized_workflow) + + self.assertEqual( + len(new_workflow.get_tasks()), len(old_workflow.get_tasks())) + self.assertEqual(new_workflow.spec.start.get_data( + 'marker'), old_workflow.spec.start.get_data('marker')) + self.assertEqual( + 1, len([t for t in new_workflow.get_tasks() if t.task_spec.name == 'Start'])) + self.assertEqual( + 1, len([t for t in new_workflow.get_tasks() if t.task_spec.name == 'Root'])) + + def testDeserialization(self): + """ + Tests the that deserialized workflow can be completed. + """ + old_workflow = self.workflow + + old_workflow.complete_next() + self.assertEqual('task_a2', old_workflow.last_task.get_name()) + serializer = DictionarySerializer() + serialized_workflow = old_workflow.serialize(serializer) + + serializer = DictionarySerializer() + new_workflow = Workflow.deserialize(serializer, serialized_workflow) + self.assertEqual('task_a2', old_workflow.last_task.get_name()) + new_workflow.complete_all() + self.assertEqual('task_a2', old_workflow.last_task.get_name()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(PersistSmallWorkflowTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/TaskTest.py b/tests/SpiffWorkflow/TaskTest.py new file mode 100644 index 000000000..af4f28e33 --- /dev/null +++ b/tests/SpiffWorkflow/TaskTest.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import re +import os.path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + +from SpiffWorkflow.task import Task, TaskState, updateDotDict +from SpiffWorkflow.specs import WorkflowSpec, Simple + + +class MockWorkflow(object): + def __init__(self, spec): + self.spec = spec + +class UpdateDotDictTest(unittest.TestCase): + def test_update(self): + res = updateDotDict({}, 'some.thing.here', 'avalue') + self.assertEqual(res, {'some':{'thing': {'here': 'avalue'}}}) + +class TaskTest(unittest.TestCase): + + def setUp(self): + Task.id_pool = 0 + Task.thread_id_pool = 0 + + def testTree(self): + # Build a tree. + spec = WorkflowSpec(name='Mock Workflow') + workflow = MockWorkflow(spec) + task1 = Simple(spec, 'Simple 1') + task2 = Simple(spec, 'Simple 2') + task3 = Simple(spec, 'Simple 3') + task4 = Simple(spec, 'Simple 4') + task5 = Simple(spec, 'Simple 5') + task6 = Simple(spec, 'Simple 6') + task7 = Simple(spec, 'Simple 7') + task8 = Simple(spec, 'Simple 8') + task9 = Simple(spec, 'Simple 9') + root = Task(workflow, task1) + c1 = root._add_child(task2) + c11 = c1._add_child(task3) + c111 = c11._add_child(task4) + c1111 = Task(workflow, task5, c111) + c112 = Task(workflow, task6, c11) + c12 = Task(workflow, task7, c1) + c2 = Task(workflow, task8, root) + c3 = Task(workflow, task9, root) + c3.state = TaskState.COMPLETED + + # Check whether the tree is built properly. + expected = """!/0: Task of Simple 1 State: MAYBE Children: 3 + !/0: Task of Simple 2 State: MAYBE Children: 2 + !/0: Task of Simple 3 State: MAYBE Children: 2 + !/0: Task of Simple 4 State: MAYBE Children: 1 + !/0: Task of Simple 5 State: MAYBE Children: 0 + !/0: Task of Simple 6 State: MAYBE Children: 0 + !/0: Task of Simple 7 State: MAYBE Children: 0 + !/0: Task of Simple 8 State: MAYBE Children: 0 + !/0: Task of Simple 9 State: COMPLETED Children: 0""" + expected = re.compile(expected.replace('!', r'([0-9a-f\-]+)')) + self.assertTrue(expected.match(root.get_dump()), + 'Expected:\n' + repr(expected.pattern) + '\n' + + 'but got:\n' + repr(root.get_dump())) + + # Now remove one line from the expected output for testing the + # filtered iterator. + expected2 = '' + for line in expected.pattern.split('\n'): + if line.find('Simple 9') >= 0: + continue + expected2 += line.lstrip() + '\n' + expected2 = re.compile(expected2) + + # Run the iterator test. + result = '' + for thetask in Task.Iterator(root, Task.MAYBE): + result += thetask.get_dump(0, False) + '\n' + self.assertTrue(expected2.match(result), + 'Expected:\n' + repr(expected2.pattern) + '\n' + + 'but got:\n' + repr(result)) + + +def suite(): + taskSuite = unittest.TestLoader().loadTestsFromTestCase(TaskTest) + updateDotSuite = unittest.TestLoader().loadTestsFromTestCase(UpdateDotDictTest) + return unittest.TestSuite([taskSuite, updateDotSuite]) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/WorkflowTest.py b/tests/SpiffWorkflow/WorkflowTest.py new file mode 100644 index 000000000..e47f069da --- /dev/null +++ b/tests/SpiffWorkflow/WorkflowTest.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +data_dir = os.path.join(os.path.dirname(__file__), 'data') +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) + +from SpiffWorkflow.workflow import Workflow +from SpiffWorkflow.specs import Cancel, Simple, WorkflowSpec +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.serializer.prettyxml import XmlSerializer + + +class WorkflowTest(unittest.TestCase): + + def testConstructor(self): + wf_spec = WorkflowSpec() + wf_spec.start.connect(Cancel(wf_spec, 'name')) + workflow = Workflow(wf_spec) + + def testBeginWorkflowStepByStep(self): + """ + Simulates interactive calls, as would be issued by a user. + """ + xml_file = os.path.join(data_dir, 'spiff', 'workflow1.xml') + with open(xml_file) as fp: + xml = fp.read() + wf_spec = WorkflowSpec.deserialize(XmlSerializer(), xml) + workflow = Workflow(wf_spec) + + tasks = workflow.get_tasks(TaskState.READY) + self.assertEqual(len(tasks), 1) + self.assertEqual(tasks[0].task_spec.name, 'Start') + workflow.complete_task_from_id(tasks[0].id) + self.assertEqual(tasks[0].state, TaskState.COMPLETED) + + tasks = workflow.get_tasks(TaskState.READY) + self.assertEqual(len(tasks), 2) + task_a1 = tasks[0] + task_b1 = tasks[1] + self.assertEqual(task_a1.task_spec.__class__, Simple) + self.assertEqual(task_a1.task_spec.name, 'task_a1') + self.assertEqual(task_b1.task_spec.__class__, Simple) + self.assertEqual(task_b1.task_spec.name, 'task_b1') + workflow.complete_task_from_id(task_a1.id) + self.assertEqual(task_a1.state, TaskState.COMPLETED) + + tasks = workflow.get_tasks(TaskState.READY) + self.assertEqual(len(tasks), 2) + self.assertTrue(task_b1 in tasks) + task_a2 = tasks[0] + self.assertEqual(task_a2.task_spec.__class__, Simple) + self.assertEqual(task_a2.task_spec.name, 'task_a2') + workflow.complete_task_from_id(task_a2.id) + + tasks = workflow.get_tasks(TaskState.READY) + self.assertEqual(len(tasks), 1) + self.assertTrue(task_b1 in tasks) + + workflow.complete_task_from_id(task_b1.id) + tasks = workflow.get_tasks(TaskState.READY) + self.assertEqual(len(tasks), 1) + workflow.complete_task_from_id(tasks[0].id) + + tasks = workflow.get_tasks(TaskState.READY) + self.assertEqual(len(tasks), 1) + self.assertEqual(tasks[0].task_spec.name, 'synch_1') + # haven't reached the end of the workflow, but stopping at "synch_1" + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(WorkflowTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/__init__.py b/tests/SpiffWorkflow/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py b/tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py new file mode 100644 index 000000000..c398ca834 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + + + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.exceptions import WorkflowException +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class AntiLoopTaskTest(BpmnWorkflowTestCase): + """The example bpmn is actually a MultiInstance. It should not report that it is a looping task and + it should fail when we try to terminate the loop""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('bpmnAntiLoopTask.bpmn','LoopTaskTest') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + self.assertFalse(ready_tasks[0].task_spec.is_loop_task()) + try: + ready_tasks[0].terminate_loop() + self.fail("Terminate Loop should throw and error when called on a non-loop MultiInstance") + except WorkflowException as ex: + self.assertTrue( + 'The method terminate_loop should only be called in the case of a BPMN Loop Task' in ( + '%r' % ex), + '\'The method terminate_loop should only be called in the case of a BPMN Loop Task\' should be a substring of error message: \'%r\'' % ex) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(AntiLoopTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ApprovalsTest.py b/tests/SpiffWorkflow/bpmn/ApprovalsTest.py new file mode 100644 index 000000000..19f493806 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ApprovalsTest.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.specs.events import MessageEventDefinition +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ApprovalsTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Approvals.bpmn', 'Approvals') + # Start (StartTask:0xb6b4204cL) + # --> Approvals.First_Approval_Wins (CallActivity) + # --> Start (StartTask:0xb6b4266cL) + # | --> First_Approval_Wins.Supervisor_Approval (ManualTask) + # | | --> First_Approval_Wins.Supervisor_Approved (EndEvent) + # | | --> First_Approval_Wins.EndJoin (EndJoin) + # | | --> End (Simple) + # | --> First_Approval_Wins.Manager_Approval (ManualTask) + # | --> First_Approval_Wins.Manager_Approved (EndEvent) + # | --> [shown earlier] First_Approval_Wins.EndJoin (EndJoin) + # --> Approvals.First_Approval_Wins_Done (ManualTask) + # --> Approvals.Gateway4 (ParallelGateway) + # --> Approvals.Manager_Approval__P_ (ManualTask) + # | --> Approvals.Gateway5 (ParallelGateway) + # | --> Approvals.Parallel_Approvals_Done (ManualTask) + # | --> Approvals.Parallel_SP (CallActivity) + # | --> Start (StartTask) + # | | --> Parallel_Approvals_SP.Step1 (ManualTask) + # | | | --> Parallel_Approvals_SP.Supervisor_Approval (ManualTask) + # | | | --> Parallel_Approvals_SP.End2 (EndEvent) + # | | | --> Parallel_Approvals_SP.EndJoin (EndJoin) + # | | | --> End (Simple) + # | | --> Parallel_Approvals_SP.Manager_Approval (ManualTask) + # | | --> [shown earlier] Parallel_Approvals_SP.End2 (EndEvent) + # | --> Approvals.Parallel_SP_Done (ManualTask) + # | --> Approvals.End1 (EndEvent) + # | --> Approvals.EndJoin (EndJoin) + # | --> End (Simple) + # --> Approvals.Supervisor_Approval__P_ (ManualTask) + # --> [shown earlier] Approvals.Gateway5 (ParallelGateway) + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + self.do_next_named_step('First_Approval_Wins.Manager_Approval') + self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done') + + self.do_next_named_step('Approvals.Manager_Approval__P_') + self.do_next_named_step('Approvals.Supervisor_Approval__P_') + self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done') + + self.do_next_named_step('Parallel_Approvals_SP.Step1') + self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval') + self.do_next_named_step('Parallel_Approvals_SP.Supervisor_Approval') + self.do_next_exclusive_step('Approvals.Parallel_SP_Done') + + def testRunThroughHappyOtherOrders(self): + + self.do_next_named_step('First_Approval_Wins.Supervisor_Approval') + self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done') + + self.do_next_named_step('Approvals.Supervisor_Approval__P_') + self.do_next_named_step('Approvals.Manager_Approval__P_') + self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done') + + self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval') + self.do_next_named_step('Parallel_Approvals_SP.Step1') + self.do_next_named_step('Parallel_Approvals_SP.Supervisor_Approval') + self.do_next_exclusive_step('Approvals.Parallel_SP_Done') + + def testSaveRestore(self): + + self.do_next_named_step('First_Approval_Wins.Manager_Approval') + self.save_restore() + self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done') + + self.save_restore() + self.do_next_named_step('Approvals.Supervisor_Approval__P_') + self.do_next_named_step('Approvals.Manager_Approval__P_') + self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done') + + self.save_restore() + self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval') + self.do_next_exclusive_step('Parallel_Approvals_SP.Step1') + self.do_next_exclusive_step( + 'Parallel_Approvals_SP.Supervisor_Approval') + self.do_next_exclusive_step('Approvals.Parallel_SP_Done') + + def testSaveRestoreWaiting(self): + + self.do_next_named_step('First_Approval_Wins.Manager_Approval') + self.save_restore() + self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done') + + self.save_restore() + self.do_next_named_step('Approvals.Supervisor_Approval__P_') + self.save_restore() + self.do_next_named_step('Approvals.Manager_Approval__P_') + self.save_restore() + self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done') + + self.save_restore() + self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval') + self.save_restore() + self.do_next_exclusive_step('Parallel_Approvals_SP.Step1') + self.save_restore() + self.do_next_exclusive_step( + 'Parallel_Approvals_SP.Supervisor_Approval') + self.save_restore() + self.do_next_exclusive_step('Approvals.Parallel_SP_Done') + + def testReadonlyWaiting(self): + + self.do_next_named_step('First_Approval_Wins.Manager_Approval') + + readonly = self.get_read_only_workflow() + self.assertEqual(1, len(readonly.get_ready_user_tasks())) + self.assertEqual('Approvals.First_Approval_Wins_Done', + readonly.get_ready_user_tasks()[0].task_spec.name) + self.assertRaises(AssertionError, readonly.do_engine_steps) + self.assertRaises(AssertionError, readonly.refresh_waiting_tasks) + self.assertRaises(AssertionError, readonly.catch, MessageEventDefinition('Cheese')) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + + self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done') + + readonly = self.get_read_only_workflow() + self.assertEqual(2, len(readonly.get_ready_user_tasks())) + self.assertEqual( + ['Approvals.Manager_Approval__P_', + 'Approvals.Supervisor_Approval__P_'], + sorted(t.task_spec.name for t in readonly.get_ready_user_tasks())) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + + self.do_next_named_step('Approvals.Supervisor_Approval__P_') + + readonly = self.get_read_only_workflow() + self.assertEqual(1, len(readonly.get_ready_user_tasks())) + self.assertEqual('Approvals.Manager_Approval__P_', + readonly.get_ready_user_tasks()[0].task_spec.name) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + self.do_next_named_step('Approvals.Manager_Approval__P_') + + readonly = self.get_read_only_workflow() + self.assertEqual(1, len(readonly.get_ready_user_tasks())) + self.assertEqual('Approvals.Parallel_Approvals_Done', + readonly.get_ready_user_tasks()[0].task_spec.name) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done') + + readonly = self.get_read_only_workflow() + self.assertEqual(2, len(readonly.get_ready_user_tasks())) + self.assertEqual( + ['Parallel_Approvals_SP.Manager_Approval', + 'Parallel_Approvals_SP.Step1'], + sorted(t.task_spec.name for t in readonly.get_ready_user_tasks())) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval') + + readonly = self.get_read_only_workflow() + self.assertEqual(1, len(readonly.get_ready_user_tasks())) + self.assertEqual('Parallel_Approvals_SP.Step1', + readonly.get_ready_user_tasks()[0].task_spec.name) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + self.do_next_exclusive_step('Parallel_Approvals_SP.Step1') + + readonly = self.get_read_only_workflow() + self.assertEqual(1, len(readonly.get_ready_user_tasks())) + self.assertEqual('Parallel_Approvals_SP.Supervisor_Approval', + readonly.get_ready_user_tasks()[0].task_spec.name) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + self.do_next_exclusive_step( + 'Parallel_Approvals_SP.Supervisor_Approval') + + readonly = self.get_read_only_workflow() + self.assertEqual(1, len(readonly.get_ready_user_tasks())) + self.assertEqual('Approvals.Parallel_SP_Done', + readonly.get_ready_user_tasks()[0].task_spec.name) + self.assertRaises( + AssertionError, readonly.get_ready_user_tasks()[0].complete) + self.do_next_exclusive_step('Approvals.Parallel_SP_Done') + + readonly = self.get_read_only_workflow() + self.assertEqual(0, len(readonly.get_ready_user_tasks())) + self.assertEqual(0, len(readonly.get_waiting_tasks())) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ApprovalsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/BaseParallelTestCase.py b/tests/SpiffWorkflow/bpmn/BaseParallelTestCase.py new file mode 100644 index 000000000..80fc5ff7b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BaseParallelTestCase.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +from builtins import range +import unittest +import logging +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class BaseParallelTestCase(BpmnWorkflowTestCase): + + def _do_test(self, order, only_one_instance=True, save_restore=False): + + self.workflow.do_engine_steps() + for s in order: + choice = None + if isinstance(s, tuple): + s, choice = s + if s.startswith('!'): + logging.info("Checking that we cannot do '%s'", s[1:]) + self.assertRaises( + AssertionError, self.do_next_named_step, s[1:], choice=choice) + else: + if choice is not None: + logging.info( + "Doing step '%s' (with choice='%s')", s, choice) + else: + logging.info("Doing step '%s'", s) + # logging.debug(self.workflow.get_dump()) + self.do_next_named_step( + s, choice=choice, only_one_instance=only_one_instance) + self.workflow.do_engine_steps() + if save_restore: + # logging.debug("Before SaveRestore: \n%s" % + # self.workflow.get_dump()) + self.save_restore() + + self.workflow.do_engine_steps() + unfinished = self.workflow.get_tasks(TaskState.READY | TaskState.WAITING) + if unfinished: + logging.debug("Unfinished tasks: %s", unfinished) + logging.debug(self.workflow.get_dump()) + self.assertEqual(0, len(unfinished)) + diff --git a/tests/SpiffWorkflow/bpmn/BoxDeepCopyTest.py b/tests/SpiffWorkflow/bpmn/BoxDeepCopyTest.py new file mode 100644 index 000000000..9ebade906 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BoxDeepCopyTest.py @@ -0,0 +1,26 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box + + +class BoxDeepCopyTest(unittest.TestCase): + + def test_deep_copy_of_box(self): + data = {"foods": { + "spam": {"delicious": False} + }, + "hamsters": ['your', 'mother'] + } + data = Box(data) + data2 = data.__deepcopy__() + self.assertEqual(data, data2) + data.foods.spam.delicious = True + data.hamsters = ['your', 'father'] + self.assertFalse(data2.foods.spam.delicious) + self.assertEqual(['your', 'mother'], data2.hamsters) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BoxDeepCopyTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py b/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py new file mode 100644 index 000000000..7a407cc7f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +from SpiffWorkflow.bpmn.specs.ExclusiveGateway import ExclusiveGateway +from SpiffWorkflow.bpmn.specs.UserTask import UserTask +from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser +from SpiffWorkflow.bpmn.parser.task_parsers import ExclusiveGatewayParser, UserTaskParser +from SpiffWorkflow.bpmn.parser.util import full_tag + +from SpiffWorkflow.bpmn.serializer.bpmn_converters import BpmnTaskSpecConverter + +# Many of our tests relied on the Packager to set the calledElement attribute on +# Call Activities. I've moved that code to a customized parser. +from SpiffWorkflow.signavio.parser import CallActivityParser +from SpiffWorkflow.bpmn.specs.SubWorkflowTask import CallActivity + +__author__ = 'matth' + +# This provides some extensions to the BPMN parser that make it easier to +# implement testcases + + +class TestUserTask(UserTask): + + def get_user_choices(self): + if not self.outputs: + return [] + assert len(self.outputs) == 1 + next_node = self.outputs[0] + if isinstance(next_node, ExclusiveGateway): + return next_node.get_outgoing_sequence_names() + return self.get_outgoing_sequence_names() + + def do_choice(self, task, choice): + task.set_data(choice=choice) + task.complete() + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic(wf_spec, s_state, TestUserTask) + +class TestExclusiveGatewayParser(ExclusiveGatewayParser): + + def parse_condition(self, sequence_flow_node): + cond = super().parse_condition(sequence_flow_node) + if cond is not None: + return cond + return "choice == '%s'" % sequence_flow_node.get('name', None) + +class TestUserTaskConverter(BpmnTaskSpecConverter): + + def __init__(self, data_converter=None): + super().__init__(TestUserTask, data_converter) + + def to_dict(self, spec): + dct = self.get_default_attributes(spec) + dct.update(self.get_bpmn_attributes(spec)) + return dct + + def from_dict(self, dct): + return self.task_spec_from_dict(dct) + + +class TestBpmnParser(BpmnParser): + OVERRIDE_PARSER_CLASSES = { + full_tag('userTask'): (UserTaskParser, TestUserTask), + full_tag('exclusiveGateway'): (TestExclusiveGatewayParser, ExclusiveGateway), + full_tag('callActivity'): (CallActivityParser, CallActivity) + } + diff --git a/tests/SpiffWorkflow/bpmn/BpmnSerializerTest.py b/tests/SpiffWorkflow/bpmn/BpmnSerializerTest.py new file mode 100644 index 000000000..a8b6ebd53 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BpmnSerializerTest.py @@ -0,0 +1,118 @@ +import os +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from .BpmnLoaderForTests import TestBpmnParser + + +class BpmnSerializerTest(unittest.TestCase): + CORRELATE = BpmnSerializer + + def load_workflow_spec(self, filename, process_name): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = TestBpmnParser() + parser.add_bpmn_files_by_glob(f) + top_level_spec = parser.get_spec(process_name) + subprocesses = parser.get_subprocess_specs(process_name) + return top_level_spec, subprocesses + + def setUp(self): + super(BpmnSerializerTest, self).setUp() + self.serializer = BpmnSerializer() + self.spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact') + self.workflow = BpmnWorkflow(self.spec, subprocesses) + + def testDeserializeWorkflowSpec(self): + self.assertIsNotNone(self.spec) + + def testSerializeWorkflowSpec(self): + spec_serialized = self.serializer.serialize_workflow_spec(self.spec) + result = self.serializer.deserialize_workflow_spec(spec_serialized) + spec_serialized2 = self.serializer.serialize_workflow_spec(result) + self.assertEqual(spec_serialized, spec_serialized2) + + def testSerializeWorkflow(self): + json = self.serializer.serialize_workflow(self.workflow) + print(json) + + def testDeserializeWorkflow(self): + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeCallActivityChildren(self): + """Tested as a part of deserialize workflow.""" + pass + + def testSerializeTask(self): + json = self.serializer.serialize_workflow(self.workflow) + print(json) + + def testDeserializeTask(self): + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeActiveWorkflow(self): + self.workflow.do_engine_steps() + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeWithData(self): + self.workflow.data["test"] = "my_test" + json = self.serializer.serialize_workflow(self.workflow) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self.assertEqual('my_test', wf2.get_data("test")) + + def testDeserializeWithDefaultScriptEngineClass(self): + json = self.serializer.serialize_workflow(self.workflow) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self.assertIsNotNone(self.workflow.script_engine) + self.assertIsNotNone(wf2.script_engine) + self.assertEqual(self.workflow.script_engine.__class__, + wf2.script_engine.__class__) + + @unittest.skip("Deserialize does not persist the script engine, Fix me.") + def testDeserializeWithCustomScriptEngine(self): + class CustomScriptEngine(PythonScriptEngine): + pass + + self.workflow.script_engine = CustomScriptEngine() + json = self.serializer.serialize_workflow(self.workflow) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self.assertEqual(self.workflow.script_engine.__class__, + wf2.script_engine.__class__) + + def testDeserializeWithDataOnTask(self): + self.workflow.do_engine_steps() + user_task = self.workflow.get_ready_user_tasks()[0] + user_task.data = {"test":"my_test"} + self._compare_with_deserialized_copy(self.workflow) + + def testLastTaskIsSetAndWorksThroughRestore(self): + self.workflow.do_engine_steps() + json = self.serializer.serialize_workflow(self.workflow) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self.assertIsNotNone(self.workflow.last_task) + self.assertIsNotNone(wf2.last_task) + self._compare_workflows(self.workflow, wf2) + + def _compare_with_deserialized_copy(self, wf): + json = self.serializer.serialize_workflow(wf) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self._compare_workflows(wf, wf2) + + def _compare_workflows(self, w1, w2): + self.assertIsInstance(w1, BpmnWorkflow) + self.assertIsInstance(w2, BpmnWorkflow) + self.assertEqual(w1.data, w2.data) + self.assertEqual(w1.name, w2.name) + for task in w1.get_ready_user_tasks(): + w2_task = w2.get_task(task.id) + self.assertIsNotNone(w2_task) + self.assertEqual(task.data, w2_task.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BpmnSerializerTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/BpmnWorkflowSerializerTest.py b/tests/SpiffWorkflow/bpmn/BpmnWorkflowSerializerTest.py new file mode 100644 index 000000000..d0d1a9479 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BpmnWorkflowSerializerTest.py @@ -0,0 +1,216 @@ +import os +import unittest +import json +from uuid import uuid4 + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser +from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer +from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnLoaderForTests import TestUserTaskConverter + + +class BpmnWorkflowSerializerTest(unittest.TestCase): + """Please note that the BpmnSerializer is Deprecated.""" + SERIALIZER_VERSION = "100.1.ANY" + + def load_workflow_spec(self, filename, process_name): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = BpmnParser() + parser.add_bpmn_files_by_glob(f) + top_level_spec = parser.get_spec(process_name) + subprocesses = parser.get_subprocess_specs(process_name) + return top_level_spec, subprocesses + + def setUp(self): + super(BpmnWorkflowSerializerTest, self).setUp() + wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([TestUserTaskConverter]) + self.serializer = BpmnWorkflowSerializer(wf_spec_converter, version=self.SERIALIZER_VERSION) + spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testSerializeWorkflowSpec(self): + spec_serialized = self.serializer.serialize_json(self.workflow) + result = self.serializer.deserialize_json(spec_serialized) + spec_serialized2 = self.serializer.serialize_json(result) + self.assertEqual(spec_serialized, spec_serialized2) + + def testSerializeWorkflowSpecWithGzip(self): + spec_serialized = self.serializer.serialize_json(self.workflow, use_gzip=True) + result = self.serializer.deserialize_json(spec_serialized, use_gzip=True) + spec_serialized2 = self.serializer.serialize_json(result, use_gzip=True) + self.assertEqual(spec_serialized, spec_serialized2) + + def testSerlializePerservesVersion(self): + spec_serialized = self.serializer.serialize_json(self.workflow) + version = self.serializer.get_version(spec_serialized) + self.assertEqual(version, self.SERIALIZER_VERSION) + + def testSerializeToOldSerializerThenNewSerializer(self): + old_serializer = BpmnSerializer() + old_json = old_serializer.serialize_workflow(self.workflow) + new_workflow = old_serializer.deserialize_workflow(old_json) + new_json = self.serializer.serialize_json(new_workflow) + new_workflow_2 = self.serializer.deserialize_json(new_json) + + def testSerializeWorkflow(self): + serialized = self.serializer.serialize_json(self.workflow) + json.loads(serialized) + + def testSerializeWorkflowCustomJSONEncoderDecoder(self): + class MyCls: + a = 1 + def to_dict(self): + return {'a': 1, 'my_type': 'mycls'} + + @classmethod + def from_dict(self, data): + return MyCls() + + class MyJsonEncoder(json.JSONEncoder): + def default(self, z): + if isinstance(z, MyCls): + return z.to_dict() + return super().default(z) + + class MyJsonDecoder(json.JSONDecoder): + classes = {'mycls': MyCls} + + def __init__(self, *args, **kwargs): + super().__init__(object_hook=self.object_hook, *args, **kwargs) + + def object_hook(self, z): + if 'my_type' in z and z['my_type'] in self.classes: + return self.classes[z['my_type']].from_dict(z) + + return z + + unserializable = MyCls() + + a_task_spec = self.workflow.spec.task_specs[list(self.workflow.spec.task_specs)[0]] + a_task = self.workflow.get_tasks_from_spec_name(a_task_spec.name)[0] + a_task.data['jsonTest'] = unserializable + + try: + self.assertRaises(TypeError, self.serializer.serialize_json, self.workflow) + wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([TestUserTaskConverter]) + custom_serializer = BpmnWorkflowSerializer(wf_spec_converter, version=self.SERIALIZER_VERSION,json_encoder_cls=MyJsonEncoder, json_decoder_cls=MyJsonDecoder) + serialized_workflow = custom_serializer.serialize_json(self.workflow) + finally: + a_task.data.pop('jsonTest',None) + + serialized_task = [x for x in json.loads(serialized_workflow)['tasks'].values() if x['task_spec'] == a_task_spec.name][0] + self.assertEqual(serialized_task['data']['jsonTest'], {'a': 1, 'my_type': 'mycls'}) + + deserialized_workflow = custom_serializer.deserialize_json(serialized_workflow) + deserialized_task = deserialized_workflow.get_tasks_from_spec_name(a_task_spec.name)[0] + self.assertTrue(isinstance(deserialized_task.data['jsonTest'], MyCls)) + + def testDeserializeWorkflow(self): + self._compare_with_deserialized_copy(self.workflow) + + def testSerializeTask(self): + self.serializer.serialize_json(self.workflow) + + def testDeserializeTask(self): + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeActiveWorkflow(self): + self.workflow.do_engine_steps() + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeWithData(self): + self.workflow.data["test"] = "my_test" + json = self.serializer.serialize_json(self.workflow) + wf2 = self.serializer.deserialize_json(json) + self.assertEqual('my_test', wf2.get_data("test")) + + def testDeserializeWithDefaultScriptEngineClass(self): + json = self.serializer.serialize_json(self.workflow) + wf2 = self.serializer.deserialize_json(json) + self.assertIsNotNone(self.workflow.script_engine) + self.assertIsNotNone(wf2.script_engine) + self.assertEqual(self.workflow.script_engine.__class__, + wf2.script_engine.__class__) + + @unittest.skip("Deserialize does not persist the script engine, Fix me.") + def testDeserializeWithCustomScriptEngine(self): + class CustomScriptEngine(PythonScriptEngine): + pass + + self.workflow.script_engine = CustomScriptEngine() + dct = self.serializer.serialize_json(self.workflow) + wf2 = self.serializer.deserialize_json(dct) + self.assertEqual(self.workflow.script_engine.__class__, + wf2.script_engine.__class__) + + def testDeserializeWithDataOnTask(self): + self.workflow.do_engine_steps() + user_task = self.workflow.get_ready_user_tasks()[0] + user_task.data = {"test":"my_test"} + self._compare_with_deserialized_copy(self.workflow) + + def testSerializeIgnoresCallable(self): + self.workflow.do_engine_steps() + user_task = self.workflow.get_ready_user_tasks()[0] + def f(n): + return n + 1 + user_task.data = { 'f': f } + task_id = str(user_task.id) + dct = self.serializer.workflow_to_dict(self.workflow) + self.assertNotIn('f', dct['tasks'][task_id]['data']) + + def testLastTaskIsSetAndWorksThroughRestore(self): + self.workflow.do_engine_steps() + json = self.serializer.serialize_json(self.workflow) + wf2 = self.serializer.deserialize_json(json) + self.assertIsNotNone(self.workflow.last_task) + self.assertIsNotNone(wf2.last_task) + self._compare_workflows(self.workflow, wf2) + + def test_convert_1_0_to_1_1(self): + # The serialization used here comes from NestedSubprocessTest saved at line 25 with version 1.0 + fn = os.path.join(os.path.dirname(__file__), 'data', 'serialization', 'v1.0.json') + wf = self.serializer.deserialize_json(open(fn).read()) + # We should be able to finish the workflow from this point + ready_tasks = wf.get_tasks(TaskState.READY) + self.assertEqual('Action3', ready_tasks[0].task_spec.description) + ready_tasks[0].complete() + wf.do_engine_steps() + self.assertEqual(True, wf.is_completed()) + + def test_serialize_workflow_where_script_task_includes_function(self): + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + ready_tasks[0].complete() + self.workflow.do_engine_steps() + results = self.serializer.serialize_json(self.workflow) + assert self.workflow.is_completed() + assert 'y' in self.workflow.last_task.data + assert 'x' not in self.workflow.last_task.data + assert 'some_fun' not in self.workflow.last_task.data + + def _compare_with_deserialized_copy(self, wf): + json = self.serializer.serialize_json(wf) + wf2 = self.serializer.deserialize_json(json) + self._compare_workflows(wf, wf2) + + def _compare_workflows(self, w1, w2): + self.assertIsInstance(w1, BpmnWorkflow) + self.assertIsInstance(w2, BpmnWorkflow) + self.assertEqual(w1.data, w2.data) + self.assertEqual(w1.name, w2.name) + for task in w1.get_ready_user_tasks(): + w2_task = w2.get_task(task.id) + self.assertIsNotNone(w2_task) + self.assertEqual(task.data, w2_task.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BpmnWorkflowSerializerTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py b/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py new file mode 100644 index 000000000..cb788eb32 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- + +import json +import os +import unittest + +from SpiffWorkflow.task import TaskState + +from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer +from .BpmnLoaderForTests import TestUserTaskConverter, TestBpmnParser + +__author__ = 'matth' + + +wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([TestUserTaskConverter]) + +class BpmnWorkflowTestCase(unittest.TestCase): + + serializer = BpmnWorkflowSerializer(wf_spec_converter) + + def load_workflow_spec(self, filename, process_name): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = TestBpmnParser() + parser.add_bpmn_files_by_glob(f) + top_level_spec = parser.get_spec(process_name) + subprocesses = parser.get_subprocess_specs(process_name) + return top_level_spec, subprocesses + + def load_collaboration(self, filename, collaboration_name): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = TestBpmnParser() + parser.add_bpmn_files_by_glob(f) + return parser.get_collaboration(collaboration_name) + + def get_all_specs(self, filename): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = TestBpmnParser() + parser.add_bpmn_files_by_glob(f) + return parser.find_all_specs() + + def do_next_exclusive_step(self, step_name, with_save_load=False, set_attribs=None, choice=None): + if with_save_load: + self.save_restore_all() + + self.workflow.do_engine_steps() + tasks = self.workflow.get_tasks(TaskState.READY) + self._do_single_step(step_name, tasks, set_attribs, choice) + + def do_next_named_step(self, step_name, with_save_load=False, set_attribs=None, choice=None, only_one_instance=True): + if with_save_load: + self.save_restore() + + self.workflow.do_engine_steps() + step_name_path = step_name.split("|") + + def switch_workflow(p): + for task_id, sp in p.workflow._get_outermost_workflow().subprocesses.items(): + if p in sp.get_tasks(workflow=sp): + return p.workflow.get_task(task_id) + + def is_match(t): + if not (t.task_spec.name == step_name_path[-1] or t.task_spec.description == step_name_path[-1]): + return False + for parent_name in step_name_path[:-1]: + p = t.parent + found = False + while (p and p != p.parent): + if (p.task_spec.name == parent_name or p.task_spec.description == parent_name): + found = True + break + if p.parent is None and p.workflow != p.workflow.outer_workflow: + p = switch_workflow(p) + else: + p = p.parent + if not found: + return False + return True + + tasks = list( + [t for t in self.workflow.get_tasks(TaskState.READY) if is_match(t)]) + + self._do_single_step( + step_name_path[-1], tasks, set_attribs, choice, only_one_instance=only_one_instance) + + def assertTaskNotReady(self, step_name): + tasks = list([t for t in self.workflow.get_tasks(TaskState.READY) + if t.task_spec.name == step_name or t.task_spec.description == step_name]) + self.assertEqual([], tasks) + + def _do_single_step(self, step_name, tasks, set_attribs=None, choice=None, only_one_instance=True): + + if only_one_instance: + self.assertEqual( + len(tasks), 1, 'Did not find one task for \'%s\' (got %d)' % (step_name, len(tasks))) + else: + self.assertNotEqual( + len(tasks), 0, 'Did not find any tasks for \'%s\'' % (step_name)) + + self.assertTrue( + tasks[0].task_spec.name == step_name or tasks[ + 0].task_spec.description == step_name, + 'Expected step %s, got %s (%s)' % (step_name, tasks[0].task_spec.description, tasks[0].task_spec.name)) + if not set_attribs: + set_attribs = {} + + if choice: + set_attribs['choice'] = choice + + if set_attribs: + tasks[0].set_data(**set_attribs) + tasks[0].complete() + + def save_restore(self): + + before_state = self._get_workflow_state(do_steps=False) + before_dump = self.workflow.get_dump() + # Check that we can actully convert this to JSON + json_str = json.dumps(before_state) + after = self.serializer.workflow_from_dict(json.loads(json_str), read_only=False) + # Check that serializing and deserializing results in the same workflow + after_state = self.serializer.workflow_to_dict(after) + after_dump = after.get_dump() + self.maxDiff = None + self.assertEqual(before_dump, after_dump) + self.assertEqual(before_state, after_state) + self.workflow = after + + def restore(self, state): + self.workflow = self.serializer.workflow_from_dict(state, read_only=False) + + def get_read_only_workflow(self): + state = self._get_workflow_state() + return self.serializer.workflow_from_dict(state, read_only=True) + + def _get_workflow_state(self, do_steps=True): + if do_steps: + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + return self.serializer.workflow_to_dict(self.workflow) diff --git a/tests/SpiffWorkflow/bpmn/CallActivityEndEventTest.py b/tests/SpiffWorkflow/bpmn/CallActivityEndEventTest.py new file mode 100644 index 000000000..f3d1522da --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/CallActivityEndEventTest.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class CallActivityTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('call_activity_*.bpmn', 'Process_8200379') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + + def testCallActivityHasSameScriptEngine(self): + self.runCallActivityWithCustomScript() + + def testCallActivityHasSameScriptEngineAfterSaveRestore(self): + self.runCallActivityWithCustomScript(save_restore=True) + + def runCallActivityWithCustomScript(self, save_restore=False): + class CustomScriptEngine(PythonScriptEngine): + pass + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses, + script_engine=CustomScriptEngine()) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + self.assertIsInstance(self.workflow.script_engine, CustomScriptEngine) + + if save_restore: + self.save_restore() + # We have to reset the script engine after deserialize. + self.workflow.script_engine = CustomScriptEngine() + + # Get the subworkflow + sub_task = self.workflow.get_tasks_from_spec_name('Sub_Bpmn_Task')[0] + sub_workflow = sub_task.workflow + self.assertNotEqual(sub_workflow, self.workflow) + self.assertIsInstance(self.workflow.script_engine, CustomScriptEngine) + self.assertEqual(sub_workflow.script_engine, self.workflow.script_engine) + + def test_call_activity_allows_removal_of_data(self): + # If a call activity alters the data - removing existing keys, that + # data should be removed in the final output as well. + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + self.assertNotIn('remove_this_var', self.workflow.last_task.data.keys()) + + def test_call_acitivity_errors_include_task_trace(self): + error_spec = self.subprocesses.get('ErroringBPMN') + error_spec, subprocesses = self.load_workflow_spec('call_activity_*.bpmn', 'ErroringBPMN') + with self.assertRaises(WorkflowTaskExecException) as context: + self.workflow = BpmnWorkflow(error_spec, subprocesses) + self.workflow.do_engine_steps() + self.assertEquals(2, len(context.exception.task_trace)) + self.assertRegexpMatches(context.exception.task_trace[0], 'Create Data \(.*?call_activity_call_activity.bpmn\)') + self.assertRegexpMatches(context.exception.task_trace[1], 'Get Data Call Activity \(.*?call_activity_with_error.bpmn\)') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CallActivityTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/CallActivitySubProcessPropTest.py b/tests/SpiffWorkflow/bpmn/CallActivitySubProcessPropTest.py new file mode 100644 index 000000000..e540f717e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/CallActivitySubProcessPropTest.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + + + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class CallActivitySubProcessPropTest(BpmnWorkflowTestCase): + """ + Make sure that workflow.data propagates to the subworkflows + in a BPMN + """ + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('proptest-*.bpmn', 'TopLevel') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + self.assertTrue(self.workflow.is_completed()) + self.assertEqual(self.workflow.data['valA'],1) + self.assertEqual(self.workflow.data['valB'],1) + self.assertEqual(self.workflow.data['valC'],1) + self.assertEqual(self.workflow.data['valD'],1) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CallActivitySubProcessPropTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ClashingName2Test.py b/tests/SpiffWorkflow/bpmn/ClashingName2Test.py new file mode 100644 index 000000000..f906c848c --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ClashingName2Test.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException +__author__ = 'kellym' + + + +class ClashingNameTest2(BpmnWorkflowTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + pass + + def loadWorkflow(self): + self.load_workflow_spec('Approvals_bad.bpmn', 'Approvals') + + def testRunThroughHappy(self): + # make sure we raise an exception + # when validating a workflow with multiple + # same IDs in the BPMN workspace + self.assertRaises(ValidationException,self.loadWorkflow) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ClashingNameTest2) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/CollaborationTest.py b/tests/SpiffWorkflow/bpmn/CollaborationTest.py new file mode 100644 index 000000000..e1361dd81 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/CollaborationTest.py @@ -0,0 +1,125 @@ +from SpiffWorkflow.bpmn.specs.SubWorkflowTask import CallActivity +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.task import TaskState + +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class CollaborationTest(BpmnWorkflowTestCase): + + def testCollaboration(self): + + spec, subprocesses = self.load_collaboration('collaboration.bpmn', 'my_collaboration') + + # Only executable processes should be started + self.assertIn('process_buddy', subprocesses) + self.assertNotIn('random_person_process', subprocesses) + self.workflow = BpmnWorkflow(spec, subprocesses) + start = self.workflow.get_tasks_from_spec_name('Start')[0] + # Set up some data to be evaluated so that the workflow can proceed + start.data['lover_name'] = 'Peggy' + self.workflow.do_engine_steps() + + # Call activities should be created for executable processes and be reachable + buddy = self.workflow.get_tasks_from_spec_name('process_buddy')[0] + self.assertIsInstance(buddy.task_spec, CallActivity) + self.assertEqual(buddy.task_spec.spec, 'process_buddy') + self.assertEqual(buddy.state, TaskState.WAITING) + + def testBpmnMessage(self): + + spec, subprocesses = self.load_workflow_spec('collaboration.bpmn', 'process_buddy') + workflow = BpmnWorkflow(spec, subprocesses) + start = workflow.get_tasks_from_spec_name('Start')[0] + # Set up some data to be evaluated so that the workflow can proceed + start.data['lover_name'] = 'Peggy' + workflow.do_engine_steps() + # An external message should be created + messages = workflow.get_bpmn_messages() + self.assertEqual(len(messages), 1) + self.assertEqual(len(workflow.bpmn_messages), 0) + receive = workflow.get_tasks_from_spec_name('EventReceiveLetter')[0] + workflow.catch_bpmn_message('Love Letter Response', messages[0].payload, messages[0].correlations) + workflow.do_engine_steps() + # The external message created above should be caught + self.assertEqual(receive.state, TaskState.COMPLETED) + self.assertEqual(receive.data, messages[0].payload) + self.assertEqual(workflow.is_completed(), True) + + def testCorrelation(self): + + specs = self.get_all_specs('correlation.bpmn') + proc_1 = specs['proc_1'] + workflow = BpmnWorkflow(proc_1, specs) + workflow.do_engine_steps() + for idx, task in enumerate(workflow.get_ready_user_tasks()): + task.data['task_num'] = idx + task.complete() + workflow.do_engine_steps() + ready_tasks = workflow.get_ready_user_tasks() + waiting = workflow.get_tasks_from_spec_name('get_response') + # Two processes should have been started and two corresponding catch events should be waiting + self.assertEqual(len(ready_tasks), 2) + self.assertEqual(len(waiting), 2) + for task in waiting: + self.assertEqual(task.state, TaskState.WAITING) + # Now copy the task_num that was sent into a new variable + for task in ready_tasks: + task.data.update(init_id=task.data['task_num']) + task.complete() + workflow.do_engine_steps() + # If the messages were routed properly, the id should match + for task in workflow.get_tasks_from_spec_name('subprocess_end'): + self.assertEqual(task.data['task_num'], task.data['init_id']) + + def testTwoCorrelationKeys(self): + + specs = self.get_all_specs('correlation_two_conversations.bpmn') + proc_1 = specs['proc_1'] + workflow = BpmnWorkflow(proc_1, specs) + workflow.do_engine_steps() + for idx, task in enumerate(workflow.get_ready_user_tasks()): + task.data['task_num'] = idx + task.complete() + workflow.do_engine_steps() + + # Two processes should have been started and two corresponding catch events should be waiting + ready_tasks = workflow.get_ready_user_tasks() + waiting = workflow.get_tasks_from_spec_name('get_response_one') + self.assertEqual(len(ready_tasks), 2) + self.assertEqual(len(waiting), 2) + for task in waiting: + self.assertEqual(task.state, TaskState.WAITING) + # Now copy the task_num that was sent into a new variable + for task in ready_tasks: + task.data.update(init_id=task.data['task_num']) + task.complete() + workflow.do_engine_steps() + + # Complete dummy tasks + for task in workflow.get_ready_user_tasks(): + task.complete() + workflow.do_engine_steps() + + # Repeat for the other process, using a different mapped name + ready_tasks = workflow.get_ready_user_tasks() + waiting = workflow.get_tasks_from_spec_name('get_response_two') + self.assertEqual(len(ready_tasks), 2) + self.assertEqual(len(waiting), 2) + for task in ready_tasks: + task.data.update(subprocess=task.data['task_num']) + task.complete() + workflow.do_engine_steps() + + # If the messages were routed properly, the id should match + for task in workflow.get_tasks_from_spec_name('subprocess_end'): + self.assertEqual(task.data['task_num'], task.data['init_id']) + self.assertEqual(task.data['task_num'], task.data['subprocess']) + + def testSerialization(self): + + spec, subprocesses = self.load_collaboration('collaboration.bpmn', 'my_collaboration') + self.workflow = BpmnWorkflow(spec, subprocesses) + start = self.workflow.get_tasks_from_spec_name('Start')[0] + start.data['lover_name'] = 'Peggy' + self.workflow.do_engine_steps() + self.save_restore() \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/CustomScriptTest.py b/tests/SpiffWorkflow/bpmn/CustomScriptTest.py new file mode 100644 index 000000000..12f69a2aa --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/CustomScriptTest.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'McDonald, danfunk' + +def my_custom_function(txt): + return str(txt).upper() + +class CustomBpmnScriptEngine(PythonScriptEngine): + """This is a custom script processor that can be easily injected into Spiff Workflow. + It will execute python code read in from the bpmn. It will also make any scripts in the + scripts directory available for execution. """ + def __init__(self): + augment_methods = {'custom_function': my_custom_function} + super().__init__(scripting_additions=augment_methods) + + +class CustomInlineScriptTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('custom_function_test*', 'top_workflow') + script_engine = CustomBpmnScriptEngine() + self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=script_engine) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=False) + + def actual_test(self, save_restore): + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + data = self.workflow.last_task.data + self.assertEqual(data['c1'], 'HELLO') + self.assertEqual(data['c2'], 'GOODBYE') + self.assertEqual(data['c3'], 'ARRIVEDERCI') + + def test_overwrite_function_with_local_variable(self): + ready_task = self.workflow.get_tasks(TaskState.READY)[0] + ready_task.data = {'custom_function': "bill"} + with self.assertRaises(WorkflowTaskExecException) as e: + self.workflow.do_engine_steps() + self.assertTrue('' in str(e.exception)) + self.assertTrue('custom_function' in str(e.exception)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CustomInlineScriptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/DataObjectReferenceTest.py b/tests/SpiffWorkflow/bpmn/DataObjectReferenceTest.py new file mode 100644 index 000000000..0254c5df4 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/DataObjectReferenceTest.py @@ -0,0 +1,82 @@ +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.exceptions import WorkflowDataException + + +class DataObjectReferenceTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('data_object.bpmn', 'Process') + + def testDataObjectReferences(self): + self.actual_test(False) + + def testDataObjectSerialization(self): + self.actual_test(True) + + def testMissingDataInput(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + + # Add the data so that we can advance the workflow + ready_tasks = self.workflow.get_ready_user_tasks() + ready_tasks[0].data = { 'obj_1': 'hello' } + ready_tasks[0].complete() + + # Remove the data before advancing + ready_tasks = self.workflow.get_ready_user_tasks() + self.workflow.data.pop('obj_1') + with self.assertRaises(WorkflowDataException) as exc: + ready_tasks[0].complete() + self.assertEqual(exc.data_output.name, 'obj_1') + + def testMissingDataOutput(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + with self.assertRaises(WorkflowDataException) as exc: + ready_tasks[0].complete() + self.assertEqual(exc.data_output.name, 'obj_1') + + def actual_test(self, save_restore): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + + # Set up the data + ready_tasks = self.workflow.get_ready_user_tasks() + ready_tasks[0].data = { 'obj_1': 'hello' } + ready_tasks[0].complete() + # After task completion, obj_1 should be copied out of the task into the workflow + self.assertNotIn('obj_1', ready_tasks[0].data) + self.assertIn('obj_1', self.workflow.data) + + if save_restore: + self.save_restore() + + # Set a value for obj_1 in the task data again + ready_tasks = self.workflow.get_ready_user_tasks() + ready_tasks[0].data = { 'obj_1': 'hello again' } + ready_tasks[0].complete() + + # Check to make sure we use the workflow value instead of the value we set + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertEqual(ready_tasks[0].data['obj_1'], 'hello') + # Modify the value in the task + ready_tasks[0].data = { 'obj_1': 'hello again' } + ready_tasks[0].complete() + # We did not set an output data reference so obj_1 should remain unchanged in the workflow data + # and be removed from the task data + self.assertNotIn('obj_1', ready_tasks[0].data) + self.assertEqual(self.workflow.data['obj_1'], 'hello') + + # Make sure data objects can be copied in and out of a subprocess + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertEqual(ready_tasks[0].data['obj_1'], 'hello') + ready_tasks[0].complete() + self.workflow.do_engine_steps() + sp = self.workflow.get_tasks_from_spec_name('subprocess')[0] + self.assertNotIn('obj_1', sp.data) diff --git a/tests/SpiffWorkflow/bpmn/ExclusiveGatewayIntoMultiInstanceTest.py b/tests/SpiffWorkflow/bpmn/ExclusiveGatewayIntoMultiInstanceTest.py new file mode 100644 index 000000000..8eaaff418 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ExclusiveGatewayIntoMultiInstanceTest.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + + + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ExclusiveGatewayIntoMultiInstanceTest(BpmnWorkflowTestCase): + """In the example BPMN Diagram we set x = 0, then we have an + exclusive gateway that should skip over a parallel multi-instance + class, so it should run straight through and complete without issue.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('exclusive_into_multi.bpmn','ExclusiveToMulti') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + def testSaveRestore(self): + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExclusiveGatewayIntoMultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ExclusiveGatewayNoDefaultTest.py b/tests/SpiffWorkflow/bpmn/ExclusiveGatewayNoDefaultTest.py new file mode 100644 index 000000000..58fdb5d66 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ExclusiveGatewayNoDefaultTest.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + + + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException + +__author__ = 'essweine' + +class ExclusiveGatewayNoDefaultTest(BpmnWorkflowTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('exclusive_gateway_no_default.bpmn', 'NoDefaultGateway') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + first = self.workflow.get_tasks_from_spec_name('StartEvent_1')[0] + first.data = { 'x': 1 } + self.assertRaises(WorkflowException, self.workflow.do_engine_steps) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExclusiveGatewayNoDefaultTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ExculsiveGatewayNonDefaultPathIntoMultiTest.py b/tests/SpiffWorkflow/bpmn/ExculsiveGatewayNonDefaultPathIntoMultiTest.py new file mode 100644 index 000000000..424f72e9b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ExculsiveGatewayNonDefaultPathIntoMultiTest.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + + + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ExclusiveGatewayNonDefaultPathIntoMultiTest(BpmnWorkflowTestCase): + """In the example BPMN Diagram we require that "Yes" or "No" be specified + in a user task and check that a multiinstance can follow a non-default + path. + """ + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('exclusive_non_default_path_into_multi.bpmn','ExclusiveNonDefaultMulti') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def load_workflow1_spec(self): + return + + def testRunThroughHappy(self): + + + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("DoStuff", task.task_spec.name) + task.update_data({"morestuff": 'Yes'}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + self.assertEqual("GetMoreStuff", task.task_spec.name) + else: + self.assertEqual("GetMoreStuff_%d"%(i-1), task.task_spec.name) + + + task.update_data({"stuff.addstuff": "Stuff %d"%i}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExclusiveGatewayNonDefaultPathIntoMultiTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/FeelExpressionEngineTest.py b/tests/SpiffWorkflow/bpmn/FeelExpressionEngineTest.py new file mode 100644 index 000000000..6fe07dece --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/FeelExpressionEngineTest.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.FeelLikeScriptEngine import FeelLikeScriptEngine, FeelInterval +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +import datetime + +__author__ = 'matth' + + +class FeelExpressionTest(BpmnWorkflowTestCase): + + def setUp(self): + self.expressionEngine = FeelLikeScriptEngine() + + def testRunThroughExpressions(self): + tests = [("string length('abcd')", 4, {}), + ("contains('abcXYZdef','XYZ')", True, {}), + ("list contains(x,'b')", True, {'x': ['a', 'b', 'c']}), + ("list contains(x,'z')", False, {'x': ['a', 'b', 'c']}), + # ("list contains(['a','b','c'],'b')",True,{}), # fails due to parse error + ("all ([True,True,True])", True, {}), + ("all ([True,False,True])", False, {}), + ("any ([False,False,False])", False, {}), + ("any ([True,False,True])", True, {}), + ("PT3S", datetime.timedelta(seconds=3), {}), + ("d[item>1]",[2,3,4],{'d':[1,2,3,4]}), + ("d[x>=2].y",[2,3,4],{'d':[{'x':1,'y':1}, + {'x': 2, 'y': 2}, + {'x': 3, 'y': 3}, + {'x': 4, 'y': 4}, + ]}), + ("concatenate(a,b,c)", ['a', 'b', 'c'], {'a': ['a'], + 'b': ['b'], + 'c': ['c'], + }), + ("append(a,'c')", ['a', 'b', 'c'], {'a': ['a', 'b']}), + ("now()", FeelInterval(datetime.datetime.now() - datetime.timedelta(seconds=1), + datetime.datetime.now() + datetime.timedelta(seconds=1)), + {}), + ("day of week('2020-05-07')", 4, {}), + ("day of week(a)", 0, {'a': datetime.datetime(2020, 5, 3)}), + ("list contains(a.b,'x')", True, {'a': {'b': ['a', 'x']}}), # combo + ("list contains(a.b,'c')", False, {'a': {'b': ['a', 'x']}}), + ("list contains(a.keys(),'b')", True, {'a': {'b': ['a', 'x']}}), + ("list contains(a.keys(),'c')", False, {'a': {'b': ['a', 'x']}}), + ] + for test in tests: + self.assertEqual(self.expressionEngine._evaluate(test[0], test[2]), + test[1], "test --> %s <-- with variables ==> %s <==Fail!" % (test[0], str(test[2]))) + + def testRunThroughDMNExpression(self): + """ + Real world test + """ + data = { + "exclusive": [ + { + "ExclusiveSpaceAMComputingID": None + } + ] + } + x = self.expressionEngine._evaluate( + """sum([1 for x in exclusive if x.get('ExclusiveSpaceAMComputingID',None)==None])""", + data + ) + self.assertEqual(x, 1) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelExpressionTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/IOSpecTest.py b/tests/SpiffWorkflow/bpmn/IOSpecTest.py new file mode 100644 index 000000000..efa6441c9 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/IOSpecTest.py @@ -0,0 +1,88 @@ +from SpiffWorkflow.bpmn.exceptions import WorkflowDataException +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + + +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + + +class CallActivityDataTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('io_spec*.bpmn', 'parent') + + def testCallActivityWithIOSpec(self): + self.actual_test() + + def testCallActivityWithIOSpecSaveRestore(self): + self.actual_test(True) + + def testCallActivityMissingInput(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + set_data = self.workflow.spec.task_specs['Activity_0haob58'] + set_data.script = """in_1, unused = 1, True""" + + with self.assertRaises(WorkflowDataException) as exc: + self.advance_to_subprocess() + self.assertEqual(exc.var.name,'in_2') + + def testCallActivityMissingOutput(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + script_task = self.workflow.spec.task_specs['Activity_0haob58'] + script_task.script = """in_1, in_2, unused = 1, "hello world", True""" + + self.advance_to_subprocess() + task = self.workflow.get_tasks(TaskState.READY)[0] + transform_task = task.workflow.spec.task_specs['Activity_04d94ee'] + transform_task.script = """out_1, unused = in_1 * 2, False""" + + with self.assertRaises(WorkflowDataException) as exc: + self.complete_subprocess() + self.assertEqual(exc.var.name,'out_2') + + def actual_test(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + set_data = self.workflow.spec.task_specs['Activity_0haob58'] + set_data.script = """in_1, in_2, unused = 1, "hello world", True""" + + if save_restore: + self.save_restore() + + self.advance_to_subprocess() + # This will be the first task of the subprocess + task = self.workflow.get_tasks(TaskState.READY)[0] + + # These should be copied + self.assertIn('in_1', task.data) + self.assertIn('in_2', task.data) + # This should not + self.assertNotIn('unused', task.data) + + self.complete_subprocess() + task = self.workflow.get_tasks(TaskState.READY)[0] + # Originals should not change + self.assertEqual(task.data['in_1'], 1) + self.assertEqual(task.data['in_2'], "hello world") + self.assertEqual(task.data['unused'], True) + # New variables should be present + self.assertEqual(task.data['out_1'], 2) + self.assertEqual(task.data['out_2'], "HELLO WORLD") + + def advance_to_subprocess(self): + # Once we enter the subworkflow it becomes a waiting task + waiting = self.workflow.get_tasks(TaskState.WAITING) + while len(waiting) == 0: + next_task = self.workflow.get_tasks(TaskState.READY)[0] + next_task.complete() + waiting = self.workflow.get_tasks(TaskState.WAITING) + + def complete_subprocess(self): + # When we complete, the subworkflow task will move from WAITING to READY + waiting = self.workflow.get_tasks(TaskState.WAITING) + while len(waiting) > 0: + next_task = self.workflow.get_tasks(TaskState.READY)[0] + next_task.complete() + waiting = self.workflow.get_tasks(TaskState.WAITING) \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/InvalidProcessIDTest.py b/tests/SpiffWorkflow/bpmn/InvalidProcessIDTest.py new file mode 100644 index 000000000..5a6996409 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/InvalidProcessIDTest.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'essweine' + + +class InvalidProcessIDTest(BpmnWorkflowTestCase): + + def testInvalidWorkflowProcess(self): + self.assertRaisesRegex( + Exception, "The process '\w+' was not found*", + self.load_workflow_spec, "invalid_process*.bpmn", "topworkflow") + + def testInvalidCalledElement(self): + self.assertRaisesRegex( + ValidationException, "The process '\w+' was not found", + self.load_workflow_spec, "invalid_process*.bpmn", "top_workflow") + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InvalidProcessIDTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py b/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py new file mode 100644 index 000000000..ffb8f0242 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +import unittest + +import os + +from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException +from SpiffWorkflow.signavio.parser.bpmn import SignavioBpmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class InvalidWorkflowsTest(BpmnWorkflowTestCase): + + def testDisconnectedBoundaryEvent(self): + + with self.assertRaises(ValidationException) as exc: + parser = SignavioBpmnParser() + filename = os.path.join(os.path.dirname(__file__), 'data', 'Invalid-Workflows/Disconnected-Boundary-Event.bpmn20.xml') + parser.add_bpmn_file(filename) + self.assertIn('Intermediate Catch Event has no incoming sequences', str(exc)) + self.assertIn('bpmn:intermediateCatchEvent (id:sid-84C7CE67-D0B6-486A-B097-486DA924FF9D)', str(exc)) + self.assertIn('Invalid-Workflows/Disconnected-Boundary-Event.bpmn20.xml', str(exc)) + + def testNoStartEvent(self): + try: + self.load_workflow_spec( + 'Invalid-Workflows/No-Start-Event.bpmn20.xml', 'No Start Event') + self.fail( + "self.load_workflow_spec('Invalid-Workflows/No-Start-Event.bpmn20.xml', 'No Start Event') should fail.") + except ValidationException as ex: + self.assertTrue('No start event found' in ('%r' % ex), + '\'No start event found\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue('No-Start-Event.bpmn20.xml' in ('%r' % ex), + '\'No-Start-Event.bpmn20.xml\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue('process' in ('%r' % ex), + '\'process\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue( + 'sid-669ddebf-4196-41ee-8b04-bcc90bc5f983' in ('%r' % ex), + '\'sid-669ddebf-4196-41ee-8b04-bcc90bc5f983\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue('No Start Event' in ('%r' % ex), + '\'No Start Event\' should be a substring of error message: \'%r\'' % ex) + + def testSubprocessNotFound(self): + + with self.assertRaises(ValidationException) as exc: + self.load_workflow_spec('Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml', 'Subprocess Not Found') + self.assertIn("The process 'Missing subprocess' was not found.", str(exc)) + self.assertIn("bpmn:callActivity (id:sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A)", str(exc)) + self.assertIn("Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml", str(exc)) + + def testUnsupportedTask(self): + try: + self.load_workflow_spec( + 'Invalid-Workflows/Unsupported-Task.bpmn20.xml', 'Unsupported Task') + self.fail( + "self.load_workflow_spec('Invalid-Workflows/Unsupported-Task.bpmn20.xml', 'Unsupported Task') should fail.") + except ValidationException as ex: + self.assertTrue( + 'There is no support implemented for this task type' in ( + '%r' % ex), + '\'There is no support implemented for this task type\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue('Unsupported-Task.bpmn20.xml' in ('%r' % ex), + '\'Unsupported-Task.bpmn20.xml\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue('businessRuleTask' in ('%r' % ex), + '\'businessRuleTask\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue( + 'sid-75EEAB28-3B69-4282-B91A-0F3C97931834' in ('%r' % ex), + '\'sid-75EEAB28-3B69-4282-B91A-0F3C97931834\' should be a substring of error message: \'%r\'' % ex) + self.assertTrue('Business Rule Task' in ('%r' % ex), + '\'Business Rule Task\' should be a substring of error message: \'%r\'' % ex) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InvalidWorkflowsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/LoopTaskTest.py b/tests/SpiffWorkflow/bpmn/LoopTaskTest.py new file mode 100644 index 000000000..709750c64 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/LoopTaskTest.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class LoopTaskTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task with a loop cardinality of 5. + It should repeat 5 times before termination.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('bpmnLoopTask.bpmn','LoopTaskTest') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + for i in range(5): + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + self.assertTrue(ready_tasks[0].task_spec.is_loop_task()) + self.assertFalse(self.workflow.is_completed()) + last_task = self.workflow.last_task + + self.do_next_exclusive_step('Activity_TestLoop') + + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + ready_tasks[0].terminate_loop() + self.do_next_exclusive_step('Activity_TestLoop') + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + + def testSaveRestore(self): + + for i in range(5): + self.save_restore() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + self.assertTrue(ready_tasks[0].task_spec.is_loop_task()) + self.assertFalse(self.workflow.is_completed()) + self.do_next_exclusive_step('Activity_TestLoop') + + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + ready_tasks[0].terminate_loop() + self.do_next_exclusive_step('Activity_TestLoop') + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LoopTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MultiInstanceParallelCondTest.py b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelCondTest.py new file mode 100644 index 000000000..305cb3df9 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelCondTest.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceCondTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task set to be a parallel + multi-instance with a loop cardinality of 5. + It should repeat 5 times before termination, and it should + have a navigation list with 7 items in it - one for start, one for end, + and five items for the repeating section. """ + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('MultiInstanceParallelTaskCond.bpmn', 'MultiInstance') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def load_workflow1_spec(self): + return + + def testRunThroughHappy(self): + self.actualTest() + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_ready_user_tasks())) + task = self.workflow.get_ready_user_tasks()[0] + task.data['collection'] = {'a':{'a':'test'}, + 'b':{'b':'test'}} + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + + for task in self.workflow.get_ready_user_tasks(): + self.assertFalse(self.workflow.is_completed()) + self.workflow.complete_task_from_id(task.id) + if save_restore: + self.save_restore() + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceCondTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MultiInstanceParallelTest.py b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelTest.py new file mode 100644 index 000000000..e13b943a2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelTest.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task set to be a parallel + multi-instance with a loop cardinality of 5. + It should repeat 5 times before termination, and it should + have a navigation list with 7 items in it - one for start, one for end, + and five items for the repeating section. """ + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('MultiInstanceParallelTask.bpmn', 'MultiInstance') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def load_workflow1_spec(self): + return + + def testRunThroughHappy(self): + self.actualTest() + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_ready_user_tasks())) + task = self.workflow.get_ready_user_tasks()[0] + task.data['collection'] = [1,2,3,4,5] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + for task in self.workflow.get_ready_user_tasks(): + self.assertFalse(self.workflow.is_completed()) + self.workflow.complete_task_from_id(task.id) + if save_restore: + self.save_restore() + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MultiInstanceTest.py b/tests/SpiffWorkflow/bpmn/MultiInstanceTest.py new file mode 100644 index 000000000..6054f99a4 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MultiInstanceTest.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task with a loop cardinality of 5. + It should repeat 5 times before termination.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('bpmnMultiUserTask.bpmn','MultiInstance') + self.workflow = BpmnWorkflow(spec, subprocesses) + + + def testRunThroughHappy(self): + + for i in range(5): + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + self.do_next_exclusive_step('Activity_Loop') + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + def testSaveRestore(self): + + for i in range(5): + self.save_restore() + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + self.do_next_exclusive_step('Activity_Loop') + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NITimerDurationBoundaryTest.py b/tests/SpiffWorkflow/bpmn/NITimerDurationBoundaryTest.py new file mode 100644 index 000000000..f7cef40be --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NITimerDurationBoundaryTest.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + +import unittest +import datetime +import time +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NITimerDurationTest(BpmnWorkflowTestCase): + """ + Non-Interrupting Timer boundary test + """ + def setUp(self): + spec, subprocesses = self.load_workflow_spec('timer-non-interrupt-boundary.bpmn', 'NonInterruptTimer') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def load_spec(self): + return + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self,save_restore = False): + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + ready_tasks[0].data['work_done'] = 'No' + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .25s + # we should terminate loop before that. + starttime = datetime.datetime.now() + while loopcount < 10: + ready_tasks = self.workflow.get_tasks(TaskState.READY) + if len(ready_tasks) > 1: + break + if save_restore: self.save_restore() + #self.assertEqual(1, len(self.workflow.get_tasks(Task.WAITING))) + time.sleep(0.1) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + loopcount = loopcount +1 + endtime = datetime.datetime.now() + duration = endtime-starttime + # appropriate time here is .5 seconds + # due to the .3 seconds that we loop and then + # the two conditions that we complete after the timer completes. + self.assertEqual(durationdatetime.timedelta(seconds=.2),True) + for task in ready_tasks: + if task.task_spec == 'GetReason': + task.data['delay_reason'] = 'Just Because' + else: + task.data['work_done'] = 'Yes' + self.workflow.complete_task_from_id(task.id) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + ready_tasks[0].data['experience'] = 'Great!' + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.is_completed(),True) + self.assertEqual(self.workflow.last_task.data,{'work_done': 'Yes', 'experience': 'Great!'}) + print (self.workflow.last_task.data) + print(duration) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NITimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavListMulipleEnds.py b/tests/SpiffWorkflow/bpmn/NavListMulipleEnds.py new file mode 100644 index 000000000..4d4100258 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavListMulipleEnds.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + + + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavListExclusiveGatewayTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughly like this, a gateway + that leads to two different end points + + [Step 1] -> + -> 'False' -> [Alternate End] -> END A + -> 'True' -> [Step 2] -> END B + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('ExclusiveGatewayMultipleEndNavigation.bpmn','ExclusiveGatewayMultipleEndNavigation') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_nav_list() + self.assertEqual(6, len(nav_list)) + + self.assertEqual("Step 1", nav_list[0]["description"]) + self.assertEqual("GatewayToEnd", nav_list[1]["description"]) + self.assertEqual("False", nav_list[2]["description"]) + self.assertEqual("Step End", nav_list[3]["description"]) + self.assertEqual("True", nav_list[4]["description"]) + self.assertEqual("Step 2", nav_list[5]["description"]) + + self.assertEqual(0, nav_list[0]["indent"]) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavListExclusiveGatewayTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NestedProcessesTest.py b/tests/SpiffWorkflow/bpmn/NestedProcessesTest.py new file mode 100644 index 000000000..2fe55b778 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NestedProcessesTest.py @@ -0,0 +1,36 @@ +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'neilc' + + +class NestedProcessesTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Nested*.bpmn20.xml', 'Nested Subprocesses') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + self.do_next_named_step('Action1') + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.do_next_named_step('Action2') + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.do_next_named_step('Action3') + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NestedProcessesTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelFromCamundaTest.py b/tests/SpiffWorkflow/bpmn/ParallelFromCamundaTest.py new file mode 100644 index 000000000..670c33d4d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelFromCamundaTest.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ParallelFromCamunda(BpmnWorkflowTestCase): + + # Should we move this to the Camunda package? Is this even testing anything Camunda related? + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel.camunda.bpmn20.xml', 'Process_1hb021r') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughParallelTaskFirst(self): + + # 1 first task + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.do_next_named_step('First Task') + self.save_restore() + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + + # 3 parallel tasks + self.assertEqual(3, len(self.workflow.get_tasks(TaskState.READY))) + self.do_next_named_step('Parallel Task A') + self.save_restore() + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Parallel Task B') + self.save_restore() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Parallel Task C') + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + + # 1 last task + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.do_next_named_step('Last Task') + self.save_restore() + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + + def testAllParallelDataMakesItIntoGatewayTask(self): + """It should be true that data collected across parallel tasks + is all available in the join task.""" + + self.do_next_named_step('First Task') + self.do_next_named_step('Parallel Task A', + set_attribs={"taskA": "taskA"}) + self.do_next_named_step('Parallel Task B', + set_attribs={"taskB": "taskB"}) + self.do_next_named_step('Parallel Task C', + set_attribs={"taskC": "taskC"}) + self.workflow.do_engine_steps() + self.do_next_named_step('Last Task') + self.assertEqual("taskA", self.workflow.last_task.data["taskA"]) + self.assertEqual("taskB", self.workflow.last_task.data["taskB"]) + self.assertEqual("taskC", self.workflow.last_task.data["taskC"]) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelFromCamunda) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelJoinLongInclusiveTest.py b/tests/SpiffWorkflow/bpmn/ParallelJoinLongInclusiveTest.py new file mode 100644 index 000000000..1105f473f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelJoinLongInclusiveTest.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class ParallelJoinLongInclusiveTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel-Join-Long-Inclusive.bpmn20.xml', 'Parallel Join Long Inclusive') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughThread1FirstThenNo(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step( + 'Thread 1 - Choose', choice='Yes', with_save_load=True) + self.workflow.do_engine_steps() + for i in range(1, 13): + self.do_next_named_step('Thread 1 - Task %d' % i) + self.workflow.do_engine_steps() + + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_named_step( + 'Thread 2 - Choose', choice='No', with_save_load=True) + self.workflow.do_engine_steps() + self.do_next_named_step('Done', with_save_load=True) + self.workflow.do_engine_steps() + self.do_next_named_step('Thread 2 - No Task', with_save_load=True) + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testNoFirstThenThread1(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step( + 'Thread 2 - Choose', choice='No', with_save_load=True) + self.workflow.do_engine_steps() + + self.do_next_named_step( + 'Thread 1 - Choose', choice='Yes', with_save_load=True) + self.workflow.do_engine_steps() + for i in range(1, 13): + self.do_next_named_step('Thread 1 - Task %d' % i) + self.workflow.do_engine_steps() + + self.do_next_named_step('Done', with_save_load=True) + self.workflow.do_engine_steps() + + self.do_next_named_step('Thread 2 - No Task', with_save_load=True) + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelJoinLongInclusiveTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelJoinLongTest.py b/tests/SpiffWorkflow/bpmn/ParallelJoinLongTest.py new file mode 100644 index 000000000..2d3989b6f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelJoinLongTest.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class ParallelJoinLongTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel-Join-Long.bpmn20.xml', 'Parallel Join Long') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughAlternating(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step( + 'Thread 1 - Choose', choice='Yes', with_save_load=True) + self.workflow.do_engine_steps() + self.do_next_named_step( + 'Thread 2 - Choose', choice='Yes', with_save_load=True) + self.workflow.do_engine_steps() + + for i in range(1, 13): + self.do_next_named_step( + 'Thread 1 - Task %d' % i, with_save_load=True) + self.workflow.do_engine_steps() + self.do_next_named_step( + 'Thread 2 - Task %d' % i, with_save_load=True) + self.workflow.do_engine_steps() + + self.do_next_named_step('Done', with_save_load=True) + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughThread1First(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step( + 'Thread 1 - Choose', choice='Yes', with_save_load=True) + self.workflow.do_engine_steps() + for i in range(1, 13): + self.do_next_named_step('Thread 1 - Task %d' % i) + self.workflow.do_engine_steps() + + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_named_step( + 'Thread 2 - Choose', choice='Yes', with_save_load=True) + self.workflow.do_engine_steps() + for i in range(1, 13): + self.do_next_named_step( + 'Thread 2 - Task %d' % i, with_save_load=True) + self.workflow.do_engine_steps() + + self.do_next_named_step('Done', with_save_load=True) + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelJoinLongTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelLoopingAfterJoinTest.py b/tests/SpiffWorkflow/bpmn/ParallelLoopingAfterJoinTest.py new file mode 100644 index 000000000..5a525e083 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelLoopingAfterJoinTest.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BaseParallelTestCase import BaseParallelTestCase + +__author__ = 'matth' + +class ParallelLoopingAfterJoinTest(BaseParallelTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec( + 'Test-Workflows/Parallel-Looping-After-Join.bpmn20.xml', + 'Parallel Looping After Join') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def test1(self): + self._do_test( + ['Go', '1', '2', '2A', '2B', '2 Done', ('Retry?', 'No'), 'Done'], save_restore=True) + + def test2(self): + self._do_test( + ['Go', '1', '2', '2A', '2B', '2 Done', ('Retry?', 'Yes'), 'Go', + '1', '2', '2A', '2B', '2 Done', ('Retry?', 'No'), 'Done'], save_restore=True) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelLoopingAfterJoinTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelManyThreadsAtSamePointTest.py b/tests/SpiffWorkflow/bpmn/ParallelManyThreadsAtSamePointTest.py new file mode 100644 index 000000000..0bdbe75d2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelManyThreadsAtSamePointTest.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BaseParallelTestCase import BaseParallelTestCase + +__author__ = 'matth' + +class ParallelManyThreadsAtSamePointTest(BaseParallelTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec( + 'Test-Workflows/Parallel-Many-Threads-At-Same-Point.bpmn20.xml', + 'Parallel Many Threads At Same Point') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def test1(self): + self._do_test(['1', '2', '3', '4', 'Done', 'Done', 'Done', 'Done'], + only_one_instance=False, save_restore=True) + + def test2(self): + self._do_test(['1', 'Done', '2', 'Done', '3', 'Done', '4', 'Done'], + only_one_instance=False, save_restore=True) + + def test2(self): + self._do_test(['1', '2', 'Done', '3', '4', 'Done', 'Done', 'Done'], + only_one_instance=False, save_restore=True) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelManyThreadsAtSamePointTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelManyThreadsAtSamePointTestNested.py b/tests/SpiffWorkflow/bpmn/ParallelManyThreadsAtSamePointTestNested.py new file mode 100644 index 000000000..23d5e9fc0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelManyThreadsAtSamePointTestNested.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +import unittest +import logging +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BaseParallelTestCase import BaseParallelTestCase + +__author__ = 'matth' + +class ParallelManyThreadsAtSamePointTestNested(BaseParallelTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec( + 'Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.bpmn20.xml', + 'Parallel Many Threads At Same Point Nested') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def test_depth_first(self): + instructions = [] + for split1 in ['SP 1', 'SP 2']: + for sp in ['A', 'B']: + for split2 in ['1', '2']: + for t in ['A', 'B']: + instructions.append(split1 + sp + "|" + split2 + t) + instructions.append(split1 + sp + "|" + 'Inner Done') + instructions.append("!" + split1 + sp + "|" + 'Inner Done') + if sp == 'A': + instructions.append("!Outer Done") + + instructions.append('Outer Done') + instructions.append("!Outer Done") + + logging.info('Doing test with instructions: %s', instructions) + self._do_test(instructions, only_one_instance=False, save_restore=True) + + def test_breadth_first(self): + instructions = [] + for t in ['A', 'B']: + for split2 in ['1', '2']: + for sp in ['A', 'B']: + for split1 in ['SP 1', 'SP 2']: + instructions.append(split1 + sp + "|" + split2 + t) + + for split1 in ['SP 1', 'SP 2']: + for sp in ['A', 'B']: + for split2 in ['1', '2']: + instructions += [split1 + sp + "|" + 'Inner Done'] + + for split1 in ['SP 1', 'SP 2']: + instructions += ['Outer Done'] + + logging.info('Doing test with instructions: %s', instructions) + self._do_test(instructions, only_one_instance=False, save_restore=True) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelManyThreadsAtSamePointTestNested) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelMultipleSplitsAndJoinsTest.py b/tests/SpiffWorkflow/bpmn/ParallelMultipleSplitsAndJoinsTest.py new file mode 100644 index 000000000..7dc3484bd --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelMultipleSplitsAndJoinsTest.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BaseParallelTestCase import BaseParallelTestCase + +__author__ = 'matth' + +class ParallelMultipleSplitsAndJoinsTest(BaseParallelTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec( + 'Test-Workflows/Parallel-Multiple-Splits-And-Joins.bpmn20.xml', + 'Parallel Multiple Splits And Joins') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def test1(self): + self._do_test(['1', '!Done', '2', '1A', '!Done', '2A', '1B', '2B', + '!Done', '1 Done', '!Done', '2 Done', 'Done'], save_restore=True) + + def test2(self): + self._do_test( + ['1', '!Done', '1A', '1B', '1 Done', '!Done', '2', '2A', '2B', '2 Done', 'Done'], save_restore=True) + + def test3(self): + self._do_test(['1', '2', '!Done', '1B', '2B', '!2 Done', '1A', + '!Done', '2A', '1 Done', '!Done', '2 Done', 'Done'], save_restore=True) + + def test4(self): + self._do_test( + ['1', '1B', '1A', '1 Done', '!Done', '2', '2B', '2A', '2 Done', 'Done'], save_restore=True) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelMultipleSplitsAndJoinsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelMultipleSplitsTest.py b/tests/SpiffWorkflow/bpmn/ParallelMultipleSplitsTest.py new file mode 100644 index 000000000..d779337e7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelMultipleSplitsTest.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class ParallelMultipleSplitsTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel-Multiple-Splits.bpmn20.xml', 'Parallel Multiple Splits') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughAlternating(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Do First') + self.workflow.do_engine_steps() + self.do_next_named_step('SP 1 - Choose', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('SP 2 - Choose', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('SP 3 - Choose', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('SP 1 - Yes Task') + self.workflow.do_engine_steps() + self.do_next_named_step('SP 2 - Yes Task') + self.workflow.do_engine_steps() + self.do_next_named_step('SP 3 - Yes Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelMultipleSplitsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelOnePathEndsTest.py b/tests/SpiffWorkflow/bpmn/ParallelOnePathEndsTest.py new file mode 100644 index 000000000..6583f7ce1 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelOnePathEndsTest.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class ParallelOnePathEndsTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel-One-Path-Ends.bpmn20.xml', 'Parallel One Path Ends') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughParallelTaskFirst(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Parallel Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Choice 1', choice='No') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughChoiceFirst(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='No') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Parallel Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughParallelTaskFirstYes(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Parallel Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelOnePathEndsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelOrderTest.py b/tests/SpiffWorkflow/bpmn/ParallelOrderTest.py new file mode 100644 index 000000000..67d3c90bd --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelOrderTest.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a 4 parallel workflows, this + verifies that the parallel tasks have a natural order that follows + the visual layout of the diagram, rather than just the order in which + they were created. """ + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('ParallelOrder.bpmn','ParallelOrder') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + self.assertEquals(4, len(self.workflow.get_ready_user_tasks())) + tasks = self.workflow.get_ready_user_tasks() + self.assertEquals("Task 1", tasks[0].get_description()) + self.assertEquals("Task 2", tasks[1].get_description()) + self.assertEquals("Task 3", tasks[2].get_description()) + self.assertEquals("Task 4", tasks[3].get_description()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelThenExclusiveTest.py b/tests/SpiffWorkflow/bpmn/ParallelThenExclusiveTest.py new file mode 100644 index 000000000..bf1e7386b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelThenExclusiveTest.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class ParallelThenExclusiveTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel-Then-Exclusive.bpmn20.xml', 'Parallel Then Exclusive') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughParallelTaskFirst(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Parallel Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughChoiceFirst(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('Parallel Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughChoiceThreadCompleteFirst(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Parallel Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +class ParallelThenExclusiveNoInclusiveTest(ParallelThenExclusiveTest): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec( + 'Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.bpmn20.xml', + 'Parallel Then Exclusive No Inclusive') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + +def suite(): + return unittest.TestLoader().loadTestsFromModule(sys.modules[__name__]) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelThroughSameTaskTest.py b/tests/SpiffWorkflow/bpmn/ParallelThroughSameTaskTest.py new file mode 100644 index 000000000..4768ddbae --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelThroughSameTaskTest.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + +class ParallelThroughSameTaskTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel-Through-Same-Task.bpmn20.xml', 'Parallel Through Same Task') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughFirstRepeatTaskFirst(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + # The inclusive gateway allows this to pass through (since there is a + # route to it on the same sequence flow) + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRepeatTasksReadyTogether(self): + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(2, len(ready_tasks)) + self.assertEqual( + 'Repeated Task', ready_tasks[0].task_spec.description) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + # The inclusive gateway allows us through here, because there is no route for the other thread + # that doesn't use the same sequence flow + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRepeatTasksReadyTogetherSaveRestore(self): + + self.save_restore() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='Yes') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('Yes Task') + self.workflow.do_engine_steps() + self.save_restore() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(2, len(ready_tasks)) + self.assertEqual( + 'Repeated Task', ready_tasks[0].task_spec.description) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + self.save_restore() + # The inclusive gateway allows us through here, because there is no route for the other thread + # that doesn't use the same sequence flow + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testNoRouteRepeatTaskFirst(self): + + self.save_restore() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + self.save_restore() + # The inclusive gateway allows this to pass through (since there is a + # route to it on the same sequence flow) + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('Choice 1', choice='No') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('No Task') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testNoRouteNoTaskFirst(self): + + self.save_restore() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='No') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('No Task') + self.workflow.do_engine_steps() + self.save_restore() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testNoRouteNoFirstThenRepeating(self): + + self.save_restore() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Choice 1', choice='No') + self.workflow.do_engine_steps() + self.save_restore() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Repeated Task') + self.workflow.do_engine_steps() + self.save_restore() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('No Task') + self.workflow.do_engine_steps() + self.save_restore() + self.do_next_named_step('Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelThroughSameTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelWithScriptTest.py b/tests/SpiffWorkflow/bpmn/ParallelWithScriptTest.py new file mode 100644 index 000000000..933d8a91d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelWithScriptTest.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + + + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'leashys' + + +class ParallelWithScriptTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('ParallelWithScript.bpmn', 'ParallelWithScript') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughParallel(self): + self.workflow.do_engine_steps() + # TODO: what to assert here? + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ParallelWithScriptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParserTest.py b/tests/SpiffWorkflow/bpmn/ParserTest.py new file mode 100644 index 000000000..5703273e8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParserTest.py @@ -0,0 +1,29 @@ +import unittest +import os + +from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser + + +class ParserTest(unittest.TestCase): + + def testIOSpecification(self): + + parser = BpmnParser() + bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'io_spec.bpmn') + parser.add_bpmn_file(bpmn_file) + spec = parser.get_spec('subprocess') + self.assertEqual(len(spec.data_inputs), 2) + self.assertEqual(len(spec.data_outputs), 2) + + def testDataReferences(self): + + parser = BpmnParser() + bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'data_object.bpmn') + parser.add_bpmn_file(bpmn_file) + spec = parser.get_spec("Process") + generate = spec.task_specs['generate_data'] + read = spec.task_specs['read_data'] + self.assertEqual(len(generate.data_output_associations), 1) + self.assertEqual(generate.data_output_associations[0].name, 'obj_1') + self.assertEqual(len(read.data_input_associations), 1) + self.assertEqual(read.data_input_associations[0].name, 'obj_1') diff --git a/tests/SpiffWorkflow/bpmn/ProcessDependencyTest.py b/tests/SpiffWorkflow/bpmn/ProcessDependencyTest.py new file mode 100644 index 000000000..414a8abbb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ProcessDependencyTest.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + + + +import sys +import os +import unittest + +from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from SpiffWorkflow.spiff.parser import SpiffBpmnParser +from tests.SpiffWorkflow.bpmn.BpmnLoaderForTests import TestBpmnParser + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'danfunk' + + +class ProcessDependencyTest(BpmnWorkflowTestCase): + """ + Assure we can determine all of the call activities and DMN references that + will be required by a parser, prior to calling its parse method. + + Because DMN references vary between Camunda and Spiff, need to test that + both methods will work. + """ + + def testCamundaParser(self): + self.actual_test(CamundaParser()) + + def testSpiffParser(self): + self.actual_test(SpiffBpmnParser()) + + def actual_test(self, parser): + filename = 'call_activity_nested' + process_name = 'Level1' + base_dir = os.path.join(os.path.dirname(__file__), 'data', filename) + parser.add_bpmn_file(os.path.join(base_dir, 'call_activity_nested.bpmn')) + dependencies = parser.get_dependencies() + self.assertEqual(3, len(dependencies)) + process_deps = parser.get_process_dependencies() + self.assertEqual(2, len(process_deps)) + self.assertIn('Level2', process_deps) + self.assertIn('Level2b', process_deps) + dmn_deps = parser.get_dmn_dependencies() + self.assertEqual(1, len(dmn_deps)) + self.assertIn('Level2c', dmn_deps) + + # Add Level 2 file, and we should find a level 3 dependency as well. + parser.add_bpmn_file(os.path.join(base_dir, 'call_activity_level_2.bpmn')) + dependencies = parser.get_dependencies() + self.assertEqual(4, len(dependencies)) + self.assertIn('Level3', dependencies) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ProcessDependencyTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/PythonScriptEngineTest.py b/tests/SpiffWorkflow/bpmn/PythonScriptEngineTest.py new file mode 100644 index 000000000..d5c8ec32a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/PythonScriptEngineTest.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'danfunk' + + +class PythonScriptEngineTest(BpmnWorkflowTestCase): + + def setUp(self): + self.expressionEngine = PythonScriptEngine() + + # All this, just so we have a task object, not using anything in the Script. + spec, subprocesses = self.load_workflow_spec('ScriptTest.bpmn', 'ScriptTest') + workflow = BpmnWorkflow(spec, subprocesses) + workflow.do_engine_steps() + self.task = workflow.last_task + + def testDateTimeExpressions(self): + """Basically, assure that we can use datime, dateutils, and pytz""" + script = """ +# Create Current Date as UTC +now_utc = datetime.datetime.now(datetime.timezone.utc) +# Create Current Date at EST +now_est = now_utc.astimezone(pytz.timezone('US/Eastern')) + +# Format a date from a date String in UTC +datestr = "2021-09-23 16:11:00 -0000" # 12 pm EST, 4pm UTC +dt = dateparser.parse(datestr) +localtime = dt.astimezone(pytz.timezone('US/Eastern')) +localtime_str = localtime.strftime("%Y-%m-%d %H:%M:%S") + """ + self.expressionEngine.execute(self.task, script) + self.assertEqual(self.task.data['now_utc'].utcoffset().days, 0) + self.assertEqual(self.task.data['now_est'].tzinfo.zone, "US/Eastern") + self.assertEqual(self.task.data['localtime_str'], "2021-09-23 12:11:00") + self.assertTrue(True) + + def testFunctionsAndGlobalsAreRemoved(self): + self.assertIn('testvar', self.task.data) + self.assertIn('testvar2', self.task.data) + self.assertIn('sample', self.task.data) + self.assertNotIn('my_function', self.task.data) + self.assertNotIn('datetime', self.task.data) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(PythonScriptEngineTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py b/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py new file mode 100644 index 000000000..81e5f330e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ResetSubProcessTest(BpmnWorkflowTestCase): + """Assure we can reset a token to a previous task when we have + a sub-workflow.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('resetworkflowA-*.bpmn', 'TopLevel') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def reload_save_restore(self): + + spec, subprocesses = self.load_workflow_spec('resetworkflowB-*.bpmn', 'TopLevel') + self.workflow = BpmnWorkflow(spec, subprocesses) + # Save and restore the workflow, without including the spec. + # When loading the spec, use a slightly different spec. + self.workflow.do_engine_steps() + state = self.serializer.serialize_json(self.workflow) + self.workflow = self.serializer.deserialize_json(state) + self.workflow.spec = spec + self.workflow.subprocesses = subprocesses + + def testSaveRestore(self): + self.actualTest(True) + + def testResetToOuterWorkflowWhileInSubWorkflow(self): + + self.workflow.do_engine_steps() + top_level_task = self.workflow.get_ready_user_tasks()[0] + self.workflow.complete_task_from_id(top_level_task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.save_restore() + top_level_task = self.workflow.get_tasks_from_spec_name('Task1')[0] + top_level_task.reset_token({}, reset_data=True) + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(), 'Task1') + + + def actualTest(self, save_restore=False): + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_ready_user_tasks())) + task = self.workflow.get_ready_user_tasks()[0] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'SubTask2') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_tasks_from_spec_name('Task1')[0] + task.reset_token(self.workflow.last_task.data) + self.workflow.do_engine_steps() + self.reload_save_restore() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Task1') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Subtask2') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Subtask2A') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Task2') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetSubProcessTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/SameNameBugTest.py b/tests/SpiffWorkflow/bpmn/SameNameBugTest.py new file mode 100644 index 000000000..7ee0907f0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/SameNameBugTest.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'sartography' + + +class SameNameBugTest(BpmnWorkflowTestCase): + + """Should we bail out with a good error message, when two BPMN diagrams + that work with each other, have tasks with the same id?!?""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('same_id*.bpmn', 'same_id') + self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=PythonScriptEngine()) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self,save_restore = False): + if save_restore: + self.save_restore() + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(SameNameBugTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ScriptTest.py b/tests/SpiffWorkflow/bpmn/ScriptTest.py new file mode 100644 index 000000000..efe9f1c85 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ScriptTest.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class InlineScriptTest(BpmnWorkflowTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('ScriptTest.bpmn', 'ScriptTest') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + self.workflow.do_engine_steps() + data = self.workflow.last_task.data + self.assertEqual(data,{'testvar': {'a': 1, 'b': 2, 'new': 'Test'}, + 'testvar2': [{'x': 1, 'y': 'a'}, + {'x': 2, 'y': 'b'}, + {'x': 3, 'y': 'c'}], + 'sample': ['b', 'c']}) + + def testNoDataPollution(self): + """Ran into an issue where data from one run of a workflow could + bleed into a separate execution. It will think a variable is there + when it should not be there""" + startTask = self.workflow.get_tasks(TaskState.READY)[0] + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + self.assertTrue("testvar" in self.workflow.last_task.data) + self.assertFalse("testvar" in startTask.data) + + # StartTask doesn't know about testvar, it happened earlier. + # calling an exec that references testvar, in the context of the + # start task should fail. + with self.assertRaises(WorkflowTaskExecException): + result = self.workflow.script_engine.evaluate(startTask, 'testvar == True') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InlineScriptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ScriptTestBox.py b/tests/SpiffWorkflow/bpmn/ScriptTestBox.py new file mode 100644 index 000000000..0303afbd9 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ScriptTestBox.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + + + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class InlineScriptTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('ScriptTestBox.bpmn', 'ScriptTest') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + data = self.workflow.last_task.data + self.assertEqual(data,{'testvar': {'a': 1, 'b': 2, 'new': 'Test'}, + 'testvar2': [{'x': 1, 'y': 'a'}, + {'x': 2, 'y': 'b'}, + {'x': 3, 'y': 'c'}], + 'sample': ['b', 'c'], 'end_event': None}) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InlineScriptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ServiceTaskTest.py b/tests/SpiffWorkflow/bpmn/ServiceTaskTest.py new file mode 100644 index 000000000..290607081 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ServiceTaskTest.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import os +import sys +import unittest + +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class ServiceTaskTest(BpmnWorkflowTestCase): + + def setUp(self): + + spec, subprocesses = self.load_workflow_spec('service_task.bpmn', + 'service_task_example1') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.workflow.do_engine_steps() + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ServiceTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/SubWorkflowMultiTest.py b/tests/SpiffWorkflow/bpmn/SubWorkflowMultiTest.py new file mode 100644 index 000000000..83dfcef26 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/SubWorkflowMultiTest.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class SubWorkflowMultiTest(BpmnWorkflowTestCase): + + expected_data = { + 'a': {'name': 'Apple_edit', + 'new_info': 'Adding this!'}, + 'b': {'name': 'Bubble_edit', + 'new_info': 'Adding this!'}, + 'c': {'name': 'Crap, I should write better code_edit', + 'new_info': 'Adding this!'} + } + + def testSequential(self): + spec, subprocesses = self.load_workflow_spec('sub_workflow_multi.bpmn', 'ScriptTest') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + data = self.workflow.last_task.data + self.assertEqual(data['my_collection'], self.expected_data) + + def testParallel(self): + spec, subprocesses= self.load_workflow_spec('sub_workflow_multi_parallel.bpmn', 'ScriptTest') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + data = self.workflow.last_task.data + self.assertEqual(data['my_collection'], self.expected_data) + + def testWrapped(self): + spec, subprocesses = self.load_workflow_spec('sub_within_sub_multi.bpmn', 'ScriptTest') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + data = self.workflow.last_task.data + self.assertEqual(self.expected_data, data['my_collection']) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(SubWorkflowMultiTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/SwimLaneTest.py b/tests/SpiffWorkflow/bpmn/SwimLaneTest.py new file mode 100644 index 000000000..416382734 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/SwimLaneTest.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class SwimLaneTest(BpmnWorkflowTestCase): + """ + Test sample bpmn document to make sure the nav list + contains the correct swimlane in the 'lane' component + and make sure that our waiting tasks accept a lane parameter + and that it picks up the correct tasks. + """ + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('lanes.bpmn','lanes') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + + self.workflow.do_engine_steps() + + atasks = self.workflow.get_ready_user_tasks(lane="A") + btasks = self.workflow.get_ready_user_tasks(lane="B") + self.assertEqual(1, len(atasks)) + self.assertEqual(0, len(btasks)) + task = atasks[0] + self.assertEqual('Activity_A1', task.task_spec.name) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + atasks = self.workflow.get_ready_user_tasks(lane="A") + btasks = self.workflow.get_ready_user_tasks(lane="B") + self.assertEqual(0, len(atasks)) + self.assertEqual(1, len(btasks)) + + # Complete the gateway and the two tasks in B Lane + btasks[0].data = {'NeedClarification': False} + self.workflow.complete_task_from_id(btasks[0].id) + self.workflow.do_engine_steps() + btasks = self.workflow.get_ready_user_tasks(lane="B") + self.workflow.complete_task_from_id(btasks[0].id) + self.workflow.do_engine_steps() + + # Assert we are in lane C + tasks = self.workflow.get_ready_user_tasks() + self.assertEqual(1, len(tasks)) + self.assertEqual(tasks[0].task_spec.lane, "C") + + # Step into the sub-process, assure that is also in lane C + self.workflow.complete_task_from_id(tasks[0].id) + self.workflow.do_engine_steps() + tasks = self.workflow.get_ready_user_tasks() + self.assertEqual("SubProcessTask", tasks[0].task_spec.description) + self.assertEqual(tasks[0].task_spec.lane, "C") + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(SwimLaneTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/TooManyLoopsTest.py b/tests/SpiffWorkflow/bpmn/TooManyLoopsTest.py new file mode 100644 index 000000000..a1018e0b6 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/TooManyLoopsTest.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'sartography' + + +class TooManyLoopsTest(BpmnWorkflowTestCase): + + """Looping back around many times would cause the tree of tasks to grow + for each loop, doing this a 100 or 1000 times would cause the system to + run fail in various ways. This assures that is no longer the case.""" + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self,save_restore = False): + spec, subprocesses = self.load_workflow_spec('too_many_loops*.bpmn', 'loops') + self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=PythonScriptEngine()) + counter = 0 + data = {} + while not self.workflow.is_completed(): + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + if (self.workflow.last_task.data != data): + data = self.workflow.last_task.data + counter += 1 # There is a 10 millisecond wait task. + if save_restore: + self.save_restore() + self.assertEqual(20, self.workflow.last_task.data['counter']) + + def test_with_sub_process(self): + # Found an issue where looping back would fail when it happens + # right after a sub-process. So assuring this is fixed. + counter = 0 + spec, subprocesses = self.load_workflow_spec('too_many_loops_sub_process.bpmn', 'loops_sub') + self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=PythonScriptEngine()) + data = {} + while not self.workflow.is_completed(): + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + if (self.workflow.last_task.data != data): + data = self.workflow.last_task.data + counter += 1 # There is a 10 millisecond wait task. +# self.save_restore() + self.assertEqual(20, self.workflow.last_task.data['counter']) + # One less, because we don't go back through once the first counter + # hits 20. + self.assertEqual(19, self.workflow.last_task.data['counter2']) + + def test_with_two_call_activities(self): + spec, subprocess = self.load_workflow_spec('sub_in_loop*.bpmn', 'main') + self.workflow = BpmnWorkflow(spec, subprocess) + self.workflow.do_engine_steps() + for loop in range(3): + ready = self.workflow.get_ready_user_tasks() + ready[0].data = { 'done': True if loop == 3 else False } + ready[0].complete() + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.save_restore() + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TooManyLoopsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/__init__.py b/tests/SpiffWorkflow/bpmn/__init__.py new file mode 100644 index 000000000..02764216f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + + + +__author__ = 'matth' diff --git a/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn b/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn new file mode 100644 index 000000000..152f7785d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn @@ -0,0 +1,403 @@ + + + + + + Person who takes the first action to start the process + + + + + + + + + + + Start1 + First_Approval_Wins + End1 + First_Approval_Wins_Done + Parallel_Approvals_Done + Parallel_SP + Parallel_SP_Done + + + + Supervisor_Approval__P_ + Gateway4 + Gateway5 + + + + Manager_Approval__P_ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Supervisor_Approval + Start2 + Supervisor_Approved + + + + Manager_Approval + Manager_Approved + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Start3 + Supervisor_Approval + End2 + + + + Manager_Approval + + + + Step1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Approvals_bad.bpmn b/tests/SpiffWorkflow/bpmn/data/Approvals_bad.bpmn new file mode 100644 index 000000000..5e33ada61 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Approvals_bad.bpmn @@ -0,0 +1,403 @@ + + + + + + Person who takes the first action to start the process + + + + + + + + + + + Start1 + First_Approval_Wins + End1 + First_Approval_Wins_Done + Parallel_Approvals_Done + Parallel_SP + Parallel_SP_Done + + + + Supervisor_Approval__P_ + Gateway4 + Gateway5 + + + + Manager_Approval__P_ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Supervisor_Approval + Start2 + Supervisor_Approved + + + + Manager_Approval + Manager_Approved + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Start3 + Supervisor_Approval + End2 + + + + Manager_Approval + + + + Step1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ComplexNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ComplexNavigation.bpmn new file mode 100644 index 000000000..87b743e07 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ComplexNavigation.bpmn @@ -0,0 +1,746 @@ + + + + + Flow_0kcrx5l + + + + Flow_0kcrx5l + Flow_1seuuie + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_12obxbo + Flow_1y4gjsg + + + Flow_02614fd + Flow_0c4tt8e + ro.chair = {} +ro.chair.uid = RO_Chair_CID +ro.chair.name_degree = RO_Chair_Name_Degree +ro.chair.title = RO_Chair_Title +ro.chair.sig_block = RO_Chair_Sig_Block + + + Flow_1seuuie + Flow_1ni06mz + Flow_1y9edqt + + + Flow_1y9edqt + Flow_1oriwwz + Flow_185jvp3 + + + Flow_185jvp3 + Flow_1dh8c45 + sch_enum = [] +if pi.E0.schoolAbbrv != "MD": + sch_enum_md = [ + { + "value": "MD", + "label": "Medicine" + }, + ] +else: + sch_enum_md = [] +if pi.E0.schoolAbbrv != "AS": + sch_enum_as = [ + { + "value": "AS", + "label": "Arts & Science" + }, + ] +else: + sch_enum_as = [] +if pi.E0.schoolAbbrv != "CU": + sch_enum_cu = [ + { + "value": "CU", + "label": "Education" + }, + ] +else: + sch_enum_cu = [] +if pi.E0.schoolAbbrv != "NR": + sch_enum_nr = [ + { + "value": "NR", + "label": "Nursing" + }, + ] +else: + sch_enum_nr = [] +sch_enum = sch_enum_md + sch_enum_as + sch_enum_cu + sch_enum_nr +del(sch_enum_md) +del(sch_enum_as) +del(sch_enum_cu) +del(sch_enum_nr) + + + + + + + + + + + + + + Flow_1dh8c45 + Flow_0mf9npl + + + Flow_1oriwwz + Flow_0nmpxmc + Flow_12obxbo + Flow_03s8gvx + Flow_0nzochy + Flow_0h955ao + + + Flow_1y4gjsg + Flow_0lnb8jw + Flow_1fqtd41 + Flow_0a626ba + + + Flow_0a626ba + Flow_0ssrpqx + if PIsPrimaryDepartmentSameAsRO.value == "diffSchool": + ro.schoolName = RO_StudySchool.label + ro.schoolAbbrv = RO_StudySchool.value + +if PIsPrimaryDepartmentSameAsRO.value != "yes": + if ro.schoolAbbrv == "MD": + ro.deptName = RO_StudyDeptMedicine.label + ro.deptAbbrv = RO_StudyDeptMedicine.value + elif ro.schoolAbbrv == "AS": + ro.deptName = RO_StudyDeptArtsSciences.label + ro.deptAbbrv = RO_StudyDeptArtsSciences.value + elif ro.schoolAbbrv == "CU": + ro.deptName = RO_StudyDeptEducation.label + ro.deptAbbrv = RO_StudyDeptEducation.value + else: + ro.deptName = "" + ro.deptAbbrv = "" + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0nzochy + Flow_0lnb8jw + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0h955ao + Flow_1fqtd41 + + + Flow_0mf9npl + Flow_0nmpxmc + ro.schoolName = RO_StudySchool.label +ro.schoolAbbrv = RO_StudySchool.value + + + Flow_03s8gvx + Flow_0ssrpqx + Flow_0tnnt3b + + + ro.schoolAbbrv == "CU" + + + + + + + PIsPrimaryDepartmentSameAsRO.value != "yes" + + + + PIsPrimaryDepartmentSameAsRO.value == 'diffSchool' + + + + + + ro.schoolAbbrv not in ["MD", "AS", "CU"] + + + + ro.schoolAbbrv == "AS" + + + + + + + + Flow_1ni06mz + Flow_0tnnt3b + Flow_02614fd + + + temp + Flow_15xpsq8 + Flow_1g7q28p + + + Flow_0cqbu1f + Flow_1d4sb3d + Flow_12oux1f + Flow_0ygr7cu + + + The following Primary Coordinators were entered in Protocol Builder: +{%+ for key, value in pcs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_pcs %}, {% endif %}{% endfor %} +To Save the current settings for all Primary Coordinators, select Save All. + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + +### Please provide supplemental information for: + #### {{ pc.display_name }} + ##### Title: {{ pc.title }} + + ##### Department: {{ pc.department }} + ##### Affiliation: {{ pc.affiliation }} + + + + + + + Flow_12oux1f + Flow_1ik148z + + + + Flow_0c4tt8e + Flow_05g7d16 + Flow_13zasb1 + + + The PI is also the RO Chair + Flow_13zasb1 + Flow_0cqbu1f + + + Flow_0efu6u1 + Flow_0a3fjzp + Flow_0ljn2v6 + Flow_0pdoc38 + + + The following Sub-Investigators were entered in Protocol Builder: +{%+ for key, value in subs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_subs %}, {% endif %}{% endfor %} +To Save the current settings for all Sub-Investigators, select Save All. + + +Otherwise, edit each Sub-Investigator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ sub.display_name }} + ##### Title: {{ sub.title }} + + ##### Department: {{ sub.department }} + ##### Affiliation: {{ sub.affiliation }} + + + + + + + Flow_0ljn2v6 + Flow_07vu2b0 + + + + Flow_1ik148z + Flow_0ygr7cu + Flow_0a3fjzp + Flow_0rstqv5 + + + The following Additional Coordinators were entered in Protocol Builder: +{%+ for key, value in acs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_acs %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Coordinators, select Save All. + + + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ acs.display_name }} + ##### Title: {{ acs.title }} + + + ##### Department: {{ acs.department }} + ##### Affiliation: {{ acs.affiliation }} + Flow_0rstqv5 + Flow_0efu6u1 + + + + Flow_0pdoc38 + Flow_07vu2b0 + Flow_1g7q28p + Flow_0qti1ms + + + The following Additional Personnel were entered in Protocol Builder: +{%+ for key, value in aps.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_aps %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Personnel, select Save All. + + + + +Otherwise, edit each Additional Personnel as necessary and select the Save button for each. + + + + +### Please provide supplemental information for: + #### {{ ap.display_name }} + ##### Title: {{ ap.title }} + + + ##### Department: {{ ap.department }} + ##### Affiliation: {{ ap.affiliation }} + + + + + + + Flow_0qti1ms + Flow_15xpsq8 + + + + ***Name & Degree:*** {{ RO_Chair_Name_Degree }} +***School:*** {{ RO_School }} +***Department:*** {{ RO_Department }} +***Title:*** {{ RO_Chair_Title }} +***Email:*** {{ RO_Chair_CID }} + + +{% if RO_Chair_CID != dc.uid %} + *Does not match the Department Chair specified in Protocol Builder, {{ dc.display_name }}* +{% endif %} + + + + + + + + + + Flow_05g7d16 + Flow_1d4sb3d + + + + + + + + + + + RO_Chair_CID == pi.uid + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayMultipleEndNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayMultipleEndNavigation.bpmn new file mode 100644 index 000000000..28c4a5538 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayMultipleEndNavigation.bpmn @@ -0,0 +1,143 @@ + + + + + Flow_0kcrx5l + + + ##### Please confirm Primary Investigator entered in Protocol Builder is correct and if so, provide additional information: +### **{{ pi.display_name }}** +***Email:*** {{ pi.email_address }} + +**Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + +{% if is_me_pi %} +Since you are the person entering this information, you already have access and will receive all emails. +{% endif %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_147b9li + Flow_0xnj2rp + + + + + Flow_1dcsioh + Flow_147b9li + Flow_00prawo + + + tru + + + false + + + Flow_16qr5jf + + + + Flow_0kcrx5l + Flow_1dcsioh + + + No PI entered in PB + Flow_00prawo + Flow_16qr5jf + + + Flow_0xnj2rp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Disconnected-Boundary-Event.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Disconnected-Boundary-Event.bpmn20.xml new file mode 100644 index 000000000..228a66cf9 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Disconnected-Boundary-Event.bpmn20.xml @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + + + + This not actually connected to the subprocess task - it's just on top + + + + + + + + + sid-7ED4D4F6-491F-4317-A37D-51C86F911524 + sid-137E71C6-FE26-418B-AFC0-1083027370F7 + sid-D99DD91F-8C51-4913-872A-DBBB5C7BE66C + + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Disconnected-Boundary-Event.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Disconnected-Boundary-Event.signavio.xml new file mode 100644 index 000000000..6c8e6d360 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Disconnected-Boundary-Event.signavio.xml @@ -0,0 +1,3735 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Disconnected Boundary Event + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for + + + Message Non + + + Interrupt SP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + This not + actually + connected to + + + the subprocess + task - it's + just + + + on top + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for Disconnected Boundary + Event + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + + + In a Subprocess + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Multiple-Start-Events.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Multiple-Start-Events.bpmn20.xml new file mode 100644 index 000000000..cdce4e0cb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Multiple-Start-Events.bpmn20.xml @@ -0,0 +1,155 @@ + + + + + + + + + + + + + + + + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-361A64F9-EF4B-495B-AEF1-1FA282A62607 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + sid-EF7E5895-D344-4F95-87D1-981F64B1A3D2 + sid-DB365392-51BD-46A6-8521-AD17E140A6E8 + sid-B912FC0D-7824-41E6-BC28-4A8B6B0DF94F + + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + sid-0B688C65-99C8-407B-9D5A-2C670FE80BE8 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + + + + sid-5CB3BBEE-8865-4698-B10F-4D323DA96E4A + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + sid-5CB3BBEE-8865-4698-B10F-4D323DA96E4A + + + + + + sid-D420ADDD-33AD-43E1-A95A-CCE637A7C326 + + + + + + sid-D420ADDD-33AD-43E1-A95A-CCE637A7C326 + sid-0B688C65-99C8-407B-9D5A-2C670FE80BE8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Multiple-Start-Events.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Multiple-Start-Events.signavio.xml new file mode 100644 index 000000000..19fb488b5 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Multiple-Start-Events.signavio.xml @@ -0,0 +1,3373 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Multiple Start Events + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + for a + + + While + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Another thing + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/No-Start-Event.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/No-Start-Event.bpmn20.xml new file mode 100644 index 000000000..091ebe470 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/No-Start-Event.bpmn20.xml @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-361A64F9-EF4B-495B-AEF1-1FA282A62607 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/No-Start-Event.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/No-Start-Event.signavio.xml new file mode 100644 index 000000000..477f0074f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/No-Start-Event.signavio.xml @@ -0,0 +1,2404 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No Start Event + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + for a + + + While + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/README.txt b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/README.txt new file mode 100644 index 000000000..8e8379c38 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/README.txt @@ -0,0 +1,6 @@ +Please note that these files were created with Gemsbok. + +Please install and configure it, in order to edit them, rather than doing so by hand, as the .signavio.xml files +need to be kept in sync. + +It is here: https://github.com/matthewhampton/Gemsbok diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Recursive-Subprocesses.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Recursive-Subprocesses.bpmn20.xml new file mode 100644 index 000000000..5e78d2687 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Recursive-Subprocesses.bpmn20.xml @@ -0,0 +1,182 @@ + + + + + + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + + + + + + + + + sid-7ED4D4F6-491F-4317-A37D-51C86F911524 + sid-D99DD91F-8C51-4913-872A-DBBB5C7BE66C + sid-10515BFA-0CEC-4B8B-B3BE-E717DEBA6D89 + + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + + + + + + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Recursive-Subprocesses.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Recursive-Subprocesses.signavio.xml new file mode 100644 index 000000000..87b91546b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Recursive-Subprocesses.signavio.xml @@ -0,0 +1,3428 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Recursive Subprocesses + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for + + + Recursive + + + Subprocesses + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for Recursive Subprocesses + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Recursive + + + Subprocesses + + + (callback!) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml new file mode 100644 index 000000000..478c7294a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml @@ -0,0 +1,181 @@ + + + + + + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + + + + + + + + + sid-7ED4D4F6-491F-4317-A37D-51C86F911524 + sid-137E71C6-FE26-418B-AFC0-1083027370F7 + sid-D99DD91F-8C51-4913-872A-DBBB5C7BE66C + + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Subprocess-Not-Found.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Subprocess-Not-Found.signavio.xml new file mode 100644 index 000000000..ed0b8aef4 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Subprocess-Not-Found.signavio.xml @@ -0,0 +1,3565 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess Not Found + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for + + + Subprocess Not + + + Found + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for Subprocess Not Found + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + + + In a Subprocess + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Unsupported-Task.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Unsupported-Task.bpmn20.xml new file mode 100644 index 000000000..79410f7cd --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Unsupported-Task.bpmn20.xml @@ -0,0 +1,137 @@ + + + + + + + + + + + + + + + + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-361A64F9-EF4B-495B-AEF1-1FA282A62607 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + sid-22E8C43C-2108-49C8-B281-EF8BFDDE459F + sid-75EEAB28-3B69-4282-B91A-0F3C97931834 + + + + + + + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + sid-44DBB1DF-9836-4CB8-896D-1D2DEFE8CD20 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + + + + sid-F1D58491-5B19-461B-8F3E-ABF544D3C488 + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + sid-F1D58491-5B19-461B-8F3E-ABF544D3C488 + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-44DBB1DF-9836-4CB8-896D-1D2DEFE8CD20 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Unsupported-Task.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Unsupported-Task.signavio.xml new file mode 100644 index 000000000..6270330fd --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Invalid-Workflows/Unsupported-Task.signavio.xml @@ -0,0 +1,3182 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Unsupported Task + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + for a + + + While + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Business Rule + + + Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTask.bpmn b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTask.bpmn new file mode 100644 index 000000000..6b02f0961 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTask.bpmn @@ -0,0 +1,59 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + + + + Flow_0ds4mp0 + Flow_0ugjw69 + + collection + + + + Flow_0t6p1sb + Flow_0ds4mp0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTaskCond.bpmn b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTaskCond.bpmn new file mode 100644 index 000000000..2945c381a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTaskCond.bpmn @@ -0,0 +1,145 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + Flow_1oo4mpj + + + + Flow_0u92n7b + Flow_0ugjw69 + + + + Flow_0t6p1sb + Flow_0ds4mp0 + + + + Flow_1sx7n9u + Flow_1oo4mpj + Flow_0u92n7b + + + len(collection.keys())==0 + + + + + Flow_0ds4mp0 + Flow_1dah8xt + Flow_0i1bv5g + + + + Flow_1dah8xt + Flow_0io0g18 + + + Flow_0io0g18 + Flow_0i1bv5g + Flow_1sx7n9u + + + + 1==1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/NavLeapFrogLong.bpmn b/tests/SpiffWorkflow/bpmn/data/NavLeapFrogLong.bpmn new file mode 100644 index 000000000..6a92338e8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/NavLeapFrogLong.bpmn @@ -0,0 +1,1209 @@ + + + + + Flow_0kcrx5l + + + Flow_0kcrx5l + Flow_1dcsioh + current_user = ldap() +investigators = study_info('investigators') +# Primary Investigator +pi = investigators.get('PI', None) +is_cu_pi = False +if pi != None: + hasPI = True + study_data_set("PiUid",pi['uid']) + if pi.get('uid', None) != None: + pi_invalid_uid = False + if pi['uid'] == current_user['uid']: + is_cu_pi = True + else: + pi_invalid_uid = True +else: + hasPI = False + +# Department Chair +dc = investigators.get('DEPT_CH', None) +if dc != None: + if dc.get('uid', None) != None: + dc_invalid_uid = False + else: + dc_invalid_uid = True +else: + dc_invalid_uid = False + +# Primary Coordinators +pcs = {} +is_cu_pc = False +cnt_pcs_uid = 0 +for k in investigators.keys(): + if k in ['SC_I','SC_II','IRBC']: + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + pcs[k] = investigator + cnt_pcs_uid = cnt_pcs_uid + 1 + else: + is_cu_pc = True + is_cu_pc_role = investigator['label'] + else: + pcs[k] = investigator +cnt_pcs = len(pcs.keys()) +if cnt_pcs != cnt_pcs_uid: + pcs_invalid_uid = True +else: + pcs_invalid_uid = False +if cnt_pcs > 0: + del(k) + del(investigator) + +# Additional Coordinators +acs = {} +is_cu_ac = False +cnt_acs_uid = 0 +for k in investigators.keys(): + if k == 'AS_C': + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + acs[k] = investigator + cnt_acs_uid = cnt_acs_uid + 1 + else: + is_cu_ac = True + is_cu_ac_role = investigator['label'] + else: + acs[k] = investigator +cnt_acs = len(acs.keys()) +if cnt_pcs != cnt_pcs_uid: + acs_invalid_uid = True +else: + acs_invalid_uid = False +if cnt_acs > 0: + del(k) + del(investigator) + +# Sub-Investigatoers +subs = {} +is_cu_subs = False +cnt_subs_uid = 0 +for k in investigators.keys(): + if k[:2] == 'SI': + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + subs[k] = investigator + cnt_subs_uid = cnt_subs_uid + 1 + else: + is_cu_subs = True + else: + subs[k] = investigator +cnt_subs = len(subs.keys()) +if cnt_subs != cnt_subs_uid: + subs_invalid_uid = True +else: + subs_invalid_uid = False +if cnt_subs > 0: + del(k) + del(investigator) + +# Additional Personnel +aps = {} +is_cu_ap = False +cnt_aps_uid = 0 +for k in investigators.keys(): + if k in ['SCI','DC']: + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + aps[k] = investigator + cnt_aps_uid = cnt_aps_uid + 1 + else: + is_cu_ap = True + is_cu_ap_role = investigator['label'] + else: + aps[k] = investigator +cnt_aps = len(aps.keys()) +if cnt_aps != cnt_aps_uid: + aps_invalid_uid = True +else: + aps_invalid_uid = False +if cnt_aps > 0: + del(k) + del(investigator) +del(investigators) + + + temp + Flow_10zn0h1 + Flow_0kp47dz + + + ##### Please confirm Primary Investigator entered in Protocol Builder is correct and if so, provide additional information: +### **{{ pi.display_name }}** +***Email:*** {{ pi.email_address }} + +**Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + +{% if is_me_pi %} +Since you are the person entering this information, you already have access and will receive all emails. +{% endif %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_1kg5jot + Flow_1mplloa + + + + + Flow_1dcsioh + Flow_147b9li + Flow_00prawo + + + + not(hasPI) or (hasPI and pi_invalid_uid) + + + No PI entered in PB + Flow_00prawo + Flow_16qr5jf + + + Flow_0kpe12r + Flow_1ayisx2 + Flow_0xifvai + Flow_1oqem42 + + + + + The following Primary Coordinators were entered in Protocol Builder: +{%+ for key, value in pcs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_pcs %}, {% endif %}{% endfor %} +To Save the current settings for all Primary Coordinators, select Save All. + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + +### Please provide supplemental information for: + #### {{ pc.display_name }} + ##### Title: {{ pc.title }} + + ##### Department: {{ pc.department }} + ##### Affiliation: {{ pc.affiliation }} + + + + + + + Flow_0xifvai + Flow_1n0k4pd + + + + cnt_pcs == 0 + + + Flow_0tfprc8 + Flow_0tsdclr + Flow_1grahhv + LDAP_dept = pi.department +length_LDAP_dept = len(LDAP_dept) +pi.E0 = {} +if length_LDAP_dept > 0: + E0_start = LDAP_dept.find("E0:") + 3 + E0_slice = LDAP_dept[E0_start:length_LDAP_dept] + E0_first_hyphen = E0_slice.find("-") + E0_dept_start = E0_first_hyphen + 1 + pi.E0.schoolAbbrv = E0_slice[0:E0_first_hyphen] + isSpace = " " in E0_slice + if isSpace: + E0_first_space = E0_slice.find(" ") + E0_spec_start = E0_first_space + 1 + E0_spec_end = len(E0_slice) + pi.E0.deptAbbrv = E0_slice[E0_dept_start:E0_first_space] + pi.E0.specName = E0_slice[E0_spec_start:E0_spec_end] + else: + pi.E0.specName = "" +else: + pi.E0.schoolAbbrv = "Not in LDAP" + pi.E0.deptAbbrv = "Not in LDAP" + pi.E0.specName = "Not in LDAP" + + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0iuzu7j + Flow_0whqr3p + + + + Flow_070j5fg + Flow_0vi6thu + Flow_00yhlrq + + + + RO_Chair_CID == pi.uid + + + The PI is also the RO Chair + Flow_00yhlrq + Flow_0kpe12r + + + + Flow_12ss6u8 + Flow_0dt3pjw + Flow_05rqrlf + Flow_0jxzqw1 + + + + + cnt_subs == 0 + + + The following Sub-Investigators were entered in Protocol Builder: +{%+ for key, value in subs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_subs %}, {% endif %}{% endfor %} +To Save the current settings for all Sub-Investigators, select Save All. + + +Otherwise, edit each Sub-Investigator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ sub.display_name }} + ##### Title: {{ sub.title }} + + ##### Department: {{ sub.department }} + ##### Affiliation: {{ sub.affiliation }} + + + + + + + Flow_05rqrlf + Flow_0ofpgml + + + + Please enter the Private Investigator in Protocol Builder. + Flow_16qr5jf + + + + + Flow_1grahhv + Flow_1kg5jot + pi.E0.schoolName = PI_E0_schoolName +pi.E0.deptName = PI_E0_deptName +pi_experience_key = "pi_experience_" + pi.user_id +pi.experience = user_data_get(pi_experience_key,"") +ro = {} +ro['chair'] = {} + + + + Flow_1oo0ijr + Flow_070j5fg + ro.chair = {} +ro.chair.uid = RO_Chair_CID +ro.chair.name_degree = RO_Chair_Name_Degree +ro.chair.title = RO_Chair_Title +ro.chair.sig_block = RO_Chair_Sig_Block + + + Flow_1n0k4pd + Flow_1oqem42 + Flow_1gtl2o3 + Flow_0dt3pjw + + + + + The following Additional Coordinators were entered in Protocol Builder: +{%+ for key, value in acs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_acs %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Coordinators, select Save All. + + + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ acs.display_name }} + ##### Title: {{ acs.title }} + + + ##### Department: {{ acs.department }} + ##### Affiliation: {{ acs.affiliation }} + Flow_1gtl2o3 + Flow_12ss6u8 + + + + cnt_acs == 0 + + + Flow_1va8c15 + Flow_1yd7kbi + Flow_0w4d2bz + + + Flow_1yd7kbi + Flow_13la8l3 + Flow_0ycdxbl + + + PIsPrimaryDepartmentSameAsRO.value != "yes" + + + + PIsPrimaryDepartmentSameAsRO.value == 'diffSchool' + + + Flow_0ycdxbl + Flow_1fj9iz0 + sch_enum = [] +if pi.E0.schoolAbbrv != "MD": + sch_enum_md = [ + { + "value": "MD", + "label": "Medicine" + }, + ] +else: + sch_enum_md = [] +if pi.E0.schoolAbbrv != "AS": + sch_enum_as = [ + { + "value": "AS", + "label": "Arts & Science" + }, + ] +else: + sch_enum_as = [] +if pi.E0.schoolAbbrv != "CU": + sch_enum_cu = [ + { + "value": "CU", + "label": "Education" + }, + ] +else: + sch_enum_cu = [] +if pi.E0.schoolAbbrv != "NR": + sch_enum_nr = [ + { + "value": "NR", + "label": "Nursing" + }, + ] +else: + sch_enum_nr = [] +sch_enum = sch_enum_md + sch_enum_as + sch_enum_cu + sch_enum_nr +del(sch_enum_md) +del(sch_enum_as) +del(sch_enum_cu) +del(sch_enum_nr) + + + + + + + + + + + + + + + Flow_1fj9iz0 + Flow_1yz8k2a + + + + + Flow_13la8l3 + Flow_0mdjaid + Flow_0fw4rck + Flow_1azfvtx + Flow_0giqf35 + Flow_0iuzu7j + + + ro.schoolAbbrv not in ["MD", "AS", "CU"] + + + Flow_0whqr3p + Flow_0zc01f9 + Flow_1vyg8ir + Flow_0m9peiz + + + + Flow_0m9peiz + Flow_1vv63qa + if PIsPrimaryDepartmentSameAsRO.value == "diffSchool": + ro.schoolName = RO_StudySchool.label + ro.schoolAbbrv = RO_StudySchool.value + +if PIsPrimaryDepartmentSameAsRO.value != "yes": + if ro.schoolAbbrv == "MD": + ro.deptName = RO_StudyDeptMedicine.label + ro.deptAbbrv = RO_StudyDeptMedicine.value + elif ro.schoolAbbrv == "AS": + ro.deptName = RO_StudyDeptArtsSciences.label + ro.deptAbbrv = RO_StudyDeptArtsSciences.value + elif ro.schoolAbbrv == "CU": + ro.deptName = RO_StudyDeptEducation.label + ro.deptAbbrv = RO_StudyDeptEducation.value + else: + ro.deptName = "" + ro.deptAbbrv = "" + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_1azfvtx + Flow_0zc01f9 + + + Flow_1e0yt3v + Flow_0shnt6k + Flow_1va8c15 + ro = {} +ro['schoolName'] = PI_E0_schoolName +ro['schoolAbbrv'] = pi.E0.schoolAbbrv +ro['deptName'] = pi.E0.deptName +ro['deptAbbrv'] = pi.E0.deptAbbrv + + + + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0giqf35 + Flow_1vyg8ir + + + + + + + Flow_1yz8k2a + Flow_0mdjaid + ro.schoolName = RO_StudySchool.label +ro.schoolAbbrv = RO_StudySchool.value + + + + ro.schoolAbbrv == "AS" + + + ro.schoolAbbrv == "CU" + + + Flow_1vv63qa + Flow_0fw4rck + Flow_0vff9k5 + + + + Flow_0ofpgml + Flow_0jxzqw1 + Flow_0q56tn8 + Flow_0kp47dz + + + + + cnt_aps == 0 + + + The following Additional Personnel were entered in Protocol Builder: +{%+ for key, value in aps.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_aps %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Personnel, select Save All. + + + + +Otherwise, edit each Additional Personnel as necessary and select the Save button for each. + + + + +### Please provide supplemental information for: + #### {{ ap.display_name }} + ##### Title: {{ ap.title }} + + + ##### Department: {{ ap.department }} + ##### Affiliation: {{ ap.affiliation }} + + + + + + + Flow_0q56tn8 + Flow_10zn0h1 + + + + Flow_147b9li + Flow_0tfprc8 + Flow_0nz62mu + + + + dc_invalid_uid or pcs_invalid_uid or acs_invalid_uid or subs_invalid_uid or aps_invalid_uid + + + Select No if all displayed invalid Computing IDs do not need system access and/or receive emails. If they do, correct in Protocol Builder first and then select Yes. + + +{% if dc_invalid_uid %} +Department Chair + {{ dc.error }} +{% endif %} +{% if pcs_invalid_uid %} +Primary Coordinators +{% for k, pc in pcs.items() %} + {% if pc.get('uid', None) == None: %} + {{ pc.error }} + {% endif %} +{% endfor %} +{% endif %} +{% if acs_invalid_uid %} +Additional Coordinators +{% for k, ac in acs.items() %} + {% if ac.get('uid', None) == None: %} + {{ ac.error }} + {% endif %} +{% endfor %} +{% endif %} +{% if subs_invalid_uid %} +Sub-Investigators +{% for k, sub in subs.items() %} + {% if sub.get('uid', None) == None: %} + {{ sub.error }} + {% endif %} +{% endfor %} +{% endif %} +{% if aps_invalid_uid %} +Additional Personnnel +{% for k, ap in aps.items() %} + {% if ap.get('uid', None) == None: %} + {{ ap.error }} + {% endif %} +{% endfor %} +{% endif %} + + + + + + + + + + Flow_0nz62mu + Flow_16bkbuc + + + Flow_16bkbuc + Flow_0tsdclr + Flow_1mtwuyq + + + + not(FixInvalidUIDs) + + + ***Name & Degree:*** {{ RO_Chair_Name_Degree }} +***School:*** {{ RO_School }} +***Department:*** {{ RO_Department }} +***Title:*** {{ RO_Chair_Title }} +***Email:*** {{ RO_Chair_CID }} + + +{% if RO_Chair_CID != dc.uid %} + *Does not match the Department Chair specified in Protocol Builder, {{ dc.display_name }}* +{% endif %} + + + + + + + + + + Flow_0vi6thu + Flow_1ayisx2 + + + + Flow_07ur9cc + Flow_0shnt6k + user_data_set(pi_experience_key, pi.experience) + + + Flow_1mplloa + Flow_07ur9cc + Flow_1e0yt3v + + + pi.experience != user_data_get(pi_experience_key,"") + + + + + Flow_0vff9k5 + Flow_0w4d2bz + Flow_1oo0ijr + + + Flow_1mtwuyq + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ParallelOrder.bpmn b/tests/SpiffWorkflow/bpmn/data/ParallelOrder.bpmn new file mode 100644 index 000000000..b37ac39fc --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ParallelOrder.bpmn @@ -0,0 +1,138 @@ + + + + + Flow_0a440h9 + + + + Flow_0a440h9 + Flow_0fyg6vt + Flow_1hxdywg + Flow_1knmmur + Flow_1hchuue + + + Flow_0fyg6vt + Flow_0vkxjxc + + + + Flow_1hxdywg + Flow_1l55egz + + + Flow_1knmmur + Flow_1drgguy + + + Flow_1hchuue + Flow_0rxk9n3 + + + + + + + Flow_1l55egz + Flow_0vkxjxc + Flow_1drgguy + Flow_0rxk9n3 + Flow_0mckkuv + + + + + + Flow_0mckkuv + + + + These tasks were created in the opposite order in which they are displayed.  In the XML, Task4 happens first, then 3, 2, and 1. When Parsed, the order of these tasks should be 1,2,3 and 4. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ParallelWithScript.bpmn b/tests/SpiffWorkflow/bpmn/data/ParallelWithScript.bpmn new file mode 100644 index 000000000..a92df4add --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ParallelWithScript.bpmn @@ -0,0 +1,117 @@ + + + + + Flow_1swtnkk + + + + + + + + + + + Flow_1empxbr + Flow_1m1yz1x + # do nothing + + + Flow_04k0ue9 + + + + Flow_1swtnkk + Flow_1ukvcj0 + Flow_188f01l + Flow_1empxbr + + + Flow_0ykkbts + Flow_0lmf2gd + Flow_0954wrk + Flow_04k0ue9 + + + Flow_1ukvcj0 + Flow_0lmf2gd + + + Flow_188f01l + Flow_0ykkbts + + + Flow_1m1yz1x + Flow_0954wrk + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ScriptTest.bpmn b/tests/SpiffWorkflow/bpmn/data/ScriptTest.bpmn new file mode 100644 index 000000000..20607466d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ScriptTest.bpmn @@ -0,0 +1,60 @@ + + + + + Flow_0dsbqk4 + + + + Flow_0dsbqk4 + Flow_1izwhjx + testvar = {'a':1,'b':2} +testvar2 = [{'x':1,'y':'a'}, + {'x':2,'y':'b'}, + {'x':3,'y':'c'}] + + + Flow_1rbktuo + + + + + Flow_1izwhjx + Flow_1rbktuo + def my_function() : + my_fun_var = 1 + +testvar['new'] = 'Test' +sample = [x['y'] for x in testvar2 if x['x'] > 1] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ScriptTestBox.bpmn b/tests/SpiffWorkflow/bpmn/data/ScriptTestBox.bpmn new file mode 100644 index 000000000..050c2e38e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ScriptTestBox.bpmn @@ -0,0 +1,56 @@ + + + + + Flow_0dsbqk4 + + + + Flow_0dsbqk4 + Flow_1izwhjx + testvar = {'a':1,'b':2} +testvar2 = [{'x':1,'y':'a'}, + {'x':2,'y':'b'}, + {'x':3,'y':'c'}] + + + Flow_1rbktuo + + + + + Flow_1izwhjx + Flow_1rbktuo + testvar.new = 'Test' +sample = [x.y for x in testvar2 if x.x > 1] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml new file mode 100644 index 000000000..85d39b530 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml @@ -0,0 +1,421 @@ + + + + + + + + + + + + + + + + + + + + + sid-A8FDFF7F-AAEC-4FE2-A261-DC46CCE8A925 + sid-32FA774A-C8E2-4ED8-92AB-262BB2309EDC + sid-A7A0FBEC-D736-45F1-A16F-45318E27874F + sid-C077D304-0ED5-47D8-B9EA-9D390CA0F86A + sid-CBBEEC71-BFE5-48BA-875D-2D83042C2491 + sid-2C3EA718-3768-4E1B-AB38-44001C090FEE + sid-2D13DB20-B41B-44C7-BC3B-ECE223C8B793 + sid-2ABC7FB8-98FF-4BE6-8A7F-941CBDEE3232 + + + + + + sid-09C66072-F204-4E32-A501-80D7BD2F45E7 + sid-3D825928-1108-4E19-907F-E6B9ADA5BB3B + sid-F2919669-F486-4C9A-B80E-6EDE1876962A + sid-CE3659E8-9ACC-4449-AD65-C5CF2DCB8054 + sid-07366683-F441-49D4-8A06-7C3D78CCEDE4 + sid-EC95E155-147C-4FD1-AFE5-5B803F655E3B + + + + + + + sid-16CDDB98-02B8-4DB8-8433-09A1F0170561 + + + Some documentation + + + + + + sid-16CDDB98-02B8-4DB8-8433-09A1F0170561 + sid-D43E1CF5-8963-434E-9F2D-64C5ABE362A7 + + + Some docs on a gateway + + + + sid-D43E1CF5-8963-434E-9F2D-64C5ABE362A7 + sid-880EE112-139E-4533-BB98-8A1E6D943A18 + sid-4B3460BF-1433-4961-BEA6-CD4766A5F509 + + + + + + sid-C06ACF4A-E241-4E40-B283-F35060801420 + sid-3FB96CF0-BCC8-427E-9142-9F7C72F07893 + sid-8CA0DD43-CC77-424E-B98D-BEBDBA7F8E85 + + + + + + + + sid-3FB96CF0-BCC8-427E-9142-9F7C72F07893 + sid-CA558A2D-1F6E-4BEB-B04F-6868529FCC24 + + + + + + sid-880EE112-139E-4533-BB98-8A1E6D943A18 + sid-6B67CBBC-E314-4DCF-B12F-968AEA30B05D + + + + + + + + + sid-4B3460BF-1433-4961-BEA6-CD4766A5F509 + sid-C06ACF4A-E241-4E40-B283-F35060801420 + + + + + + + + + sid-CA558A2D-1F6E-4BEB-B04F-6868529FCC24 + sid-6B67CBBC-E314-4DCF-B12F-968AEA30B05D + + + + + + + + sid-FFC6AFF7-1730-4FA8-BAE1-D5AE564FB8FF + sid-D40873A8-FA42-4FA8-BC6E-74B84D57C045 + + + + + + + + sid-2F70B74A-5D28-4D73-9C3B-540E7F9723F2 + sid-2DE53FE2-6F50-4EF5-9B11-E3733E2BD494 + + + + + + + sid-2DE53FE2-6F50-4EF5-9B11-E3733E2BD494 + + + + + + sid-D40873A8-FA42-4FA8-BC6E-74B84D57C045 + + + + sid-2F70B74A-5D28-4D73-9C3B-540E7F9723F2 + + finish_time + + + + + + + sid-8CA0DD43-CC77-424E-B98D-BEBDBA7F8E85 + sid-FFC6AFF7-1730-4FA8-BAE1-D5AE564FB8FF + + start_time + + + + + + + + + + + + + + + Some docs on a default sequence + + + Some docs on a sequence + + + + + + + + + sid-078306CD-6A3E-4B8E-9111-AD0717106A65 + sid-30A09A8A-4BC2-4303-A1C9-1C6EC7BCC039 + sid-7D103D68-E179-4138-9655-FC1ECFC7B897 + sid-42505FB2-8D9D-482D-8528-884BE441786D + sid-661CF7E6-4A5A-42CA-9C6A-6EE7D60DA7B4 + sid-CB483F12-E787-4180-AAB3-B4E74540158C + + + + + + + sid-6A80BDD6-95D5-4072-B6D1-3145C3308B16 + + + + + + + + sid-6A80BDD6-95D5-4072-B6D1-3145C3308B16 + sid-CEBA0A2D-CE09-41E9-B5C0-4741AF63CB25 + + + + + + + + sid-FECB9005-5026-4A3A-A795-7CFBDEA2D9A4 + sid-CEBA0A2D-CE09-41E9-B5C0-4741AF63CB25 + sid-17FC6A7F-4734-4A00-80DA-5C024783D1CF + + + + + + sid-17FC6A7F-4734-4A00-80DA-5C024783D1CF + sid-4023B856-5C8C-4F4B-89CF-00D224EADEDC + sid-5AE8BBED-8AD1-41A8-95DB-4897F73E7492 + + + + + + + + sid-5AE8BBED-8AD1-41A8-95DB-4897F73E7492 + sid-FECB9005-5026-4A3A-A795-7CFBDEA2D9A4 + + + + + + sid-4023B856-5C8C-4F4B-89CF-00D224EADEDC + + + + + + + + + + This task does not have a type on purpose. I assume if it is left off, then it is a user task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml new file mode 100644 index 000000000..a53e48590 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml @@ -0,0 +1,8126 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Action Management + + + + + + + + + + + + + + + + + + + + + undefined + + + Responsible Manager + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Review Action + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cancel Action + + + (if necessary) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cancelled + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Notify + + + Responsible + + + Person: New + + + Action + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Notify + + + Responsible + + + Person: Action + + + Cancelled + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + undefined + + + Responsible Person + + + + + + + + + + + + + + + + + + + + + + Do Work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Overdue + + + Escalation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Complete + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Finish Time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Start Time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Work + + + + + + + + + + + + + + + + + + + + + undefined + + + Responsible Person + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Start Work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Complete Work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Resume Work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This task does + not + + + have a type on + + + purpose. I + assume + + + if it is left + off, then + + + it is a user + task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + Put On Hold + + + + + + + + + + + + + + + + + + + + Resume + + + + + + + + + + + + + + + + + + + + Start + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cancel + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cancel Action + + + + + + + + + + + + + + + + + + + + Approve + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/CallActivity-Escalation-Test-Sub.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/CallActivity-Escalation-Test-Sub.bpmn20.xml new file mode 100644 index 000000000..c3026c0c8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/CallActivity-Escalation-Test-Sub.bpmn20.xml @@ -0,0 +1,117 @@ + + + + + SequenceFlow_1pdxvjo + + + SequenceFlow_0expyhf + SequenceFlow_07vs6qb + SequenceFlow_15hc88y + + + + SequenceFlow_07vs6qb + + + + should_escalate + + + SequenceFlow_15hc88y + + + + SequenceFlow_1pdxvjo + SequenceFlow_0expyhf + SequenceFlow_0rwyweg + + + 'should_escalate' in vars() or 'should_escalate' in globals() + + + + + SequenceFlow_0rwyweg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/CallActivity-Escalation-Test.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/CallActivity-Escalation-Test.bpmn20.xml new file mode 100644 index 000000000..b53a7e1f7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/CallActivity-Escalation-Test.bpmn20.xml @@ -0,0 +1,692 @@ + + + + + SequenceFlow_16zp7m1 + + + + SequenceFlow_16zp7m1 + SequenceFlow_1kda3po + SequenceFlow_1s1w90v + SequenceFlow_14zh7re + SequenceFlow_0how4sz + SequenceFlow_09hcnsg + SequenceFlow_1svgn4k + + + SequenceFlow_0poxg37 + + + + SequenceFlow_04o4ekt + + + + SequenceFlow_1v1zuh1 + + + + SequenceFlow_1kda3po + SequenceFlow_1ah0nyw + + + + SequenceFlow_1s1w90v + SequenceFlow_1ra76av + + + SequenceFlow_00n7udr + + + + SequenceFlow_1smtf8a + + + + SequenceFlow_072gxkd + + + + SequenceFlow_0oox1qd + + + + SequenceFlow_0exwdi2 + + + + + SequenceFlow_14zh7re + SequenceFlow_1q5t764 + + + SequenceFlow_0ua7azh + + + SequenceFlow_0ty2h2e + + + SequenceFlow_1sq53x5 + + + + + + SequenceFlow_1qkwoqo + + + + SequenceFlow_0sqyooa + + + + + SequenceFlow_0how4sz + SequenceFlow_06tuy2y + + + SequenceFlow_09hcnsg + SequenceFlow_1oqcl5z + + + SequenceFlow_0i2nsbv + + + SequenceFlow_1ud7tfa + + + SequenceFlow_1sv1p07 + + + SequenceFlow_044gv9a + + + SequenceFlow_078mvp0 + + + + SequenceFlow_1lit4b3 + + + + + + + + + + SequenceFlow_1347w9t + + + SequenceFlow_0lih4rm + + + SequenceFlow_1svgn4k + SequenceFlow_1gibvy0 + + + + + + SequenceFlow_0cymhxo + + + + + SequenceFlow_0cymhxo + SequenceFlow_0lih4rm + a = 1 + + + + SequenceFlow_1gibvy0 + SequenceFlow_1347w9t + a = 1 + + + + SequenceFlow_04o4ekt + SequenceFlow_1v1zuh1 + a = 1 + + + + SequenceFlow_1ah0nyw + SequenceFlow_0poxg37 + a = 1 + + + + SequenceFlow_0oox1qd + SequenceFlow_072gxkd + a = 1 + + + + SequenceFlow_00n7udr + SequenceFlow_1smtf8a + a = 1 + + + + SequenceFlow_1ra76av + SequenceFlow_0exwdi2 + a = 1 + + + + SequenceFlow_1qkwoqo + SequenceFlow_1sq53x5 + a = 1 + + + + SequenceFlow_0sqyooa + SequenceFlow_0ty2h2e + a = 1 + + + + SequenceFlow_1q5t764 + SequenceFlow_0ua7azh + a = 1 + + + + SequenceFlow_078mvp0 + SequenceFlow_1ud7tfa + a = 1 + + + + SequenceFlow_06tuy2y + SequenceFlow_0i2nsbv + a = 1 + + + + SequenceFlow_1lit4b3 + SequenceFlow_044gv9a + a = 1 + + + + SequenceFlow_1oqcl5z + SequenceFlow_1sv1p07 + a = 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts-SP.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts-SP.bpmn20.xml new file mode 100644 index 000000000..faec45b3f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts-SP.bpmn20.xml @@ -0,0 +1,181 @@ + + + + + + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + + + + + + + + + + + + sid-7ED4D4F6-491F-4317-A37D-51C86F911524 + sid-137E71C6-FE26-418B-AFC0-1083027370F7 + sid-D99DD91F-8C51-4913-872A-DBBB5C7BE66C + + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts-SP.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts-SP.signavio.xml new file mode 100644 index 000000000..5b5e89409 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts-SP.signavio.xml @@ -0,0 +1,3566 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Interrupts SP + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for + + + Message + Interrupts + + + SP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Interrupt + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for Message Interrupts SP + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + + + In a Subprocess + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts.bpmn20.xml new file mode 100644 index 000000000..443004f00 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts.bpmn20.xml @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-6FBBB56D-00CD-4C2B-9345-486986BB4992 + sid-B0D398F3-C33A-4113-B8BB-D7ABF6990C4A + sid-D3365C47-2FAE-4D17-98F4-E68B345E18CE + sid-067AF0E4-F644-4962-BC02-E767C63647D5 + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + + + + + + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + sid-A554E53F-CADB-4CC6-B11B-CB792EC5D4CB + + + + + + sid-768A24E1-8467-4009-B576-667053978B1F + sid-A554E53F-CADB-4CC6-B11B-CB792EC5D4CB + + + sid-768A24E1-8467-4009-B576-667053978B1F + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts.signavio.xml new file mode 100644 index 000000000..3c1f39c5f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Interrupts.signavio.xml @@ -0,0 +1,2005 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Interrupts + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + That + + + Takes A Long + Time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + Interrupt + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt-SP.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt-SP.bpmn20.xml new file mode 100644 index 000000000..b2c4e6caf --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt-SP.bpmn20.xml @@ -0,0 +1,181 @@ + + + + + + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A + sid-001BB515-2DC3-47C1-8122-3E853B3FDC34 + sid-4B320727-A78F-47E7-98CF-F589994A1C64 + sid-2BCA4F40-F885-4E59-9646-24E458BBC873 + sid-84C7CE67-D0B6-486A-B097-486DA924FF9D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + + + + + + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + sid-8ABF0F70-8A8A-49F2-8C3E-FC595AB764C6 + + + + + + sid-3204DA36-1068-4F48-99DD-1DBAC909A08B + sid-E0F8D63F-0B79-4CDF-9025-F3D40E581A2C + + + sid-2E189291-49E9-41CF-8865-D7B3D57D4464 + + + + + + + + + + + + + + + sid-7ED4D4F6-491F-4317-A37D-51C86F911524 + sid-137E71C6-FE26-418B-AFC0-1083027370F7 + sid-D99DD91F-8C51-4913-872A-DBBB5C7BE66C + + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + + + + + + sid-9D819E98-CD45-48A5-9F8D-B36047118934 + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + sid-04951D4B-FEAF-4D8E-924F-6B14AB63B83B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt-SP.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt-SP.signavio.xml new file mode 100644 index 000000000..0a56bd2ae --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt-SP.signavio.xml @@ -0,0 +1,3576 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Non Interrupt SP + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for + + + Message Non + + + Interrupt SP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Outer End + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + SP Parallel + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Ack Subprocess + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess for Message Non Interrupt SP + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + + + In a Subprocess + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Inner End + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt.bpmn20.xml new file mode 100644 index 000000000..13f9bc308 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt.bpmn20.xml @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-6FBBB56D-00CD-4C2B-9345-486986BB4992 + sid-B0D398F3-C33A-4113-B8BB-D7ABF6990C4A + sid-D3365C47-2FAE-4D17-98F4-E68B345E18CE + sid-067AF0E4-F644-4962-BC02-E767C63647D5 + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + + + + + + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + sid-A554E53F-CADB-4CC6-B11B-CB792EC5D4CB + + + + + + sid-768A24E1-8467-4009-B576-667053978B1F + sid-A554E53F-CADB-4CC6-B11B-CB792EC5D4CB + + + sid-768A24E1-8467-4009-B576-667053978B1F + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt.signavio.xml new file mode 100644 index 000000000..6bca2a5b0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Message-Non-Interrupt.signavio.xml @@ -0,0 +1,2005 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Non Interrupt + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do Something + That + + + Takes A Long + Time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + Non-Interrupt + + + Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + OK + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Messages.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Messages.bpmn20.xml new file mode 100644 index 000000000..8143cc6cf --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Messages.bpmn20.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-B0D398F3-C33A-4113-B8BB-D7ABF6990C4A + sid-DF69AC93-16C3-405D-9FB1-9B2539519372 + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Messages.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Messages.signavio.xml new file mode 100644 index 000000000..f04f6ef58 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Messages.signavio.xml @@ -0,0 +1,814 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Messages + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Message + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Multi-Instance.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Multi-Instance.bpmn20.xml new file mode 100644 index 000000000..4c480fbd7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Multi-Instance.bpmn20.xml @@ -0,0 +1,88 @@ + + + + + SequenceFlow_1svgzrn + + + # {{person.id}} +Please answer a few questions about this person. +loo + + + + + + SequenceFlow_066wkms + SequenceFlow_1qz7vsd + + 3 + + + + # Thank you for completeing the user information. + +{% for person in personnel %} +  * Person {{person.uid}} does {% if not person.dog_friendly %} NOT {% endif %} likes dogs. +{% endfor %} + SequenceFlow_1qz7vsd + + + + SequenceFlow_1svgzrn + SequenceFlow_066wkms + + + + + Loops through each person and asks if they like dogs. + + + + Collects the set of personnel from the Protocol Builder + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level1.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level1.bpmn20.xml new file mode 100644 index 000000000..8fb42e2be --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level1.bpmn20.xml @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + sid-23EF7D0F-BC6E-45ED-A47D-22CEBCE0BE5A + sid-64E2EF25-F986-4834-8C3B-C3533746113E + sid-1B59DCD2-83A0-4687-B7BE-83625395572E + sid-BC014079-199F-4720-95CD-244B0ACB6DE1 + + + + + + + sid-27BA998B-1FEE-4CBA-86D5-8C5968F1478D + + + + + + sid-B8192BBF-7DB4-4AA1-8990-5017C30130A8 + + + + + + + + + sid-27BA998B-1FEE-4CBA-86D5-8C5968F1478D + sid-02F2C617-DFEE-44AE-AAED-145AF2E2D946 + + + + + + + sid-02F2C617-DFEE-44AE-AAED-145AF2E2D946 + sid-B8192BBF-7DB4-4AA1-8990-5017C30130A8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level1.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level1.signavio.xml new file mode 100644 index 000000000..cdab4ebf2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level1.signavio.xml @@ -0,0 +1,1614 @@ + + +First sublevel of nested subprocesses +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nested level 1 + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Action2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nested level 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Finish + + + + + + + + + + + + + + + + + + + + ToLevel2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level2.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level2.bpmn20.xml new file mode 100644 index 000000000..8e9b079d3 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level2.bpmn20.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + sid-D55DA431-BFBE-4EB9-9B86-918CD1792C65 + sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7 + sid-2EDAD784-7F15-486C-B805-D26EE25F8087 + + + + + + + sid-862F0510-9CB8-4C7C-87CC-A9EA7E2D8758 + + + + + + + + + sid-862F0510-9CB8-4C7C-87CC-A9EA7E2D8758 + sid-80315FC5-BC67-4999-8CE2-3ACDC838E3E6 + + + + + + sid-80315FC5-BC67-4999-8CE2-3ACDC838E3E6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level2.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level2.signavio.xml new file mode 100644 index 000000000..0a5b40a49 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Sub-level2.signavio.xml @@ -0,0 +1,1150 @@ + + +2nd nested subprocess level +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nested level 2 + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Action3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Finish + + + + + + + + + + + + + + + + + + + + In + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Subprocesses.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Subprocesses.bpmn20.xml new file mode 100644 index 000000000..a3ca3c467 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Subprocesses.bpmn20.xml @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + sid-093DC600-6F99-40CE-988C-7AD87B792F90 + sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38 + sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B + sid-C014B4B9-889F-4EE9-9949-C89502C35CF0 + + + + + + + sid-E35CEC65-EA3C-4C5A-BC90-8C17016C24E5 + + + + + + sid-85D2E5A5-BD56-4650-B715-3B6E0BE33443 + + + + + + + + + sid-E35CEC65-EA3C-4C5A-BC90-8C17016C24E5 + sid-5BC5ECB5-884B-449A-AC67-B9B7ED296728 + + + + + + + sid-5BC5ECB5-884B-449A-AC67-B9B7ED296728 + sid-85D2E5A5-BD56-4650-B715-3B6E0BE33443 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Subprocesses.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Subprocesses.signavio.xml new file mode 100644 index 000000000..38f1507ac --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Nested-Subprocesses.signavio.xml @@ -0,0 +1,1576 @@ + + +Used to test multiple nestings of subprocesses +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nested Subprocesses + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Action1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nested level 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Finished + + + + + + + + + + + + + + + + + + + + Continue + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long-Inclusive.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long-Inclusive.bpmn20.xml new file mode 100644 index 000000000..80cf29724 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long-Inclusive.bpmn20.xml @@ -0,0 +1,633 @@ + + + + + + + + + + + + + + + + sid-9CAB06E6-EDCF-4193-869A-FE8328E8CBFF + sid-F4CFA154-9281-4579-B117-0859A2BFF7E8 + sid-E489DED4-8C38-4841-80BC-E514353C1B8C + sid-B88338F7-5084-4532-9ABB-7387B1E5A664 + sid-35745597-A6C0-424B-884C-C5C23B60C942 + sid-0BC1E7F7-CBDA-4591-95B9-320FCBEF6114 + sid-45841FFD-3D92-4A18-9CE3-84DC5282F570 + sid-A8E18F57-FC41-401D-A397-9264C3E48293 + sid-E98E44A0-A273-4350-BA75-B37F2FCBA1DD + sid-29C1DD4B-9E3E-4686-892D-D47927F6DA08 + sid-107A993F-6302-4391-9BE2-068C9C7B693B + sid-71AA325A-4D02-46B4-8DC9-00C90BC5337C + sid-54BB293D-91B6-41B5-A5C4-423300D74D14 + sid-D8777102-7A64-42E6-A988-D0AE3049ABB0 + sid-E6A08072-E35C-4545-9C66-B74B615F34C2 + sid-BEC02819-27FE-4484-8FDA-08450F4DE618 + sid-6576AA43-43DF-4086-98C8-FD2B22F20EB0 + sid-08397892-678C-4706-A05F-8F6DAE9B5423 + sid-3500C16F-8037-4987-9022-8E30AB6B0590 + sid-A473B421-0981-49D8-BD5A-66832BD518EC + sid-F18EA1E5-B692-484C-AB84-2F422BF7868A + sid-D67A997E-C7CF-4581-8749-4F931D8737B5 + sid-399AE395-D46F-4A30-B875-E904970AF141 + sid-738EA50B-3EB5-464B-96B8-6CA5FC30ECBA + sid-5598F421-4AC5-4C12-9239-EFAC51C5F474 + sid-CF5677F8-747F-4E95-953E-4DAB186958F4 + sid-A73FF591-2A52-42DF-97DB-6CEEF8991283 + sid-69DF31CE-D587-4BA8-8BE6-72786108D8DF + sid-38B84B23-6757-4357-9AF5-A62A5C8AC1D3 + sid-732095A1-B07A-4B08-A46B-277C12901DED + sid-23391B60-C6A7-4C9E-9F95-43EA84ECFB74 + sid-9A40D0CD-3BD0-4A0D-A6B0-60FD60265247 + sid-B2E34105-96D5-4020-85D9-C569BA42D618 + sid-3D1455CF-6B1E-4EB1-81B2-D738110BB283 + sid-75EE4F61-E8E2-441B-8818-30E3BACF140B + sid-4864A824-7467-421A-A654-83EE83F7681C + sid-6938255D-3C1A-4B94-9E83-4D467E0DDB4B + + + + + + + sid-54E118FA-9A24-434C-9E65-36F9D01FB43D + + + + + + sid-54E118FA-9A24-434C-9E65-36F9D01FB43D + sid-7BFA5A55-E297-40FC-88A6-DF1DA809A12C + sid-C7247231-5152-424E-A240-B07B76E8F5EC + + + + + + sid-C609F3E0-2D09-469C-8750-3E3BA8C926BE + sid-0204722F-5A92-4236-BBF1-C66123E14E22 + + + + + + sid-0204722F-5A92-4236-BBF1-C66123E14E22 + sid-699ED598-1AB9-4A3B-9315-9C89578FB017 + + + + + + sid-699ED598-1AB9-4A3B-9315-9C89578FB017 + sid-85116AFA-E95A-4384-9695-361C1A6070C3 + + + + + + sid-85116AFA-E95A-4384-9695-361C1A6070C3 + sid-84756278-D67A-4E65-AD96-24325F08E2D1 + + + + + + sid-AE7CFA43-AC83-4F28-BCE3-AD7BE9CE6F27 + sid-C132728C-7DAF-468C-A807-90A34847071E + + + + + + sid-C132728C-7DAF-468C-A807-90A34847071E + sid-4A3A7E6E-F79B-4842-860C-407DB9227023 + + + + + + sid-4A3A7E6E-F79B-4842-860C-407DB9227023 + sid-B45563D3-2FBE-406D-93E4-85A2DD04B1A4 + + + + + + sid-D7D86B12-A88C-4072-9852-6DD62643556A + sid-AE7CFA43-AC83-4F28-BCE3-AD7BE9CE6F27 + + + + + + sid-369B410B-EA82-4896-91FD-23FFF759494A + sid-E47AA9C3-9EB7-4B07-BB17-086388AACE0D + + + + + + sid-84756278-D67A-4E65-AD96-24325F08E2D1 + sid-0C53B343-3753-4EED-A6FE-C1A7DFBF13BC + + + + + + sid-0C53B343-3753-4EED-A6FE-C1A7DFBF13BC + sid-9CA8DF1F-1622-4F6A-B9A6-761C60C29A11 + + + + + + sid-9CA8DF1F-1622-4F6A-B9A6-761C60C29A11 + sid-13838920-8EE4-45CB-8F01-29F13CA13819 + + + + + + sid-13838920-8EE4-45CB-8F01-29F13CA13819 + sid-FAA04C3A-F55B-4947-850D-5A180D43BD61 + + + + + + sid-FAA04C3A-F55B-4947-850D-5A180D43BD61 + sid-A19043EA-D140-48AE-99A1-4B1EA3DE0E51 + + + + + + sid-A19043EA-D140-48AE-99A1-4B1EA3DE0E51 + sid-2A94D2F0-DF4B-45B6-A30D-FFB9BDF6E9D9 + + + + + + sid-2A94D2F0-DF4B-45B6-A30D-FFB9BDF6E9D9 + sid-661F5F14-5B94-4977-9827-20654AE2719B + + + + + + sid-661F5F14-5B94-4977-9827-20654AE2719B + sid-C0DC27C3-19F9-4D3D-9D04-8869DAEDEF1E + + + + + + sid-B45563D3-2FBE-406D-93E4-85A2DD04B1A4 + sid-0E826E42-8FBC-4532-96EA-C82E7340CBA4 + + + + + + sid-0E826E42-8FBC-4532-96EA-C82E7340CBA4 + sid-BE9CBE97-0E09-4A37-BD98-65592D2F2E84 + + + + + + sid-BE9CBE97-0E09-4A37-BD98-65592D2F2E84 + sid-C96EBBBD-7DDA-4875-89AC-0F030E53C2B6 + + + + + + sid-C96EBBBD-7DDA-4875-89AC-0F030E53C2B6 + sid-8449C64C-CF1D-4601-ACAE-2CD61BE2D36C + + + + + + sid-8449C64C-CF1D-4601-ACAE-2CD61BE2D36C + sid-1DD0519A-72AD-4FB1-91D6-4D18F2DA1FC8 + + + + + + sid-1DD0519A-72AD-4FB1-91D6-4D18F2DA1FC8 + sid-42540C95-8E89-4B6F-B133-F677FA72C9FF + + + + + + sid-42540C95-8E89-4B6F-B133-F677FA72C9FF + sid-108A05A6-D07C-4DA9-AAC3-8075A721B44B + + + + + + sid-108A05A6-D07C-4DA9-AAC3-8075A721B44B + sid-A8886943-1369-43FB-BFC1-FF1FF974EB5D + + + + + + sid-7BFA5A55-E297-40FC-88A6-DF1DA809A12C + sid-7F5D9083-6201-43F9-BBEE-664E7310F4F2 + + + + + + sid-7F5D9083-6201-43F9-BBEE-664E7310F4F2 + sid-C609F3E0-2D09-469C-8750-3E3BA8C926BE + sid-C06DF3AB-4CE5-4123-8033-8AACDCDF4416 + + + + + + sid-C06DF3AB-4CE5-4123-8033-8AACDCDF4416 + sid-A0A2FCFF-E2BE-4FE6-A4D2-CCB3DCF68BFB + + + + + + sid-C7247231-5152-424E-A240-B07B76E8F5EC + sid-DC398932-1111-4CA2-AEB4-D460E0E06C6E + + + + + + sid-DC398932-1111-4CA2-AEB4-D460E0E06C6E + sid-D7D86B12-A88C-4072-9852-6DD62643556A + sid-16EB4D98-7F77-4046-8CDD-E07C796542FE + + + + + + sid-16EB4D98-7F77-4046-8CDD-E07C796542FE + sid-8E17C1AF-45C2-48C7-A794-1259E2ECA43D + + + + + + sid-A8886943-1369-43FB-BFC1-FF1FF974EB5D + sid-C0DC27C3-19F9-4D3D-9D04-8869DAEDEF1E + sid-369B410B-EA82-4896-91FD-23FFF759494A + + + + + + sid-8E17C1AF-45C2-48C7-A794-1259E2ECA43D + + + + + + sid-A0A2FCFF-E2BE-4FE6-A4D2-CCB3DCF68BFB + + + + + + sid-E47AA9C3-9EB7-4B07-BB17-086388AACE0D + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long-Inclusive.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long-Inclusive.signavio.xml new file mode 100644 index 000000000..7ed9ce015 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long-Inclusive.signavio.xml @@ -0,0 +1,18834 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Join Long Inclusive + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 10 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 11 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 10 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 11 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - + + + Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - No + + + Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - + + + Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - No + + + Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long.bpmn20.xml new file mode 100644 index 000000000..a7afcb9c8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long.bpmn20.xml @@ -0,0 +1,622 @@ + + + + + + + + + + + + + + + + sid-9CAB06E6-EDCF-4193-869A-FE8328E8CBFF + sid-F4CFA154-9281-4579-B117-0859A2BFF7E8 + sid-E489DED4-8C38-4841-80BC-E514353C1B8C + sid-B88338F7-5084-4532-9ABB-7387B1E5A664 + sid-35745597-A6C0-424B-884C-C5C23B60C942 + sid-0BC1E7F7-CBDA-4591-95B9-320FCBEF6114 + sid-45841FFD-3D92-4A18-9CE3-84DC5282F570 + sid-A8E18F57-FC41-401D-A397-9264C3E48293 + sid-E98E44A0-A273-4350-BA75-B37F2FCBA1DD + sid-29C1DD4B-9E3E-4686-892D-D47927F6DA08 + sid-3D61C6FE-D354-41FF-8544-014FAE7B5C5E + sid-06F36AE5-5FD5-487D-A783-64CEAB2C8A91 + sid-107A993F-6302-4391-9BE2-068C9C7B693B + sid-71AA325A-4D02-46B4-8DC9-00C90BC5337C + sid-54BB293D-91B6-41B5-A5C4-423300D74D14 + sid-D8777102-7A64-42E6-A988-D0AE3049ABB0 + sid-E6A08072-E35C-4545-9C66-B74B615F34C2 + sid-BEC02819-27FE-4484-8FDA-08450F4DE618 + sid-6576AA43-43DF-4086-98C8-FD2B22F20EB0 + sid-08397892-678C-4706-A05F-8F6DAE9B5423 + sid-3500C16F-8037-4987-9022-8E30AB6B0590 + sid-A473B421-0981-49D8-BD5A-66832BD518EC + sid-F18EA1E5-B692-484C-AB84-2F422BF7868A + sid-D67A997E-C7CF-4581-8749-4F931D8737B5 + sid-399AE395-D46F-4A30-B875-E904970AF141 + sid-738EA50B-3EB5-464B-96B8-6CA5FC30ECBA + sid-5598F421-4AC5-4C12-9239-EFAC51C5F474 + sid-CF5677F8-747F-4E95-953E-4DAB186958F4 + sid-A73FF591-2A52-42DF-97DB-6CEEF8991283 + sid-69DF31CE-D587-4BA8-8BE6-72786108D8DF + sid-38B84B23-6757-4357-9AF5-A62A5C8AC1D3 + sid-732095A1-B07A-4B08-A46B-277C12901DED + sid-23391B60-C6A7-4C9E-9F95-43EA84ECFB74 + sid-9A40D0CD-3BD0-4A0D-A6B0-60FD60265247 + sid-B2E34105-96D5-4020-85D9-C569BA42D618 + + + + + + + sid-54E118FA-9A24-434C-9E65-36F9D01FB43D + + + + + + sid-54E118FA-9A24-434C-9E65-36F9D01FB43D + sid-7BFA5A55-E297-40FC-88A6-DF1DA809A12C + sid-C7247231-5152-424E-A240-B07B76E8F5EC + + + + + + sid-C609F3E0-2D09-469C-8750-3E3BA8C926BE + sid-0204722F-5A92-4236-BBF1-C66123E14E22 + + + + + + sid-0204722F-5A92-4236-BBF1-C66123E14E22 + sid-B149930A-9E12-41E7-8FC2-E7BD1ECFAC3C + sid-699ED598-1AB9-4A3B-9315-9C89578FB017 + + + + + + sid-699ED598-1AB9-4A3B-9315-9C89578FB017 + sid-85116AFA-E95A-4384-9695-361C1A6070C3 + + + + + + sid-85116AFA-E95A-4384-9695-361C1A6070C3 + sid-84756278-D67A-4E65-AD96-24325F08E2D1 + + + + + + sid-AE7CFA43-AC83-4F28-BCE3-AD7BE9CE6F27 + sid-AD3AF6E3-A77C-4366-A745-50B5DF66D071 + sid-C132728C-7DAF-468C-A807-90A34847071E + + + + + + sid-C132728C-7DAF-468C-A807-90A34847071E + sid-4A3A7E6E-F79B-4842-860C-407DB9227023 + + + + + + sid-4A3A7E6E-F79B-4842-860C-407DB9227023 + sid-B45563D3-2FBE-406D-93E4-85A2DD04B1A4 + + + + + + sid-D7D86B12-A88C-4072-9852-6DD62643556A + sid-AE7CFA43-AC83-4F28-BCE3-AD7BE9CE6F27 + + + + + + sid-A8886943-1369-43FB-BFC1-FF1FF974EB5D + sid-C0DC27C3-19F9-4D3D-9D04-8869DAEDEF1E + sid-369B410B-EA82-4896-91FD-23FFF759494A + + + + + + sid-75D1F9CC-4885-4EC4-BB9C-C24DFDA04F69 + + + + + + + sid-369B410B-EA82-4896-91FD-23FFF759494A + sid-75D1F9CC-4885-4EC4-BB9C-C24DFDA04F69 + + + + + + sid-84756278-D67A-4E65-AD96-24325F08E2D1 + sid-0C53B343-3753-4EED-A6FE-C1A7DFBF13BC + + + + + + sid-0C53B343-3753-4EED-A6FE-C1A7DFBF13BC + sid-9CA8DF1F-1622-4F6A-B9A6-761C60C29A11 + + + + + + sid-9CA8DF1F-1622-4F6A-B9A6-761C60C29A11 + sid-13838920-8EE4-45CB-8F01-29F13CA13819 + + + + + + sid-13838920-8EE4-45CB-8F01-29F13CA13819 + sid-FAA04C3A-F55B-4947-850D-5A180D43BD61 + + + + + + sid-FAA04C3A-F55B-4947-850D-5A180D43BD61 + sid-A19043EA-D140-48AE-99A1-4B1EA3DE0E51 + + + + + + sid-A19043EA-D140-48AE-99A1-4B1EA3DE0E51 + sid-2A94D2F0-DF4B-45B6-A30D-FFB9BDF6E9D9 + + + + + + sid-2A94D2F0-DF4B-45B6-A30D-FFB9BDF6E9D9 + sid-661F5F14-5B94-4977-9827-20654AE2719B + + + + + + sid-661F5F14-5B94-4977-9827-20654AE2719B + sid-C0DC27C3-19F9-4D3D-9D04-8869DAEDEF1E + + + + + + sid-B45563D3-2FBE-406D-93E4-85A2DD04B1A4 + sid-0E826E42-8FBC-4532-96EA-C82E7340CBA4 + + + + + + sid-0E826E42-8FBC-4532-96EA-C82E7340CBA4 + sid-BE9CBE97-0E09-4A37-BD98-65592D2F2E84 + + + + + + sid-BE9CBE97-0E09-4A37-BD98-65592D2F2E84 + sid-C96EBBBD-7DDA-4875-89AC-0F030E53C2B6 + + + + + + sid-C96EBBBD-7DDA-4875-89AC-0F030E53C2B6 + sid-8449C64C-CF1D-4601-ACAE-2CD61BE2D36C + + + + + + sid-8449C64C-CF1D-4601-ACAE-2CD61BE2D36C + sid-1DD0519A-72AD-4FB1-91D6-4D18F2DA1FC8 + + + + + + sid-1DD0519A-72AD-4FB1-91D6-4D18F2DA1FC8 + sid-42540C95-8E89-4B6F-B133-F677FA72C9FF + + + + + + sid-42540C95-8E89-4B6F-B133-F677FA72C9FF + sid-108A05A6-D07C-4DA9-AAC3-8075A721B44B + + + + + + sid-108A05A6-D07C-4DA9-AAC3-8075A721B44B + sid-A8886943-1369-43FB-BFC1-FF1FF974EB5D + + + + + + sid-7BFA5A55-E297-40FC-88A6-DF1DA809A12C + sid-7F5D9083-6201-43F9-BBEE-664E7310F4F2 + + + + + + sid-7F5D9083-6201-43F9-BBEE-664E7310F4F2 + sid-C609F3E0-2D09-469C-8750-3E3BA8C926BE + sid-C06DF3AB-4CE5-4123-8033-8AACDCDF4416 + + + + + + sid-C06DF3AB-4CE5-4123-8033-8AACDCDF4416 + sid-B149930A-9E12-41E7-8FC2-E7BD1ECFAC3C + + + + + + sid-C7247231-5152-424E-A240-B07B76E8F5EC + sid-DC398932-1111-4CA2-AEB4-D460E0E06C6E + + + + + + sid-DC398932-1111-4CA2-AEB4-D460E0E06C6E + sid-D7D86B12-A88C-4072-9852-6DD62643556A + sid-16EB4D98-7F77-4046-8CDD-E07C796542FE + + + + + + sid-16EB4D98-7F77-4046-8CDD-E07C796542FE + sid-AD3AF6E3-A77C-4366-A745-50B5DF66D071 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long.signavio.xml new file mode 100644 index 000000000..c6c844526 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Join-Long.signavio.xml @@ -0,0 +1,18676 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Join Long + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 10 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 11 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - Task + + + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 8 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 10 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 11 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - Task + + + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - + + + Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 1 - No + + + Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - + + + Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Thread 2 - No + + + Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Looping-After-Join.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Looping-After-Join.bpmn20.xml new file mode 100644 index 000000000..150b28d10 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Looping-After-Join.bpmn20.xml @@ -0,0 +1,291 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-E976FBC2-266E-420F-8D4D-C8FBC6199090 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-4F7F3AA6-4E8E-458D-BBEF-E03FC0646897 + sid-ABD788A3-CD57-4280-A22A-260B3AEEE138 + sid-E7B8898A-0D14-4E98-B3D7-736B94EE3FA7 + sid-A1609BD5-1E4A-47AE-8648-1DD41D1B1D58 + sid-1946C635-7886-4687-844F-C644FA6222B8 + sid-4C3B3C16-91DB-43E3-A816-FFEE572E61E1 + sid-55C018B8-C073-4292-9ED0-79BDE50E7498 + sid-E7904BFA-1F17-478E-91C9-C8A5B64190C9 + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-3E0EBE59-75C8-465C-90CC-197CE808A96E + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-607CB05E-8762-41B6-AD43-C3970661A99D + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + + + + + + sid-231F8A51-752F-4CB3-8FD1-23D153238344 + sid-607CB05E-8762-41B6-AD43-C3970661A99D + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + + + + + + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + sid-2668AC98-39E4-4B12-9052-930528086CAC + + + + + + sid-F6160C0E-216C-4D72-98D1-CC5549327D55 + + + + + + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + sid-918C653D-0960-4223-9C28-78114F238BCC + sid-FD82C2A6-7C54-4890-901E-A7E864F7605C + + + + + + sid-918C653D-0960-4223-9C28-78114F238BCC + sid-961AF51C-9935-410E-AAA4-105B19186F5E + + + + + + sid-FD82C2A6-7C54-4890-901E-A7E864F7605C + sid-47947925-21CD-46FF-8D3F-294B235AA4CF + + + + + + sid-961AF51C-9935-410E-AAA4-105B19186F5E + sid-47947925-21CD-46FF-8D3F-294B235AA4CF + sid-AFA38469-CD5C-42A2-9473-2EAEBA61F0C0 + + + + + + sid-AFA38469-CD5C-42A2-9473-2EAEBA61F0C0 + sid-231F8A51-752F-4CB3-8FD1-23D153238344 + + + + + + sid-2668AC98-39E4-4B12-9052-930528086CAC + sid-08D6385B-C6BB-45FC-A6BD-2369F392868D + sid-41205B5D-4DBA-4155-A0EE-7D71CE9AA459 + + + + + + sid-08D6385B-C6BB-45FC-A6BD-2369F392868D + sid-F6160C0E-216C-4D72-98D1-CC5549327D55 + + + + + + sid-41205B5D-4DBA-4155-A0EE-7D71CE9AA459 + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-3E0EBE59-75C8-465C-90CC-197CE808A96E + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Looping-After-Join.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Looping-After-Join.signavio.xml new file mode 100644 index 000000000..8e692dd91 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Looping-After-Join.signavio.xml @@ -0,0 +1,6534 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Looping After Join + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + First Split + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Join of First + Split + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Retry? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Second Split + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Join of Second + Split + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Go + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.bpmn20.xml new file mode 100644 index 000000000..8a4f9ab92 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.bpmn20.xml @@ -0,0 +1,428 @@ + + + + + + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-EB082AAA-C40E-494F-AC00-B5DEA7B57772 + sid-9B706A97-A88A-4364-87BF-BAADDBD865CE + sid-DCF1C5F0-C906-4BFE-8444-92A3353AAFE7 + sid-C3644051-AF3B-45A7-A56D-E3FECD1F0F73 + sid-49278649-FF42-4016-B888-2692250103F0 + sid-98AF447F-1490-40AE-B725-A04DC6C95DEE + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-7CC8F04C-823A-4646-80BE-569C3596CFD0 + sid-5E19572F-70A1-443D-8C1C-B620546D1D1A + + + + + + sid-572C055F-C183-463A-A59B-779738FCD387 + sid-75A8AF3F-EA80-414A-9327-D560562FA9E3 + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-D74A0AEE-AFCE-46DA-82F7-0F93D4CB5B88 + + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-F4D4F2EA-2858-46C6-AFFC-098C658966D6 + + + + + + + sid-7CC8F04C-823A-4646-80BE-569C3596CFD0 + sid-FC3FD5EC-7D88-4912-B2AE-94996D65C652 + + + + + + + sid-5E19572F-70A1-443D-8C1C-B620546D1D1A + sid-DCE9FFB9-9438-4575-9B82-5C31EC635B45 + + + + + + sid-F4D4F2EA-2858-46C6-AFFC-098C658966D6 + sid-D74A0AEE-AFCE-46DA-82F7-0F93D4CB5B88 + sid-572C055F-C183-463A-A59B-779738FCD387 + + + + + + sid-FC3FD5EC-7D88-4912-B2AE-94996D65C652 + sid-DCE9FFB9-9438-4575-9B82-5C31EC635B45 + sid-75A8AF3F-EA80-414A-9327-D560562FA9E3 + + + + + + + + + + + + + + + + + + + + + sid-2652F9DC-A651-459C-A8EC-8835E500E02C + sid-BDF659A9-9AD0-460A-BDF7-AF9471515D34 + sid-262865E3-8F24-4DCA-B5A0-1584BEE5F16E + sid-53AD0FA3-41AA-4933-ADAD-DB9C2B6A7DF4 + sid-A6D200FE-F3CD-494B-9C95-AFF5F694D1E4 + sid-2DBEF7AE-F7B5-4BDB-ADD9-68DC0ADE3F73 + sid-F15D010F-5E89-4305-A589-935402A85670 + sid-B6E73E3B-B8D1-48DA-8A5D-14E1578B9E2B + sid-03B5205A-AEC4-4501-9002-36884F8EA859 + sid-DFA7CF0A-74D1-4313-8F0D-354255A83816 + + + + + + + sid-47AA1D93-E5C8-4409-836E-2D80391B5C5D + + + + + + sid-47AA1D93-E5C8-4409-836E-2D80391B5C5D + sid-535CE609-2290-46DE-BCD6-AE4DC8F475E8 + sid-A492F72B-94A8-42B0-9E41-BF5B731F8C15 + sid-A7CD80BC-A458-48FA-BBF0-DB925075DE2A + sid-F4988382-AF62-4B31-9C76-94A26FE73486 + + + + + + sid-535CE609-2290-46DE-BCD6-AE4DC8F475E8 + sid-EE6AE52D-9C3C-4543-B470-1DCD48379541 + + + + + + sid-A492F72B-94A8-42B0-9E41-BF5B731F8C15 + sid-E4308A4E-12D2-4B26-B328-7B0BC8822054 + + + + + + sid-BABA5BCD-33B0-421D-A28C-B50F4805BB9C + sid-E85690CD-3C19-4B7F-9BE7-42DAF96474A2 + sid-A0778FC1-BBA0-4AA8-8D79-6B5E92F78BC4 + + + + + + sid-A0778FC1-BBA0-4AA8-8D79-6B5E92F78BC4 + + + + + + sid-A7CD80BC-A458-48FA-BBF0-DB925075DE2A + sid-7457347D-D229-4BF4-A2B8-1C27FE956402 + + + + + + sid-F4988382-AF62-4B31-9C76-94A26FE73486 + sid-997038AC-FB5A-4A71-85AF-92940FF80BF0 + + + + + + sid-E4308A4E-12D2-4B26-B328-7B0BC8822054 + sid-EE6AE52D-9C3C-4543-B470-1DCD48379541 + sid-BABA5BCD-33B0-421D-A28C-B50F4805BB9C + + + + + + sid-997038AC-FB5A-4A71-85AF-92940FF80BF0 + sid-7457347D-D229-4BF4-A2B8-1C27FE956402 + sid-E85690CD-3C19-4B7F-9BE7-42DAF96474A2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.signavio.xml new file mode 100644 index 000000000..668df8ebf --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.signavio.xml @@ -0,0 +1,8272 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Many Threads At Same Point + Nested + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Outer Split + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Outer Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Outer End + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 1A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 1B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 2A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 2B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Outer Join 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Outer Join 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Inner split + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Inner Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Inner End + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Inner Join 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Inner join 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point.bpmn20.xml new file mode 100644 index 000000000..39bfd7d72 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point.bpmn20.xml @@ -0,0 +1,183 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-12002433-32F3-4F11-B6FA-02CC3B4FB996 + sid-43B2AAEB-B9DA-466C-B7AC-09991607428F + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-7CC8F04C-823A-4646-80BE-569C3596CFD0 + sid-5E19572F-70A1-443D-8C1C-B620546D1D1A + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-D74A0AEE-AFCE-46DA-82F7-0F93D4CB5B88 + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-F4D4F2EA-2858-46C6-AFFC-098C658966D6 + + + + + + sid-F4D4F2EA-2858-46C6-AFFC-098C658966D6 + sid-D74A0AEE-AFCE-46DA-82F7-0F93D4CB5B88 + sid-FC3FD5EC-7D88-4912-B2AE-94996D65C652 + sid-DCE9FFB9-9438-4575-9B82-5C31EC635B45 + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-7CC8F04C-823A-4646-80BE-569C3596CFD0 + sid-FC3FD5EC-7D88-4912-B2AE-94996D65C652 + + + + + + sid-5E19572F-70A1-443D-8C1C-B620546D1D1A + sid-DCE9FFB9-9438-4575-9B82-5C31EC635B45 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point.signavio.xml new file mode 100644 index 000000000..3cea57c7f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Many-Threads-At-Same-Point.signavio.xml @@ -0,0 +1,4000 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Many Threads At Same Point + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits-And-Joins.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits-And-Joins.bpmn20.xml new file mode 100644 index 000000000..34818aa62 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits-And-Joins.bpmn20.xml @@ -0,0 +1,327 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-E976FBC2-266E-420F-8D4D-C8FBC6199090 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-4F7F3AA6-4E8E-458D-BBEF-E03FC0646897 + sid-ABD788A3-CD57-4280-A22A-260B3AEEE138 + sid-E7B8898A-0D14-4E98-B3D7-736B94EE3FA7 + sid-596EEA66-6606-415E-946C-B6A2A2186E46 + sid-809B2A5A-4907-4CB6-A398-E93855AD6F20 + sid-3C0AE49A-7BE9-4D61-B409-AC5794825CDE + sid-A1609BD5-1E4A-47AE-8648-1DD41D1B1D58 + sid-1946C635-7886-4687-844F-C644FA6222B8 + sid-A8511509-2456-4EC7-A3E9-39029B470AE3 + sid-70192697-62E0-4F16-BE86-7945E95AC33A + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-607CB05E-8762-41B6-AD43-C3970661A99D + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + + + + + + sid-231F8A51-752F-4CB3-8FD1-23D153238344 + sid-F1ADD871-FEF1-4A10-9077-1080FE848E46 + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + + + + + + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + sid-918C653D-0960-4223-9C28-78114F238BCC + sid-FD82C2A6-7C54-4890-901E-A7E864F7605C + + + + + + sid-918C653D-0960-4223-9C28-78114F238BCC + sid-961AF51C-9935-410E-AAA4-105B19186F5E + + + + + + sid-FD82C2A6-7C54-4890-901E-A7E864F7605C + sid-47947925-21CD-46FF-8D3F-294B235AA4CF + + + + + + sid-607CB05E-8762-41B6-AD43-C3970661A99D + sid-58DD8E88-59AE-4AAF-9ECC-5878215E6C52 + sid-5A3B2623-2876-4C5D-BEC0-8FF537CDD8FD + + + + + + sid-58DD8E88-59AE-4AAF-9ECC-5878215E6C52 + sid-BECCEB67-CAD4-416B-9DBE-B835DE1C4D8B + + + + + + sid-5A3B2623-2876-4C5D-BEC0-8FF537CDD8FD + sid-E1019803-AC7D-4082-A410-F756BC744268 + + + + + + sid-961AF51C-9935-410E-AAA4-105B19186F5E + sid-47947925-21CD-46FF-8D3F-294B235AA4CF + sid-AFA38469-CD5C-42A2-9473-2EAEBA61F0C0 + + + + + + sid-AFA38469-CD5C-42A2-9473-2EAEBA61F0C0 + sid-231F8A51-752F-4CB3-8FD1-23D153238344 + + + + + + sid-BECCEB67-CAD4-416B-9DBE-B835DE1C4D8B + sid-E1019803-AC7D-4082-A410-F756BC744268 + sid-51875CBD-D777-4E8B-B6E8-516FC6B885B8 + + + + + + sid-51875CBD-D777-4E8B-B6E8-516FC6B885B8 + sid-F1ADD871-FEF1-4A10-9077-1080FE848E46 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits-And-Joins.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits-And-Joins.signavio.xml new file mode 100644 index 000000000..18fda73ad --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits-And-Joins.signavio.xml @@ -0,0 +1,7277 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Multiple Splits And Joins + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1B + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2 Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits.bpmn20.xml new file mode 100644 index 000000000..2812292a9 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits.bpmn20.xml @@ -0,0 +1,571 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + sid-9CAB06E6-EDCF-4193-869A-FE8328E8CBFF + sid-F4CFA154-9281-4579-B117-0859A2BFF7E8 + sid-3D61C6FE-D354-41FF-8544-014FAE7B5C5E + sid-06F36AE5-5FD5-487D-A783-64CEAB2C8A91 + sid-107A993F-6302-4391-9BE2-068C9C7B693B + sid-7FE0DFC7-C85C-4512-AE6D-870C434EB8C1 + sid-BE900A6F-2C7A-4444-8DBB-6320D1BAC713 + sid-C6CAD430-8E26-445B-83C0-A36E23C7E1C1 + sid-2563D3F5-8471-4F7C-BDFD-09C38D094F43 + sid-5744C1B5-7791-4CEC-BE63-25659AF6086F + + + + + + + sid-54E118FA-9A24-434C-9E65-36F9D01FB43D + + + + + + sid-54E118FA-9A24-434C-9E65-36F9D01FB43D + sid-7BFA5A55-E297-40FC-88A6-DF1DA809A12C + sid-1187C872-CED2-4F61-A3A6-917961FBB6A8 + + + + + + sid-43F81872-8CCF-4341-9003-438220FA07F4 + sid-00841997-63BF-4E4B-8BB5-B3A9A15F0123 + sid-B22C3013-E93D-43F9-8445-3C0B98AFF3F6 + sid-369B410B-EA82-4896-91FD-23FFF759494A + + + + + + sid-75D1F9CC-4885-4EC4-BB9C-C24DFDA04F69 + + + + + + + sid-369B410B-EA82-4896-91FD-23FFF759494A + sid-75D1F9CC-4885-4EC4-BB9C-C24DFDA04F69 + + + + + + + sid-A24102E3-7EC7-42BA-AF16-AAA14CA59B80 + sid-00841997-63BF-4E4B-8BB5-B3A9A15F0123 + + + + + + + sid-7BFA5A55-E297-40FC-88A6-DF1DA809A12C + sid-43F81872-8CCF-4341-9003-438220FA07F4 + + + + + + sid-715966B5-1BBA-426A-B1AD-B1600C836447 + sid-A24102E3-7EC7-42BA-AF16-AAA14CA59B80 + sid-BFF577ED-4FCE-4C82-8867-69A604840D18 + + + + + + + sid-BFF577ED-4FCE-4C82-8867-69A604840D18 + sid-B22C3013-E93D-43F9-8445-3C0B98AFF3F6 + + + + + + sid-1187C872-CED2-4F61-A3A6-917961FBB6A8 + sid-715966B5-1BBA-426A-B1AD-B1600C836447 + + + + + + + + + + + + + + + + + + + + sid-9F27C720-DF39-4F98-B982-B8401C522627 + sid-CBC4BA2E-F967-4FFA-930F-9351FA2FE1BC + sid-976144D1-F77F-468A-9AEA-44CFE6FC53AF + sid-5785112F-6652-4A79-8765-274633FF857C + sid-50F141BC-DA33-4115-B0BE-3E2F86170A6F + sid-5B01939E-7383-4B15-8B03-7BAD381D815B + + + + + + + sid-93BCB59F-C250-4A99-9656-A018CB6909D0 + sid-71B40CAD-61D1-40E1-879A-5269B2C537C3 + + + + + + sid-71B40CAD-61D1-40E1-879A-5269B2C537C3 + sid-83DCC02D-1092-457B-A92F-FCC1A69B11DB + sid-B138CBD6-3412-4E12-818A-046DBE4EB268 + + + + + + sid-83DCC02D-1092-457B-A92F-FCC1A69B11DB + sid-3BF11953-04AC-47E5-86DF-A021E14B38BE + + + + + + sid-B138CBD6-3412-4E12-818A-046DBE4EB268 + sid-4BD8D37C-5F6B-4E22-B0EF-207C79D2FF1F + + + + + + sid-4BD8D37C-5F6B-4E22-B0EF-207C79D2FF1F + sid-3BF11953-04AC-47E5-86DF-A021E14B38BE + + + + + + sid-93BCB59F-C250-4A99-9656-A018CB6909D0 + + + + + + + + + + + + + + + sid-B132315A-D2F0-4C56-B9B7-0D653E10D055 + sid-C80BC3FC-1DAA-4891-A532-8CAF546022DB + sid-90F285F4-D9B1-47EB-98A4-4DEC26F5004E + sid-4C5398A4-E1EE-453F-84A4-95E36AB3F52D + sid-172D9479-01F2-4322-8DEE-F64EB67370AC + sid-E9F47FE4-FBA9-48C1-94EB-0414FF62CA4B + + + + + + + sid-D142870D-78CA-4179-A412-D55D995F09C2 + sid-EBAA7EDE-A111-4B75-9049-18086C370093 + + + + + + sid-EBAA7EDE-A111-4B75-9049-18086C370093 + sid-DA5A35BE-DE88-458E-9070-5EAF5E50AC3F + sid-F96F1343-A5F2-443E-AE6A-399839FA5CAD + + + + + + sid-DA5A35BE-DE88-458E-9070-5EAF5E50AC3F + sid-2D95B883-D746-42C4-A63B-E79F94DFFA19 + + + + + + sid-F96F1343-A5F2-443E-AE6A-399839FA5CAD + sid-53FC57E6-73A4-44DF-B8A4-2D040EE730E4 + + + + + + sid-53FC57E6-73A4-44DF-B8A4-2D040EE730E4 + sid-2D95B883-D746-42C4-A63B-E79F94DFFA19 + + + + + + sid-D142870D-78CA-4179-A412-D55D995F09C2 + + + + + + + + + + + + + + + sid-907901B0-15E1-463E-B3F2-FCCF07032D21 + sid-15DE9BF4-7DC1-4AB5-ABCB-2D78586129B4 + sid-D0F64CE9-F2AF-4C30-B131-7C5AFCBD83BE + sid-92F09878-8A0A-4027-A8D7-A0CC968646B7 + sid-9B90B624-729B-4ABC-912F-DEEACF9E0BEE + sid-CA956D63-388B-4F67-84BD-60CB84585701 + + + + + + + sid-53E74B08-8501-45E0-BB17-4F1AFA9695B5 + sid-26FD7AF6-2635-4D7F-B84D-A54BC63D059E + + + + + + sid-26FD7AF6-2635-4D7F-B84D-A54BC63D059E + sid-29205115-BF1F-4D60-AA26-A06F992732A2 + sid-E2D0CBCD-5C99-49FA-A29E-D4B70CFEC942 + + + + + + sid-29205115-BF1F-4D60-AA26-A06F992732A2 + sid-67317F54-919D-4CE2-A590-8BB9A538B282 + + + + + + sid-E2D0CBCD-5C99-49FA-A29E-D4B70CFEC942 + sid-9382889C-EFCE-42B2-A6DC-D29A276D1E86 + + + + + + sid-9382889C-EFCE-42B2-A6DC-D29A276D1E86 + sid-67317F54-919D-4CE2-A590-8BB9A538B282 + + + + + + sid-53E74B08-8501-45E0-BB17-4F1AFA9695B5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits.signavio.xml new file mode 100644 index 000000000..9c525db8b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Multiple-Splits.signavio.xml @@ -0,0 +1,11634 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Multiple Splits + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess + + + With a Choice 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess + + + With a Choice 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess + + + With a Choice 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Do First + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess With a Choice 2 + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + SP 2 - Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 2 - No Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 2 - Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess With a Choice 1 + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + SP 1 - Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 1 - No Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 1 - Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Subprocess With a Choice 3 + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + SP 3 - Choose + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 3 - No Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SP 3 - Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-One-Path-Ends.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-One-Path-Ends.bpmn20.xml new file mode 100644 index 000000000..773ffc75e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-One-Path-Ends.bpmn20.xml @@ -0,0 +1,192 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-E2054FDD-0C20-4939-938D-2169B317FEE7 + sid-34AD79D9-BE0C-4F97-AC23-7A97D238A6E5 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-8FFE9D52-DC83-46A8-BB36-98BA94E5FE84 + sid-40294A27-262C-4805-94A0-36AC9DFEA55A + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-E3493781-6466-4AED-BAD2-63D115E14820 + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + + + + + + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + sid-9C753C3D-F964-45B0-AF57-234F910529EF + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + + + + + + sid-9C753C3D-F964-45B0-AF57-234F910529EF + sid-A6DA25CE-636A-46B7-8005-759577956F09 + + + + + + sid-8B2BFD35-F1B2-4C77-AC51-F15960D8791A + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + + + + + + sid-A6DA25CE-636A-46B7-8005-759577956F09 + sid-E3493781-6466-4AED-BAD2-63D115E14820 + sid-8B2BFD35-F1B2-4C77-AC51-F15960D8791A + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-One-Path-Ends.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-One-Path-Ends.signavio.xml new file mode 100644 index 000000000..bf33da092 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-One-Path-Ends.signavio.xml @@ -0,0 +1,3793 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel One Path Ends + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Choice 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.bpmn20.xml new file mode 100644 index 000000000..373bb7f23 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.bpmn20.xml @@ -0,0 +1,219 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-E2054FDD-0C20-4939-938D-2169B317FEE7 + sid-34AD79D9-BE0C-4F97-AC23-7A97D238A6E5 + sid-2A302E91-F89F-4913-8F55-5C3AC5FAE4D3 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-040FCBAD-0550-4251-B799-74FCDB0DC3E2 + sid-D856C519-562B-46A3-B32C-9587F394BD0F + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-E3493781-6466-4AED-BAD2-63D115E14820 + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + + + + + + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + sid-9C753C3D-F964-45B0-AF57-234F910529EF + + + + + + sid-9C753C3D-F964-45B0-AF57-234F910529EF + sid-A6DA25CE-636A-46B7-8005-759577956F09 + + + + + + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + sid-12F60C82-D18F-4747-B5B5-34FD40F2C8DE + + + + + + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-12F60C82-D18F-4747-B5B5-34FD40F2C8DE + sid-A6DA25CE-636A-46B7-8005-759577956F09 + sid-3B450653-1657-4247-B96E-6E3E6262BB97 + + + + + + sid-E3493781-6466-4AED-BAD2-63D115E14820 + sid-3B450653-1657-4247-B96E-6E3E6262BB97 + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.signavio.xml new file mode 100644 index 000000000..fd0583b3c --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.signavio.xml @@ -0,0 +1,4497 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Then Exclusive No Inclusive + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Choice 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive.bpmn20.xml new file mode 100644 index 000000000..f915b4c3d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive.bpmn20.xml @@ -0,0 +1,202 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-E2054FDD-0C20-4939-938D-2169B317FEE7 + sid-34AD79D9-BE0C-4F97-AC23-7A97D238A6E5 + sid-2A302E91-F89F-4913-8F55-5C3AC5FAE4D3 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-EBB511F3-5AD5-4307-9B9B-85C17F8889D5 + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-E3493781-6466-4AED-BAD2-63D115E14820 + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + + + + + + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + sid-9C753C3D-F964-45B0-AF57-234F910529EF + + + + + + sid-9C753C3D-F964-45B0-AF57-234F910529EF + sid-A6DA25CE-636A-46B7-8005-759577956F09 + + + + + + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + sid-12F60C82-D18F-4747-B5B5-34FD40F2C8DE + + + + + + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-E3493781-6466-4AED-BAD2-63D115E14820 + sid-12F60C82-D18F-4747-B5B5-34FD40F2C8DE + sid-A6DA25CE-636A-46B7-8005-759577956F09 + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive.signavio.xml new file mode 100644 index 000000000..31f702adb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Then-Exclusive.signavio.xml @@ -0,0 +1,4291 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Then Exclusive + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Choice 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Through-Same-Task.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Through-Same-Task.bpmn20.xml new file mode 100644 index 000000000..fed03996b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Through-Same-Task.bpmn20.xml @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + sid-B33EE043-AB93-4343-A1D4-7B267E2DAFBE + sid-349F8C0C-45EA-489C-84DD-1D944F48D778 + sid-57463471-693A-42A2-9EC6-6460BEDECA86 + sid-CA089240-802A-4C32-9130-FB1A33DDCCC3 + sid-E2054FDD-0C20-4939-938D-2169B317FEE7 + sid-34AD79D9-BE0C-4F97-AC23-7A97D238A6E5 + sid-2A302E91-F89F-4913-8F55-5C3AC5FAE4D3 + sid-F3A979E3-F586-4807-8223-1FAB5A5647B0 + sid-51816945-79BF-47F9-BA3C-E95ABAE3D1DB + sid-AF897BE2-CC07-4236-902B-DD6E1AB31842 + + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + + + + + + sid-F3994F51-FE54-4910-A1F4-E5895AA1A612 + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + + + + + + sid-7E15C71B-DE9E-4788-B140-A647C99FDC94 + sid-A6DA25CE-636A-46B7-8005-759577956F09 + sid-E3493781-6466-4AED-BAD2-63D115E14820 + + + + + + sid-B6E22A74-A691-453A-A789-B9F8AF787D7C + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + + + + + + sid-CAEAD081-6E73-4C98-8656-C67DA18F5140 + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + sid-9C753C3D-F964-45B0-AF57-234F910529EF + + + + + + sid-9C753C3D-F964-45B0-AF57-234F910529EF + sid-A6DA25CE-636A-46B7-8005-759577956F09 + + + + + + sid-3742C960-71D0-4342-8064-AF1BB9EECB42 + sid-12F60C82-D18F-4747-B5B5-34FD40F2C8DE + + + + + + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-40496205-24D7-494C-AB6B-CD42B8D606EF + + + + + + sid-E3493781-6466-4AED-BAD2-63D115E14820 + sid-12F60C82-D18F-4747-B5B5-34FD40F2C8DE + sid-0895E09C-077C-4D12-8C11-31F28CBC7740 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Through-Same-Task.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Through-Same-Task.signavio.xml new file mode 100644 index 000000000..45a234a01 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel-Through-Same-Task.signavio.xml @@ -0,0 +1,4289 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Parallel Through Same Task + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Repeated Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Choice 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Yes Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No Task + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel.camunda.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel.camunda.bpmn20.xml new file mode 100644 index 000000000..620cf25b8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel.camunda.bpmn20.xml @@ -0,0 +1,158 @@ + + + + + SequenceFlow_1vv685e + + + + + SequenceFlow_160ihio + SequenceFlow_01s4u0j + SequenceFlow_1x5zcdu + SequenceFlow_1g26zbi + + + + + + + SequenceFlow_11uv01u + SequenceFlow_0z80s5o + SequenceFlow_0cquzxd + SequenceFlow_0uq97wv + + + + + + SequenceFlow_1a97zm5 + + + + + + + + + + + + + + SequenceFlow_1vv685e + SequenceFlow_160ihio + + + + + + + + SequenceFlow_01s4u0j + SequenceFlow_11uv01u + + + + + + + + SequenceFlow_1x5zcdu + SequenceFlow_0z80s5o + + + + + + + + SequenceFlow_1g26zbi + SequenceFlow_0cquzxd + + + + + + + + SequenceFlow_0uq97wv + SequenceFlow_1a97zm5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/README.txt b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/README.txt new file mode 100644 index 000000000..8e8379c38 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/README.txt @@ -0,0 +1,6 @@ +Please note that these files were created with Gemsbok. + +Please install and configure it, in order to edit them, rather than doing so by hand, as the .signavio.xml files +need to be kept in sync. + +It is here: https://github.com/matthewhampton/Gemsbok diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Scripts.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Scripts.bpmn20.xml new file mode 100644 index 000000000..3b3678e20 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Scripts.bpmn20.xml @@ -0,0 +1,172 @@ + + + + + + + + + + + + + + + + sid-8FEAF795-94B2-440D-802F-A1E3D06B2E8D + sid-26E89636-8D64-4086-A71F-A225A5AA8F81 + sid-11DD8628-C125-439E-B207-AE898139650B + sid-6C039219-F210-4984-B366-11C77DB35229 + sid-8BB6CFC5-D636-47D8-9AF6-0E45FD2F3B39 + sid-F7545D0E-08BD-4415-968E-CEF57C5261A9 + sid-C621B8FC-0E3E-409C-A9D2-55188512C4CC + sid-F4C7A6CA-3D30-43FE-BDCE-17DFBE8C102E + + + + + + + sid-2E290042-66C2-4BB7-8627-7FF1EAF345DB + + + + + + sid-0258E597-1879-4A66-B2A0-686F559D123E + sid-8E7A167A-0410-43DF-AAC1-FAA54D122990 + + + + + + + sid-65C6AD2F-4BB1-4CCF-A4BD-652354666F4C + sid-473A1324-A78B-491D-B26F-E98A581DC913 + + + + + + sid-2E290042-66C2-4BB7-8627-7FF1EAF345DB + sid-49D8B0A1-1254-4545-B18C-5232C606A7E6 + + + + + + sid-49D8B0A1-1254-4545-B18C-5232C606A7E6 + sid-0258E597-1879-4A66-B2A0-686F559D123E + sid-5457E966-E8F8-4E6A-AEDE-B4A1399CFAC2 + + + + + + sid-5457E966-E8F8-4E6A-AEDE-B4A1399CFAC2 + sid-41485063-4276-4F8E-A55C-C057B30FA5F7 + + + + + + + sid-41485063-4276-4F8E-A55C-C057B30FA5F7 + sid-8E7A167A-0410-43DF-AAC1-FAA54D122990 + sid-473A1324-A78B-491D-B26F-E98A581DC913 + sid-9928A8F9-83F2-4C6B-A253-FC3D161BD33C + + + + + + sid-9928A8F9-83F2-4C6B-A253-FC3D161BD33C + sid-65C6AD2F-4BB1-4CCF-A4BD-652354666F4C + + + + + + + + + + + priority = 'Emergency' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Scripts.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Scripts.signavio.xml new file mode 100644 index 000000000..f83180b1b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Scripts.signavio.xml @@ -0,0 +1,3553 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Scripts + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Permanent + + + Script + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Select Type Of + + + Change + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Temp Script + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Emergency? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Acknowledge + + + Emergency + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Permanent + + + + + + + + + + + + + + + + + + + + Temporary + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Confirm + + + + + + + + + + + + + + + + + + + + No + + + + + + + + + + + + + + + + + + + + Yes + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml new file mode 100644 index 000000000..b9ab28f94 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml @@ -0,0 +1,209 @@ + + + + + + + + + + + + + + + + sid-2C69C7A6-BD02-428F-B5AA-0BB10EC514C2 + USER_INPUT_SELECT_TEST + GATEWAY_SELECTED_TEST + sid-464B8E64-10B4-4158-BDEE-11144CE20306 + MESSAGES_SUB + SCRIPTS_SUB + MESSAGE_INTERRUPTS_SUB + MESSAGE_NON_INTERRUPT_SUB + + + + + + + sid-6469A686-F148-4E90-81BB-5D3305421758 + + + + + + sid-6469A686-F148-4E90-81BB-5D3305421758 + sid-0B133E7E-E6B3-4578-8F8F-1C9DA5D8A015 + + + + + + sid-0B133E7E-E6B3-4578-8F8F-1C9DA5D8A015 + sid-85975D4B-1DC8-4998-A7DF-6F9C31861EE7 + sid-ECE4F718-B986-45F1-8B0C-C0C1DAE66DB3 + sid-488CD0F1-E280-4BDE-B794-64CAF8C4FCA8 + sid-9AD6C46B-BC40-4D3A-91E2-8E933EF9ADF0 + sid-BFCB2CAC-CE5B-4EA2-8A4D-339D7D5894A4 + + + + + + sid-36EA1BBD-FB84-4C19-AEFA-5C731F9C8789 + sid-979356AC-A00F-456E-9790-39D512F50D3C + sid-9AD6C46B-BC40-4D3A-91E2-8E933EF9ADF0 + sid-B7DB2642-79A3-4D7B-9CB6-06C4854E4C9D + sid-EC51B1C2-2A2D-4515-90CF-9CDCF31A8A44 + + + + + + + sid-85975D4B-1DC8-4998-A7DF-6F9C31861EE7 + sid-36EA1BBD-FB84-4C19-AEFA-5C731F9C8789 + + + + + + + sid-488CD0F1-E280-4BDE-B794-64CAF8C4FCA8 + sid-979356AC-A00F-456E-9790-39D512F50D3C + + + + + + + sid-ECE4F718-B986-45F1-8B0C-C0C1DAE66DB3 + sid-B7DB2642-79A3-4D7B-9CB6-06C4854E4C9D + + + + + + + sid-BFCB2CAC-CE5B-4EA2-8A4D-339D7D5894A4 + sid-EC51B1C2-2A2D-4515-90CF-9CDCF31A8A44 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.signavio.xml new file mode 100644 index 000000000..ec2212f5d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.signavio.xml @@ -0,0 +1,3570 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Workflows + + + + + + + + + + + + + + + + + + + + + undefined + + + Test Runner + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Select Test + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Messages + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Scripts + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message + + + Interrupts + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Non + + + Interrupt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Messages + + + + + + + + + + + + + + + + + + + + Message Interrupts + + + + + + + + + + + + + + + + + + + + Scripts + + + + + + + + + + + + + + + + + + + + Tests Done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Message Non Interrupt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Timer-Intermediate.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Timer-Intermediate.bpmn20.xml new file mode 100644 index 000000000..0dc67f737 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Timer-Intermediate.bpmn20.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + sid-516421BF-6D56-4D23-87A5-6E8FC1E2636F + sid-B0D398F3-C33A-4113-B8BB-D7ABF6990C4A + sid-AB1683F9-F260-499B-A04C-F7ADA9C64817 + + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + + + + + + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + + + + + + sid-F1DA0EEB-68EF-47E3-A6EA-5E2D25B6B34D + sid-334FDA52-5EF6-49DE-9839-E1263BA922FE + + due_time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Timer-Intermediate.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Timer-Intermediate.signavio.xml new file mode 100644 index 000000000..c2cba0688 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Timer-Intermediate.signavio.xml @@ -0,0 +1,818 @@ + + + +BPMN 2.0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Timer Intermediate + + + + + + + + + + + + + + + + + + + + + undefined + + + Tester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Due Time + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +]]> + diff --git a/tests/SpiffWorkflow/bpmn/data/boundary.bpmn b/tests/SpiffWorkflow/bpmn/data/boundary.bpmn new file mode 100644 index 000000000..00e53ccbd --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/boundary.bpmn @@ -0,0 +1,163 @@ + + + + + Flow_1pbxbk9 + + + Flow_1jnwt7c + Flow_0f0f7wg + + Flow_0vmzw8v + + + + + + + + Flow_0vmzw8v + Flow_0hkqchr + Flow_0axldsu + + + + Flow_0q65w45 + + + Flow_0axldsu + Flow_0q65w45 + Flow_0hkqchr + + + + answer == 'Yes' + + + + + Flow_0yzqey7 + Flow_1v53za5 + + + + + + + Flow_0jqkm6y + + + + Flow_0f0f7wg + Flow_1v53za5 + Flow_0jqkm6y + + + + + + + + + Flow_1pbxbk9 + Flow_1jnwt7c + + + Flow_0yzqey7 + + PT0.03S + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/boundary_timer_on_task.bpmn b/tests/SpiffWorkflow/bpmn/data/boundary_timer_on_task.bpmn new file mode 100644 index 000000000..bedebecf0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/boundary_timer_on_task.bpmn @@ -0,0 +1,77 @@ + + + + + Flow_164sojd + + + + Flow_0ac4lx5 + + timedelta(milliseconds=2) + + + + + Flow_0ac4lx5 + timer_called = True + + + Some docs + Flow_04tuv5z + + + + + Flow_164sojd + Flow_1m2vq4v + timer_called = False + + + Flow_1m2vq4v + Flow_04tuv5z + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/bpmnAntiLoopTask.bpmn b/tests/SpiffWorkflow/bpmn/data/bpmnAntiLoopTask.bpmn new file mode 100644 index 000000000..4893f713a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/bpmnAntiLoopTask.bpmn @@ -0,0 +1,47 @@ + + + + + Flow_0q33jmj + + + Enter Name for member {{ Activity_TestLoop_CurrentVar }} + + + + + + Flow_0q33jmj + Flow_13213ce + + 5 + + + + + Flow_13213ce + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/bpmnLoopTask.bpmn b/tests/SpiffWorkflow/bpmn/data/bpmnLoopTask.bpmn new file mode 100644 index 000000000..c35b237e2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/bpmnLoopTask.bpmn @@ -0,0 +1,45 @@ + + + + + Flow_0q33jmj + + + Enter Name for member {{ Activity_TestLoop_CurrentVar }} + + + + + + Flow_0q33jmj + Flow_13213ce + + + + + Flow_13213ce + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/bpmnMultiUserTask.bpmn b/tests/SpiffWorkflow/bpmn/data/bpmnMultiUserTask.bpmn new file mode 100644 index 000000000..d9a723161 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/bpmnMultiUserTask.bpmn @@ -0,0 +1,49 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + + + + Flow_0t6p1sb + Flow_0ugjw69 + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_call_activity.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_call_activity.bpmn new file mode 100644 index 000000000..84c05f4d7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_call_activity.bpmn @@ -0,0 +1,43 @@ + + + + + Flow_07uhaa7 + + + + # Call Event +<div><span>Hello {{my_var}}</span></div> + Flow_0apfnjq + + + + Flow_07uhaa7 + Flow_0apfnjq + my_var = 'World' +my_other_var = 'Mike' +del(remove_this_var) + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_end_event.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_end_event.bpmn new file mode 100644 index 000000000..d13377d12 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_end_event.bpmn @@ -0,0 +1,71 @@ + + + + + Flow_1g3dpd7 + + + + + Flow_0ovgj6c + Flow_0qdgvah + + + + # Main Workflow +Hello {{my_other_var}} + + + Flow_0izaz4f + + + + Flow_0qdgvah + Flow_0izaz4f + print(pre_var) +print(my_var) +print(my_other_var) + + + Flow_1g3dpd7 + Flow_0ovgj6c + pre_var = 'some string' +remove_this_var = 'something else' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_2.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_2.bpmn new file mode 100644 index 000000000..9460d992e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_2.bpmn @@ -0,0 +1,55 @@ + + + + + Flow_1g3dpd7 + + + + Flow_1g3dpd7 + Flow_0qdgvah + + + + # Main Workflow +Hello {{my_other_var}} + + + Flow_1ll6j9j + + + + Flow_0qdgvah + Flow_1ll6j9j + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_2b.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_2b.bpmn new file mode 100644 index 000000000..0310bcfc0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_2b.bpmn @@ -0,0 +1,42 @@ + + + + + Flow_1g3dpd7 + + + + # Main Workflow +Hello {{my_other_var}} + + + Flow_0l0w6u9 + + + Flow_1g3dpd7 + Flow_0l0w6u9 + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_3.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_3.bpmn new file mode 100644 index 000000000..9b1c832b0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_level_3.bpmn @@ -0,0 +1,43 @@ + + + + + Flow_1g3dpd7 + + + + Flow_1g3dpd7 + Flow_0qdgvah + + + + # Main Workflow +Hello {{my_other_var}} + + + Flow_0qdgvah + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_nested.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_nested.bpmn new file mode 100644 index 000000000..231b8f75e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/call_activity_nested.bpmn @@ -0,0 +1,72 @@ + + + + + Flow_1g3dpd7 + + + + Flow_1g3dpd7 + Flow_0qdgvah + + + + # Main Workflow +Hello {{my_other_var}} + + + Flow_04o2npf + + + + Flow_0qdgvah + Flow_0upce00 + + + + + Level2c + + Flow_0upce00 + Flow_04o2npf + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_nested/level2c.dmn b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/level2c.dmn new file mode 100644 index 000000000..ff7d93686 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_nested/level2c.dmn @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/call_activity_with_error.bpmn b/tests/SpiffWorkflow/bpmn/data/call_activity_with_error.bpmn new file mode 100644 index 000000000..790b395ba --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/call_activity_with_error.bpmn @@ -0,0 +1,72 @@ + + + + + Flow_1g3dpd7 + + + + + Flow_0ovgj6c + Flow_0qdgvah + + + + # Main Workflow +Hello {{my_other_var}} + + + Flow_0izaz4f + + + + Flow_0qdgvah + Flow_0izaz4f + print(pre_var) +print(my_var) +print(my_other_var) + + + Flow_1g3dpd7 + Flow_0ovgj6c + pre_var = 'some string' +# There is no variable remove +# remove_this_var = 'something else' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/collaboration.bpmn b/tests/SpiffWorkflow/bpmn/data/collaboration.bpmn new file mode 100644 index 000000000..534949a99 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/collaboration.bpmn @@ -0,0 +1,112 @@ + + + + + + + + + + + + + lover_name + + + + + + + + + + lover_name + + + from_name + + + + + + Flow_1bl6jeh + + + Flow_1bl6jeh + Flow_0tp8uut + + + Flow_1ai45pq + + + Flow_0tp8uut + Flow_1ai45pq + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/correlation.bpmn b/tests/SpiffWorkflow/bpmn/data/correlation.bpmn new file mode 100644 index 000000000..769bd5696 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/correlation.bpmn @@ -0,0 +1,179 @@ + + + + + + + process_id + + + + + task_num + + + init_id + + + + + + + Flow_0lrjj2a + + + Flow_0lrjj2a + Flow_0gp7t8p + + 2 + + + Flow_10qgjde + + + + Flow_02xt17l + Flow_018er1s + + + + + Flow_018er1s + Flow_1qfn3g2 + + + + Flow_1qfn3g2 + + + + Flow_10qgjde + Flow_02xt17l + + + + + Flow_0gp7t8p + + + + + Flow_0qafvbe + + + + + + Flow_0qafvbe + Flow_12j0ayf + + + + Flow_12j0ayf + Flow_0k7rc31 + + + + Flow_0k7rc31 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/correlation_two_conversations.bpmn b/tests/SpiffWorkflow/bpmn/data/correlation_two_conversations.bpmn new file mode 100644 index 000000000..2e52a62f5 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/correlation_two_conversations.bpmn @@ -0,0 +1,291 @@ + + + + + + + + process_id + + + task_id + + + + + task_num + + + init_id + + + + + task_num + + + subprocess + + + + + Flow_0lrjj2a + + + Flow_0lrjj2a + Flow_0gp7t8p + + 2 + + + Flow_10qgjde + + + + Flow_02xt17l + Flow_018er1s + + + + + Flow_018er1s + Flow_0dm5gw6 + + + + Flow_0qfregr + + + Flow_10qgjde + Flow_02xt17l + + + + + Flow_0aznp8k + Flow_1xocqcx + + + + + Flow_1xocqcx + Flow_0qfregr + + + + + Flow_0dm5gw6 + Flow_0aznp8k + + + + + + + Flow_0gp7t8p + + + + + Flow_0qafvbe + + + + Flow_0qafvbe + Flow_12j0ayf + + + Flow_12j0ayf + Flow_0k7rc31 + + + + Flow_0k7rc31 + + + + + + + + + + Flow_0h0qe7m + + + + Flow_0h0qe7m + Flow_0d0s8s1 + + + Flow_0d0s8s1 + Flow_1xoewh9 + + + + Flow_1xoewh9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/custom_function_test.bpmn b/tests/SpiffWorkflow/bpmn/data/custom_function_test.bpmn new file mode 100644 index 000000000..b1cda27f9 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/custom_function_test.bpmn @@ -0,0 +1,93 @@ + + + + + Flow_0dsbqk4 + + + + Flow_0dsbqk4 + Flow_089pzz7 + c1 = custom_function('hello') + + + Flow_1q01hy1 + + + Flow_089pzz7 + Flow_08hqius + + Flow_14l2ton + + + + Flow_06gypww + + + + Flow_14l2ton + Flow_06gypww + c2 = custom_function('goodbye') + + + + + + + Flow_08hqius + Flow_1q01hy1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/custom_function_test_call_activity.bpmn b/tests/SpiffWorkflow/bpmn/data/custom_function_test_call_activity.bpmn new file mode 100644 index 000000000..4ad395ce0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/custom_function_test_call_activity.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_1edpn8x + + + Flow_1edpn8x + Flow_17mgpnz + c3 = custom_function('arrivederci') + + + Flow_17mgpnz + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/data_object.bpmn b/tests/SpiffWorkflow/bpmn/data/data_object.bpmn new file mode 100644 index 000000000..a8f940297 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/data_object.bpmn @@ -0,0 +1,152 @@ + + + + + + + Flow_18858hr + + + + + + + + + Flow_19pyf8s + + + + Flow_1r7v9yo + Flow_1tnu3ej + + + DataObjectReference_0pztwm3 + Property_1uusomz + + + + Flow_18858hr + Flow_0gbxq9s + + DataObjectReference_17fhr1j + + + + Flow_0gbxq9s + Flow_1r7v9yo + + + Flow_1tnu3ej + Flow_19pyf8s + + + DataObjectReference_0cm8dnh + Property_1q5wp77 + + + Flow_0yx8lkz + + + Flow_0yx8lkz + Flow_0rk4i35 + + + + Flow_0rk4i35 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/exclusive_gateway_no_default.bpmn b/tests/SpiffWorkflow/bpmn/data/exclusive_gateway_no_default.bpmn new file mode 100644 index 000000000..9c566ca4e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/exclusive_gateway_no_default.bpmn @@ -0,0 +1,92 @@ + + + + + Flow_1i4a088 + + + Flow_1i4a088 + Flow_Greater1 + Flow_Less1 + + + x > 1 + + + x < 1 + + + + + + Flow_Greater2 + Flow_Less2 + + + + Flow_Greater1 + Flow_Greater2 + + + Flow_Less1 + Flow_Less2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/exclusive_into_multi.bpmn b/tests/SpiffWorkflow/bpmn/data/exclusive_into_multi.bpmn new file mode 100644 index 000000000..0fb12a411 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/exclusive_into_multi.bpmn @@ -0,0 +1,83 @@ + + + + + Flow_163toj3 + + + Flow_163toj3 + Flow_1rakb4c + x = 0 + + + + Flow_1rakb4c + Flow_04bjhw6 + Flow_0340se7 + + + + x != 0 + + + + + Flow_04bjhw6 + Flow_073oado + + 1 + + + + Flow_073oado + Flow_0340se7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/exclusive_non_default_path_into_multi.bpmn b/tests/SpiffWorkflow/bpmn/data/exclusive_non_default_path_into_multi.bpmn new file mode 100644 index 000000000..bb4bbab0d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/exclusive_non_default_path_into_multi.bpmn @@ -0,0 +1,97 @@ + + + + + Flow_0rqubl2 + + + + + + + + Flow_0rqubl2 + Flow_02orejl + + + Flow_02orejl + Flow_Yes + Flow_No + + + Flow_No + Flow_0pud9db + + + + + + + + Flow_Yes + Flow_0pud9db + + 3 + + + + + + morestuff == 'Yes' + + + morestuff == 'No' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/invalid_cancel.bpmn b/tests/SpiffWorkflow/bpmn/data/invalid_cancel.bpmn new file mode 100644 index 000000000..f473c9655 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/invalid_cancel.bpmn @@ -0,0 +1,123 @@ + + + + + Flow_0xym55y + + + + Flow_16q1uec + print('New Title') +title = 'New Title' + + + + + + + + Flow_1rvh899 + Flow_1n1fs6z + + + Flow_07i0gvv + Flow_1c2tudh + [print(formdata) for _ in range(how_many)] +printdata = formdata + + + Flow_1c2tudh + + + + + + Flow_0xym55y + Flow_1rvh899 + print('Hello'); printdata=''; test_message='' + + + + + + + + + Flow_1n1fs6z + Flow_07i0gvv + + + + Flow_16q1uec + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/invalid_process_sub.bpmn b/tests/SpiffWorkflow/bpmn/data/invalid_process_sub.bpmn new file mode 100644 index 000000000..2a8793e98 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/invalid_process_sub.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_0xpz6la + + + Flow_0xpz6la + Flow_03yam6h + print('complicated common task') + + + + Flow_03yam6h + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/invalid_process_top.bpmn b/tests/SpiffWorkflow/bpmn/data/invalid_process_top.bpmn new file mode 100644 index 000000000..28e738b5a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/invalid_process_top.bpmn @@ -0,0 +1,64 @@ + + + + + Flow_1xegt6f + + + + + Flow_0qc6vpv + + + + + Flow_1xegt6f + Flow_11qyfqv + print('task1') + + + Flow_11qyfqv + Flow_0hntmrc + + + Flow_0hntmrc + Flow_0qc6vpv + print('task2') + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/io_spec.bpmn b/tests/SpiffWorkflow/bpmn/data/io_spec.bpmn new file mode 100644 index 000000000..65356b56f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/io_spec.bpmn @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + Flow_0n038fc + + + Flow_1d3l0mt + + + Flow_0n038fc + Flow_1d3l0mt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/io_spec_parent.bpmn b/tests/SpiffWorkflow/bpmn/data/io_spec_parent.bpmn new file mode 100644 index 000000000..a4324f12f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/io_spec_parent.bpmn @@ -0,0 +1,54 @@ + + + + + + + + Flow_1uel76w + + + Flow_0aj70uj + Flow_1uel76w + + + Flow_00qjfvu + Flow_0aj70uj + + + + Flow_00qjfvu + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/lanes.bpmn b/tests/SpiffWorkflow/bpmn/data/lanes.bpmn new file mode 100644 index 000000000..0561478d7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/lanes.bpmn @@ -0,0 +1,207 @@ + + + + + + + + + Activity_B1 + Implement_Feature + Gateway_askQuestion + + + + StartEvent_1 + Activity_A1 + Activity_A2 + + + Activity_1uksrqx + Event_07pakcl + Activity_0i0rxuw + + + + Flow_0jwejm5 + + + + + + + + Flow_0jwejm5 + Flow_140vffb + + + + + + + + Flow_140vffb + Flow_1k9gsm1 + + + + + + + + Flow_0okhwy0 + Flow_17rng3c + + + Flow_182bqvo + Flow_17rng3c + Flow_0xz2oco + + + + + NeedClarification == 'Yes' + + + + + + + + + Flow_1cybznq + Flow_0e1uyol + + Flow_086ghyu + + + + Flow_086ghyu + Flow_1jw6qrj + + + Flow_1jw6qrj + + + + + Flow_0xz2oco + Flow_1cybznq + + + Flow_0e1uyol + + + Flow_1k9gsm1 + Flow_0okhwy0 + Flow_182bqvo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/multipleEvents.bpmn b/tests/SpiffWorkflow/bpmn/data/multipleEvents.bpmn new file mode 100644 index 000000000..5cf1fa3cc --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/multipleEvents.bpmn @@ -0,0 +1,201 @@ + + + + + Flow_1f7we2y + + + Flow_0mixvu9 + Flow_08knksn + cancel="cancel_signal" + + + + + + + + Flow_02yqmm2 + Flow_1i2iik3 + + + <H1>Hello</H1> + Flow_0ynr3ge + Flow_02yqmm2 + + + Flow_0xvajbj + + + <H1>Good Bye</H1> + Flow_0elk2a3 + Flow_0xvajbj + + + <H1>Cancel Message</H1> + Flow_0zc9byy + Flow_0zy1z3a + cancel="cancel_event" + + + Flow_1i2iik3 + Flow_0elk2a3 + + Flow_0syqz17 + + + + + + + + Flow_0syqz17 + Flow_0m0ilsi + + + Flow_0m0ilsi + + + + + + Flow_0mixvu9 + + + + Flow_0zc9byy + + + + + + + + + + + Flow_08knksn + + + + Flow_0zy1z3a + + + + Flow_1f7we2y + Flow_0ynr3ge + cancel="none" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/proptest-Sub.bpmn b/tests/SpiffWorkflow/bpmn/data/proptest-Sub.bpmn new file mode 100644 index 000000000..84f987fa7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/proptest-Sub.bpmn @@ -0,0 +1,40 @@ + + + + + Flow_0wro40z + + + + Flow_0061o90 + + + + Flow_0wro40z + Flow_0061o90 + valC=valB +valD=valA + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/proptest-Top.bpmn b/tests/SpiffWorkflow/bpmn/data/proptest-Top.bpmn new file mode 100644 index 000000000..323ace022 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/proptest-Top.bpmn @@ -0,0 +1,111 @@ + + + + + Flow_0du1rjv + + + Flow_0du1rjv + Flow_0gl3cli + valA = 1 + + + + Flow_0gl3cli + Flow_0qkplpb + + Flow_0i4qk1g + + + + Flow_001zehj + + + + Flow_0i4qk1g + Flow_001zehj + valB = valA + + + + + Flow_0qkplpb + Flow_1rptzfw + + + + + Flow_1u6sv80 + + + + Flow_1rptzfw + Flow_1u6sv80 + #print(valA) +#print(valB) +#print(valC) +#print(valD) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/random_fact.bpmn b/tests/SpiffWorkflow/bpmn/data/random_fact.bpmn new file mode 100644 index 000000000..a721848fb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/random_fact.bpmn @@ -0,0 +1,133 @@ + + + + + SequenceFlow_0ik56h0 + + + + + + + + SequenceFlow_1291h6i + SequenceFlow_0am07in + #! scripts.FactService + +def some_fun(): + x = "what fun!" + return x; + +y = some_fun() + + + SequenceFlow_0am07in + + + + Here's some documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0ik56h0 + SequenceFlow_1291h6i + + + + + User sets the Fact.type to cat, norris, or buzzword + + + Makes an API  call to get a fact of the required type. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowA-sublevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-sublevel.bpmn new file mode 100644 index 000000000..c4f5267fe --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-sublevel.bpmn @@ -0,0 +1,38 @@ + + + + + Flow_1mp4u8z + + + Flow_1mp4u8z + Flow_1ud341z + + + + Flow_1ud341z + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowA-toplevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-toplevel.bpmn new file mode 100644 index 000000000..d9cb22211 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-toplevel.bpmn @@ -0,0 +1,76 @@ + + + + + Flow_1mskfwg + + + Flow_0qafdqt + Flow_07sx36q + + + + + + Flow_1ydceye + + + + Flow_1mskfwg + Flow_083r7tz + + + Flow_07sx36q + Flow_1ydceye + + + + Flow_083r7tz + Flow_0qafdqt + # Just need another task in here, to handle deep nesting. +x=1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowB-sublevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-sublevel.bpmn new file mode 100644 index 000000000..b6cc411e3 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-sublevel.bpmn @@ -0,0 +1,50 @@ + + + + + Flow_1mp4u8z + + + Flow_1mp4u8z + Flow_1ud341z + + + + Flow_1qhjty2 + + + + Flow_1ud341z + Flow_1qhjty2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowB-toplevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-toplevel.bpmn new file mode 100644 index 000000000..f53492ad4 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-toplevel.bpmn @@ -0,0 +1,76 @@ + + + + + Flow_1mskfwg + + + Flow_1t8q9x8 + Flow_07sx36q + + + + + + Flow_1ydceye + + + + Flow_1mskfwg + Flow_083r7tz + + + Flow_07sx36q + Flow_1ydceye + + + Flow_083r7tz + Flow_1t8q9x8 + # Just need another task in here, to handle deep nesting. +x=1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/rrt.bpmn b/tests/SpiffWorkflow/bpmn/data/rrt.bpmn new file mode 100644 index 000000000..e6d1afb27 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/rrt.bpmn @@ -0,0 +1,336 @@ + + + + + SequenceFlow_05ja25w + + + ### UNIVERSITY OF VIRGINIA RESEARCH +#### Research Ramp-up Plan + + +As we plan for the resumption of on-grounds research, PIs are required to develop a Research Ramp-up Plan. Please use the ramp-up guidance provided to lay out your plan(s) to manage operations while prioritizing physical distancing, staggered work shifts to reduce group size, remote work, and other exposure-reducing measures. + + +Plans must be submitted to the Office of Research by Monday, May ?? for consideration in the first round of approvals. Plans will then be reviewed on a rolling basis going forward. + + +Instructions for Submitting: + + +1. Add a Request for each lab space you manage in a building. If your lab spans multiple rooms or floors in a single building, one request will be required for that lab. If your lab spans multipe buildings, one request for each building will be required for that lab. The primary reason for this differentiation is that in addition to obtaining approval to restart operations, this information will also be used after start up to assist with any contact tracing that may be needed. + + +2. Select each Request added and step through each form presented, responding to all required and applicable fields. You may be presented with different questions if activities in each lab differ. + + +3. After all forms have been completed, you will be presented with the option to create your Research Recovery Plan in Word format. Download the document and review it. If you see any corrections that need to be made, return to the coresponding form and make the correction. + + +4. Once the generated Research Recovery Plan is finalize, use the web site to submit it to the Office of the Vice President for Research for review. + + +Please submit questions on the Research Support website. + SequenceFlow_05ja25w + SequenceFlow_0h50bp3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0h50bp3 + SequenceFlow_0bqu7pp + + + + + + ### {{ LabName }} +#### Lab details + + +Your response to these questions will determine if you do or do not provide additional information regarding each topic later. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0bqu7pp + Flow_0scfmzc + + + SequenceFlow_1qtrgbv + + + + Review plan, make changes if needed, continue of ready to submit. + Flow_1b6vbkk + Flow_1e2qi9s + + + + Flow_1e2qi9s + SequenceFlow_1qtrgbv + CompleteTemplate ResearchRecoveryPlan.docx RESEARCH_RECOVERY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_0so3402 + SequenceFlow_1yi9lig + + + Flow_0scfmzc + Flow_0so3402 + Flow_0141rp3 + + + isAnimalUse == True + + + + + + + + + + + + + + Flow_1121pfu + SequenceFlow_1b4non2 + + + Flow_0141rp3 + SequenceFlow_1yi9lig + Flow_1121pfu + SequenceFlow_1wp5zmg + + + isGrantSupport == True + + + SequenceFlow_1b4non2 + SequenceFlow_1wp5zmg + Flow_1b6vbkk + + + + + isGrantSupport == False + + + + isAnimalUse == False + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/same_id.bpmn b/tests/SpiffWorkflow/bpmn/data/same_id.bpmn new file mode 100644 index 000000000..df911921d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/same_id.bpmn @@ -0,0 +1,61 @@ + + + + + Flow_1fij5ow + + + Flow_1fij5ow + Flow_0gdswwp + + + + + Flow_0z5aj2a + + + + Flow_0gdswwp + Flow_0z5aj2a + + + This Task's id is "I_AM_TASK_1"  which is the same id of a task in workflow referenced in Task 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/same_id_sub.bpmn b/tests/SpiffWorkflow/bpmn/data/same_id_sub.bpmn new file mode 100644 index 000000000..74bc4147d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/same_id_sub.bpmn @@ -0,0 +1,49 @@ + + + + + Flow_1fij5ow + + + Flow_1fij5ow + Flow_0gdswwp + + + + + Flow_0gdswwp + + + This Task's id is "I_AM_TASK_1"  which is the same id of a task in workflow referenced in Task 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/serialization/v1.0.json b/tests/SpiffWorkflow/bpmn/data/serialization/v1.0.json new file mode 100644 index 000000000..f580929ad --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/serialization/v1.0.json @@ -0,0 +1,1045 @@ +{ + "serializer_version": "1.0", + "spec":{ + "name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "description":"Nested Subprocesses", + "file":"None:sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.bpmn", + "task_specs":{ + "Start":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_1", + "name":"Start", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[], + "outputs":[ + "sid-093DC600-6F99-40CE-988C-7AD87B792F90" + ], + "typename":"StartTask" + }, + "sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_2", + "name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[ + "sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38" + ], + "outputs":[ + "End" + ], + "typename":"_EndJoin" + }, + "End":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_3", + "name":"End", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[ + "sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin" + ], + "outputs":[], + "typename":"Simple" + }, + "sid-093DC600-6F99-40CE-988C-7AD87B792F90":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_4", + "name":"sid-093DC600-6F99-40CE-988C-7AD87B792F90", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":295.03104963838916, + "y":125.9281387248673 + }, + "lookahead":2, + "inputs":[ + "Start" + ], + "outputs":[ + "sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B":{ + "id":"sid-E35CEC65-EA3C-4C5A-BC90-8C17016C24E5", + "name":"", + "documentation":null, + "target_task_spec":"sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-E35CEC65-EA3C-4C5A-BC90-8C17016C24E5":{ + "id":"sid-E35CEC65-EA3C-4C5A-BC90-8C17016C24E5", + "name":"", + "documentation":null, + "target_task_spec":"sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B", + "typename":"SequenceFlow" + } + }, + "event_definition":{ + "internal":false, + "external":false, + "typename":"NoneEventDefinition" + }, + "typename":"StartEvent", + "extensions":{} + }, + "sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_5", + "name":"sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B", + "description":"Action1", + "manual":false, + "internal":false, + "position":{ + "x":388.1987495241964, + "y":100.0482220899209 + }, + "lookahead":2, + "inputs":[ + "sid-093DC600-6F99-40CE-988C-7AD87B792F90" + ], + "outputs":[ + "sid-C014B4B9-889F-4EE9-9949-C89502C35CF0" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-C014B4B9-889F-4EE9-9949-C89502C35CF0":{ + "id":"sid-5BC5ECB5-884B-449A-AC67-B9B7ED296728", + "name":"Continue", + "documentation":null, + "target_task_spec":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-5BC5ECB5-884B-449A-AC67-B9B7ED296728":{ + "id":"sid-5BC5ECB5-884B-449A-AC67-B9B7ED296728", + "name":"Continue", + "documentation":null, + "target_task_spec":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "typename":"SequenceFlow" + } + }, + "typename":"TestUserTask", + "extensions":{} + }, + "sid-C014B4B9-889F-4EE9-9949-C89502C35CF0":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_6", + "name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "description":"Nested level 1", + "manual":false, + "internal":false, + "position":{ + "x":621.1179992387142, + "y":75.0 + }, + "lookahead":2, + "inputs":[ + "sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B" + ], + "outputs":[ + "sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38" + ], + "lane":null, + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38":{ + "id":"sid-85D2E5A5-BD56-4650-B715-3B6E0BE33443", + "name":"Finished", + "documentation":null, + "target_task_spec":"sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-85D2E5A5-BD56-4650-B715-3B6E0BE33443":{ + "id":"sid-85D2E5A5-BD56-4650-B715-3B6E0BE33443", + "name":"Finished", + "documentation":null, + "target_task_spec":"sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38", + "typename":"SequenceFlow" + } + }, + "spec":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7", + "sub_workflow":null, + "typename":"CallActivity", + "extensions":{} + }, + "sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_7", + "name":"sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":838.5092989722641, + "y":126.96333539026517 + }, + "lookahead":2, + "inputs":[ + "sid-C014B4B9-889F-4EE9-9949-C89502C35CF0" + ], + "outputs":[ + "sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin":{ + "id":"sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38.ToEndJoin", + "name":null, + "documentation":null, + "target_task_spec":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38.ToEndJoin":{ + "id":"sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38.ToEndJoin", + "name":null, + "documentation":null, + "target_task_spec":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin", + "typename":"SequenceFlow" + } + }, + "event_definition":{ + "internal":false, + "external":false, + "typename":"NoneEventDefinition" + }, + "typename":"EndEvent", + "extensions":{} + }, + "Root":{ + "id":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963_8", + "name":"Root", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[], + "outputs":[], + "typename":"Simple" + } + }, + "typename":"BpmnProcessSpec" + }, + "data":{}, + "last_task":"7ff9e4ff-adbe-4c60-9b76-e7645a6e1563", + "success":true, + "tasks":{ + "32fcbafc-495a-4ba2-8b29-d65a416c45b9":{ + "id":"32fcbafc-495a-4ba2-8b29-d65a416c45b9", + "parent":null, + "children":[ + "e0dd5675-7477-40dd-a295-84457f6b5196" + ], + "last_state_change":1655750011.214358, + "state":32, + "task_spec":"Root", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + }, + "e0dd5675-7477-40dd-a295-84457f6b5196":{ + "id":"e0dd5675-7477-40dd-a295-84457f6b5196", + "parent":"32fcbafc-495a-4ba2-8b29-d65a416c45b9", + "children":[ + "7d02cfc1-ecfe-4795-8697-433bec0083a7" + ], + "last_state_change":1655750011.221847, + "state":32, + "task_spec":"Start", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + }, + "7d02cfc1-ecfe-4795-8697-433bec0083a7":{ + "id":"7d02cfc1-ecfe-4795-8697-433bec0083a7", + "parent":"e0dd5675-7477-40dd-a295-84457f6b5196", + "children":[ + "d3636b3c-2551-4027-a748-b6d28701d622" + ], + "last_state_change":1655750011.2265785, + "state":32, + "task_spec":"sid-093DC600-6F99-40CE-988C-7AD87B792F90", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{ + "event_fired":true + }, + "data":{} + }, + "d3636b3c-2551-4027-a748-b6d28701d622":{ + "id":"d3636b3c-2551-4027-a748-b6d28701d622", + "parent":"7d02cfc1-ecfe-4795-8697-433bec0083a7", + "children":[ + "7ff9e4ff-adbe-4c60-9b76-e7645a6e1563" + ], + "last_state_change":1655750011.231242, + "state":32, + "task_spec":"sid-FECD237F-6ABD-4A51-BB9C-B0C7D991202B", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + }, + "7ff9e4ff-adbe-4c60-9b76-e7645a6e1563":{ + "id":"7ff9e4ff-adbe-4c60-9b76-e7645a6e1563", + "parent":"d3636b3c-2551-4027-a748-b6d28701d622", + "children":[ + "eaba8031-e898-4c7f-97a7-62ec00282748", + "dea6192c-686b-4382-a6cf-d5b3ca038a37" + ], + "last_state_change":1655750011.244295, + "state":32, + "task_spec":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + }, + "eaba8031-e898-4c7f-97a7-62ec00282748":{ + "id":"eaba8031-e898-4c7f-97a7-62ec00282748", + "parent":"7ff9e4ff-adbe-4c60-9b76-e7645a6e1563", + "children":[ + "26ddc593-6c9a-46c5-abe6-37c4b3522350" + ], + "last_state_change":1655750011.2491412, + "state":32, + "task_spec":"Start", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{}, + "data":{} + }, + "26ddc593-6c9a-46c5-abe6-37c4b3522350":{ + "id":"26ddc593-6c9a-46c5-abe6-37c4b3522350", + "parent":"eaba8031-e898-4c7f-97a7-62ec00282748", + "children":[ + "95912ad3-9951-4d4f-8591-f6e849f383bc" + ], + "last_state_change":1655750011.2535467, + "state":32, + "task_spec":"sid-23EF7D0F-BC6E-45ED-A47D-22CEBCE0BE5A", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{ + "event_fired":true + }, + "data":{} + }, + "95912ad3-9951-4d4f-8591-f6e849f383bc":{ + "id":"95912ad3-9951-4d4f-8591-f6e849f383bc", + "parent":"26ddc593-6c9a-46c5-abe6-37c4b3522350", + "children":[ + "cc33a303-918a-4ad3-91c4-ef8b4e4fd127" + ], + "last_state_change":1655750034.2813075, + "state":32, + "task_spec":"sid-1B59DCD2-83A0-4687-B7BE-83625395572E", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{}, + "data":{} + }, + "cc33a303-918a-4ad3-91c4-ef8b4e4fd127":{ + "id":"cc33a303-918a-4ad3-91c4-ef8b4e4fd127", + "parent":"95912ad3-9951-4d4f-8591-f6e849f383bc", + "children":[ + "ddda3f03-9fde-4b15-bba1-d053ce9adc88", + "54ac1086-be95-4f62-acd5-e4d52cfe758a" + ], + "last_state_change":1655750034.2987168, + "state":32, + "task_spec":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{}, + "data":{} + }, + "ddda3f03-9fde-4b15-bba1-d053ce9adc88":{ + "id":"ddda3f03-9fde-4b15-bba1-d053ce9adc88", + "parent":"cc33a303-918a-4ad3-91c4-ef8b4e4fd127", + "children":[ + "88737152-d6ff-465b-92f1-50f19c45f64e" + ], + "last_state_change":1655750034.3050532, + "state":32, + "task_spec":"Start", + "triggered":false, + "workflow_name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "internal_data":{}, + "data":{} + }, + "88737152-d6ff-465b-92f1-50f19c45f64e":{ + "id":"88737152-d6ff-465b-92f1-50f19c45f64e", + "parent":"ddda3f03-9fde-4b15-bba1-d053ce9adc88", + "children":[ + "2bc86015-d947-4572-a2fd-8649d18bd55a" + ], + "last_state_change":1655750034.310996, + "state":32, + "task_spec":"sid-D55DA431-BFBE-4EB9-9B86-918CD1792C65", + "triggered":false, + "workflow_name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "internal_data":{ + "event_fired":true + }, + "data":{} + }, + "2bc86015-d947-4572-a2fd-8649d18bd55a":{ + "id":"2bc86015-d947-4572-a2fd-8649d18bd55a", + "parent":"88737152-d6ff-465b-92f1-50f19c45f64e", + "children":[ + "27ed00f2-962d-4208-8c37-34fa9146bebd" + ], + "last_state_change":1655750034.3138447, + "state":16, + "task_spec":"sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7", + "triggered":false, + "workflow_name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "internal_data":{}, + "data":{} + }, + "27ed00f2-962d-4208-8c37-34fa9146bebd":{ + "id":"27ed00f2-962d-4208-8c37-34fa9146bebd", + "parent":"2bc86015-d947-4572-a2fd-8649d18bd55a", + "children":[ + "ed38c843-c6e7-408f-8433-117a31e52981" + ], + "last_state_change":1655750034.2856145, + "state":4, + "task_spec":"sid-2EDAD784-7F15-486C-B805-D26EE25F8087", + "triggered":false, + "workflow_name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "internal_data":{}, + "data":{} + }, + "ed38c843-c6e7-408f-8433-117a31e52981":{ + "id":"ed38c843-c6e7-408f-8433-117a31e52981", + "parent":"27ed00f2-962d-4208-8c37-34fa9146bebd", + "children":[ + "27c8287e-bce1-4b9e-afee-b637010bd837" + ], + "last_state_change":1655750034.28598, + "state":4, + "task_spec":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin", + "triggered":false, + "workflow_name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "internal_data":{}, + "data":{} + }, + "27c8287e-bce1-4b9e-afee-b637010bd837":{ + "id":"27c8287e-bce1-4b9e-afee-b637010bd837", + "parent":"ed38c843-c6e7-408f-8433-117a31e52981", + "children":[], + "last_state_change":1655750034.2863548, + "state":4, + "task_spec":"End", + "triggered":false, + "workflow_name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "internal_data":{}, + "data":{} + }, + "54ac1086-be95-4f62-acd5-e4d52cfe758a":{ + "id":"54ac1086-be95-4f62-acd5-e4d52cfe758a", + "parent":"cc33a303-918a-4ad3-91c4-ef8b4e4fd127", + "children":[ + "05ef5ac5-6579-4ac2-9b8f-09f2b5d98aca" + ], + "last_state_change":1655750011.235, + "state":4, + "task_spec":"sid-64E2EF25-F986-4834-8C3B-C3533746113E", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{}, + "data":{} + }, + "05ef5ac5-6579-4ac2-9b8f-09f2b5d98aca":{ + "id":"05ef5ac5-6579-4ac2-9b8f-09f2b5d98aca", + "parent":"54ac1086-be95-4f62-acd5-e4d52cfe758a", + "children":[ + "4fa137ad-658d-4f21-8bd8-c591905dc059" + ], + "last_state_change":1655750011.235288, + "state":4, + "task_spec":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{}, + "data":{} + }, + "4fa137ad-658d-4f21-8bd8-c591905dc059":{ + "id":"4fa137ad-658d-4f21-8bd8-c591905dc059", + "parent":"05ef5ac5-6579-4ac2-9b8f-09f2b5d98aca", + "children":[], + "last_state_change":1655750011.2356079, + "state":4, + "task_spec":"End", + "triggered":false, + "workflow_name":"sid-C014B4B9-889F-4EE9-9949-C89502C35CF0", + "internal_data":{}, + "data":{} + }, + "dea6192c-686b-4382-a6cf-d5b3ca038a37":{ + "id":"dea6192c-686b-4382-a6cf-d5b3ca038a37", + "parent":"7ff9e4ff-adbe-4c60-9b76-e7645a6e1563", + "children":[ + "bdae1429-d0da-494f-be6e-2cd476d9c5c8" + ], + "last_state_change":1655750011.2166302, + "state":4, + "task_spec":"sid-EF0FA50B-FE9B-4C86-9981-4F6B62387D38", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + }, + "bdae1429-d0da-494f-be6e-2cd476d9c5c8":{ + "id":"bdae1429-d0da-494f-be6e-2cd476d9c5c8", + "parent":"dea6192c-686b-4382-a6cf-d5b3ca038a37", + "children":[ + "9dee00c2-f09a-4738-9e91-b84f7e127649" + ], + "last_state_change":1655750011.2169454, + "state":4, + "task_spec":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963.EndJoin", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + }, + "9dee00c2-f09a-4738-9e91-b84f7e127649":{ + "id":"9dee00c2-f09a-4738-9e91-b84f7e127649", + "parent":"bdae1429-d0da-494f-be6e-2cd476d9c5c8", + "children":[], + "last_state_change":1655750011.217286, + "state":4, + "task_spec":"End", + "triggered":false, + "workflow_name":"sid-a12cf1e5-86f4-4d69-9790-6a90342f5963", + "internal_data":{}, + "data":{} + } + }, + "root":"32fcbafc-495a-4ba2-8b29-d65a416c45b9", + "subprocess_specs":{ + "sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5":{ + "name":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5", + "description":"Nested level 2", + "file":"None:sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.bpmn", + "task_specs":{ + "Start":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_1", + "name":"Start", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[], + "outputs":[ + "sid-D55DA431-BFBE-4EB9-9B86-918CD1792C65" + ], + "typename":"StartTask" + }, + "sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_2", + "name":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[ + "sid-2EDAD784-7F15-486C-B805-D26EE25F8087" + ], + "outputs":[ + "End" + ], + "typename":"_EndJoin" + }, + "End":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_3", + "name":"End", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[ + "sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin" + ], + "outputs":[], + "typename":"Simple" + }, + "sid-D55DA431-BFBE-4EB9-9B86-918CD1792C65":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_4", + "name":"sid-D55DA431-BFBE-4EB9-9B86-918CD1792C65", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":223.0, + "y":178.0 + }, + "lookahead":2, + "inputs":[ + "Start" + ], + "outputs":[ + "sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7":{ + "id":"sid-862F0510-9CB8-4C7C-87CC-A9EA7E2D8758", + "name":"In", + "documentation":null, + "target_task_spec":"sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-862F0510-9CB8-4C7C-87CC-A9EA7E2D8758":{ + "id":"sid-862F0510-9CB8-4C7C-87CC-A9EA7E2D8758", + "name":"In", + "documentation":null, + "target_task_spec":"sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7", + "typename":"SequenceFlow" + } + }, + "event_definition":{ + "internal":false, + "external":false, + "typename":"NoneEventDefinition" + }, + "typename":"StartEvent", + "extensions":{} + }, + "sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_5", + "name":"sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7", + "description":"Action3", + "manual":false, + "internal":false, + "position":{ + "x":360.0, + "y":153.0 + }, + "lookahead":2, + "inputs":[ + "sid-D55DA431-BFBE-4EB9-9B86-918CD1792C65" + ], + "outputs":[ + "sid-2EDAD784-7F15-486C-B805-D26EE25F8087" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-2EDAD784-7F15-486C-B805-D26EE25F8087":{ + "id":"sid-80315FC5-BC67-4999-8CE2-3ACDC838E3E6", + "name":"Finish", + "documentation":null, + "target_task_spec":"sid-2EDAD784-7F15-486C-B805-D26EE25F8087", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-80315FC5-BC67-4999-8CE2-3ACDC838E3E6":{ + "id":"sid-80315FC5-BC67-4999-8CE2-3ACDC838E3E6", + "name":"Finish", + "documentation":null, + "target_task_spec":"sid-2EDAD784-7F15-486C-B805-D26EE25F8087", + "typename":"SequenceFlow" + } + }, + "typename":"TestUserTask", + "extensions":{} + }, + "sid-2EDAD784-7F15-486C-B805-D26EE25F8087":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_6", + "name":"sid-2EDAD784-7F15-486C-B805-D26EE25F8087", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":578.0, + "y":170.0 + }, + "lookahead":2, + "inputs":[ + "sid-17C53A94-546D-4099-8A52-AAEE6AC3F6E7" + ], + "outputs":[ + "sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin":{ + "id":"sid-2EDAD784-7F15-486C-B805-D26EE25F8087.ToEndJoin", + "name":null, + "documentation":null, + "target_task_spec":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-2EDAD784-7F15-486C-B805-D26EE25F8087.ToEndJoin":{ + "id":"sid-2EDAD784-7F15-486C-B805-D26EE25F8087.ToEndJoin", + "name":null, + "documentation":null, + "target_task_spec":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5.EndJoin", + "typename":"SequenceFlow" + } + }, + "event_definition":{ + "internal":false, + "external":false, + "typename":"NoneEventDefinition" + }, + "typename":"EndEvent", + "extensions":{} + }, + "Root":{ + "id":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5_7", + "name":"Root", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[], + "outputs":[], + "typename":"Simple" + } + }, + "typename":"BpmnProcessSpec" + }, + "sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7":{ + "name":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7", + "description":"Nested level 1", + "file":"None:sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.bpmn", + "task_specs":{ + "Start":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_1", + "name":"Start", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[], + "outputs":[ + "sid-23EF7D0F-BC6E-45ED-A47D-22CEBCE0BE5A" + ], + "typename":"StartTask" + }, + "sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_2", + "name":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[ + "sid-64E2EF25-F986-4834-8C3B-C3533746113E" + ], + "outputs":[ + "End" + ], + "typename":"_EndJoin" + }, + "End":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_3", + "name":"End", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[ + "sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin" + ], + "outputs":[], + "typename":"Simple" + }, + "sid-23EF7D0F-BC6E-45ED-A47D-22CEBCE0BE5A":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_4", + "name":"sid-23EF7D0F-BC6E-45ED-A47D-22CEBCE0BE5A", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":261.0, + "y":119.0 + }, + "lookahead":2, + "inputs":[ + "Start" + ], + "outputs":[ + "sid-1B59DCD2-83A0-4687-B7BE-83625395572E" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-1B59DCD2-83A0-4687-B7BE-83625395572E":{ + "id":"sid-27BA998B-1FEE-4CBA-86D5-8C5968F1478D", + "name":"", + "documentation":null, + "target_task_spec":"sid-1B59DCD2-83A0-4687-B7BE-83625395572E", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-27BA998B-1FEE-4CBA-86D5-8C5968F1478D":{ + "id":"sid-27BA998B-1FEE-4CBA-86D5-8C5968F1478D", + "name":"", + "documentation":null, + "target_task_spec":"sid-1B59DCD2-83A0-4687-B7BE-83625395572E", + "typename":"SequenceFlow" + } + }, + "event_definition":{ + "internal":false, + "external":false, + "typename":"NoneEventDefinition" + }, + "typename":"StartEvent", + "extensions":{} + }, + "sid-1B59DCD2-83A0-4687-B7BE-83625395572E":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_5", + "name":"sid-1B59DCD2-83A0-4687-B7BE-83625395572E", + "description":"Action2", + "manual":false, + "internal":false, + "position":{ + "x":345.0, + "y":75.0 + }, + "lookahead":2, + "inputs":[ + "sid-23EF7D0F-BC6E-45ED-A47D-22CEBCE0BE5A" + ], + "outputs":[ + "sid-BC014079-199F-4720-95CD-244B0ACB6DE1" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-BC014079-199F-4720-95CD-244B0ACB6DE1":{ + "id":"sid-02F2C617-DFEE-44AE-AAED-145AF2E2D946", + "name":"ToLevel2", + "documentation":null, + "target_task_spec":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-02F2C617-DFEE-44AE-AAED-145AF2E2D946":{ + "id":"sid-02F2C617-DFEE-44AE-AAED-145AF2E2D946", + "name":"ToLevel2", + "documentation":null, + "target_task_spec":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "typename":"SequenceFlow" + } + }, + "typename":"TestUserTask", + "extensions":{} + }, + "sid-BC014079-199F-4720-95CD-244B0ACB6DE1":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_6", + "name":"sid-BC014079-199F-4720-95CD-244B0ACB6DE1", + "description":"Nested level 2", + "manual":false, + "internal":false, + "position":{ + "x":525.0, + "y":90.0 + }, + "lookahead":2, + "inputs":[ + "sid-1B59DCD2-83A0-4687-B7BE-83625395572E" + ], + "outputs":[ + "sid-64E2EF25-F986-4834-8C3B-C3533746113E" + ], + "lane":null, + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-64E2EF25-F986-4834-8C3B-C3533746113E":{ + "id":"sid-B8192BBF-7DB4-4AA1-8990-5017C30130A8", + "name":"Finish", + "documentation":null, + "target_task_spec":"sid-64E2EF25-F986-4834-8C3B-C3533746113E", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-B8192BBF-7DB4-4AA1-8990-5017C30130A8":{ + "id":"sid-B8192BBF-7DB4-4AA1-8990-5017C30130A8", + "name":"Finish", + "documentation":null, + "target_task_spec":"sid-64E2EF25-F986-4834-8C3B-C3533746113E", + "typename":"SequenceFlow" + } + }, + "spec":"sid-65436787-c39d-47c9-b99e-b4d7bd01b8f5", + "sub_workflow":null, + "typename":"CallActivity", + "extensions":{} + }, + "sid-64E2EF25-F986-4834-8C3B-C3533746113E":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_7", + "name":"sid-64E2EF25-F986-4834-8C3B-C3533746113E", + "description":"Done", + "manual":false, + "internal":false, + "position":{ + "x":690.0, + "y":115.0 + }, + "lookahead":2, + "inputs":[ + "sid-BC014079-199F-4720-95CD-244B0ACB6DE1" + ], + "outputs":[ + "sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin" + ], + "lane":"Tester", + "documentation":null, + "loopTask":false, + "outgoing_sequence_flows":{ + "sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin":{ + "id":"sid-64E2EF25-F986-4834-8C3B-C3533746113E.ToEndJoin", + "name":null, + "documentation":null, + "target_task_spec":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin", + "typename":"SequenceFlow" + } + }, + "outgoing_sequence_flows_by_id":{ + "sid-64E2EF25-F986-4834-8C3B-C3533746113E.ToEndJoin":{ + "id":"sid-64E2EF25-F986-4834-8C3B-C3533746113E.ToEndJoin", + "name":null, + "documentation":null, + "target_task_spec":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7.EndJoin", + "typename":"SequenceFlow" + } + }, + "event_definition":{ + "internal":false, + "external":false, + "typename":"NoneEventDefinition" + }, + "typename":"EndEvent", + "extensions":{} + }, + "Root":{ + "id":"sid-c7ccc144-a90c-4a5b-973d-a9d67bc9aae7_8", + "name":"Root", + "description":"", + "manual":false, + "internal":false, + "position":{ + "x":0, + "y":0 + }, + "lookahead":2, + "inputs":[], + "outputs":[], + "typename":"Simple" + } + }, + "typename":"BpmnProcessSpec" + } + }, + "subprocesses":{ + "cc33a303-918a-4ad3-91c4-ef8b4e4fd127":[ + "ddda3f03-9fde-4b15-bba1-d053ce9adc88", + "88737152-d6ff-465b-92f1-50f19c45f64e", + "2bc86015-d947-4572-a2fd-8649d18bd55a", + "27ed00f2-962d-4208-8c37-34fa9146bebd", + "ed38c843-c6e7-408f-8433-117a31e52981", + "27c8287e-bce1-4b9e-afee-b637010bd837" + ], + "7ff9e4ff-adbe-4c60-9b76-e7645a6e1563":[ + "eaba8031-e898-4c7f-97a7-62ec00282748", + "26ddc593-6c9a-46c5-abe6-37c4b3522350", + "95912ad3-9951-4d4f-8591-f6e849f383bc", + "cc33a303-918a-4ad3-91c4-ef8b4e4fd127", + "ddda3f03-9fde-4b15-bba1-d053ce9adc88", + "88737152-d6ff-465b-92f1-50f19c45f64e", + "2bc86015-d947-4572-a2fd-8649d18bd55a", + "27ed00f2-962d-4208-8c37-34fa9146bebd", + "ed38c843-c6e7-408f-8433-117a31e52981", + "27c8287e-bce1-4b9e-afee-b637010bd837", + "54ac1086-be95-4f62-acd5-e4d52cfe758a", + "05ef5ac5-6579-4ac2-9b8f-09f2b5d98aca", + "4fa137ad-658d-4f21-8bd8-c591905dc059" + ] + } +} diff --git a/tests/SpiffWorkflow/bpmn/data/service_task.bpmn b/tests/SpiffWorkflow/bpmn/data/service_task.bpmn new file mode 100644 index 000000000..fa24627f1 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/service_task.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_0l9vzsi + + + + Flow_0l9vzsi + Flow_16rdnn7 + + + Flow_16rdnn7 + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/sub_in_loop.bpmn b/tests/SpiffWorkflow/bpmn/data/sub_in_loop.bpmn new file mode 100644 index 000000000..ec8720e0a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/sub_in_loop.bpmn @@ -0,0 +1,88 @@ + + + + + Flow_0nlj5lh + + + Flow_0nlj5lh + Flow_16vai1a + Flow_1lkecht + + + + Flow_1lkecht + Flow_1vci114 + + + + Flow_0iui938 + Flow_0ew7zdi + Flow_16vai1a + + + Flow_0ew7zdi + + + done + + + + Flow_1vci114 + Flow_0iui938 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/sub_in_loop_call_activity.bpmn b/tests/SpiffWorkflow/bpmn/data/sub_in_loop_call_activity.bpmn new file mode 100644 index 000000000..2af69b3be --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/sub_in_loop_call_activity.bpmn @@ -0,0 +1,38 @@ + + + + + Flow_1dbtwxp + + + + Flow_1t99mly + + + + Flow_1dbtwxp + Flow_1t99mly + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/sub_within_sub_multi.bpmn b/tests/SpiffWorkflow/bpmn/data/sub_within_sub_multi.bpmn new file mode 100644 index 000000000..95e29da0d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/sub_within_sub_multi.bpmn @@ -0,0 +1,129 @@ + + + + + Flow_0dsbqk4 + + + + Flow_18e9qgr + + + Flow_1ona7kk + Flow_18e9qgr + + + Flow_05tjul5 + + + Flow_1pc1vib + + + Flow_1pc1vib + Flow_05tjul5 + + Flow_0hikak1 + + + Flow_0hikak1 + Flow_0oby5rd + my_var['new_info'] = "Adding this!" +my_var['name'] = my_var['name'] + "_edit" + + + + + Flow_0oby5rd + + + + + + + + + + Flow_0dsbqk4 + Flow_1ona7kk + my_collection = { + 'a':{'name':'Apple'}, + 'b':{'name':'Bubble'}, + 'c':{'name':'Crap, I should write better code'} +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi.bpmn b/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi.bpmn new file mode 100644 index 000000000..1fb69933c --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi.bpmn @@ -0,0 +1,93 @@ + + + + + Flow_0dsbqk4 + + + + Flow_18e9qgr + + + Flow_1ona7kk + Flow_18e9qgr + + + Flow_14l2ton + Flow_06gypww + my_var['new_info'] = "Adding this!" +my_var['name'] = my_var['name'] + "_edit" + + + Flow_06gypww + + + Flow_14l2ton + + + + + + + + Flow_0dsbqk4 + Flow_1ona7kk + my_collection = { + 'a':{'name':'Apple'}, + 'b':{'name':'Bubble'}, + 'c':{'name':'Crap, I should write better code'} +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi1.bpmn b/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi1.bpmn new file mode 100644 index 000000000..478e18ffa --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi1.bpmn @@ -0,0 +1,59 @@ + + + + + Flow_0dsbqk4 + + + + Flow_1lbqsop + + + Flow_0n1o8w6 + Flow_1lbqsop + + 5 + done==True + + x = {'a':a} +if a==3: + done=True +a=x + + + + + Flow_0dsbqk4 + Flow_0n1o8w6 + done=False + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi_parallel.bpmn b/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi_parallel.bpmn new file mode 100644 index 000000000..b89d98f66 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/sub_workflow_multi_parallel.bpmn @@ -0,0 +1,93 @@ + + + + + Flow_0dsbqk4 + + + + Flow_18e9qgr + + + Flow_1ona7kk + Flow_18e9qgr + + + Flow_14l2ton + Flow_06gypww + my_var['new_info'] = "Adding this!" +my_var['name'] = my_var['name'] + "_edit" + + + Flow_06gypww + + + Flow_14l2ton + + + + + + + + Flow_0dsbqk4 + Flow_1ona7kk + my_collection = { + 'a':{'name':'Apple'}, + 'b':{'name':'Bubble'}, + 'c':{'name':'Crap, I should write better code'} +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer-cycle-start.bpmn b/tests/SpiffWorkflow/bpmn/data/timer-cycle-start.bpmn new file mode 100644 index 000000000..9d75c9c9e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer-cycle-start.bpmn @@ -0,0 +1,99 @@ + + + + + + + + + StartEvent + wait_timer + EndItAll + + + CycleStart + Refill_Coffee + CycleEnd + + + + Flow_1pahvlr + + + Flow_0jtfzsk + + (2,timedelta(seconds=0.1)) + + + + Flow_0jtfzsk + Flow_07sglzn + print('refill count = %d'%custom_function()) + + + Flow_07sglzn + + + Flow_1pahvlr + Flow_05ejbm4 + + timedelta(seconds=0.5) + + + + Flow_05ejbm4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer-cycle.bpmn b/tests/SpiffWorkflow/bpmn/data/timer-cycle.bpmn new file mode 100644 index 000000000..9252d2f54 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer-cycle.bpmn @@ -0,0 +1,84 @@ + + + + + Flow_1pahvlr + + + Flow_09d7dp2 + Flow_1pvkgnu + + + Flow_1pvkgnu + Flow_1ekgt3x + + + Flow_1ekgt3x + + + + + + + Flow_1pzc4jz + print('refill count = %d'%custom_function()) + + + + Flow_1pahvlr + Flow_09d7dp2 + refill_count = 0 + + + Flow_1pzc4jz + + (2,timedelta(seconds=0.01)) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer-date-start.bpmn b/tests/SpiffWorkflow/bpmn/data/timer-date-start.bpmn new file mode 100644 index 000000000..7bead0fa2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer-date-start.bpmn @@ -0,0 +1,72 @@ + + + + + Flow_1i73q45 + + + Flow_1i73q45 + Flow_00e79cz + futuredate = dateparser.parse('in 1 second') - timedelta(seconds=.95) +futuredate2 = dateparser.parse('September 1 2021 at 10am EDT') + + + + + Flow_00e79cz + Flow_1bdrcxy + + futuredate + + + + + Flow_1bdrcxy + Flow_0bjksyv + print('yay!') +completed = True + + + Flow_0bjksyv + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer-non-interrupt-boundary.bpmn b/tests/SpiffWorkflow/bpmn/data/timer-non-interrupt-boundary.bpmn new file mode 100644 index 000000000..25d750e27 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer-non-interrupt-boundary.bpmn @@ -0,0 +1,169 @@ + + + + + Flow_1hyztad + + + Flow_1hyztad + Flow_07l1pau + + Flow_1ls93l9 + + + + Flow_1ku6me6 + Flow_06jd2h7 + Flow_10bimyk + + + + + Flow_10bimyk + + + work_done == 'Yes' + + + + + + + + Flow_1ls93l9 + Flow_06jd2h7 + Flow_1ku6me6 + + + + Flow_03e1mfr + + timedelta(seconds=.2) + + + + + + + + + Flow_03e1mfr + Flow_0tlkkap + + + + + + + Flow_07l1pau + Flow_0tlkkap + Flow_0vper9q + + + + Flow_0or6odg + + + + + + + + + Flow_0vper9q + Flow_0or6odg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer.bpmn b/tests/SpiffWorkflow/bpmn/data/timer.bpmn new file mode 100644 index 000000000..fa26d75fb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer.bpmn @@ -0,0 +1,68 @@ + + + + + Flow_1pahvlr + + + Flow_1pahvlr + Flow_1pvkgnu + + + Flow_1pvkgnu + Flow_1elbn9u + + timedelta(seconds=.25) + + + + Flow_1elbn9u + Flow_1ekgt3x + + + Flow_1ekgt3x + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer_event_changes_last_task.bpmn b/tests/SpiffWorkflow/bpmn/data/timer_event_changes_last_task.bpmn new file mode 100644 index 000000000..8b6acb97d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer_event_changes_last_task.bpmn @@ -0,0 +1,77 @@ + + + + + Flow_164sojd + + + Flow_1m2vq4v + Flow_04tuv5z + + + + Flow_0ac4lx5 + + timedelta(milliseconds=2) + + + + + Flow_0ac4lx5 + timer_called = True + + + Some docs + Flow_04tuv5z + + + + + Flow_164sojd + Flow_1m2vq4v + timer_called = False + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/too_many_loops.bpmn b/tests/SpiffWorkflow/bpmn/data/too_many_loops.bpmn new file mode 100644 index 000000000..8353d16d7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/too_many_loops.bpmn @@ -0,0 +1,125 @@ + + + + + Flow_1gb8wca + Flow_1d2usdq + counter = counter + 1 + + + + ### Results +Submission for Pre-Review was sent to the HSR-IRB on {{ sent_local_date_str }} at {{ sent_local_time_str }}. + +The HSR-IRB started the Pre-Review process on {{ end_local_date_str }} at {{ end_local_time_str }} and assigned {{ irb_info.IRB_ADMINISTRATIVE_REVIEWER }} as the reviewer. + +### Metrics + + +Days elapsed: {{days_delta }} + Flow_1tj9oz1 + + + + Flow_15jw6a4 + Flow_0op1a19 + Flow_1gb8wca + + timedelta(milliseconds=10) + + + + Flow_0mxlkif + Flow_1tj9oz1 + Flow_0op1a19 + + + counter >= 20 + + + counter < 20 + + + + Flow_0q7fkb7 + Flow_15jw6a4 + counter = 0 + + + + Flow_1d2usdq + Flow_0mxlkif + + + Flow_0q7fkb7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/too_many_loops_call_activity.bpmn b/tests/SpiffWorkflow/bpmn/data/too_many_loops_call_activity.bpmn new file mode 100644 index 000000000..ee7f0479d --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/too_many_loops_call_activity.bpmn @@ -0,0 +1,48 @@ + + + + + Flow_175n91v + + + Flow_175n91v + Flow_1d2usdq + counter2 = 1000 + + + + ### Results +Submission for Pre-Review was sent to the HSR-IRB on {{ sent_local_date_str }} at {{ sent_local_time_str }}. + +The HSR-IRB started the Pre-Review process on {{ end_local_date_str }} at {{ end_local_time_str }} and assigned {{ irb_info.IRB_ADMINISTRATIVE_REVIEWER }} as the reviewer. + +### Metrics + + +Days elapsed: {{days_delta }} + Flow_1d2usdq + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/too_many_loops_sub_process.bpmn b/tests/SpiffWorkflow/bpmn/data/too_many_loops_sub_process.bpmn new file mode 100644 index 000000000..2f6a8b53a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/too_many_loops_sub_process.bpmn @@ -0,0 +1,157 @@ + + + + + Flow_0q7fkb7 + + + + Flow_1gb8wca + Flow_1d2usdq + counter = counter + 1 + + + + ### Results +Submission for Pre-Review was sent to the HSR-IRB on {{ sent_local_date_str }} at {{ sent_local_time_str }}. + +The HSR-IRB started the Pre-Review process on {{ end_local_date_str }} at {{ end_local_time_str }} and assigned {{ irb_info.IRB_ADMINISTRATIVE_REVIEWER }} as the reviewer. + +### Metrics + + +Days elapsed: {{days_delta }} + Flow_1tj9oz1 + + + + Flow_15jw6a4 + Flow_1ivr6d7 + Flow_1gb8wca + + timedelta(milliseconds=10) + + + + Flow_1d2usdq + Flow_1tj9oz1 + Flow_0op1a19 + + + counter >= 20 + + + counter < 20 + + + + Flow_0q7fkb7 + Flow_15jw6a4 + counter = 0 +counter2 = 0 +counter3 = 0 + + + + Flow_0op1a19 + Flow_1ivr6d7 + + Flow_1fcanuu + + + + Flow_1fcanuu + Flow_04le6u5 + counter2 += 1 + + + Flow_04le6u5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/transaction.bpmn b/tests/SpiffWorkflow/bpmn/data/transaction.bpmn new file mode 100644 index 000000000..2feecea46 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/transaction.bpmn @@ -0,0 +1,282 @@ + + + + + Flow_0ppdvks + + + Flow_0ppdvks + Flow_0bnlh0h + + Flow_039gvks + + + + + + + + Flow_039gvks + Flow_0cfipfp + + + + + + + + Data_Default + Flow_0oab4hv + + + Quantity_Default + + + + + + Flow_0oab4hv + Quantity_Default + Quantity_LT_Zero + Flow_1k10r1p + + + + quantity < 0 + + + Quantity_LT_Zero + + + + quantity == 0 + + + Flow_1k10r1p + + + + Flow_1j0zczv + + + + Flow_0cfipfp + Flow_1j0zczv + Data_Default + + + + value == "" + + + + + + Flow_1yafeny + + + + Flow_0bnlh0h + Flow_1yafeny + print(f"Value: {value} / Quantity {quantity}") + + + + Flow_0qzlrzx + reason = "Error 1" + + + Flow_0qzlrzx + + + + Flow_189pa5w + + + + + Flow_189pa5w + reason = "Error None" + + + Flow_12218kd + + + + + Flow_12218kd + reason = "Cancel" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/two_top_level_procs.bpmn b/tests/SpiffWorkflow/bpmn/data/two_top_level_procs.bpmn new file mode 100644 index 000000000..363ccc120 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/two_top_level_procs.bpmn @@ -0,0 +1,79 @@ + + + + + + + + + Flow_1fumg40 + + + + Flow_1sfcxwo + + + + Flow_1fumg40 + Flow_1sfcxwo + + + + + Flow_0ptjvq1 + + + + Flow_12xe6lg + + + + Flow_0ptjvq1 + Flow_12xe6lg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/events/ActionManagementTest.py b/tests/SpiffWorkflow/bpmn/events/ActionManagementTest.py new file mode 100644 index 000000000..699e50c25 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/ActionManagementTest.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- + +import unittest +import datetime +import time +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ActionManagementTest(BpmnWorkflowTestCase): + START_TIME_DELTA=0.01 + FINISH_TIME_DELTA=0.02 + + def now_plus_seconds(self, seconds): + return datetime.datetime.now() + datetime.timedelta(seconds=seconds) + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('Test-Workflows/Action-Management.bpmn20.xml', 'Action Management') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + start_time = self.now_plus_seconds(self.START_TIME_DELTA) + finish_time = self.now_plus_seconds(self.FINISH_TIME_DELTA) + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.workflow.get_tasks(TaskState.READY)[0].set_data( + start_time=start_time, finish_time=finish_time) + + def testRunThroughHappy(self): + self.do_next_exclusive_step("Review Action", choice='Approve') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual('NEW ACTION', self.workflow.get_tasks( + TaskState.READY)[0].get_data('script_output')) + self.assertEqual('Cancel Action (if necessary)', + self.workflow.get_tasks(TaskState.READY)[0].task_spec.description) + + time.sleep(self.START_TIME_DELTA) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step("Start Work") + self.workflow.do_engine_steps() + + self.do_next_named_step("Complete Work", choice="Done") + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + + def testRunThroughOverdue(self): + self.do_next_exclusive_step("Review Action", choice='Approve') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual('Cancel Action (if necessary)', + self.workflow.get_tasks(TaskState.READY)[0].task_spec.description) + + time.sleep(self.START_TIME_DELTA) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step("Start Work") + self.workflow.do_engine_steps() + + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual('Finish Time', self.workflow.get_tasks( + TaskState.WAITING)[1].task_spec.description) + time.sleep(self.FINISH_TIME_DELTA) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertNotEqual( + 'Finish Time', self.workflow.get_tasks(TaskState.WAITING)[0].task_spec.description) + + overdue_escalation_task = [ + t for t in self.workflow.get_tasks() if t.task_spec.description == 'Overdue Escalation'] + self.assertEqual(1, len(overdue_escalation_task)) + overdue_escalation_task = overdue_escalation_task[0] + self.assertEqual(TaskState.COMPLETED, overdue_escalation_task.state) + self.assertEqual( + 'ACTION OVERDUE', overdue_escalation_task.get_data('script_output')) + + self.do_next_named_step("Complete Work", choice="Done") + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + + def testRunThroughCancel(self): + + self.do_next_exclusive_step("Review Action", choice='Cancel') + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + + def testRunThroughCancelAfterApproved(self): + self.do_next_exclusive_step("Review Action", choice='Approve') + self.workflow.do_engine_steps() + + self.do_next_named_step("Cancel Action (if necessary)") + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + self.assertEqual( + 'ACTION CANCELLED', self.workflow.get_data('script_output')) + + def testRunThroughCancelAfterWorkStarted(self): + self.do_next_exclusive_step("Review Action", choice='Approve') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + time.sleep(self.START_TIME_DELTA) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step("Start Work") + self.workflow.do_engine_steps() + + self.do_next_named_step("Cancel Action (if necessary)") + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + self.assertEqual( + 'ACTION CANCELLED', self.workflow.get_data('script_output')) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ActionManagementTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/CallActivityEscalationTest.py b/tests/SpiffWorkflow/bpmn/events/CallActivityEscalationTest.py new file mode 100644 index 000000000..1e19e43b2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/CallActivityEscalationTest.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kbogus@gmail.com' + + +def on_reached_cb(workflow, task, completed_set): + # In workflows that load a subworkflow, the newly loaded children + # will not have on_reached_cb() assigned. By using this function, we + # re-assign the function in every step, thus making sure that new + # children also call on_reached_cb(). + for child in task.children: + track_task(child.task_spec, completed_set) + return True + + +def on_complete_cb(workflow, task, completed_set): + completed_set.add(task.task_spec.name) + return True + + +def track_task(task_spec, completed_set): + if task_spec.reached_event.is_connected(on_reached_cb): + task_spec.reached_event.disconnect(on_reached_cb) + task_spec.reached_event.connect(on_reached_cb, completed_set) + if task_spec.completed_event.is_connected(on_complete_cb): + task_spec.completed_event.disconnect(on_complete_cb) + task_spec.completed_event.connect(on_complete_cb, completed_set) + + +def track_workflow(wf_spec, completed_set): + for name in wf_spec.task_specs: + track_task(wf_spec.task_specs[name], completed_set) + + +class CallActivityEscalationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'CallActivity-Escalation-Test') + self.workflow = BpmnWorkflow(self.spec, subprocesses) + + def testShouldEscalate(self): + completed_set = set() + track_workflow(self.spec, completed_set) + for task in self.workflow.get_tasks(TaskState.READY): + task.set_data(should_escalate=True) + self.workflow.do_engine_steps() + self.save_restore() + self.workflow.complete_all() + self.assertEqual(True, self.workflow.is_completed()) + + self.assertEqual(True, 'EndEvent_specific1_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific1_noninterrupting_escalated' in completed_set) + + self.assertEqual(True, 'EndEvent_specific1_interrupting_normal' not in completed_set) + self.assertEqual(True, 'EndEvent_specific1_interrupting_escalated' in completed_set) + + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_escalated' in completed_set) + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_missingvariable' not in completed_set) + + self.assertEqual(True, 'EndEvent_specific2_interrupting_normal' not in completed_set) + self.assertEqual(True, 'EndEvent_specific2_interrupting_escalated' in completed_set) + self.assertEqual(True, 'EndEvent_specific2_interrupting_missingvariable' not in completed_set) + + self.assertEqual(True, 'EndEvent_general_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_general_noninterrupting_escalated' in completed_set) + + self.assertEqual(True, 'EndEvent_general_interrupting_normal' not in completed_set) + self.assertEqual(True, 'EndEvent_general_interrupting_escalated' in completed_set) + + def testShouldNotEscalate(self): + completed_set = set() + track_workflow(self.spec, completed_set) + for task in self.workflow.get_tasks(TaskState.READY): + task.set_data(should_escalate=False) + self.workflow.do_engine_steps() + self.save_restore() + self.workflow.complete_all() + self.assertEqual(True, self.workflow.is_completed()) + + self.assertEqual(True, 'EndEvent_specific1_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific1_noninterrupting_escalated' not in completed_set) + + self.assertEqual(True, 'EndEvent_specific1_interrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific1_interrupting_escalated' not in completed_set) + + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_escalated' not in completed_set) + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_missingvariable' not in completed_set) + + self.assertEqual(True, 'EndEvent_specific2_interrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific2_interrupting_escalated' not in completed_set) + self.assertEqual(True, 'EndEvent_specific2_interrupting_missingvariable' not in completed_set) + + self.assertEqual(True, 'EndEvent_general_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_general_noninterrupting_escalated' not in completed_set) + + self.assertEqual(True, 'EndEvent_general_interrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_general_interrupting_escalated' not in completed_set) + + def testMissingVariable(self): + completed_set = set() + track_workflow(self.spec, completed_set) + self.workflow.do_engine_steps() + self.save_restore() + self.workflow.complete_all() + self.assertEqual(True, self.workflow.is_completed()) + + self.assertEqual(True, 'EndEvent_specific1_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific1_noninterrupting_escalated' not in completed_set) + + self.assertEqual(True, 'EndEvent_specific1_interrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific1_interrupting_escalated' not in completed_set) + + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_escalated' not in completed_set) + self.assertEqual(True, 'EndEvent_specific2_noninterrupting_missingvariable' in completed_set) + + self.assertEqual(True, 'EndEvent_specific2_interrupting_normal' not in completed_set) + self.assertEqual(True, 'EndEvent_specific2_interrupting_escalated' not in completed_set) + self.assertEqual(True, 'EndEvent_specific2_interrupting_missingvariable' in completed_set) + + self.assertEqual(True, 'EndEvent_general_noninterrupting_normal' in completed_set) + self.assertEqual(True, 'EndEvent_general_noninterrupting_escalated' in completed_set) + + self.assertEqual(True, 'EndEvent_general_interrupting_normal' not in completed_set) + self.assertEqual(True, 'EndEvent_general_interrupting_escalated' in completed_set) + + +class CallActivityEscalationWithoutSaveRestoreTest(CallActivityEscalationTest): + def save_restore(self): + pass # disabling save_restore for this test case + + +def suite(): + loader = unittest.TestLoader() + return unittest.TestSuite([ + loader.loadTestsFromTestCase(cls) + for cls in [ + CallActivityEscalationTest, + CallActivityEscalationWithoutSaveRestoreTest, + ] + ]) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/CancelBoundaryEventTest.py b/tests/SpiffWorkflow/bpmn/events/CancelBoundaryEventTest.py new file mode 100644 index 000000000..81788ea3e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/CancelBoundaryEventTest.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + + + +import unittest +from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'michaelc' + + +class CancelBoundaryTest(BpmnWorkflowTestCase): + + def testInvalidCancelEvent(self): + self.assertRaises(ValidationException, self.load_workflow_spec, 'invalid_cancel.bpmn', 'Process_1dagb7t') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CancelBoundaryTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/MessageInterruptsSpTest.py b/tests/SpiffWorkflow/bpmn/events/MessageInterruptsSpTest.py new file mode 100644 index 000000000..f66c3ee31 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/MessageInterruptsSpTest.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.specs.events.event_definitions import MessageEventDefinition +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MessageInterruptsSpTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'Message Interrupts SP') + + def testRunThroughHappySaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_exclusive_step('Do Something In a Subprocess') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_exclusive_step('Ack Subprocess Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughInterruptSaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_exclusive_step('Acknowledge SP Interrupt Message') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessageInterruptsSpTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/MessageInterruptsTest.py b/tests/SpiffWorkflow/bpmn/events/MessageInterruptsTest.py new file mode 100644 index 000000000..55cfa8bf6 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/MessageInterruptsTest.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.specs.events.event_definitions import MessageEventDefinition +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MessageInterruptsTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'Test Workflows') + + def testRunThroughHappySaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + self.do_next_exclusive_step('Select Test', choice='Message Interrupts') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_exclusive_step('Do Something That Takes A Long Time') + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageInterruptSaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + self.do_next_exclusive_step('Select Test', choice='Message Interrupts') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_exclusive_step('Acknowledge Interrupt Message') + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step('Select Test', choice='Message Interrupts') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_exclusive_step('Do Something That Takes A Long Time') + + self.workflow.do_engine_steps() + self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageInterrupt(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step('Select Test', choice='Message Interrupts') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_exclusive_step('Acknowledge Interrupt Message') + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessageInterruptsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptTest.py b/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptTest.py new file mode 100644 index 000000000..f7b278bb1 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptTest.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.specs.events.event_definitions import MessageEventDefinition +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MessageNonInterruptTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'Test Workflows') + + def testRunThroughHappySaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + self.do_next_exclusive_step( + 'Select Test', choice='Message Non Interrupt') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_exclusive_step('Do Something That Takes A Long Time') + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageInterruptSaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + self.do_next_exclusive_step( + 'Select Test', choice='Message Non Interrupt') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Acknowledge Non-Interrupt Message') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Do Something That Takes A Long Time') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step( + 'Select Test', choice='Message Non Interrupt') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_exclusive_step('Do Something That Takes A Long Time') + + self.workflow.do_engine_steps() + self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageInterrupt(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step( + 'Select Test', choice='Message Non Interrupt') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Acknowledge Non-Interrupt Message') + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_named_step('Do Something That Takes A Long Time') + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageInterruptOtherOrder(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step( + 'Select Test', choice='Message Non Interrupt') + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Do Something That Takes A Long Time') + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Acknowledge Non-Interrupt Message') + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageInterruptOtherOrderSaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + self.do_next_exclusive_step( + 'Select Test', choice='Message Non Interrupt') + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Do Something That Takes A Long Time') + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.do_next_named_step('Acknowledge Non-Interrupt Message') + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessageNonInterruptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptsSpTest.py b/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptsSpTest.py new file mode 100644 index 000000000..d4192a23e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptsSpTest.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.specs.events.event_definitions import MessageEventDefinition +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MessageNonInterruptsSpTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'Message Non Interrupt SP') + + def testRunThroughHappySaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.do_next_exclusive_step('Do Something In a Subprocess') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_exclusive_step('Ack Subprocess Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageSaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + + self.do_next_named_step('Do Something In a Subprocess') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Ack Subprocess Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Acknowledge SP Parallel Message') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageOrder2SaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + self.do_next_named_step('Do Something In a Subprocess') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Acknowledge SP Parallel Message') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Ack Subprocess Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughMessageOrder3SaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.save_restore() + + self.workflow.do_engine_steps() + self.save_restore() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + + self.workflow.catch(MessageEventDefinition('Test Message')) + + self.do_next_named_step('Acknowledge SP Parallel Message') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Do Something In a Subprocess') + self.workflow.do_engine_steps() + self.save_restore() + + self.do_next_named_step('Ack Subprocess Done') + self.workflow.do_engine_steps() + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessageNonInterruptsSpTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/MessagesTest.py b/tests/SpiffWorkflow/bpmn/events/MessagesTest.py new file mode 100644 index 000000000..486e53d24 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/MessagesTest.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.specs.events.event_definitions import MessageEventDefinition +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MessagesTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'Test Workflows') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step('Select Test', choice='Messages') + self.workflow.do_engine_steps() + self.assertEqual([], self.workflow.get_tasks(TaskState.READY)) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.workflow.catch(MessageEventDefinition('Wrong Message')) + self.assertEqual([], self.workflow.get_tasks(TaskState.READY)) + self.workflow.catch(MessageEventDefinition('Test Message')) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.assertEqual( + 'Test Message', self.workflow.get_tasks(TaskState.READY)[0].task_spec.description) + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + def testRunThroughSaveAndRestore(self): + + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.do_next_exclusive_step('Select Test', choice='Messages') + self.workflow.do_engine_steps() + + self.save_restore() + + self.assertEqual([], self.workflow.get_tasks(TaskState.READY)) + self.assertEqual(2, len(self.workflow.get_tasks(TaskState.WAITING))) + self.workflow.catch(MessageEventDefinition('Wrong Message')) + self.assertEqual([], self.workflow.get_tasks(TaskState.READY)) + self.workflow.catch(MessageEventDefinition('Test Message')) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.save_restore() + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessagesTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/MultipleEventsTest.py b/tests/SpiffWorkflow/bpmn/events/MultipleEventsTest.py new file mode 100644 index 000000000..b1e2a24d5 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/MultipleEventsTest.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +import unittest + +from SpiffWorkflow.bpmn.specs.events import CancelEventDefinition, SignalEventDefinition +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class MultipleEventsTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('multipleEvents.bpmn', 'SignalAndCancel') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual("hello", ready_tasks[0].get_name()) + + def test_cancel_does_nothing_if_no_one_is_listening(self,save_restore = False): + + # Send cancel notifications to the workflow + self.workflow.catch(SignalEventDefinition('cancel')) # generate a cancel signal. + self.workflow.catch(CancelEventDefinition()) + + # Nothing should have happened. + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual("hello", ready_tasks[0].get_name()) + + def test_cancel_works_with_signal(self,save_restore = False): + + task = self.workflow.get_tasks(TaskState.READY)[0] + + # Move to User Task 1 + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_tasks(TaskState.READY)[0] + self.assertEqual('UserTaskOne', task.get_name()) + + # Send cancel notifications to the workflow + self.workflow.catch(SignalEventDefinition('cancel')) # generate a cancel signal. + self.workflow.catch(CancelEventDefinition()) + self.workflow.do_engine_steps() + + # The cancel event should have been called. + self.assertEqual("cancel_signal", self.workflow.last_task.data['cancel']) + + + def test_cancel_works_with_cancel_Event(self,save_restore = False): + + task = self.workflow.get_tasks(TaskState.READY)[0] + + # Move to User Task 2 + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_tasks(TaskState.READY)[0] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_tasks(TaskState.READY)[0] + self.assertEqual('UserTaskTwo', task.get_name()) + + # Send cancel notifications to the workflow + self.workflow.catch(SignalEventDefinition('cancel')) # generate a cancel signal. + self.workflow.catch(CancelEventDefinition()) + self.workflow.do_engine_steps() + + # The cancel event shave h + self.assertEqual("cancel_event", self.workflow.last_task.data['cancel']) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultipleEventsTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerCycleStartTest.py b/tests/SpiffWorkflow/bpmn/events/TimerCycleStartTest.py new file mode 100644 index 000000000..e4e8741c3 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerCycleStartTest.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +import unittest +import time + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + +counter = 0 +def my_custom_function(): + global counter + counter = counter+1 + return counter + +class CustomScriptEngine(PythonScriptEngine): + """This is a custom script processor that can be easily injected into Spiff Workflow. + It will execute python code read in from the bpmn. It will also make any scripts in the + scripts directory available for execution. """ + def __init__(self): + augment_methods = {'custom_function': my_custom_function} + super().__init__(scripting_additions=augment_methods) + + +class TimerCycleStartTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('timer-cycle-start.bpmn', 'timer') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=CustomScriptEngine()) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + global counter + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) # Start Event + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + # the data doesn't really propagate to the end as in a 'normal' workflow, so I call a + # custom function that records the number of times this got called so that + # we can keep track of how many times the triggered item gets called. + counter = 0 + + # We have a loop so we can continue to execute waiting tasks when + # timers expire. The test workflow has a wait timer that pauses long enough to + # allow the cycle to complete twice -- otherwise the first iteration through the + # cycle process causes the remaining tasks to be cancelled. + for loopcount in range(5): + if save_restore: + self.save_restore() + self.workflow.script_engine = CustomScriptEngine() + time.sleep(0.1) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(counter, 2) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerCycleStartTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerCycleTest.py b/tests/SpiffWorkflow/bpmn/events/TimerCycleTest.py new file mode 100644 index 000000000..8b9ad0f56 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerCycleTest.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +import unittest +import time + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + +counter = 0 +def my_custom_function(): + global counter + counter = counter+1 + return counter + +class CustomScriptEngine(PythonScriptEngine): + """This is a custom script processor that can be easily injected into Spiff Workflow. + It will execute python code read in from the bpmn. It will also make any scripts in the + scripts directory available for execution. """ + def __init__(self): + augment_methods = {'custom_function': my_custom_function} + super().__init__(scripting_additions=augment_methods) + + + +class TimerDurationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('timer-cycle.bpmn', 'timer') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=CustomScriptEngine()) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + global counter + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) # Start Event + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) # GetCoffee + + # See comments in timer cycle test for more context + counter = 0 + for loopcount in range(5): + if save_restore: + self.save_restore() + self.workflow.script_engine = CustomScriptEngine() + time.sleep(0.01) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + + pass + #self.assertEqual(counter, 2) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerDateTest.py b/tests/SpiffWorkflow/bpmn/events/TimerDateTest.py new file mode 100644 index 000000000..a82698b72 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerDateTest.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +import unittest +import datetime +import time +import pytz + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class TimerDateTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('timer-date-start.bpmn', 'date_timer') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + global counter + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) # Start Event + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .05s + # we should terminate loop before that. + starttime = datetime.datetime.now() + counter = 0 + while loopcount < 8: + if len(self.workflow.get_tasks(TaskState.READY)) >= 1: + break + if save_restore: + self.save_restore() + + + waiting_tasks = self.workflow.get_tasks(TaskState.WAITING) + time.sleep(0.01) + self.workflow.refresh_waiting_tasks() + loopcount = loopcount +1 + endtime = datetime.datetime.now() + self.workflow.do_engine_steps() + tz = pytz.timezone('US/Eastern') + testdate = tz.localize(datetime.datetime.strptime('2021-09-01 10:00','%Y-%m-%d %H:%M')) + self.assertEqual(self.workflow.last_task.data['futuredate2'],testdate) + self.assertTrue('completed' in self.workflow.last_task.data) + self.assertTrue(self.workflow.last_task.data['completed']) + self.assertTrue((endtime-starttime) > datetime.timedelta(seconds=.02)) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDateTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryOnTaskTest.py b/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryOnTaskTest.py new file mode 100644 index 000000000..ec743b54b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryOnTaskTest.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +import unittest +import datetime +import time + +from SpiffWorkflow.bpmn.specs.events import EndEvent +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +__author__ = 'kellym' + + +class TimerDurationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('boundary_timer_on_task.bpmn', 'test_timer') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self,save_restore = False): + # In the normal flow of things, the final end event should be the last task + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + end_events = [] + + for task in self.workflow.get_tasks(): + if isinstance(task.task_spec, EndEvent): + end_events.append(task) + self.assertEqual(1, len(end_events)) + + # In the event of a timer firing, the last task should STILL + # be the final end event. + + starttime = datetime.datetime.now() + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + time.sleep(0.1) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + + self.assertTrue(self.workflow.is_completed()) + end_events = [] + + for task in self.workflow.get_tasks(): + if isinstance(task.task_spec, EndEvent): + end_events.append(task) + self.assertEqual(1, len(end_events)) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryTest.py b/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryTest.py new file mode 100644 index 000000000..2297aa271 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryTest.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +import unittest +import time + +from SpiffWorkflow.bpmn.FeelLikeScriptEngine import FeelLikeScriptEngine +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +__author__ = 'kellym' + + +class TimerDurationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('boundary.bpmn', 'boundary_event') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self,save_restore = False): + self.workflow.script_engine = FeelLikeScriptEngine() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + ready_tasks[0].data['answer']='No' + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .03s + # we should terminate loop before that. + + while loopcount < 11: + ready_tasks = self.workflow.get_tasks(TaskState.READY) + if len(ready_tasks) < 1: + break + if save_restore: + self.save_restore() + self.workflow.script_engine = FeelLikeScriptEngine() + #self.assertEqual(1, len(self.workflow.get_tasks(Task.WAITING))) + time.sleep(0.01) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + loopcount = loopcount +1 + + # Assure that the loopcount is less than 10, and the timer interrupt fired, rather + # than allowing us to continue to loop the full 10 times. + self.assertTrue(loopcount < 10) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerDurationTest.py b/tests/SpiffWorkflow/bpmn/events/TimerDurationTest.py new file mode 100644 index 000000000..24b27eeaa --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerDurationTest.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +import unittest +import datetime +import time +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class TimerDurationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('timer.bpmn', 'timer') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .25s + # we should terminate loop before that. + starttime = datetime.datetime.now() + while loopcount < 10: + if len(self.workflow.get_tasks(TaskState.READY)) >= 1: + break + if save_restore: self.save_restore() + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + time.sleep(0.1) + self.workflow.refresh_waiting_tasks() + loopcount = loopcount +1 + endtime = datetime.datetime.now() + duration = endtime-starttime + self.assertEqual(durationdatetime.timedelta(seconds=.2),True) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TimerIntermediateTest.py b/tests/SpiffWorkflow/bpmn/events/TimerIntermediateTest.py new file mode 100644 index 000000000..6d8c256a2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TimerIntermediateTest.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +import unittest +import datetime +import time +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class TimerIntermediateTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesss = self.load_workflow_spec('Test-Workflows/Timer-Intermediate.bpmn20.xml', 'Timer Intermediate') + self.workflow = BpmnWorkflow(self.spec, self.subprocesss) + + def testRunThroughHappy(self): + + due_time = datetime.datetime.now() + datetime.timedelta(seconds=0.01) + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + self.workflow.get_tasks(TaskState.READY)[0].set_data(due_time=due_time) + + self.workflow.do_engine_steps() + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + + time.sleep(0.02) + + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.WAITING))) + self.workflow.refresh_waiting_tasks() + self.assertEqual(0, len(self.workflow.get_tasks(TaskState.WAITING))) + self.assertEqual(1, len(self.workflow.get_tasks(TaskState.READY))) + + self.workflow.do_engine_steps() + self.assertEqual( + 0, len(self.workflow.get_tasks(TaskState.READY | TaskState.WAITING))) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerIntermediateTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/TransactionSubprocssTest.py b/tests/SpiffWorkflow/bpmn/events/TransactionSubprocssTest.py new file mode 100644 index 000000000..1212d776c --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/events/TransactionSubprocssTest.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'michaelc' + + +class TransactionSubprocessTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('transaction.bpmn', 'Main_Process') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + self.workflow.do_engine_steps() + + def testNormalCompletion(self): + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + ready_tasks[0].update_data({'value': 'asdf'}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + ready_tasks[0].update_data({'quantity': 2}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + self.assertIn('value', self.workflow.last_task.data) + + # Check that workflow and next task completed + subprocess = self.workflow.get_tasks_from_spec_name('Subprocess')[0] + self.assertEqual(subprocess.get_state(), TaskState.COMPLETED) + print_task = self.workflow.get_tasks_from_spec_name("Activity_Print_Data")[0] + self.assertEqual(print_task.get_state(), TaskState.COMPLETED) + + # Check that the boundary events were cancelled + cancel_task = self.workflow.get_tasks_from_spec_name("Catch_Cancel_Event")[0] + self.assertEqual(cancel_task.get_state(), TaskState.CANCELLED) + error_1_task = self.workflow.get_tasks_from_spec_name("Catch_Error_1")[0] + self.assertEqual(error_1_task.get_state(), TaskState.CANCELLED) + error_none_task = self.workflow.get_tasks_from_spec_name("Catch_Error_None")[0] + self.assertEqual(error_none_task.get_state(), TaskState.CANCELLED) + + + def testSubworkflowCancelEvent(self): + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + + # If value == '', we cancel + ready_tasks[0].update_data({'value': ''}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + + # If the subprocess gets cancelled, verify that data set there does not persist + self.assertNotIn('value', self.workflow.last_task.data) + + # Check that we completed the Cancel Task + cancel_task = self.workflow.get_tasks_from_spec_name("Cancel_Action")[0] + self.assertEqual(cancel_task.get_state(), TaskState.COMPLETED) + + # And cancelled the remaining tasks + error_1_task = self.workflow.get_tasks_from_spec_name("Catch_Error_1")[0] + self.assertEqual(error_1_task.get_state(), TaskState.CANCELLED) + error_none_task = self.workflow.get_tasks_from_spec_name("Catch_Error_None")[0] + self.assertEqual(error_none_task.get_state(), TaskState.CANCELLED) + + # We should not have this task, as we followed the 'cancel branch' + print_task = self.workflow.get_tasks_from_spec_name("Activity_Print_Data") + self.assertEqual(len(print_task), 0) + + def testSubworkflowErrorCodeNone(self): + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + ready_tasks[0].update_data({'value': 'asdf'}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + + # If quantity == 0, we throw an error with no error code + ready_tasks[0].update_data({'quantity': 0}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + + # We formerly checked that subprocess data does not persist, but I think it should persist + # A boundary event is just an alternate path out of a workflow, and we might need the context + # of the event in later steps + + # The cancel boundary event should be cancelled + cancel_task = self.workflow.get_tasks_from_spec_name("Catch_Cancel_Event")[0] + self.assertEqual(cancel_task.get_state(), TaskState.CANCELLED) + + # We should catch the None Error, but not Error 1 + error_none_task = self.workflow.get_tasks_from_spec_name("Catch_Error_None")[0] + self.assertEqual(error_none_task.get_state(), TaskState.COMPLETED) + error_1_task = self.workflow.get_tasks_from_spec_name("Catch_Error_1")[0] + self.assertEqual(error_1_task.get_state(), TaskState.CANCELLED) + + # Make sure this branch didn't getfollowed + print_task = self.workflow.get_tasks_from_spec_name("Activity_Print_Data") + self.assertEqual(len(print_task), 0) + + def testSubworkflowErrorCodeOne(self): + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + ready_tasks[0].update_data({'value': 'asdf'}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + + # If quantity < 0, we throw 'Error 1' + ready_tasks[0].update_data({'quantity': -1}) + ready_tasks[0].complete() + self.workflow.do_engine_steps() + + # The cancel boundary event should be cancelled + # I've removed this check, see previous test for rationale + + # Both boundary events should complete + error_none_task = self.workflow.get_tasks_from_spec_name("Catch_Error_None")[0] + self.assertEqual(error_none_task.get_state(), TaskState.COMPLETED) + error_1_task = self.workflow.get_tasks_from_spec_name("Catch_Error_1")[0] + self.assertEqual(error_1_task.get_state(), TaskState.COMPLETED) + + print_task = self.workflow.get_tasks_from_spec_name("Activity_Print_Data") + self.assertEqual(len(print_task), 0) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TransactionSubprocessTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/events/__init__.py b/tests/SpiffWorkflow/bpmn/events/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/bpmn/serializer/__init__.py b/tests/SpiffWorkflow/bpmn/serializer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/bpmn/serializer/dictTest.py b/tests/SpiffWorkflow/bpmn/serializer/dictTest.py new file mode 100644 index 000000000..3556bee19 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/serializer/dictTest.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +from builtins import str +import sys +import unittest +import os +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..', '..')) + +import uuid +from SpiffWorkflow.bpmn.serializer.dict import BPMNDictionarySerializer +from tests.SpiffWorkflow.serializer.baseTest import SerializerTest +from SpiffWorkflow.workflow import Workflow + + +class BPMNDictionarySerializerTest(SerializerTest): + + def setUp(self): + super(BPMNDictionarySerializerTest, self).setUp() + self.serializer = BPMNDictionarySerializer() + self.return_type = dict + + def _compare_results(self, item1, item2, + exclude_dynamic=False, + exclude_items=None): + exclude_items = exclude_items if exclude_items is not None else [] + if exclude_dynamic: + if 'last_state_change' not in exclude_items: + exclude_items.append('last_state_change') + if 'last_task' not in exclude_items: + exclude_items.append('last_task') + if uuid.UUID not in exclude_items: + exclude_items.append(uuid.UUID) + if type(item1) in exclude_items: + return + + if isinstance(item1, dict): + self.assertIsInstance(item2, dict) + for key, value in list(item1.items()): + self.assertIn(key, item2) + if key in exclude_items: + continue + self._compare_results(value, item2[key], + exclude_dynamic=exclude_dynamic, + exclude_items=exclude_items) + for key in item2: + self.assertIn(key, item1) + + elif isinstance(item1, list): + msg = "item is not a list (is a " + str(type(item2)) + ")" + self.assertIsInstance(item2, list, msg) + msg = "list lengths differ: {} vs {}".format( + len(item1), len(item2)) + self.assertEqual(len(item1), len(item2), msg) + for i, listitem in enumerate(item1): + self._compare_results(listitem, item2[i], + exclude_dynamic=exclude_dynamic, + exclude_items=exclude_items) + + elif isinstance(item1, Workflow): + raise Exception("Item is a Workflow") + + else: + msg = "{}: types differ: {} vs {}".format( + str(item2), type(item1), type(item2)) + self.assertEqual(type(item1), type(item2), msg) + self.assertEqual(item1, item2) + + +def suite(): + return unittest.defaultTestLoader.loadTestsFromTestCase(BPMNDictionarySerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/serializer/jsonTest.py b/tests/SpiffWorkflow/bpmn/serializer/jsonTest.py new file mode 100644 index 000000000..89ee05428 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/serializer/jsonTest.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..', '..')) + +import json +from SpiffWorkflow.bpmn.serializer.json import BPMNJSONSerializer +from tests.SpiffWorkflow.serializer.dictTest import DictionarySerializerTest + + +class BPMNJSONSerializerTest(DictionarySerializerTest): + + def setUp(self): + super(BPMNJSONSerializerTest, self).setUp() + self.serializer = BPMNJSONSerializer() + self.return_type = str + + def _prepare_result(self, item): + return json.loads(item) + + def _compare_results(self, item1, item2, exclude_dynamic=False, + exclude_items=None): + if exclude_dynamic: + exclude_items = ['__uuid__'] + else: + exclude_items = [] + super(BPMNJSONSerializerTest, self)._compare_results(item1, item2, + exclude_dynamic=exclude_dynamic, + exclude_items=exclude_items) + + +def suite(): + return unittest.defaultTestLoader.loadTestsFromTestCase(BPMNJSONSerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/BaseTestCase.py b/tests/SpiffWorkflow/camunda/BaseTestCase.py new file mode 100644 index 000000000..6ea39d0ac --- /dev/null +++ b/tests/SpiffWorkflow/camunda/BaseTestCase.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +import os + +from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer +from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser +from SpiffWorkflow.camunda.serializer import UserTaskConverter, StartEventConverter, EndEventConverter, \ + IntermediateCatchEventConverter, IntermediateThrowEventConverter, BoundaryEventConverter + +from SpiffWorkflow.dmn.serializer import BusinessRuleTaskConverter + +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + + +__author__ = 'danfunk' + +wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([ + UserTaskConverter, BusinessRuleTaskConverter, StartEventConverter, + EndEventConverter, BoundaryEventConverter, IntermediateCatchEventConverter, + IntermediateThrowEventConverter]) + +class BaseTestCase(BpmnWorkflowTestCase): + """ Provides some basic tools for loading up and parsing camunda BPMN files """ + + serializer = BpmnWorkflowSerializer(wf_spec_converter) + + def load_workflow_spec(self, filename, process_name, dmn_filename=None): + bpmn = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = CamundaParser() + parser.add_bpmn_files_by_glob(bpmn) + if dmn_filename is not None: + dmn = os.path.join(os.path.dirname(__file__), 'data', 'dmn', dmn_filename) + parser.add_dmn_files_by_glob(dmn) + top_level_spec = parser.get_spec(process_name) + subprocesses = parser.get_subprocess_specs(process_name) + return top_level_spec, subprocesses + + def reload_save_restore(self): + self.save_restore() diff --git a/tests/SpiffWorkflow/camunda/BusinessRuleTaskParserTest.py b/tests/SpiffWorkflow/camunda/BusinessRuleTaskParserTest.py new file mode 100644 index 000000000..24a9b189e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/BusinessRuleTaskParserTest.py @@ -0,0 +1,55 @@ +import unittest +from unittest.mock import patch + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + + +class BusinessRuleTaskParserTest(BaseTestCase): + + def setUp(self): + self.spec, subprocesses = self.load_workflow_spec( + 'ExclusiveGatewayIfElseAndDecision.bpmn', + 'Process_1', + 'test_integer_decision.dmn') + self.workflow = BpmnWorkflow(self.spec) + + def testDmnHappy(self): + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + def testDmnSaveRestore(self): + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + @patch('SpiffWorkflow.dmn.engine.DMNEngine.DMNEngine.evaluate') + def testDmnExecHasAccessToTask(self, mock_engine): + """At one time, the Execute and Evaluate methods received a Task object + but the DMN evaluate method did not get a task object. While this is + an optional argument, it should always exist if executed in the context + of a BPMNWorkflow""" + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + self.workflow.do_engine_steps() + task = self.workflow.get_tasks_from_spec_name('TaskDecision')[0] + name, args, kwargs = mock_engine.mock_calls[0] + self.assertIn(task, args) + + def testDmnUsesSameScriptEngineAsBPMN(self): + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + self.workflow.do_engine_steps() + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BusinessRuleTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/CallActivityMessageTest.py b/tests/SpiffWorkflow/camunda/CallActivityMessageTest.py new file mode 100644 index 000000000..400f60117 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/CallActivityMessageTest.py @@ -0,0 +1,57 @@ +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from .BaseTestCase import BaseTestCase + +__author__ = 'essweine' + + +class CallActivityMessageTest(BaseTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('call_activity_with_message*.bpmn', 'Process_0xeaemr') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self, save_restore=False): + steps = [('Activity_EnterPlan',{'plan_details':'Bad'}), + ('Activity_ApproveOrDeny', {'approved':'No'}), + ('Activity_EnterPlan', {'plan_details':'Better'}), + ('Activity_ApproveOrDeny', {'approved':'No'}), + ('Activity_EnterPlan', {'plan_details':'Best'}), + ('Activity_ApproveOrDeny', {'approved':'Yes'}), + ('Activity_EnablePlan',{'Done':'OK!'}) + ] + + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + waiting_tasks = self.workflow.get_tasks(TaskState.WAITING) + self.assertEqual(1, len(ready_tasks),'Expected to have one ready task') + self.assertEqual(2, len(waiting_tasks), 'Expected to have two waiting tasks') + + for step in steps: + current_task = ready_tasks[0] + self.assertEqual(current_task.task_spec.name,step[0]) + current_task.update_data(step[1]) + current_task.complete() + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + if save_restore: self.save_restore() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(self.workflow.is_completed(),True,'Expected the workflow to be complete at this point') + self.assertEqual(self.workflow.last_task.data,{'plan_details': 'Best', + 'Approved': 'Yes', + 'Done': 'OK!'}) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CallActivityMessageTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/CamundaParserTest.py b/tests/SpiffWorkflow/camunda/CamundaParserTest.py new file mode 100644 index 000000000..1b4534490 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/CamundaParserTest.py @@ -0,0 +1,24 @@ +from SpiffWorkflow.bpmn.parser.util import full_tag +from SpiffWorkflow.camunda.specs.UserTask import UserTask +from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser +from SpiffWorkflow.camunda.parser.UserTaskParser import UserTaskParser +from SpiffWorkflow.camunda.parser.business_rule_task import BusinessRuleTaskParser +from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask + +from .BaseTestCase import BaseTestCase + +class CamundaParserTest(BaseTestCase): + + def setUp(self): + self.parser = CamundaParser() + + def test_overrides(self): + + overrides = [ + ('userTask', UserTaskParser, UserTask), + ('businessRuleTask', BusinessRuleTaskParser, BusinessRuleTask), + ] + + for key, parser, spec in overrides: + self.assertIn(full_tag(key), self.parser.OVERRIDE_PARSER_CLASSES) + self.assertEqual((parser, spec), self.parser.OVERRIDE_PARSER_CLASSES.get(full_tag(key))) \ No newline at end of file diff --git a/tests/SpiffWorkflow/camunda/ClashingNameTest.py b/tests/SpiffWorkflow/camunda/ClashingNameTest.py new file mode 100644 index 000000000..b078ddf3e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ClashingNameTest.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + +class ClashingNameTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_camunda_clash.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyReset(self): + self.actual_test(save_restore=False,reset_data=True,expected={'do_step':False,'C':'c'}) + + def testRunThroughSaveRestoreReset(self): + self.actual_test(save_restore=True,reset_data=True,expected={'do_step':False,'C':'c'}) + + def actual_test(self, save_restore=False, reset_data=False, expected=None): + + if expected is None: + expected = {'do_step': False, 'A': 'a', 'B': 'b', 'C': 'c'} + + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': True}, + {'taskname': 'FormA', + 'formvar': 'A', + 'answer': 'a'}, + {'taskname': 'FormB', + 'formvar': 'B', + 'answer': 'b'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None: + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': False}, + {'taskname': 'FormC', + 'formvar': 'C', + 'answer': 'c'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'do_step':False,'A':'a','B':'b','C':'c'}, + self.workflow.last_task.data) + + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ClashingNameTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/DMNCustomScriptTest.py b/tests/SpiffWorkflow/camunda/DMNCustomScriptTest.py new file mode 100644 index 000000000..cde4662c0 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/DMNCustomScriptTest.py @@ -0,0 +1,56 @@ +import unittest +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + + +def my_custom_function(txt): + return str(txt).upper() + + +class CustomScriptEngine(PythonScriptEngine): + + def __init__(self): + augment_methods = {'my_custom_function': my_custom_function} + super().__init__(scripting_additions=augment_methods) + + +class DMNCustomScriptTest(BaseTestCase): + + def setUp(self): + + self.spec, subprocesses = self.load_workflow_spec('CustomScript.bpmn', 'start', 'CustomScript.dmn') + self.workflow = BpmnWorkflow(self.spec, script_engine=CustomScriptEngine()) + + def testConstructor(self): + pass # this is accomplished through setup. + + def complete_manual_task(self): + manual_task = self.workflow.get_tasks_from_spec_name('manual_task')[0] + self.workflow.complete_task_from_id(manual_task.id) + self.workflow.do_engine_steps() + + def testDmnHappy(self): + self.workflow.do_engine_steps() + self.complete_manual_task() + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.last_task.data, + {'a': 'BILL', 'dmn_result': 'BILL'}) + + def testDmnSaveRestore(self): + self.save_restore() + self.workflow.script_engine = CustomScriptEngine() + self.workflow.do_engine_steps() + self.complete_manual_task() + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.last_task.data, + {'a': 'BILL', 'dmn_result': 'BILL'}) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DMNCustomScriptTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/DMNDictTest.py b/tests/SpiffWorkflow/camunda/DMNDictTest.py new file mode 100644 index 000000000..053614b49 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/DMNDictTest.py @@ -0,0 +1,41 @@ + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + +class DMNDictTest(BaseTestCase): + + def setUp(self): + self.spec, subprocesses = self.load_workflow_spec('dmndict.bpmn', 'start', 'dmndict.dmn') + self.workflow = BpmnWorkflow(self.spec) + self.expectedResult = {'inputvar': 1, 'pi': {'test': {'me': 'yup it worked'}, 'test2': {'other': 'yes'}}} + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + x = self.workflow.get_ready_user_tasks() + self.workflow.complete_task_from_id(x[0].id) + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.last_task.data, self.expectedResult) + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.save_restore() + x = self.workflow.get_ready_user_tasks() + self.workflow.complete_task_from_id(x[0].id) + self.workflow.do_engine_steps() + self.save_restore() + self.assertDictEqual(self.workflow.last_task.data, self.expectedResult) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DMNDictTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/DefaultGatewayPMITest.py b/tests/SpiffWorkflow/camunda/DefaultGatewayPMITest.py new file mode 100644 index 000000000..96852cc2c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/DefaultGatewayPMITest.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'matth' + +class DefaultGatewayPMITest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('default_gateway_pmi.bpmn', 'DefaultGateway') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def actual_test(self, save_restore=False): + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("DoStuff", task.task_spec.name) + task.update_data({"morestuff": 'Yep'}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + self.assertEqual("GetMoreStuff", task.task_spec.name) + else: + self.assertEqual("GetMoreStuff_%d"%(i-1), task.task_spec.name) + + + task.update_data({"stuff.addstuff": "Stuff %d"%i}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DefaultGatewayPMITest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ExclusiveGatewayPMITest.py b/tests/SpiffWorkflow/camunda/ExclusiveGatewayPMITest.py new file mode 100644 index 000000000..1b65ba575 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ExclusiveGatewayPMITest.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'matth' + + +class ExclusiveGatewayPMITest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('default_gateway_pmi.bpmn', 'DefaultGateway') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def testRunThroughHappyNo(self): + self.actual_test(False,'No') + + def testRunThroughSaveRestoreNo(self): + self.actual_test(True,'No') + + def actual_test(self, save_restore=False,response='Yes'): + + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("DoStuff", task.task_spec.name) + task.update_data({"morestuff": response}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + if response == 'Yes': + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + self.assertEqual("GetMoreStuff", task.task_spec.name) + else: + self.assertEqual("GetMoreStuff_%d"%(i-1), task.task_spec.name) + + + task.update_data({"stuff.addstuff": "Stuff %d"%i}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExclusiveGatewayPMITest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ExternalMessageBoundaryEventTest.py b/tests/SpiffWorkflow/camunda/ExternalMessageBoundaryEventTest.py new file mode 100644 index 000000000..7dd2b8df0 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ExternalMessageBoundaryEventTest.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.camunda.specs.events.event_definitions import MessageEventDefinition +from .BaseTestCase import BaseTestCase + +__author__ = 'kellym' + + +class ExternalMessageBoundaryTest(BaseTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('external_message.bpmn', 'ExternalMessage') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks),'Expected to have only one ready task') + self.workflow.catch(MessageEventDefinition('Interrupt', payload='SomethingImportant', result_var='interrupt_var')) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(2,len(ready_tasks),'Expected to have two ready tasks') + + # here because the thread just dies and doesn't lead to a task, we expect the data + # to die with it. + # item 1 should be at 'Pause' + self.assertEqual('Pause',ready_tasks[1].task_spec.description) + self.assertEqual('SomethingImportant', ready_tasks[1].data['interrupt_var']) + self.assertEqual(True, ready_tasks[1].data['caughtinterrupt']) + self.assertEqual('Meaningless User Task',ready_tasks[0].task_spec.description) + self.assertEqual(False, ready_tasks[0].data['caughtinterrupt']) + ready_tasks[1].complete() + self.workflow.do_engine_steps() + # what I think is going on here is that when we hit the reset, it is updating the + # last_task and appending the data to whatever happened there, so it would make sense that + # we have the extra variables that happened in 'pause' + # if on the other hand, we went on from 'meaningless task' those variables would not get added. + self.workflow.catch(MessageEventDefinition('reset', payload='SomethingDrastic', result_var='reset_var')) + ready_tasks = self.workflow.get_tasks(TaskState.READY) + # The user activity was cancelled and we should continue from the boundary event + self.assertEqual(1, len(ready_tasks),'Expected to have two ready tasks') + event = self.workflow.get_tasks_from_spec_name('Event_19detfv')[0] + event.complete() + self.assertEqual('SomethingDrastic', event.data['reset_var']) + self.assertEqual(False, event.data['caughtinterrupt']) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExternalMessageBoundaryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/FeelBusinessRuleTaskParserTest.py b/tests/SpiffWorkflow/camunda/FeelBusinessRuleTaskParserTest.py new file mode 100644 index 000000000..36a4a45ed --- /dev/null +++ b/tests/SpiffWorkflow/camunda/FeelBusinessRuleTaskParserTest.py @@ -0,0 +1,43 @@ +import unittest + +from SpiffWorkflow.task import TaskState + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + +class FeelBusinessRuleTaskParserTest(BaseTestCase): + + def setUp(self): + self.spec, subprocesses = self.load_workflow_spec( + 'ExclusiveGatewayIfElseAndDecision.bpmn', 'Process_1', 'test_integer_decision_feel.dmn') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelBusinessRuleTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/InvalidBusinessRuleTaskParserTest.py b/tests/SpiffWorkflow/camunda/InvalidBusinessRuleTaskParserTest.py new file mode 100644 index 000000000..3ca9f3d05 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/InvalidBusinessRuleTaskParserTest.py @@ -0,0 +1,35 @@ +import os +import unittest + +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + + +class BusinessRuleTaskParserTest(BaseTestCase): + + def setUp(self): + self.spec, subproceses = self.load_workflow_spec( + 'invalid/InvalidDecision.bpmn', 'Process_1', 'invalid_decision.dmn') + self.workflow = BpmnWorkflow(self.spec) + + def testDmnRaisesTaskErrors(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(TaskState.READY)[0].set_data(x=3) + try: + self.workflow.do_engine_steps() + self.assertTrue(False, "An error should have been raised.") + except WorkflowTaskExecException as we: + self.assertTrue(True, "An error was raised..") + self.assertEquals("InvalidDecisionTaskId", we.sender.name) + self.maxDiff = 1000 + self.assertEquals("Error evaluating expression spam= 1", str(we)) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BusinessRuleTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MessageBoundaryEventTest.py b/tests/SpiffWorkflow/camunda/MessageBoundaryEventTest.py new file mode 100644 index 000000000..03606c233 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MessageBoundaryEventTest.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + + +import unittest +import time +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from .BaseTestCase import BaseTestCase + +__author__ = 'kellym' + + +class MessageBoundaryTest(BaseTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('MessageBoundary.bpmn', 'Process_1kjyavs') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + + def actual_test(self,save_restore = False): + steps = [('Activity_Interrupt', {'interrupt_task':'No'}), + ('Activity_Interrupt', {'interrupt_task': 'No'}), + ('Activity_Interrupt', {'interrupt_task': 'Yes'}), + ] + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(2, len(ready_tasks),'Expected to have two ready tasks') + for step in steps: + for task in ready_tasks: + if task.task_spec.name == step[0]: + task.update_data(step[1]) + + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + time.sleep(.01) + self.workflow.refresh_waiting_tasks() + if save_restore: self.save_restore() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + time.sleep(.01) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.is_completed(),True,'Expected the workflow to be complete at this point') + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessageBoundaryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceArrayTest.py b/tests/SpiffWorkflow/camunda/MultiInstanceArrayTest.py new file mode 100644 index 000000000..3244f16d3 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceArrayTest.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.exceptions import WorkflowException + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'matth' + + +class MultiInstanceArrayTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('multi_instance_array.bpmn', 'MultiInstanceArray') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + + def testRunThroughHappyList(self): + self.actual_test2(False) + + def testRunThroughSaveRestoreList(self): + self.actual_test2(True) + + def testRunThroughHappyDict(self): + self.actual_test_with_dict(False) + + def testRunThroughSaveRestoreDict(self): + self.actual_test_with_dict(True) + + def testGetTaskExtensions(self): + self.actual_test_for_extensions(False) + + + def actual_test(self, save_restore=False): + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo,{'is_looping':False, + 'is_sequential_mi':False, + 'is_parallel_mi':False, + 'mi_count':0, + 'mi_index':0}) + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"Family": {"Size": 3}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo, {'is_looping': False, + 'is_sequential_mi': True, + 'is_parallel_mi': False, + 'mi_count': 3, + 'mi_index': i+1}) + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + + + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + self.assertEqual({'1': {'FirstName': 'The Funk #0'}, + '2': {'FirstName': 'The Funk #1'}, + '3': {'FirstName': 'The Funk #2'}}, + task.data["Family"]["Members"]) + #### NB - start here + ### Data is not correctly getting to the next task upon complete of the last task + ### after do_engine_steps, the next task in the list should be the same as task.data + ### but it is not. + + ### invalid copy of data?? ## appears that parent is not hooked up correctly + + # Set the birthdays of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i > 0: + self.assertEqual("FamilyMemberBday"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberBday", task.task_spec.name) + task.update_data({"CurrentFamilyMember": {"Birthdate": "10/0%i/1985" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'1': {'FirstName': 'The Funk #0', "Birthdate": "10/00/1985"}, + '2': {'FirstName': 'The Funk #1', "Birthdate": "10/01/1985"}, + '3': {'FirstName': 'The Funk #2', "Birthdate": "10/02/1985"}}, + self.workflow.last_task.data["Family"]["Members"]) + + + + + def actual_test2(self, save_restore=False): + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"Family":{"Size": 3}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + self.assertEqual({'1': {'FirstName': 'The Funk #0'}, + '2': {'FirstName': 'The Funk #1'}, + '3': {'FirstName': 'The Funk #2'}}, + task.data["Family"]["Members"]) + + + # Make sure that if we have a list as both input and output + # collection, that we raise an exception + + task = self.workflow.get_ready_user_tasks()[0] + task.data['Family']['Members'] = ['The Funk #0','The Funk #1','The Funk #2'] + self.assertEqual("FamilyMemberBday", task.task_spec.name) + task.update_data( + {"CurrentFamilyMember": {"Birthdate": "10/0%i/1985" % i}}) + with self.assertRaises(WorkflowException) as context: + self.workflow.complete_task_from_id(task.id) + + + def actual_test_with_dict(self, save_restore=False): + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"Family":{"Size": 3}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + self.assertEqual({'1': {'FirstName': 'The Funk #0'}, + '2': {'FirstName': 'The Funk #1'}, + '3': {'FirstName': 'The Funk #2'}}, + task.data["Family"]["Members"]) + + + + # Set the birthdays of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + # Modify so that the dict keys are alpha rather than int + task.data["Family"]["Members"] = { + "a": {'FirstName': 'The Funk #0'}, + "b": {'FirstName': 'The Funk #1'}, + "c": {'FirstName': 'The Funk #2'}} + if (i > 0): + self.assertEqual("FamilyMemberBday"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberBday", task.task_spec.name) + task.update_data( + {"CurrentFamilyMember": {"Birthdate": "10/0%i/1985" % i}}) + self.workflow.complete_task_from_id(task.id) +# if save_restore: self.save_restore() + + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({"a": {'FirstName': 'The Funk #0', "Birthdate": "10/00/1985"}, + "b": {'FirstName': 'The Funk #1', "Birthdate": "10/01/1985"}, + "c": {'FirstName': 'The Funk #2', "Birthdate": "10/02/1985"}}, + self.workflow.last_task.data["Family"]["Members"]) + + + + def actual_test_for_extensions(self, save_restore=False): + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + extensions = task.task_spec.extensions # assume bpmn + self.assertEqual(extensions,{'Test1':'Value1','Test2':'Value2'}) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceArrayTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceDMNTest.py b/tests/SpiffWorkflow/camunda/MultiInstanceDMNTest.py new file mode 100644 index 000000000..c381d5b6f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceDMNTest.py @@ -0,0 +1,47 @@ +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + +class MultiInstanceDMNTest(BaseTestCase): + + def setUp(self): + self.spec, subprocesses = self.load_workflow_spec( + 'DMNMultiInstance.bpmn', 'Process_1', 'test_integer_decision_multi.dmn') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.data['stuff']['E']['y'], 'D') + + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.save_restore() + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.save_restore() + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual(self.workflow.data['stuff']['E']['y'], 'D') + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceDMNTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceDeepDictEdit.py b/tests/SpiffWorkflow/camunda/MultiInstanceDeepDictEdit.py new file mode 100644 index 000000000..34b3db468 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceDeepDictEdit.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + + + + +import copy +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +__author__ = 'matth' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class MultiInstanceDeepDictTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + deep_dict = { + "StudyInfo": { + "investigators": { + "PI": { + "affiliation": "", + "department": "", + "display_name": "Daniel Harold Funk", + "email": "dhf8r@virginia.edu", + "given_name": "Daniel", + "sponsor_type": "Contractor", + "telephone_number": "", + "title": "", + "type_full": "Primary Investigator", + "user_id": "dhf8r" + }, + "DC": { + "type_full": "Department Contact", + "user_id": "John Smith" + } + } + } + } + + expected_result = copy.copy(deep_dict) + expected_result["StudyInfo"]["investigators"]["DC"]["email"] = "john.smith@gmail.com" + expected_result["StudyInfo"]["investigators"]["PI"]["email"] = "dan.funk@gmail.com" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/multi_instance_parallel_deep_data_edit.bpmn', + 'MultiInstance') + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def actual_test(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + # The initial task is a script task. Set the data there + # and move one. + task = self.workflow.get_ready_user_tasks()[0] + task.data = self.deep_dict + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo,{'is_looping':False, + 'is_sequential_mi':False, + 'is_parallel_mi':True, + 'mi_count':2, + 'mi_index':1}) + self.assertEqual("MultiInstanceTask", task.task_spec.name) + self.assertTrue("investigator" in task.data) + data = copy.copy(task.data) + data['investigator']['email'] = "john.smith@gmail.com" + task.update_data(data) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo,{'is_looping':False, + 'is_sequential_mi':False, + 'is_parallel_mi':True, + 'mi_count':2, + 'mi_index':2}) + self.assertEqual("MultiInstanceTask", task.task_spec.name) + self.assertTrue("investigator" in task.data) + data = copy.copy(task.data) + data['investigator']['email'] = "dan.funk@gmail.com" + task.update_data(data) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + task = self.workflow.last_task + self.assertEqual(self.expected_result, task.data) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceDeepDictTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceParallelArrayTest.py b/tests/SpiffWorkflow/camunda/MultiInstanceParallelArrayTest.py new file mode 100644 index 000000000..42a616f9e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceParallelArrayTest.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- + +import unittest +import random + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'matth' + +debug = True + +class MultiInstanceParallelArrayTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('multi_instance_array_parallel.bpmn', 'MultiInstanceArray') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def actual_test(self, save_restore=False): + + first_task = self.workflow.task_tree + + # A previous task (in this case the root task) will set the data + # so it must be found later. + first_task.update_data({"FamilySize": 3}) + self.workflow.do_engine_steps() + if save_restore: self.reload_save_restore() + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"FamilySize": 3}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.reload_save_restore() + self.workflow.do_engine_steps() + + # Set the names of the 3 family members. + for i in range(3): + + tasks = self.workflow.get_ready_user_tasks() + self.assertEqual(len(tasks),1) # still with sequential MI + task = tasks[0] + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: + self.reload_save_restore() + tasks = self.workflow.get_ready_user_tasks() + + self.assertEqual(3,len(tasks)) + # Set the birthdays of the 3 family members. + for i in range(3): # emulate random Access + task = random.choice(tasks) + x = task.internal_data['runtimes'] -1 + self.assertEqual("FamilyMemberBday", task.task_spec.name[:16]) + self.assertEqual({"FirstName": "The Funk #%i" % x}, + task.data["CurrentFamilyMember"]) + task.update_data( + {"CurrentFamilyMember": {"Birthdate": "10/05/1985" + str(x)}}) + self.workflow.do_engine_steps() + self.workflow.complete_task_from_id(task.id) + # The data should still be available on the current task. + self.assertEqual({'FirstName': "The Funk #%i" % x, + 'Birthdate': '10/05/1985' + str(x)}, + self.workflow.get_task(task.id) + .data['CurrentFamilyMember']) + self.workflow.do_engine_steps() + if save_restore: + self.reload_save_restore() + self.workflow.do_engine_steps() + + tasks = self.workflow.get_ready_user_tasks() + + self.workflow.do_engine_steps() + if save_restore: + self.reload_save_restore() + + names = task.data['FamilyMembers'] + bdays = task.data['FamilyMemberBirthday'] + for x in list(names.keys()): + self.assertEqual(str(names[x]['FirstName'][-1]),str(bdays[x]['Birthdate'][-1])) + self.assertTrue(self.workflow.is_completed()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceParallelArrayTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/NIMessageBoundaryTest.py b/tests/SpiffWorkflow/camunda/NIMessageBoundaryTest.py new file mode 100644 index 000000000..a3c79fbba --- /dev/null +++ b/tests/SpiffWorkflow/camunda/NIMessageBoundaryTest.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + + +class NIMessageBoundaryTest(BaseTestCase): + """ + Non-Interrupting Timer boundary test + """ + def setUp(self): + spec, subprocesses = self.load_workflow_spec('noninterrupting-MessageBoundary.bpmn', 'MessageBoundary') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + # first we run through a couple of steps where we answer No to each + # question + answers = {'Activity_WorkLate':('flag_task','No'), + 'Activity_DoWork': ('work_done','No')} + for x in range(3): + ready_tasks = self.workflow.get_tasks(TaskState.READY) + for task in ready_tasks: + response = answers.get(task.task_spec.name,None) + self.assertEqual(response==None, + False, + 'We got a ready task that we did not expect - %s'%( + task.task_spec.name)) + task.data[response[0]] = response[1] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + # if we have a list of tasks - that list becomes invalid + # after we do a save restore, so I'm completing the list + # before doing the save restore. + if save_restore: + self.save_restore() + + + answers = {'Activity_WorkLate':('flag_task','Yes'), + 'Activity_DoWork': ('work_done','No'), + 'Activity_WorkLateReason':('work_late_reason','covid-19')} + for x in range(3): + ready_tasks = self.workflow.get_tasks(TaskState.READY) + for task in ready_tasks: + response = answers.get(task.task_spec.name,None) + self.assertEqual(response==None, + False, + 'We got a ready task that we did not expect - %s'%( + task.task_spec.name)) + task.data[response[0]] = response[1] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(len(ready_tasks),1) + task = ready_tasks[0] + self.assertEqual(task.task_spec.name,'Activity_DoWork') + task.data['work_done'] = 'Yes' + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(len(ready_tasks), 1) + task = ready_tasks[0] + self.assertEqual(task.task_spec.name, 'Activity_WorkCompleted') + task.data['work_completed'] = 'Lots of Stuff' + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.is_completed(),True) + self.assertEqual(self.workflow.last_task.data,{'Event_InterruptBoundary_Response': 'Youre late!', + 'flag_task': 'Yes', + 'work_done': 'Yes', + 'work_completed': 'Lots of Stuff', + 'work_late_reason': 'covid-19'}) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NIMessageBoundaryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenMIParallelTest.py b/tests/SpiffWorkflow/camunda/ResetTokenMIParallelTest.py new file mode 100644 index 000000000..392071811 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenMIParallelTest.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- + +import unittest + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +__author__ = 'kellym' + +class ResetTokenTestMIParallel(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_MIParallel.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self, save_restore=False,reset_data=False): + + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'task_data': {'do_step':'Yes'}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'x'}}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'y'}}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'z'}}} + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name[:len(step['taskname'])]) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertEqual({'current': {'A': 'y'}, + 'do_step': 'Yes', + 'output': {'1': {'A': 'x'}, '2': {'A': 'y'}, '3': {'A': 'z'}}}, + self.workflow.last_task.data) + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA', + 'task_data': {'current': {'A' : 'a1'}}}, + {'taskname': 'FormC', + 'task_data': {'C' : 'c'}}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'current': {'A': 'x'}, + 'do_step': 'Yes', + 'C': 'c', + 'output': {'1': {'A': 'a1'}, + '2': {'A': 'y'}, + '3': {'A': 'z'}}}, + self.workflow.last_task.data) + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestMIParallel) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenMITest.py b/tests/SpiffWorkflow/camunda/ResetTokenMITest.py new file mode 100644 index 000000000..5d6c2e27e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenMITest.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + +class ResetTokenTestMI(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_MI.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self, save_restore=False,reset_data=False): + + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'task_data': {'do_step':'Yes'}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'x'}}}, + {'taskname': 'FormA_0', + 'task_data': {'current': {'A' : 'y'}}}, + {'taskname': 'FormA_1', + 'task_data': {'current': {'A' : 'z'}}} + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + + steps = [{'taskname': 'FormA', + 'task_data': {'current': {'A': 'a1'}}}, + {'taskname': 'FormA_0', + 'task_data': {'current': {'A': 'a2'}}}, + {'taskname': 'FormA_1', + 'task_data': {'current': {'A': 'a3'}}}, + {'taskname': 'FormC', + 'task_data': {'C': 'c'}} + ] + + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + self.assertEqual({'do_step': 'Yes', + 'output': {'1': {'A': 'a1'}, + '2': {'A': 'a2'}, + '3': {'A': 'a3'}}, + 'C': 'c'}, + self.workflow.last_task.data) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestMI) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py b/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py new file mode 100644 index 000000000..698eac7eb --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + +class ResetTokenTestNestedParallel(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_nested_parallel.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyAlt(self): + self.actual_test2(save_restore=False) + + def testRunThroughSaveRestoreAlt(self): + self.actual_test2(save_restore=True) + + def actual_test(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, complete the matrix and + Reset somewhere in the middle. It should complete the row that we + Reset to, and retain all previous answers. + """ + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'xb3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'xc1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'xc2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'xc3'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormB2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + self.workflow.reset_task_from_id(firsttaskid) + self.workflow.do_engine_steps() + #NB - this won't test random access + steps = [{'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'b2'}, + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + notworking = self.workflow.get_ready_user_tasks() + self.assertTrue(self.workflow.is_completed()) + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'xa2', + 'A3': 'xa3', + 'B1': 'xb1', + 'B2': 'b2', + 'B3': 'xb3', + 'C1': 'xc1', + 'C2': 'xc2', + 'C3': 'xc3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + def actual_test2(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, + Complete several items in the parallel matrix, but do not complete it, + Reset to a previous version on another branch of the parallel, it should + complete that branch and then pick up where we left off. + Also, after we reset the branch, there should then be three tasks ready, + A2,B3,and C1 + """ + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'c1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'c2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'c3'}, + + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + readytasks = [t.task_spec.name for t in self.workflow.get_ready_user_tasks()] + self.assertEqual(readytasks,['FormA2','FormB3','FormC1','FormC2','FormC3']) + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'a2', + 'A3': 'xa3', + 'B1': 'xb1', + 'B2': 'xb2', + 'B3': 'b3', + 'C1': 'c1', + 'C2': 'c2', + 'C3': 'c3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestNestedParallel) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py b/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py new file mode 100644 index 000000000..a5cb0af0b --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + +class ResetTokenTestParallelMatrix(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_parallel_matrix.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyAlt(self): + self.actual_test2(save_restore=False) + + def testRunThroughSaveRestoreAlt(self): + self.actual_test2(save_restore=True) + + + + def actual_test(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, complete the matrix and + Reset somewhere in the middle. It should complete the row that we + Reset to, and retain all previous answers. + """ + + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'xb3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'xc1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'xc2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'xc3'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormB2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'b2'}, + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'xa2', + 'A3': 'xa3', + 'B1': 'xb1', + 'B2': 'b2', + 'B3': 'b3', + 'C1': 'xc1', + 'C2': 'xc2', + 'C3': 'xc3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + def actual_test2(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, + Complete several items in the parallel matrix, but do not complete it, + Reset to a previous version on another branch of the parallel, it should + complete that branch and then pick up where we left off. + Also, after we reset the branch, there should then be three tasks ready, + A2,B3,and C1 + """ + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'a3'}, + + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'c1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'c2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'c3'}, + + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + readytasks = [t.task_spec.name for t in self.workflow.get_ready_user_tasks()] + self.assertEqual(readytasks,['FormA2','FormB3','FormC1']) + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'a2', + 'A3': 'a3', + 'B1': 'xb1', + 'B2': 'xb2', + 'B3': 'b3', + 'C1': 'c1', + 'C2': 'c2', + 'C3': 'c3', + 'D': 'd'}, + + self.workflow.last_task.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestParallelMatrix) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py b/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py new file mode 100644 index 000000000..04eb7cf97 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + +class ResetTokenParallelTaskCountTest(BaseTestCase): + """Assure that setting the token does not effect the overall task + count. Added this when we discovered that this was growing + exponentially in some cases..""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_parallel_simple.bpmn', 'token_trial_parallel_simple') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self, save_restore=False): + total = 10 # I would expect there to be 9 tasks, but we get 10. + + # Set the workflow in motion, and assure we have the right + # number of tasks + + self.workflow.do_engine_steps() + self.assertEquals(total, len(self.workflow.get_tasks())) + + # Tell the exclusive gateway to skip the parallel tasks section. + # We should still have the same number of tasks. + data = {'skipParallel': True} + task = self.workflow.get_ready_user_tasks()[0] + task.data = data + self.workflow.complete_task_from_id(task.id) + self.assertEquals(total, len(self.workflow.get_tasks())) + + # Reset the token to the first user task. + # We should still have the same number of tasks. + self.workflow.task_tree.dump() + task.reset_token({}, reset_data=True) + print('=-----') + self.workflow.task_tree.dump() + self.assertEquals(total, len(self.workflow.get_tasks())) + self.assertEquals(1, len(self.workflow.get_ready_user_tasks())) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenParallelTaskCountTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py b/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py new file mode 100644 index 000000000..f9236c88f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py @@ -0,0 +1,196 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + +class ResetTokenTestSubProcess(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial_subprocess.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self, save_restore=False): + """ + Test a complicated parallel matrix, complete the matrix and + Reset somewhere in the middle. It should complete the row that we + Reset to, and retain all previous answers. + """ + + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + ] + + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA1': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'a1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'a3'}, + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'a1', + 'A2': 'a2', + 'A3': 'a3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + def actual_test2(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, + Complete several items in the parallel matrix, but do not complete it, + Reset to a previous version on another branch of the parallel, it should + complete that branch and then pick up where we left off. + Also, after we reset the branch, there should then be three tasks ready, + A2,B3,and C1 + """ + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + ] + + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'a3'}, + + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'c1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'c2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'c3'}, + + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + readytasks = [t.task_spec.name for t in self.workflow.get_ready_user_tasks()] + self.assertEqual(readytasks,['FormA2','FormB3','FormC1']) + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'a2', + 'A3': 'a3', + 'B1': 'xb1', + 'B2': 'xb2', + 'B3': 'b3', + 'C1': 'c1', + 'C2': 'c2', + 'C3': 'c3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestSubProcess) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenTest.py b/tests/SpiffWorkflow/camunda/ResetTokenTest.py new file mode 100644 index 000000000..555e2068c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenTest.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + + +class ResetTokenTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('token_trial.bpmn', 'token') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyReset(self): + self.actual_test(save_restore=False,reset_data=True,expected={'do_step':False,'C':'c'}) + + def testRunThroughSaveRestoreReset(self): + self.actual_test(save_restore=True,reset_data=True,expected={'do_step':False,'C':'c'}) + + + + def actual_test(self, save_restore=False, reset_data=False, expected=None): + + if expected is None: + expected = {'do_step': False, 'A': 'a', 'B': 'b', 'C': 'c'} + + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': True}, + {'taskname': 'FormA', + 'formvar': 'A', + 'answer': 'a'}, + {'taskname': 'FormB', + 'formvar': 'B', + 'answer': 'b'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None: + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': False}, + {'taskname': 'FormC', + 'formvar': 'C', + 'answer': 'c'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'do_step':False,'A':'a','B':'b','C':'c'}, + self.workflow.last_task.data) + + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/StartMessageEventTest.py b/tests/SpiffWorkflow/camunda/StartMessageEventTest.py new file mode 100644 index 000000000..a96d2e422 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/StartMessageEventTest.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + +import unittest + +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from .BaseTestCase import BaseTestCase +__author__ = 'kellym' + + +class StartMessageTest(BaseTestCase): + + def setUp(self): + self.spec, self.subprocesses = self.load_workflow_spec('message_test.bpmn', 'ThrowCatch') + self.workflow = BpmnWorkflow(self.spec, self.subprocesses) + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self,save_restore = False): + steps = [('Activity_EnterPlan',{'plan_details':'Bad'}), + ('Activity_ApproveOrDeny', {'approved':'No'}), + ('Activity_EnterPlan', {'plan_details':'Better'}), + ('Activity_ApproveOrDeny', {'approved':'No'}), + ('Activity_EnterPlan', {'plan_details':'Best'}), + ('Activity_ApproveOrDeny', {'approved':'Yes'}), + ('Activity_EnablePlan',{'Done':'OK!'})] + self.workflow.do_engine_steps() # get around start task + ready_tasks = self.workflow.get_tasks(TaskState.READY) + waiting_tasks = self.workflow.get_tasks(TaskState.WAITING) + self.assertEqual(1, len(ready_tasks),'Expected to have one ready task') + self.assertEqual(1, len(waiting_tasks), 'Expected to have one waiting task') + + for step in steps: + current_task = ready_tasks[0] + self.assertEqual(current_task.task_spec.name,step[0]) + current_task.update_data(step[1]) + current_task.complete() + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + if save_restore: + self.save_restore() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + + self.assertEqual(self.workflow.is_completed(),True,'Expected the workflow to be complete at this point') + self.assertEqual(self.workflow.last_task.data, + { + 'plan_details': 'Best', + 'ApprovalResult': 'Yes', + 'Done': 'OK!' + }) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StartMessageTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/SubWorkflowTest.py b/tests/SpiffWorkflow/camunda/SubWorkflowTest.py new file mode 100644 index 000000000..75331022e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/SubWorkflowTest.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + +__author__ = 'kellym' + + +class SubWorkflowTest(BaseTestCase): + """The tests a somewhat complex subworkflow and verifies that it does + what we expect""" + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('subWorkflowComplex.bpmn', 'SubWorkflow') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + self.answers = ['A','A1','A2','B'] + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def actual_test(self, save_restore=False): + + # Set initial array size to 3 in the first user form. + for answer in self.answers: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_"+answer, task.task_spec.name) + task.update_data({"Field"+answer: answer}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertEqual(self.workflow.last_task.data,{'FieldA': 'A', + 'FieldA1': 'A1', + 'FieldA2': 'A2', + 'FieldB': 'B'}) + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(SubWorkflowTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/__init__.py b/tests/SpiffWorkflow/camunda/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/camunda/data/CustomScript.bpmn b/tests/SpiffWorkflow/camunda/data/CustomScript.bpmn new file mode 100644 index 000000000..586ff2a58 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/CustomScript.bpmn @@ -0,0 +1,63 @@ + + + + + Flow_0k348ph + + + + Flow_0k348ph + Flow_03rcoxc + a = my_custom_function("bill") + + + Flow_02v0zk5 + Flow_0pvahf7 + + + + Flow_0pvahf7 + + + + + Flow_03rcoxc + Flow_02v0zk5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/DMNMultiInstance.bpmn b/tests/SpiffWorkflow/camunda/data/DMNMultiInstance.bpmn new file mode 100644 index 000000000..ddf2c44e5 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/DMNMultiInstance.bpmn @@ -0,0 +1,105 @@ + + + + + Flow_1b29lxw + + + Flow_0fusz9y + + + + Flow_0z7tfh1 + SequenceFlow_06fnqj2 + + + + + + Flow_066d5e1 + Flow_0fusz9y + print('EndScript') +print(stuff) + + + + This is a test +of documentation + Flow_1b29lxw + Flow_09ciw49 + stuff={'A': {'x': 3}, + 'B': {'x': 4}, + 'C': {'x': 5}, + 'D': {'x': 6}, + 'E': {'x': 7}} + + + + Flow_09ciw49 + Flow_0z7tfh1 + + + + SequenceFlow_06fnqj2 + Flow_066d5e1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/ExclusiveGatewayIfElseAndDecision.bpmn b/tests/SpiffWorkflow/camunda/data/ExclusiveGatewayIfElseAndDecision.bpmn new file mode 100644 index 000000000..1d72c93d8 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/ExclusiveGatewayIfElseAndDecision.bpmn @@ -0,0 +1,112 @@ + + + + + SequenceFlow_0b7whlk + + + SequenceFlow_15emspo + + + SequenceFlow_0b7whlk + SequenceFlow_15emspo + SequenceFlow_030p6mf + SequenceFlow_14jk7cm + + + SequenceFlow_030p6mf + + + + x==1 + + + x==2 + + + SequenceFlow_06fnqj2 + + + + + SequenceFlow_14jk7cm + SequenceFlow_06fnqj2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/MessageBoundary.bpmn b/tests/SpiffWorkflow/camunda/data/MessageBoundary.bpmn new file mode 100644 index 000000000..5966e0fdb --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/MessageBoundary.bpmn @@ -0,0 +1,243 @@ + + + + + + + + + Event_0d3xq5q + Activity_Interrupt + Gateway_0ncff13 + Event_0l8sadb + Event_0g8w85g + + + Event_12moz8m + Event_0j702hl + Activity_1m4766l + Event_InterruptBoundary + + + + Flow_0bvln2b + + + + + + + + + Flow_0bvln2b + Flow_1t2ocwk + Flow_1ya6ran + + + Flow_1ya6ran + Flow_0saykw5 + Flow_1t2ocwk + + + + interrupt_task == 'Yes' + + + Flow_0saykw5 + Flow_0lekhj5 + + + + Flow_0lekhj5 + + + + + Flow_1gd7a2h + + + + + + Flow_0o0l113 + Flow_093roev + + + Flow_1gd7a2h + Flow_093roev + + Flow_1gs89vo + + + Flow_1gs89vo + Flow_0wuxluk + Flow_11u0pgk + + + + Flow_11u0pgk + Flow_1rqk2v9 + + timedelta(seconds=.01) + + + + + Flow_1rqk2v9 + Flow_18d90uu + Flow_0wuxluk + + + Flow_18d90uu + + + False + + + + + + Flow_0o0l113 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/call_activity_with_message.bpmn b/tests/SpiffWorkflow/camunda/data/call_activity_with_message.bpmn new file mode 100644 index 000000000..60d6107e5 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/call_activity_with_message.bpmn @@ -0,0 +1,110 @@ + + + + + Flow_0za7l4k + + + + + + + + + Flow_0za7l4k + Flow_No + Flow_1watnli + + + + Flow_17adrta + Flow_00cg7b6 + + + + + Flow_00cg7b6 + Flow_No + Flow_Yes + + + + + Approved == "Yes" + + + Flow_Yes + + + + Flow_1watnli + Flow_17adrta + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/call_activity_with_message_parent.bpmn b/tests/SpiffWorkflow/camunda/data/call_activity_with_message_parent.bpmn new file mode 100644 index 000000000..7737ec0c4 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/call_activity_with_message_parent.bpmn @@ -0,0 +1,138 @@ + + + + + + + + + Activity_ApproveOrDeny + Event_Send_Approval_Response + Event_EndEvent1 + Event_Get_Approval_Request + + + Event_NormalStart + Activity_EnablePlan + Event_EndEvent2 + Activity_RequestApproval + + + + + + + + + Flow_1pfh6m0 + Flow_1ndkq1b + + + Flow_1ndkq1b + Flow_1qnods2 + + + + Flow_1qnods2 + + + + + + Flow_087wt6r + + + + + + + + + + Flow_01vvra7 + Flow_1afin8v + + + + Flow_1afin8v + + + + Flow_1pfh6m0 + + + + Flow_087wt6r + Flow_01vvra7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/common_workflow.bpmn b/tests/SpiffWorkflow/camunda/data/common_workflow.bpmn new file mode 100644 index 000000000..3902b9b4a --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/common_workflow.bpmn @@ -0,0 +1,89 @@ + + + + + Flow_0xpz6la + Flow_03yam6h + my_custom_function('test 1 from common workflow') + + + Flow_1jz376x + + + + + + + + + Flow_03yam6h + Flow_0pc6yx9 + + + + Flow_0pc6yx9 + Flow_16t7ue6 + my_custom_function('test 2 from common workflow') + + + + + + + + + + + + + Flow_16t7ue6 + Flow_1jz376x + + + Flow_0xpz6la + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/default_gateway_pmi.bpmn b/tests/SpiffWorkflow/camunda/data/default_gateway_pmi.bpmn new file mode 100644 index 000000000..d98baf5d8 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/default_gateway_pmi.bpmn @@ -0,0 +1,89 @@ + + + + + Flow_1wis1un + + + + + + + + Flow_1wis1un + Flow_144jxvd + + + + Flow_144jxvd + Flow_1riszc2 + Flow_0xdvee4 + + + + + Flow_13ncefd + Flow_0xdvee4 + + + + + + + + + Flow_1riszc2 + Flow_13ncefd + + 3 + + + + morestuff == 'No' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmn/CustomScript.dmn b/tests/SpiffWorkflow/camunda/data/dmn/CustomScript.dmn new file mode 100644 index 000000000..514ea4071 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmn/CustomScript.dmn @@ -0,0 +1,44 @@ + + + + + + + my_custom_function('bill') + + + + + + 'BILL' + + + my_custom_function('bill') + + + + + my_custom_function('jane') + + + my_custom_function('jane') + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmn/dmndict.dmn b/tests/SpiffWorkflow/camunda/data/dmn/dmndict.dmn new file mode 100644 index 000000000..2670ed14c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmn/dmndict.dmn @@ -0,0 +1,61 @@ + + + + + + + inputvar + + + + + + + + + + + + 1 + + + + + + 'yup it worked' + + + "yes" + + + + + 2 + + + + + + 'didnt expect this' + + + "No" + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmn/invalid_decision.dmn b/tests/SpiffWorkflow/camunda/data/dmn/invalid_decision.dmn new file mode 100644 index 000000000..cbafd50c1 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmn/invalid_decision.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + spam + + + + + This is complletely wrong. + + mGender Description + = 1 + + + "wrong" + + + + so is this. + + >= 100 + + + "My cat's breath smells like cat food." + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision.dmn b/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision.dmn new file mode 100644 index 000000000..8f85db1b1 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision.dmn @@ -0,0 +1,49 @@ + + + + + + + + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision_feel.dmn b/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision_feel.dmn new file mode 100644 index 000000000..8f85db1b1 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision_feel.dmn @@ -0,0 +1,49 @@ + + + + + + + + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision_multi.dmn b/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision_multi.dmn new file mode 100644 index 000000000..7565b4c0e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmn/test_integer_decision_multi.dmn @@ -0,0 +1,49 @@ + + + + + + + item.x + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/dmndict.bpmn b/tests/SpiffWorkflow/camunda/data/dmndict.bpmn new file mode 100644 index 000000000..4778f57b3 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/dmndict.bpmn @@ -0,0 +1,64 @@ + + + + + Flow_0k348ph + + + + Flow_132mhgo + Flow_03rcoxc + pi = {'test':{'me':'stupid var'}} +inputvar = 1 + + + Flow_03rcoxc + Flow_0pvahf7 + + + + Flow_0pvahf7 + + + + + Flow_0k348ph + Flow_132mhgo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/exclusive_gateway_pmi.bpmn b/tests/SpiffWorkflow/camunda/data/exclusive_gateway_pmi.bpmn new file mode 100644 index 000000000..4acb9f8e3 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/exclusive_gateway_pmi.bpmn @@ -0,0 +1,94 @@ + + + + + Flow_1wis1un + + + + + + + + Flow_1wis1un + Flow_144jxvd + + + + Flow_144jxvd + Flow_1riszc2 + Flow_0xdvee4 + + + + morestuff == 'Yes' + + + Flow_13ncefd + Flow_0xdvee4 + + + + + + + + + Flow_1riszc2 + Flow_13ncefd + + 3 + + + + morestuff == 'No' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/external_message.bpmn b/tests/SpiffWorkflow/camunda/data/external_message.bpmn new file mode 100644 index 000000000..ce3a8f5a0 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/external_message.bpmn @@ -0,0 +1,109 @@ + + + + + Flow_1a0tyih + + + + + + + + Flow_081mykh + Flow_08pe1c9 + + + + Flow_18jn2xj + + + + Flow_18jn2xj + Flow_0q62iou + caughtinterrupt = True + + + + Flow_1pv9l9r + + + + + Flow_1a0tyih + Flow_1pv9l9r + Flow_081mykh + caughtinterrupt = False + + + + + + + + + Flow_0q62iou + + + + Flow_08pe1c9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/invalid/InvalidDecision.bpmn b/tests/SpiffWorkflow/camunda/data/invalid/InvalidDecision.bpmn new file mode 100644 index 000000000..56b4c1c1e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/invalid/InvalidDecision.bpmn @@ -0,0 +1,112 @@ + + + + + SequenceFlow_0b7whlk + + + SequenceFlow_06fnqj2 + + + + SequenceFlow_14jk7cm + SequenceFlow_06fnqj2 + + + SequenceFlow_15emspo + + + SequenceFlow_030p6mf + + + SequenceFlow_0b7whlk + SequenceFlow_14jk7cm + SequenceFlow_15emspo + SequenceFlow_030p6mf + + + + x==1 + + + + x==2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/message_test.bpmn b/tests/SpiffWorkflow/camunda/data/message_test.bpmn new file mode 100644 index 000000000..2f7565067 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/message_test.bpmn @@ -0,0 +1,210 @@ + + + + + + + + + Activity_ApproveOrDeny + SendDecision + Event_EndEvent2 + StartApprovalProcess + + + Event_NormalStart + SendApprovalRequest + GetDecision + Gateway_Approved + Activity_EnterPlan + Activity_EnablePlan + Event_EndEvent1 + + + + Flow_060cfic + + + Flow_1nsjil4 + Flow_1t3bhky + + + + Flow_1t3bhky + Flow_1jdfc06 + + + + Flow_1jdfc06 + Flow_0jqxt85 + Flow_1tfirpy + + + + + + + ApprovalResult=="Yes" + + + + + + + + + + + Flow_1ym5g7r + Flow_0m1dzpq + + + + + + + + + Flow_1tfirpy + Flow_060cfic + Flow_1nsjil4 + + + + Flow_0m1dzpq + Flow_0abuvsx + + + + Flow_0abuvsx + + + Flow_1ym5g7r + + + + + + + + + Flow_0jqxt85 + Flow_1ync7ek + + + Flow_1ync7ek + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/multi_instance_array.bpmn b/tests/SpiffWorkflow/camunda/data/multi_instance_array.bpmn new file mode 100644 index 000000000..f37b7e525 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/multi_instance_array.bpmn @@ -0,0 +1,99 @@ + + + + + Flow_0bplvtg + + + Please enter family size: + + + + + + + + + + + + + + + + + + + + Flow_0bplvtg + Flow_0zpm0rc + + + + Please enter information for family member {{ FamilyMember }}: + + + + + + Flow_0zpm0rc + Flow_0659lqh + + Family.Size + + + + + Enter Birthday for {{ CurrentFamilyMember['FamilyMember.FormField_FirstName'] }} + + + + + + Flow_0659lqh + Flow_0ncqf54 + + + + + + XXX + Flow_0ncqf54 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/multi_instance_array_parallel.bpmn b/tests/SpiffWorkflow/camunda/data/multi_instance_array_parallel.bpmn new file mode 100644 index 000000000..fbcd2cf1d --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/multi_instance_array_parallel.bpmn @@ -0,0 +1,99 @@ + + + + + Flow_0bplvtg + + + Please enter family size: + + + + + + + + + + + + + + + + Flow_0bplvtg + Flow_0zpm0rc + + + + Please enter information for family member {{ FamilyMember }}: + + + + + + Flow_0zpm0rc + Flow_0659lqh + + FamilySize + + + + + Enter Birthday for {{ CurrentFamilyMember['FamilyMember.FormField_FirstName'] }} + + + + + + Flow_0659lqh + Flow_0ncqf54 + + FamilyMembers + + + + + + XXX + Flow_0ncqf54 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/multi_instance_parallel_deep_data_edit.bpmn b/tests/SpiffWorkflow/camunda/data/multi_instance_parallel_deep_data_edit.bpmn new file mode 100644 index 000000000..7ce994be9 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/multi_instance_parallel_deep_data_edit.bpmn @@ -0,0 +1,66 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + + + + # Please provide addtional information about: +## Investigator ID: {{investigator.user_id}} +## Role: {{investigator.type_full}} + + + + + + SequenceFlow_1p568pp + Flow_0ugjw69 + + + + + Imagine a script task here that loads a complex data set. + Flow_0t6p1sb + SequenceFlow_1p568pp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/no_form.bpmn b/tests/SpiffWorkflow/camunda/data/no_form.bpmn new file mode 100644 index 000000000..20dfc7f5f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/no_form.bpmn @@ -0,0 +1,54 @@ + + + + + SequenceFlow_0ik56h0 + + + + + + + + + SequenceFlow_0ik56h0 + SequenceFlow_1de4q40 + + + SequenceFlow_1de4q40 + + + + This is a user task with no form in it, It should not error + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/noninterrupting-MessageBoundary.bpmn b/tests/SpiffWorkflow/camunda/data/noninterrupting-MessageBoundary.bpmn new file mode 100644 index 000000000..3ea3f311f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/noninterrupting-MessageBoundary.bpmn @@ -0,0 +1,287 @@ + + + + + + + + + Event_12moz8m + Gateway_0mlgg9d + Activity_WorkLateReason + Event_1w6cide + Activity_1m4766l + Event_InterruptBoundary + + + Event_0d3xq5q + Activity_WorkLate + Gateway_0ncff13 + Event_0g8w85g + Event_0l8sadb + + + + Flow_0bvln2b + + + + + + + + Flow_0bvln2b + Flow_1t2ocwk + Flow_1ya6ran + + + Flow_1ya6ran + Flow_0saykw5 + Flow_1t2ocwk + + + Flow_0saykw5 + Flow_0lekhj5 + + + + Flow_0lekhj5 + + + Flow_1gd7a2h + + + Flow_1g8u810 + Flow_1firdqj + Flow_10gq9an + + + + + + + + Flow_0o0l113 + Flow_1g8u810 + + + Flow_10gq9an + + + Flow_1gd7a2h + Flow_1firdqj + + Flow_1gs89vo + + + + + Flow_11u0pgk + Flow_18d90uu + Flow_0wuxluk + + + Flow_1x6ji2h + + + work_done == 'Yes' + + + + + + + + + Flow_1gs89vo + Flow_0wuxluk + Flow_11u0pgk + + + + + + + + + Flow_18d90uu + Flow_1x6ji2h + + + + Flow_0o0l113 + + + + + + + + flag_task == 'Yes' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/random_fact.bpmn b/tests/SpiffWorkflow/camunda/data/random_fact.bpmn new file mode 100644 index 000000000..db4eec0b1 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/random_fact.bpmn @@ -0,0 +1,96 @@ + + + + + SequenceFlow_0ik56h0 + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0ik56h0 + SequenceFlow_1wl4cli + + + + + + + + SequenceFlow_1wl4cli + Flow_1wgkcv6 + scripts.FactService + + + + Flow_1wgkcv6 + + + + User sets the Fact.type to cat, norris, or buzzword + + + + Makes an API  call to get a fact of the required type. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/random_fact.svg b/tests/SpiffWorkflow/camunda/data/random_fact.svg new file mode 100644 index 000000000..3078ea0e3 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/random_fact.svg @@ -0,0 +1,4 @@ + + + +Set TypeDisplay FactUser sets the Fact.type to cat,norris, or buzzwordMakes an API call to get a factof the required type. \ No newline at end of file diff --git a/tests/SpiffWorkflow/camunda/data/subWorkflowComplex.bpmn b/tests/SpiffWorkflow/camunda/data/subWorkflowComplex.bpmn new file mode 100644 index 000000000..5f8651526 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/subWorkflowComplex.bpmn @@ -0,0 +1,162 @@ + + + + + Flow_1 + + + Flow_1 + Flow_4 + + Flow_2 + + + + Enter Form1 + + + + + + Flow_2 + Flow_0j8meqp + + + + + Enter Form A1 + + + + + + Flow_1jbvpss + Flow_1w00bbg + + + + Enter Form A2 + + + + + + Flow_0vl1ixa + Flow_0kzcljc + + + + + Flow_1cnvx4h + + + + Flow_0j8meqp + Flow_1jbvpss + Flow_0vl1ixa + + + Flow_1w00bbg + Flow_0kzcljc + Flow_1cnvx4h + + + + + + Enter form 2 + + + + + + Flow_4 + Flow_5 + + + Flow_5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial.bpmn new file mode 100644 index 000000000..89cc6dc0b --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial.bpmn @@ -0,0 +1,144 @@ + + + + + Flow_03vnrmv + + + Flow_0g2wjhu + Flow_0ya87hl + Flow_1qgke9w + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_13qpm6f + + + + Flow_13qpm6f + Flow_04bpvfu + Flow_0g2wjhu + + + Yes + + + do_step == True + + + do_step== False + + + + + + FormA + + + + + + Flow_04bpvfu + Flow_0ahlz50 + + + FormB + + + + + + Flow_0ahlz50 + Flow_0ya87hl + + + FormC + + + + + + Flow_1qgke9w + Flow_039y4lk + + + + Flow_039y4lk + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_MI.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_MI.bpmn new file mode 100644 index 000000000..368cd4f08 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_MI.bpmn @@ -0,0 +1,83 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormC + + + + + + Flow_0ztfesh + Flow_039y4lk + + + + Flow_039y4lk + + + + MI item + + + + + + Flow_10pdq2v + Flow_0ztfesh + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_MIParallel.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_MIParallel.bpmn new file mode 100644 index 000000000..3e2f1b19b --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_MIParallel.bpmn @@ -0,0 +1,83 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormC + + + + + + Flow_0ztfesh + Flow_039y4lk + + + + Flow_039y4lk + + + + MI item + + + + + + Flow_10pdq2v + Flow_0ztfesh + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_camunda_clash.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_camunda_clash.bpmn new file mode 100644 index 000000000..ee11331ba --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_camunda_clash.bpmn @@ -0,0 +1,142 @@ + + + + + Flow_03vnrmv + + + Flow_0g2wjhu + Flow_0ya87hl + Flow_1qgke9w + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_13qpm6f + + + + Flow_13qpm6f + Flow_04bpvfu + Flow_0g2wjhu + + + Yes + do_step == True + + + do_step== False + + + + + + FormA + + + + + + Flow_04bpvfu + Flow_0ahlz50 + + + FormB + + + + + + Flow_0ahlz50 + Flow_0ya87hl + + + FormC + + + + + + Flow_1qgke9w + Flow_039y4lk + + + + Flow_039y4lk + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_nested_parallel.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_nested_parallel.bpmn new file mode 100644 index 000000000..1ff66eaa9 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_nested_parallel.bpmn @@ -0,0 +1,402 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormD + + + + + + Flow_08nd97v + Flow_039y4lk + + + + Flow_039y4lk + + + + + + Flow_10pdq2v + Flow_0rg1whs + Flow_0pyul3k + Flow_1l934p1 + + + Flow_093ce35 + Flow_0c4mntn + Flow_0knduft + Flow_08nd97v + + + FormA1 + + + + + + Flow_1uw6r98 + Flow_0n71r7a + + + FormA2 + + + + + + Flow_1rihpzh + Flow_0p30bun + + + FormA3 + + + + + + Flow_0098ozb + Flow_04lzszv + + + FormB1 + + + + + + Flow_1tpkm1k + Flow_0zndavy + + + FormB2 + + + + + + Flow_0oz5j4d + Flow_1u9tezs + + + FormB3 + + + + + + Flow_11diihw + Flow_1xgsff0 + + + FormC1 + + + + + + Flow_04yup8h + Flow_0v6ozza + + + FormC2 + + + + + + Flow_1ay413y + Flow_1d1kroa + + + FormC3 + + + + + + Flow_0etaqvr + Flow_0kahsqi + + + Flow_0rg1whs + Flow_1uw6r98 + Flow_1rihpzh + Flow_0098ozb + + + Flow_0n71r7a + Flow_0p30bun + Flow_04lzszv + Flow_0c4mntn + + + + + + + + + + Flow_0pyul3k + Flow_1tpkm1k + Flow_0oz5j4d + Flow_11diihw + + + Flow_0zndavy + Flow_1u9tezs + Flow_1xgsff0 + Flow_093ce35 + + + + + + + + + + + + Flow_1l934p1 + Flow_04yup8h + Flow_1ay413y + Flow_0etaqvr + + + Flow_0v6ozza + Flow_1d1kroa + Flow_0kahsqi + Flow_0knduft + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_parallel_matrix.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_matrix.bpmn new file mode 100644 index 000000000..276897834 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_matrix.bpmn @@ -0,0 +1,272 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormD + + + + + + Flow_08nd97v + Flow_039y4lk + + + + Flow_039y4lk + + + + + + Flow_10pdq2v + Flow_0fme0gm + Flow_0cmqr9j + Flow_0dv8nod + + + Flow_1pf4svr + Flow_0y1tqui + Flow_1h5wu4u + Flow_08nd97v + + + + + FormA1 + + + + + + Flow_0fme0gm + Flow_05w8299 + + + FormA2 + + + + + + Flow_05w8299 + Flow_16gvr7i + + + + FormA3 + + + + + + Flow_16gvr7i + Flow_1pf4svr + + + + FormB1 + + + + + + Flow_0cmqr9j + Flow_0ae6rzq + + + FormB2 + + + + + + Flow_0ae6rzq + Flow_0643kan + + + FormB3 + + + + + + Flow_0643kan + Flow_0y1tqui + + + + + + + FormC1 + + + + + + Flow_0dv8nod + Flow_0mtw6yv + + + FormC2 + + + + + + Flow_0mtw6yv + Flow_0rqbd7e + + + FormC3 + + + + + + Flow_0rqbd7e + Flow_1h5wu4u + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_parallel_simple.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_simple.bpmn new file mode 100644 index 000000000..5c3658575 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_simple.bpmn @@ -0,0 +1,216 @@ + + + + + Flow_1w2tcdp + + + Flow_1vtdwmy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_1242uxm + SequenceFlow_09c4dnr + + + #### Please plan to ensure adequate supplies for staff cleaning before and after patients + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_1ylxjys + SequenceFlow_0rwnquq + + + Flow_0f6q83k + SequenceFlow_1242uxm + SequenceFlow_1ylxjys + + + + + SequenceFlow_09c4dnr + SequenceFlow_0rwnquq + SequenceFlow_00fpfhi + + + + + + SequenceFlow_00fpfhi + Flow_0wycgzo + Flow_1vtdwmy + + + Flow_00zjlx7 + Flow_0f6q83k + Flow_0wycgzo + + + skipParallel == False + + + skipParallel == True + + + + + Flow_1w2tcdp + Flow_00zjlx7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_subprocess.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_subprocess.bpmn new file mode 100644 index 000000000..20610a94c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_subprocess.bpmn @@ -0,0 +1,151 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_13362mb + + + FormD + + + + + + Flow_0e9x16w + Flow_039y4lk + + + + Flow_039y4lk + + + + Flow_13362mb + Flow_0e9x16w + + Flow_1sy7h5y + + + FormA1 + + + + + + Flow_1sy7h5y + Flow_0f89gdk + + + FormA2 + + + + + + Flow_0f89gdk + Flow_0nudpra + + + FormA3 + + + + + + Flow_0nudpra + Flow_0zucva0 + + + + + + Flow_0zucva0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/top_workflow.bpmn b/tests/SpiffWorkflow/camunda/data/top_workflow.bpmn new file mode 100644 index 000000000..c36573741 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/top_workflow.bpmn @@ -0,0 +1,64 @@ + + + + + Flow_1xegt6f + + + + + Flow_0qc6vpv + + + + + Flow_1xegt6f + Flow_11qyfqv + my_custom_function('test 1 from top workflow') + + + Flow_11qyfqv + Flow_0hntmrc + + + Flow_0hntmrc + Flow_0qc6vpv + my_custom_function('test 2 from top workflow') + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/serializer/CamundaExtensionsTest.py b/tests/SpiffWorkflow/camunda/serializer/CamundaExtensionsTest.py new file mode 100644 index 000000000..902a911e6 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/serializer/CamundaExtensionsTest.py @@ -0,0 +1,33 @@ +import unittest + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class CamundaExtensionsTest(BaseTestCase): + + def setUp(self): + spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact') + self.workflow = BpmnWorkflow(spec, subprocesses) + + def testExtensionsAreSerialized(self): + self.assertMyExtension() + self.save_restore() + self.assertMyExtension() + + def assertMyExtension(self): + """Assure that we have a very specific extension on specific task.""" + task = self.workflow.get_task_spec_from_name("Task_User_Select_Type") + self.assertIsNotNone(task) + self.assertTrue(hasattr(task, 'extensions')) + self.assertTrue("my_extension" in task.extensions) + self.assertEqual(task.extensions["my_extension"], 'my very own extension') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CamundaExtensionsTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/serializer/CamundaParserTest.py b/tests/SpiffWorkflow/camunda/serializer/CamundaParserTest.py new file mode 100644 index 000000000..29760e9db --- /dev/null +++ b/tests/SpiffWorkflow/camunda/serializer/CamundaParserTest.py @@ -0,0 +1,28 @@ +import unittest + +from SpiffWorkflow.camunda.parser.UserTaskParser import UserTaskParser +from SpiffWorkflow.camunda.specs.UserTask import UserTask +from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser + + +class CamundaParserTest(unittest.TestCase): + CORRELATE = CamundaParser + + def setUp(self): + self.parser = CamundaParser() + + def test_overrides(self): + expected_key = "{http://www.omg.org/spec/BPMN/20100524/MODEL}userTask" + self.assertIn(expected_key, + self.parser.OVERRIDE_PARSER_CLASSES) + + self.assertEqual((UserTaskParser, UserTask), + self.parser.OVERRIDE_PARSER_CLASSES.get(expected_key)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CamundaParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/serializer/UserTaskParserTest.py b/tests/SpiffWorkflow/camunda/serializer/UserTaskParserTest.py new file mode 100644 index 000000000..40d4a3aa0 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/serializer/UserTaskParserTest.py @@ -0,0 +1,49 @@ +import unittest + +from SpiffWorkflow.camunda.parser.UserTaskParser import UserTaskParser +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class UserTaskParserTest(BaseTestCase): + CORRELATE = UserTaskParser + + def setUp(self): + self.spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact') + + def testGetForm(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertIsNotNone(form) + + def testGetEnumField(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertEquals("Fact", form.key) + self.assertEquals(1, len(form.fields)) + self.assertEquals("type", form.fields[0].id) + self.assertEquals(3, len(form.fields[0].options)) + + def testGetFieldProperties(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertEquals(1, len(form.fields[0].properties)) + self.assertEquals('description', form.fields[0].properties[0].id) + self.assertEquals('Choose from the list of available types of random facts', form.fields[0].properties[0].value) + + def testGetFieldValidation(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertEquals(1, len(form.fields[0].validation)) + self.assertEquals('maxlength', form.fields[0].validation[0].name) + self.assertEquals('25', form.fields[0].validation[0].config) + + def testNoFormDoesNotBombOut(self): + self.load_workflow_spec('no_form.bpmn', 'no_form') + self.assertTrue(True) # You can load a user task that has no form and you can still get here. + + def testCreateTask(self): + pass + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(UserTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/serializer/__init__.py b/tests/SpiffWorkflow/camunda/serializer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py b/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py new file mode 100644 index 000000000..2d3c16d7f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py @@ -0,0 +1,152 @@ +import json +import unittest + +from SpiffWorkflow.camunda.specs.UserTask import FormField, UserTask, Form, \ + EnumFormField +from SpiffWorkflow.specs import WorkflowSpec, TaskSpec + + +class UserTaskSpecTest(unittest.TestCase): + CORRELATE = UserTask + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + task_spec = TaskSpec(self.wf_spec, 'testtask', description='foo') + self.form = Form() + return UserTask(self.wf_spec, 'userTask', self.form) + + def setUp(self): + self.wf_spec = WorkflowSpec() + self.user_spec = self.create_instance() + + def testConstructor(self): + self.assertEquals(self.user_spec.name, 'userTask') + self.assertEqual(self.user_spec.data, {}) + self.assertEqual(self.user_spec.defines, {}) + self.assertEqual(self.user_spec.pre_assign, []) + self.assertEqual(self.user_spec.post_assign, []) + self.assertEqual(self.user_spec.locks, []) + + def test_set_form(self): + self.assertEqual(self.form, self.user_spec.form) + + def testSerialize(self): + pass + + def test_text_field(self): + form_field = FormField(form_type="text") + form_field.id = "1234" + self.form.add_field(form_field) + self.assertEqual(form_field, self.user_spec.form.fields[0]) + + def test_enum_field(self): + enum_field = EnumFormField() + enum_field.label = "Which kind of fool are you" + enum_field.add_option('old fool', 'This is old, therefor it is good.') + enum_field.add_option('new fool', + 'This is new, therefor it is better.') + self.form.add_field(enum_field) + self.assertEqual(enum_field, self.user_spec.form.fields[-1]) + + def test_properties(self): + form_field = FormField(form_type="text") + self.assertFalse(form_field.has_property("wilma")) + form_field.add_property("wilma", "flintstone") + self.assertTrue(form_field.has_property("wilma")) + self.assertEquals("flintstone", form_field.get_property("wilma")) + + def test_validations(self): + form_field = FormField(form_type="text") + self.assertFalse(form_field.has_validation("barney")) + form_field.add_validation("barney", "rubble") + self.assertTrue(form_field.has_validation("barney")) + self.assertEquals("rubble", form_field.get_validation("barney")) + + def testIsEngineTask(self): + self.assertFalse(self.user_spec.is_engine_task()) + + def test_convert_to_dict(self): + form = Form() + + field1 = FormField(form_type="text") + field1.id = "quest" + field1.label = "What is your quest?" + field1.default_value = "I seek the grail!" + + field2 = EnumFormField() + field2.id = "color" + field2.label = "What is your favorite color?" + field2.add_option("red", "Red") + field2.add_option("orange", "Green") + field2.add_option("yellow", "Yellow") + field2.add_option("green", "Green") + field2.add_option("blue", "Blue") + field2.add_option("indigo", "Indigo") + field2.add_option("violet", "Violet") + field2.add_option("other", "Other") + field2.add_property("description", "You know what to do.") + field2.add_validation("maxlength", "25") + + form.key = "formKey" + form.add_field(field1) + form.add_field(field2) + + def JsonableHandler(Obj): + if hasattr(Obj, 'jsonable'): + return Obj.jsonable() + else: + raise 'Object of type %s with value of %s is not JSON serializable' % ( + type(Obj), repr(Obj)) + + json_form = json.dumps(form, default=JsonableHandler) + actual = json.loads(json_form) + + expected = { + "fields": [ + { + "default_value": "I seek the grail!", + "label": "What is your quest?", + "id": "quest", + "properties": [], + "type": "text", + "validation": [], + }, + { + "default_value": "", + "id": "color", + "label": "What is your favorite color?", + "options": [ + {"id": "red", "name": "Red"}, + {"id": "orange", "name": "Green"}, + {"id": "yellow", "name": "Yellow"}, + {"id": "green", "name": "Green"}, + {"id": "blue", "name": "Blue"}, + {"id": "indigo", "name": "Indigo"}, + {"id": "violet", "name": "Violet"}, + {"id": "other", "name": "Other"}, + ], + "properties": [ + {"id": "description", "value": "You know what to do."}, + ], + "type": "enum", + "validation": [ + {"name": "maxlength", "config": "25"}, + ], + } + ], + "key": "formKey", + } + + expected_parsed = json.loads(json.dumps(expected)) + + self.maxDiff = None + self.assertDictEqual(actual, expected_parsed) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(UserTaskSpecTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/specs/__init__.py b/tests/SpiffWorkflow/camunda/specs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/data/__init__.py b/tests/SpiffWorkflow/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/data/empty1.xml b/tests/SpiffWorkflow/data/empty1.xml new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/data/empty2.xml b/tests/SpiffWorkflow/data/empty2.xml new file mode 100644 index 000000000..4adc209d8 --- /dev/null +++ b/tests/SpiffWorkflow/data/empty2.xml @@ -0,0 +1 @@ + diff --git a/tests/SpiffWorkflow/data/spiff/__init__.py b/tests/SpiffWorkflow/data/spiff/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/acyclic_synchronizing_merge.path b/tests/SpiffWorkflow/data/spiff/control-flow/acyclic_synchronizing_merge.path new file mode 100644 index 000000000..d94b3e03d --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/acyclic_synchronizing_merge.path @@ -0,0 +1,10 @@ +Start + first + task_f1 + task_f2 + task_f3 + excl_choice_1 + join + End + task_g2 + foo diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/acyclic_synchronizing_merge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/acyclic_synchronizing_merge.xml new file mode 100644 index 000000000..096fb7fe1 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/acyclic_synchronizing_merge.xml @@ -0,0 +1,49 @@ + + + Pattern 37 (Acyclic Synchronizing Merge) + + + first + + + + + task_f1 + task_f2 + task_f3 + + + + + join + + + join + + + excl_choice_1 + + + + + task_g1 + + + task_g2 + + + + + join + + + + foo + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/arbitrary_cycles.path b/tests/SpiffWorkflow/data/spiff/control-flow/arbitrary_cycles.path new file mode 100644 index 000000000..3009ea82e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/arbitrary_cycles.path @@ -0,0 +1,10 @@ +Start + first + excl_choice_1 + go_to_repetition + return_to_first + first + excl_choice_1 + task_c1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/arbitrary_cycles.xml b/tests/SpiffWorkflow/data/spiff/control-flow/arbitrary_cycles.xml new file mode 100644 index 000000000..7c4e4e994 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/arbitrary_cycles.xml @@ -0,0 +1,38 @@ + + + Pattern 10 (Arbitrary Cycles) + + + + + first + + + + + excl_choice_1 + + + + + task_c1 + + + go_to_repetition + + + + + + + first + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/blocking_discriminator.path b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_discriminator.path new file mode 100644 index 000000000..aa1fa1eb1 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_discriminator.path @@ -0,0 +1,14 @@ +Start + first + task_f1 + struct_discriminator_1 + excl_choice_1 + return_to_first + first + task_f1 + struct_discriminator_1 + excl_choice_1 + last + End + task_f2 + task_f3 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/blocking_discriminator.xml b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_discriminator.xml new file mode 100644 index 000000000..d011b9c09 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_discriminator.xml @@ -0,0 +1,45 @@ + + + Pattern 28 (Blocking Discriminator) + + + first + + + + + task_f1 + task_f2 + task_f3 + + + + + struct_discriminator_1 + + + struct_discriminator_1 + + + struct_discriminator_1 + + + + + excl_choice_1 + + + + + last + + + first + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/blocking_partial_join.path b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_partial_join.path new file mode 100644 index 000000000..ac1c7c858 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_partial_join.path @@ -0,0 +1,15 @@ +Start + multi_choice_1 + task_e1 + task_e3 + struct_synch_merge_1 + excl_choice_1 + return_to_multi_choice_1 + multi_choice_1 + task_e1 + task_e3 + struct_synch_merge_1 + excl_choice_1 + last + End + task_e4 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/blocking_partial_join.xml b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_partial_join.xml new file mode 100644 index 000000000..a8706bf9e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/blocking_partial_join.xml @@ -0,0 +1,61 @@ + + + Pattern 31 (Blocking Partial Join) + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + task_e4 + + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + excl_choice_1 + + + + + last + + + multi_choice_1 + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_case.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_case.path new file mode 100644 index 000000000..8fdb95b6d --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_case.path @@ -0,0 +1,4 @@ +Start + one1 + one2 + cancel diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_case.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_case.xml new file mode 100644 index 000000000..d3952fe91 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_case.xml @@ -0,0 +1,41 @@ + + + Pattern 20 (Cancel Job) + + + + one1 + two1 + + + + + one2 + + + cancel + + + + + + + two2a + two2b + + + two3 + + + two3 + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_multi_instance_task.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_multi_instance_task.path new file mode 100644 index 000000000..5c6c45de9 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_multi_instance_task.path @@ -0,0 +1,14 @@ +Start + first + add_instance_1 + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + task_g1 + task_g2 + cancel_multi_instance_1 + join + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_multi_instance_task.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_multi_instance_task.xml new file mode 100644 index 000000000..c39533e78 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_multi_instance_task.xml @@ -0,0 +1,59 @@ + + + Pattern 15 (Multiple Instances without a priori Run-Time Knowledge) + + + first + + + + + add_instance_1 + multi_instance_1 + cancel_multi_instance_1 + + + + + join + + + + + + + task_g1 + task_g2 + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + join + + + + + + multi_instance_1 + join + + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_region.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_region.path new file mode 100644 index 000000000..28d132ff4 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_region.path @@ -0,0 +1,9 @@ +Start + one1 + one2 + cancel + last + End + two1 + two2b + two3 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_region.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_region.xml new file mode 100644 index 000000000..3d408cb42 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_region.xml @@ -0,0 +1,48 @@ + + + Pattern 19 (Cancel Task) + + + + one1 + two1 + + + + + one2 + + + cancel + + + two2a + two4 + last + + + + + + two2a + two2b + + + two3 + + + two3 + + + two4 + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_task.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_task.path new file mode 100644 index 000000000..981bb91de --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_task.path @@ -0,0 +1,11 @@ +Start + one1 + one2 + cancel + last + End + two1 + two2b + two3 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancel_task.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_task.xml new file mode 100644 index 000000000..28c5278c8 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancel_task.xml @@ -0,0 +1,44 @@ + + + Pattern 25 (Cancel Region) + + + + one1 + two1 + + + + + one2 + + + cancel + + + two2a + last + + + + + + two2a + two2b + + + two3 + + + two3 + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_discriminator.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_discriminator.path new file mode 100644 index 000000000..a56b255eb --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_discriminator.path @@ -0,0 +1,12 @@ +Start + first + task_f1 + struct_discriminator_1 + excl_choice_1 + return_to_first + first + task_f1 + struct_discriminator_1 + excl_choice_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_discriminator.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_discriminator.xml new file mode 100644 index 000000000..cb6c23372 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_discriminator.xml @@ -0,0 +1,45 @@ + + + Pattern 29 (Cancelling Discriminator) + + + + first + + + + task_f1 + task_f2 + task_f3 + + + + + struct_discriminator_1 + + + struct_discriminator_1 + + + struct_discriminator_1 + + + + + excl_choice_1 + + + + + last + + + first + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join.path new file mode 100644 index 000000000..74fd5b2fb --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join.path @@ -0,0 +1,13 @@ +Start + multi_choice_1 + task_e1 + task_e3 + struct_synch_merge_1 + excl_choice_1 + return_to_multi_choice_1 + multi_choice_1 + task_e1 + struct_synch_merge_1 + excl_choice_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join.xml new file mode 100644 index 000000000..55bdb5015 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join.xml @@ -0,0 +1,61 @@ + + + Pattern 32 (Cancelling Partial Join) + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + task_e4 + + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + excl_choice_1 + + + + + last + + + multi_choice_1 + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join_for_multi_instance.path b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join_for_multi_instance.path new file mode 100644 index 000000000..076e8d8e6 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join_for_multi_instance.path @@ -0,0 +1,11 @@ +Start + add_instance_1 + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + join_1 + join_2 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join_for_multi_instance.xml b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join_for_multi_instance.xml new file mode 100644 index 000000000..f9e0df71a --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/cancelling_partial_join_for_multi_instance.xml @@ -0,0 +1,47 @@ + + + Pattern 35 (Cancelling Partial Join for Multiple Instances) + + + + add_instance_1 + multi_instance_1 + + + + + + task_g1 + task_g2 + + + + + join_1 + + + join_1 + + + + + join_2 + + + + + + join_2 + + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/complete_multiple_instance_activity.path b/tests/SpiffWorkflow/data/spiff/control-flow/complete_multiple_instance_activity.path new file mode 100644 index 000000000..04daa7da1 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/complete_multiple_instance_activity.path @@ -0,0 +1,10 @@ +Start + multi_instance_1 + task_g1 + task_g1 + task_g1 + trigger_join + join_1 + join_2 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/complete_multiple_instance_activity.xml b/tests/SpiffWorkflow/data/spiff/control-flow/complete_multiple_instance_activity.xml new file mode 100644 index 000000000..f0ddbcf33 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/complete_multiple_instance_activity.xml @@ -0,0 +1,38 @@ + + + Pattern 27 (Complete Multiple Instance Task) + + + + multi_instance_1 + trigger_join + + + + + + task_g1 + + + + join_1 + + + + join_2 + + + + + + join_2 + + + + + last + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/critical_section.path b/tests/SpiffWorkflow/data/spiff/control-flow/critical_section.path new file mode 100644 index 000000000..38d31ccb5 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/critical_section.path @@ -0,0 +1,18 @@ +Start + one_1 + one_2 + one_3 + one_4 + two_2 + two_3 + two_4 + last + End + last + End + two_1 + two_2 + two_3 + two_4 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/critical_section.xml b/tests/SpiffWorkflow/data/spiff/control-flow/critical_section.xml new file mode 100644 index 000000000..cb3e93b88 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/critical_section.xml @@ -0,0 +1,50 @@ + + + Pattern 39 (Critical Section) + + + + one_1 + two_1 + + + + + lock_one + one_2 + + + lock_two + one_3 + + + lock_three + two_2 + one_4 + + + lock_four + last + + + + + + two_2 + + + two_3 + + + two_4 + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/deferred_choice.path b/tests/SpiffWorkflow/data/spiff/control-flow/deferred_choice.path new file mode 100644 index 000000000..8ed93edb4 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/deferred_choice.path @@ -0,0 +1,10 @@ +Start + make_choice + last + End + first + deferred_choice_1 + task_e2 + task_e4 + join_1 + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/deferred_choice.xml b/tests/SpiffWorkflow/data/spiff/control-flow/deferred_choice.xml new file mode 100644 index 000000000..2a6334fd6 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/deferred_choice.xml @@ -0,0 +1,76 @@ + + + Pattern 16 (Deferred Choice) + + + + make_choice + first + + + + + deferred_choice_1 + + + + + task_e1 + task_e2 + + + task_e3 + + + + task_e4 + + + + task_e5 + + + + task_e6 + + + + + join_1 + + + join_1 + + + join_1 + + + join_1 + + + join_1 + + + join_1 + + + + end + + + + + + task_e2 + task_e4 + task_e6 + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/dynamic_partial_join_for_multi_instance.path b/tests/SpiffWorkflow/data/spiff/control-flow/dynamic_partial_join_for_multi_instance.path new file mode 100644 index 000000000..c870bba11 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/dynamic_partial_join_for_multi_instance.path @@ -0,0 +1,13 @@ +Start + add_instance_1 + multi_instance_1 + task_g1 + task_g2 + join_1 + join_2 + last + End + task_g1 + task_g2 + task_g1 + task_g2 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/dynamic_partial_join_for_multi_instance.xml b/tests/SpiffWorkflow/data/spiff/control-flow/dynamic_partial_join_for_multi_instance.xml new file mode 100644 index 000000000..2cab3c6cb --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/dynamic_partial_join_for_multi_instance.xml @@ -0,0 +1,47 @@ + + + Pattern 36 (Dynamic Partial Join for Multiple Instances) + + + + add_instance_1 + multi_instance_1 + + + + + + task_g1 + task_g2 + + + + + join_1 + + + join_1 + + + + + join_2 + + + + + + join_2 + + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/exclusive_choice.path b/tests/SpiffWorkflow/data/spiff/control-flow/exclusive_choice.path new file mode 100644 index 000000000..0d8e14e13 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/exclusive_choice.path @@ -0,0 +1,8 @@ +Start + first + excl_choice_1 + task_c1 + excl_choice_2 + task_d2 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/exclusive_choice.xml b/tests/SpiffWorkflow/data/spiff/control-flow/exclusive_choice.xml new file mode 100644 index 000000000..78e4eb2ab --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/exclusive_choice.xml @@ -0,0 +1,67 @@ + + + Pattern 4 (Exclusive Choice) + + + + first + + + + + excl_choice_1 + + + + + task_c1 + + + task_c2 + + + + task_c3 + + + + + + excl_choice_2 + + + excl_choice_2 + + + excl_choice_2 + + + + + task_d1 + + + task_d2 + + + + task_d3 + + + + + + last + + + last + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/explicit_termination.path b/tests/SpiffWorkflow/data/spiff/control-flow/explicit_termination.path new file mode 100644 index 000000000..8fdb95b6d --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/explicit_termination.path @@ -0,0 +1,4 @@ +Start + one1 + one2 + cancel diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/explicit_termination.xml b/tests/SpiffWorkflow/data/spiff/control-flow/explicit_termination.xml new file mode 100644 index 000000000..f9af8706c --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/explicit_termination.xml @@ -0,0 +1,41 @@ + + + Pattern 43 (Explicit Termination) + + + + one1 + two1 + + + + + one2 + + + cancel + + + + + + + two2a + two2b + + + two3 + + + two3 + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/general_synchronizing_merge.path b/tests/SpiffWorkflow/data/spiff/control-flow/general_synchronizing_merge.path new file mode 100644 index 000000000..4c5758087 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/general_synchronizing_merge.path @@ -0,0 +1,21 @@ +Start + first + task_a1 + task_b1 + task_c1 + loop_back_to_c1_once + join + End + return_to_task_c1 + task_c1 + loop_back_to_c1_once + task_c2 + go_to_stub + stub_1 + loop_back_to_stub_1_once + return_to_stub_1 + stub_1 + loop_back_to_stub_1_once + go_to_stub_3 + stub_3 + foo diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/general_synchronizing_merge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/general_synchronizing_merge.xml new file mode 100644 index 000000000..908781436 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/general_synchronizing_merge.xml @@ -0,0 +1,89 @@ + + + Pattern 37 (Acyclic Synchronizing Merge) + + + first + + + + + task_a1 + task_b1 + task_c1 + + + + + join + + + join + + + loop_back_to_c1_once + + + + + task_c1 + + + task_c2 + + + + + go_to_stub + + + + + task_g1 + + + stub_1 + + + + + join + + + + loop_back_to_stub_1_once + + + + + stub_1 + + + go_to_stub_3 + + + + + + stub_2 + + + stub_3 + + + + + foo + + + + foo + + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/generalized_and_join.path b/tests/SpiffWorkflow/data/spiff/control-flow/generalized_and_join.path new file mode 100644 index 000000000..7fbb5ffff --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/generalized_and_join.path @@ -0,0 +1,22 @@ +Start + first + task_e1 + task_f1 + task_e2 + task_f2 + task_e3 + task_f3 + struct_synch_merge_1 + excl_choice_1 + return_to_first + first + task_e1 + task_f1 + task_e2 + task_f2 + task_e3 + task_f3 + struct_synch_merge_1 + excl_choice_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/generalized_and_join.xml b/tests/SpiffWorkflow/data/spiff/control-flow/generalized_and_join.xml new file mode 100644 index 000000000..22b879d4d --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/generalized_and_join.xml @@ -0,0 +1,61 @@ + + + Pattern 33 (Generalized AND-Join) + + + first + + + + + + task_e1 + task_e2 + task_e3 + + + + + task_f1 + + + task_f2 + + + task_f3 + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + excl_choice_1 + + + + + last + + + first + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/implicit_termination.path b/tests/SpiffWorkflow/data/spiff/control-flow/implicit_termination.path new file mode 100644 index 000000000..399ad2b4a --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/implicit_termination.path @@ -0,0 +1,5 @@ +Start + first + task_f1 + task_f2 + task_f3 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/implicit_termination.xml b/tests/SpiffWorkflow/data/spiff/control-flow/implicit_termination.xml new file mode 100644 index 000000000..c887a1cf3 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/implicit_termination.xml @@ -0,0 +1,21 @@ + + + Pattern 11 (Implicit Termination) + + + + first + + + + task_f1 + task_f2 + task_f3 + + + + + + + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_parallel_routing.path b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_parallel_routing.path new file mode 100644 index 000000000..5f0d47b66 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_parallel_routing.path @@ -0,0 +1,7 @@ +Start + one1 + two1 + two2 + join + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_parallel_routing.xml b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_parallel_routing.xml new file mode 100644 index 000000000..6ff3f7f1e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_parallel_routing.xml @@ -0,0 +1,38 @@ + + + Pattern 17 (Interleaved Parallel Routing) + + + + one1 + two1 + + + + + one_task_at_a_time + join + + + + + + one_task_at_a_time + two2 + + + one_task_at_a_time + join + + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_routing.path b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_routing.path new file mode 100644 index 000000000..6a42a92cd --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_routing.path @@ -0,0 +1,9 @@ +Start + first + one1 + two1 + three1 + four1 + join + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_routing.xml b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_routing.xml new file mode 100644 index 000000000..7cf4a99dd --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/interleaved_routing.xml @@ -0,0 +1,47 @@ + + + Pattern 40 (Interleaved Routing) + + + + first + + + + + one_task_at_a_time + one1 + two1 + three1 + four1 + + + + + one_task_at_a_time + join + + + one_task_at_a_time + join + + + one_task_at_a_time + join + + + one_task_at_a_time + join + + + + + one_task_at_a_time + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/milestone.path b/tests/SpiffWorkflow/data/spiff/control-flow/milestone.path new file mode 100644 index 000000000..b0e4a2edf --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/milestone.path @@ -0,0 +1,10 @@ +Start + task_e1 + task_f1 + task_f2 + task_e2 + task_e3 + last + End + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/milestone.xml b/tests/SpiffWorkflow/data/spiff/control-flow/milestone.xml new file mode 100644 index 000000000..95534fd56 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/milestone.xml @@ -0,0 +1,31 @@ + + + Pattern 18 (Milestone) + + + task_e1 + task_f1 + + + + task_e2 + + + task_e3 + + + last + + + + task_f2 + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_choice.path b/tests/SpiffWorkflow/data/spiff/control-flow/multi_choice.path new file mode 100644 index 000000000..aaff59dcd --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_choice.path @@ -0,0 +1,8 @@ +Start + multi_choice_1 + task_e1 + last + End + task_e3 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_choice.xml b/tests/SpiffWorkflow/data/spiff/control-flow/multi_choice.xml new file mode 100644 index 000000000..9c6c004ae --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_choice.xml @@ -0,0 +1,40 @@ + + + Pattern 6 (Multi-Choice) + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + + + last + + + last + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_design_time_knowledge.path b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_design_time_knowledge.path new file mode 100644 index 000000000..9dcfb3b4a --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_design_time_knowledge.path @@ -0,0 +1,9 @@ +Start + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + struct_synch_merge_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_design_time_knowledge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_design_time_knowledge.xml new file mode 100644 index 000000000..187c45312 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_design_time_knowledge.xml @@ -0,0 +1,32 @@ + + + Pattern 13 (Multiple Instances with a priori Design-Time Knowledge) + + + + multi_instance_1 + + + + + task_g1 + + + + + task_g2 + + + struct_synch_merge_1 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_run_time_knowledge.path b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_run_time_knowledge.path new file mode 100644 index 000000000..9dcfb3b4a --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_run_time_knowledge.path @@ -0,0 +1,9 @@ +Start + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + struct_synch_merge_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_run_time_knowledge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_run_time_knowledge.xml new file mode 100644 index 000000000..81a1cc1a7 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_with_a_priori_run_time_knowledge.xml @@ -0,0 +1,34 @@ + + + Pattern 14 (Multiple Instances with a priori Run-Time Knowledge) + + + multi_instance_1 + + + + + task_g1 + + + + + task_g2 + + + struct_synch_merge_1 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_a_priori.path b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_a_priori.path new file mode 100644 index 000000000..e5354f1cd --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_a_priori.path @@ -0,0 +1,13 @@ +Start + add_instance_1 + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + task_g1 + task_g2 + struct_synch_merge_1 + join + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_a_priori.xml b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_a_priori.xml new file mode 100644 index 000000000..5ee7761df --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_a_priori.xml @@ -0,0 +1,47 @@ + + + Pattern 15 (Multiple Instances without a priori Run-Time Knowledge) + + + + add_instance_1 + multi_instance_1 + + + + + join + + + + + + + task_g1 + task_g2 + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + join + + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_synch.path b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_synch.path new file mode 100644 index 000000000..258ce1841 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_synch.path @@ -0,0 +1,20 @@ +Start + multi_instance_1 + task_g1 + last + End + task_g2 + last + End + task_g1 + last + End + task_g2 + last + End + task_g1 + last + End + task_g2 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_synch.xml b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_synch.xml new file mode 100644 index 000000000..0052dd33b --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_instance_without_synch.xml @@ -0,0 +1,27 @@ + + + Pattern 12 (Multiple Instances without Synchronization) + + + multi_instance_1 + + + + + task_g1 + task_g2 + + + + + last + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_merge.path b/tests/SpiffWorkflow/data/spiff/control-flow/multi_merge.path new file mode 100644 index 000000000..a26e33b5e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_merge.path @@ -0,0 +1,10 @@ +Start + multi_choice_1 + task_e1 + merge + last + End + task_e3 + merge + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/multi_merge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/multi_merge.xml new file mode 100644 index 000000000..9b50f57dd --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/multi_merge.xml @@ -0,0 +1,46 @@ + + + Pattern 8 (Mutli-Merge) + + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + + + merge + + + merge + + + merge + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/parallel_split.path b/tests/SpiffWorkflow/data/spiff/control-flow/parallel_split.path new file mode 100644 index 000000000..e288c161d --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/parallel_split.path @@ -0,0 +1,11 @@ +Start + first + task_f1 + last + End + task_f2 + last + End + task_f3 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/parallel_split.xml b/tests/SpiffWorkflow/data/spiff/control-flow/parallel_split.xml new file mode 100644 index 000000000..045265842 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/parallel_split.xml @@ -0,0 +1,30 @@ + + + Pattern 2 (Parallel Split) + + + + first + + + + task_f1 + task_f2 + task_f3 + + + + last + + + last + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/persistent_trigger.path b/tests/SpiffWorkflow/data/spiff/control-flow/persistent_trigger.path new file mode 100644 index 000000000..5272df1c2 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/persistent_trigger.path @@ -0,0 +1,22 @@ +Start + trigger_before_1 + trigger_before_2 + last + End + add_instance_1 + last + End + multi_instance_1 + task_g1 + last + End + task_g1 + last + End + trigger_after_1 + add_instance_1 + task_g1 + last + End + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/persistent_trigger.xml b/tests/SpiffWorkflow/data/spiff/control-flow/persistent_trigger.xml new file mode 100644 index 000000000..f37a07ba0 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/persistent_trigger.xml @@ -0,0 +1,48 @@ + + + Pattern 15 (Multiple Instances without a priori Run-Time Knowledge) + + + + trigger_before_1 + add_instance_1 + multi_instance_1 + trigger_after_1 + + + + + trigger_before_2 + + + last + + + + + + last + + + + + + task_g1 + + + + last + + + + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/recursion.path b/tests/SpiffWorkflow/data/spiff/control-flow/recursion.path new file mode 100644 index 000000000..4b406b8f8 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/recursion.path @@ -0,0 +1,11 @@ +Start + first + excl_choice_1 + sub_workflow_1 + Start + first + excl_choice_1 + last + End + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/recursion.xml b/tests/SpiffWorkflow/data/spiff/control-flow/recursion.xml new file mode 100644 index 000000000..6440145d1 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/recursion.xml @@ -0,0 +1,36 @@ + + + Pattern 22 (Recursion) + + + first + + + + excl_choice_1 + + + + + last + + + sub_workflow_1 + + + + + + + + + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/sequence.path b/tests/SpiffWorkflow/data/spiff/control-flow/sequence.path new file mode 100644 index 000000000..e5c7316be --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/sequence.path @@ -0,0 +1,5 @@ +Start + first + second + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/sequence.xml b/tests/SpiffWorkflow/data/spiff/control-flow/sequence.xml new file mode 100644 index 000000000..0b9b58750 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/sequence.xml @@ -0,0 +1,20 @@ + + + Pattern 1 (Sequence) + + + first + + + + second + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/simple_merge.path b/tests/SpiffWorkflow/data/spiff/control-flow/simple_merge.path new file mode 100644 index 000000000..6a9d94d25 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/simple_merge.path @@ -0,0 +1,7 @@ +Start + first + excl_choice_1 + task_c1 + task_d1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/simple_merge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/simple_merge.xml new file mode 100644 index 000000000..4464f4293 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/simple_merge.xml @@ -0,0 +1,41 @@ + + + Pattern 5 (Simple Merge) + + + + first + + + + + excl_choice_1 + + + + + task_c1 + + + task_c2 + + + + + + task_d1 + + + task_d1 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/static_partial_join_for_multi_instance.path b/tests/SpiffWorkflow/data/spiff/control-flow/static_partial_join_for_multi_instance.path new file mode 100644 index 000000000..6037a0183 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/static_partial_join_for_multi_instance.path @@ -0,0 +1,13 @@ +Start + add_instance_1 + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + join_1 + join_2 + last + End + task_g1 + task_g2 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/static_partial_join_for_multi_instance.xml b/tests/SpiffWorkflow/data/spiff/control-flow/static_partial_join_for_multi_instance.xml new file mode 100644 index 000000000..15fc1f910 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/static_partial_join_for_multi_instance.xml @@ -0,0 +1,47 @@ + + + Pattern 34 (Static Partial Join for Multiple Instances) + + + + add_instance_1 + multi_instance_1 + + + + + + task_g1 + task_g2 + + + + + join_1 + + + join_1 + + + + + join_2 + + + + + + join_2 + + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/structured_discriminator.path b/tests/SpiffWorkflow/data/spiff/control-flow/structured_discriminator.path new file mode 100644 index 000000000..ac4192e7c --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/structured_discriminator.path @@ -0,0 +1,7 @@ +Start + multi_choice_1 + task_a1 + discriminator + last + End + task_a3 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/structured_discriminator.xml b/tests/SpiffWorkflow/data/spiff/control-flow/structured_discriminator.xml new file mode 100644 index 000000000..7f84b34d3 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/structured_discriminator.xml @@ -0,0 +1,45 @@ + + + Pattern 9 (Structured Discriminator) + + + multi_choice_1 + + + + + + + task_a1 + + + + task_a2 + + + + task_a3 + + + + + + discriminator + + + discriminator + + + discriminator + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/structured_partial_join.path b/tests/SpiffWorkflow/data/spiff/control-flow/structured_partial_join.path new file mode 100644 index 000000000..02ad65cc4 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/structured_partial_join.path @@ -0,0 +1,8 @@ +Start + multi_choice_1 + task_e1 + task_e3 + struct_synch_merge_1 + last + End + task_e4 diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/structured_partial_join.xml b/tests/SpiffWorkflow/data/spiff/control-flow/structured_partial_join.xml new file mode 100644 index 000000000..1de54ff21 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/structured_partial_join.xml @@ -0,0 +1,52 @@ + + + Pattern 30 (Structured Partial Join) + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + task_e4 + + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/structured_synchronizing_merge.path b/tests/SpiffWorkflow/data/spiff/control-flow/structured_synchronizing_merge.path new file mode 100644 index 000000000..4fdd836cf --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/structured_synchronizing_merge.path @@ -0,0 +1,9 @@ +Start + multi_choice_1 + task_e1 + task_e3 + task_g3a + task_g3b + struct_synch_merge_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/structured_synchronizing_merge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/structured_synchronizing_merge.xml new file mode 100644 index 000000000..ff788fde2 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/structured_synchronizing_merge.xml @@ -0,0 +1,53 @@ + + + Pattern 7 (Structured Synchronizing Merge) + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + task_g3a + task_g3b + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join.path b/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join.path new file mode 100644 index 000000000..e3a1fde8e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join.path @@ -0,0 +1,11 @@ +Start + first + sub_workflow_1 + Start + first + last + End + second + join + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join.xml b/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join.xml new file mode 100644 index 000000000..87e3f85ec --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join.xml @@ -0,0 +1,37 @@ + + + Pattern 10 (Block Task to Sub-Workflow Decomposition) + + + + first + + + + sub_workflow_1 + second + + + + join + + + + + + + + + + + join + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join_inner.xml b/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join_inner.xml new file mode 100644 index 000000000..835a836f9 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/subworkflow_to_join_inner.xml @@ -0,0 +1,17 @@ + + + Pattern 10 (Block Task to Sub-Workflow Decomposition) - Sub-Workflow + + + first + + + + last + + + + end + + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/synchronization.path b/tests/SpiffWorkflow/data/spiff/control-flow/synchronization.path new file mode 100644 index 000000000..a20827006 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/synchronization.path @@ -0,0 +1,9 @@ +Start + first + task_f1 + task_f2 + task_f3 + task_g3a + task_g3b + join + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/synchronization.xml b/tests/SpiffWorkflow/data/spiff/control-flow/synchronization.xml new file mode 100644 index 000000000..239cc892e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/synchronization.xml @@ -0,0 +1,39 @@ + + + Pattern 3 (Synchronization) + + + + first + + + + task_f1 + task_f2 + task_f3 + + + + + join + + + join + + + task_g3a + task_g3b + + + + join + + + join + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/thread_merge.path b/tests/SpiffWorkflow/data/spiff/control-flow/thread_merge.path new file mode 100644 index 000000000..e80f960b7 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/thread_merge.path @@ -0,0 +1,23 @@ +Start + thread_split_1 + ThreadStart + task_f1 + task_f2 + task_g1 + task_g2 + join + ThreadStart + task_f1 + task_f2 + task_g1 + task_g2 + join + ThreadStart + task_f1 + task_f2 + task_g1 + task_g2 + join + thread_merge_1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/thread_merge.xml b/tests/SpiffWorkflow/data/spiff/control-flow/thread_merge.xml new file mode 100644 index 000000000..fce686cad --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/thread_merge.xml @@ -0,0 +1,46 @@ + + + Pattern 41 (Thread Merge) + + + + thread_split_1 + + + + + task_f1 + task_g1 + + + + + task_f2 + + + join + + + + + task_g2 + + + join + + + + + thread_merge_1 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/thread_split.path b/tests/SpiffWorkflow/data/spiff/control-flow/thread_split.path new file mode 100644 index 000000000..c3a3dea65 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/thread_split.path @@ -0,0 +1,26 @@ +Start + thread_split_1 + ThreadStart + task_f1 + task_f2 + task_g1 + task_g2 + join + last + End + ThreadStart + task_f1 + task_f2 + task_g1 + task_g2 + join + last + End + ThreadStart + task_f1 + task_f2 + task_g1 + task_g2 + join + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/thread_split.xml b/tests/SpiffWorkflow/data/spiff/control-flow/thread_split.xml new file mode 100644 index 000000000..fbe8f45ac --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/thread_split.xml @@ -0,0 +1,41 @@ + + + Pattern 41 (Thread Merge) + + + + thread_split_1 + + + + + task_f1 + task_g1 + + + + + task_f2 + + + join + + + + + task_g2 + + + join + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/transient_trigger.path b/tests/SpiffWorkflow/data/spiff/control-flow/transient_trigger.path new file mode 100644 index 000000000..09f82df11 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/transient_trigger.path @@ -0,0 +1,12 @@ +Start + add_instance_1 + add_instance_2 + last + End + multi_instance_1 + task_g1 + last + End + task_g1 + last + End diff --git a/tests/SpiffWorkflow/data/spiff/control-flow/transient_trigger.xml b/tests/SpiffWorkflow/data/spiff/control-flow/transient_trigger.xml new file mode 100644 index 000000000..c66660847 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/control-flow/transient_trigger.xml @@ -0,0 +1,34 @@ + + + Pattern 15 (Multiple Instances without a priori Run-Time Knowledge) + + + + add_instance_1 + multi_instance_1 + + + + + add_instance_2 + + + last + + + + + + task_g1 + + + + last + + + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/block_data.xml b/tests/SpiffWorkflow/data/spiff/data/block_data.xml new file mode 100644 index 000000000..265b5c75e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/block_data.xml @@ -0,0 +1,27 @@ + + + Pattern 2 (Block Data) + + + + first + + + + sub_workflow_1 + + + + + + + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/block_data.xml.data b/tests/SpiffWorkflow/data/spiff/data/block_data.xml.data new file mode 100644 index 000000000..3e157d00e --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/block_data.xml.data @@ -0,0 +1,9 @@ +Start: data_1=outer/ +first: data_1=outer/ +sub_workflow_1: data_1=outer/ +Start: data_1=inner/ +first: data_1=inner/ +last: data_1=inner/ +End: data_1=inner/ +last: data_1=outer/ +End: data_1=outer/ diff --git a/tests/SpiffWorkflow/data/spiff/data/block_data_inner.xml b/tests/SpiffWorkflow/data/spiff/data/block_data_inner.xml new file mode 100644 index 000000000..9d0ddda05 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/block_data_inner.xml @@ -0,0 +1,18 @@ + + + Pattern 2 (Block Data) - Sub-Workflow + + + + first + + + + last + + + + end + + + diff --git a/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow.xml b/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow.xml new file mode 100644 index 000000000..62cfaf7b6 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow.xml @@ -0,0 +1,28 @@ + + + Pattern 10 (Block Task to Sub-Workflow Decomposition) + + + + first + + + + sub_workflow_1 + + + + + + + + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow.xml.data b/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow.xml.data new file mode 100644 index 000000000..97ea252af --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow.xml.data @@ -0,0 +1,9 @@ +Start: data_1=outer/ +first: data_1=outer/ +sub_workflow_1: data_1=outer/ +Start: sub_workflow_data=outer/ +first: sub_workflow_data=outer/ +last: sub_workflow_data=outer/ +End: sub_workflow_data=outer/ +last: data_1=outer/ +End: data_1=outer/ diff --git a/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow_inner.xml b/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow_inner.xml new file mode 100644 index 000000000..835a836f9 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/block_to_subworkflow_inner.xml @@ -0,0 +1,17 @@ + + + Pattern 10 (Block Task to Sub-Workflow Decomposition) - Sub-Workflow + + + first + + + + last + + + + end + + + diff --git a/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block.xml b/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block.xml new file mode 100644 index 000000000..52c7d5028 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block.xml @@ -0,0 +1,27 @@ + + + Pattern 11 (Sub-Workflow Decomposition to Block Task) + + + first + + + + sub_workflow_1 + + + + + + + + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block.xml.data b/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block.xml.data new file mode 100644 index 000000000..b8ee56205 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block.xml.data @@ -0,0 +1,9 @@ +Start: / +first: / +sub_workflow_1: / +Start: sub_workflow_data=inner/ +first: sub_workflow_data=inner/ +last: sub_workflow_data=inner/ +End: sub_workflow_data=inner/ +last: data_1=inner/ +End: data_1=inner/ diff --git a/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block_inner.xml b/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block_inner.xml new file mode 100644 index 000000000..7ebecf8f1 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/subworkflow_to_block_inner.xml @@ -0,0 +1,17 @@ + + + Pattern 11 (Sub-Workflow Decomposition to Block Task) - Sub-Workflow + + + + first + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/task_data.xml b/tests/SpiffWorkflow/data/spiff/data/task_data.xml new file mode 100644 index 000000000..b78d2af5d --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/task_data.xml @@ -0,0 +1,18 @@ + + + Pattern 1 (Task Data) + + + + first + + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/task_data.xml.data b/tests/SpiffWorkflow/data/spiff/data/task_data.xml.data new file mode 100644 index 000000000..e763ac785 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/task_data.xml.data @@ -0,0 +1,4 @@ +Start: data_attribute=at_start/ +first: data_attribute=at_start/data_attribute=at_first +last: data_attribute=at_start/ +End: data_attribute=at_start/ diff --git a/tests/SpiffWorkflow/data/spiff/data/task_to_task.xml b/tests/SpiffWorkflow/data/spiff/data/task_to_task.xml new file mode 100644 index 000000000..663545ea3 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/task_to_task.xml @@ -0,0 +1,18 @@ + + + Pattern 9 (Task To Task) + + + + first + + + + + last + + + + end + + diff --git a/tests/SpiffWorkflow/data/spiff/data/task_to_task.xml.data b/tests/SpiffWorkflow/data/spiff/data/task_to_task.xml.data new file mode 100644 index 000000000..31a383b14 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/data/task_to_task.xml.data @@ -0,0 +1,4 @@ +Start: data_attribute_1=at_start/ +first: data_attribute_1=at_start;data_attribute_2=at_first/ +last: data_attribute_1=at_start;data_attribute_2=at_first/ +End: data_attribute_1=at_start;data_attribute_2=at_first/ diff --git a/tests/SpiffWorkflow/data/spiff/resource/.gitignore b/tests/SpiffWorkflow/data/spiff/resource/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/data/spiff/workflow1.path b/tests/SpiffWorkflow/data/spiff/workflow1.path new file mode 100644 index 000000000..0b5bc042b --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/workflow1.path @@ -0,0 +1,41 @@ +Start + task_a1 + task_a2 + task_b1 + task_b2 + synch_1 + excl_choice_1 + task_c1 + excl_choice_2 + task_d3 + multi_choice_1 + task_e1 + task_e3 + struct_synch_merge_1 + task_f1 + struct_discriminator_1 + excl_choice_3 + return_to_excl_choice_1 + excl_choice_1 + task_c1 + excl_choice_2 + task_d3 + multi_choice_1 + task_e1 + task_e3 + struct_synch_merge_1 + task_f1 + struct_discriminator_1 + excl_choice_3 + multi_instance_1 + task_g1 + task_g2 + task_g1 + task_g2 + task_g1 + task_g2 + struct_synch_merge_2 + last + End + task_f2 + task_f3 diff --git a/tests/SpiffWorkflow/data/spiff/workflow1.py b/tests/SpiffWorkflow/data/spiff/workflow1.py new file mode 100644 index 000000000..fba204bd6 --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/workflow1.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- + +from SpiffWorkflow.specs import ExclusiveChoice, Join, MultiChoice, MultiInstance, Simple, WorkflowSpec +from SpiffWorkflow.operators import Attrib, Equal, NotEqual + + +class TestWorkflowSpec(WorkflowSpec): + + def __init__(self): + WorkflowSpec.__init__(self) + # Build one branch. + a1 = Simple(self, 'task_a1') + self.start.connect(a1) + + a2 = Simple(self, 'task_a2') + a1.connect(a2) + + # Build another branch. + b1 = Simple(self, 'task_b1') + self.start.connect(b1) + + b2 = Simple(self, 'task_b2') + b1.connect(b2) + + # Merge both branches (synchronized). + synch_1 = Join(self, 'synch_1') + a2.connect(synch_1) + b2.connect(synch_1) + + # If-condition that does not match. + excl_choice_1 = ExclusiveChoice(self, 'excl_choice_1') + synch_1.connect(excl_choice_1) + + c1 = Simple(self, 'task_c1') + excl_choice_1.connect(c1) + + c2 = Simple(self, 'task_c2') + cond = Equal(Attrib('test_attribute1'), Attrib('test_attribute2')) + excl_choice_1.connect_if(cond, c2) + + c3 = Simple(self, 'task_c3') + excl_choice_1.connect_if(cond, c3) + + # If-condition that matches. + excl_choice_2 = ExclusiveChoice(self, 'excl_choice_2') + c1.connect(excl_choice_2) + c2.connect(excl_choice_2) + c3.connect(excl_choice_2) + + d1 = Simple(self, 'task_d1') + excl_choice_2.connect(d1) + + d2 = Simple(self, 'task_d2') + excl_choice_2.connect_if(cond, d2) + + d3 = Simple(self, 'task_d3') + cond = Equal(Attrib('test_attribute1'), Attrib('test_attribute1')) + excl_choice_2.connect_if(cond, d3) + + # If-condition that does not match. + multichoice = MultiChoice(self, 'multi_choice_1') + d1.connect(multichoice) + d2.connect(multichoice) + d3.connect(multichoice) + + e1 = Simple(self, 'task_e1') + multichoice.connect_if(cond, e1) + + e2 = Simple(self, 'task_e2') + cond = Equal(Attrib('test_attribute1'), Attrib('test_attribute2')) + multichoice.connect_if(cond, e2) + + e3 = Simple(self, 'task_e3') + cond = Equal(Attrib('test_attribute2'), Attrib('test_attribute2')) + multichoice.connect_if(cond, e3) + + # StructuredSynchronizingMerge + syncmerge = Join(self, 'struct_synch_merge_1', 'multi_choice_1') + e1.connect(syncmerge) + e2.connect(syncmerge) + e3.connect(syncmerge) + + # Implicit parallel split. + f1 = Simple(self, 'task_f1') + syncmerge.connect(f1) + + f2 = Simple(self, 'task_f2') + syncmerge.connect(f2) + + f3 = Simple(self, 'task_f3') + syncmerge.connect(f3) + + # Discriminator + discrim_1 = Join(self, + 'struct_discriminator_1', + 'struct_synch_merge_1', + threshold=1) + f1.connect(discrim_1) + f2.connect(discrim_1) + f3.connect(discrim_1) + + # Loop back to the first exclusive choice. + excl_choice_3 = ExclusiveChoice(self, 'excl_choice_3') + discrim_1.connect(excl_choice_3) + cond = NotEqual(Attrib('excl_choice_3_reached'), Attrib('two')) + excl_choice_3.connect_if(cond, excl_choice_1) + + # Split into 3 branches, and implicitly split twice in addition. + multi_instance_1 = MultiInstance(self, 'multi_instance_1', times=3) + excl_choice_3.connect(multi_instance_1) + + # Parallel tasks. + g1 = Simple(self, 'task_g1') + g2 = Simple(self, 'task_g2') + multi_instance_1.connect(g1) + multi_instance_1.connect(g2) + + # StructuredSynchronizingMerge + syncmerge2 = Join(self, 'struct_synch_merge_2', 'multi_instance_1') + g1.connect(syncmerge2) + g2.connect(syncmerge2) + + # Add a final task. + last = Simple(self, 'last') + syncmerge2.connect(last) + + # Add another final task :-). + end = Simple(self, 'End') + last.connect(end) diff --git a/tests/SpiffWorkflow/data/spiff/workflow1.xml b/tests/SpiffWorkflow/data/spiff/workflow1.xml new file mode 100644 index 000000000..d33f71dbd --- /dev/null +++ b/tests/SpiffWorkflow/data/spiff/workflow1.xml @@ -0,0 +1,168 @@ + + + + A test workflow that contains all possible tasks. + + + + + task_a1 + task_b1 + + + + + task_a2 + + + synch_1 + + + + + + task_b2 + + + synch_1 + + + + + + excl_choice_1 + + + + + task_c1 + + + task_c2 + + + + task_c3 + + + + + + excl_choice_2 + + + excl_choice_2 + + + excl_choice_2 + + + + + task_d1 + + + task_d2 + + + + task_d3 + + + + + + multi_choice_1 + + + multi_choice_1 + + + multi_choice_1 + + + + + + + task_e1 + + + + task_e2 + + + + task_e3 + + + + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + struct_synch_merge_1 + + + + + task_f1 + task_f2 + task_f3 + + + + + struct_discriminator_1 + + + struct_discriminator_1 + + + struct_discriminator_1 + + + + + excl_choice_3 + + + + + multi_instance_1 + + + excl_choice_1 + + + + + + task_g1 + task_g2 + + + + + struct_synch_merge_2 + + + struct_synch_merge_2 + + + + + last + + + + + end + + diff --git a/tests/SpiffWorkflow/dmn/DecisionRunner.py b/tests/SpiffWorkflow/dmn/DecisionRunner.py new file mode 100644 index 000000000..0c266bfd4 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DecisionRunner.py @@ -0,0 +1,51 @@ +import os + +from lxml import etree + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box +from SpiffWorkflow.dmn.engine.DMNEngine import DMNEngine +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser + + +class Workflow: + def __init__(self, script_engine): + self.script_engine = script_engine + self.outer_workflow = self + self.spec = Box({'file': 'my_mock_file'}) + +class TaskSpec: + def __init__(self): + self.name = "MockTestSpec" + self.description = "Mock Test Spec" + +class Task: + def __init__(self, script_engine, data): + self.data = data + self.workflow = Workflow(script_engine) + self.task_spec = TaskSpec() + +class DecisionRunner: + + def __init__(self, script_engine, filename, path=''): + self.script_engine = script_engine + fn = os.path.join(os.path.dirname(__file__), path, 'data', filename) + + with open(fn) as fh: + node = etree.parse(fh) + + self.dmnParser = DMNParser(None, node.getroot()) + self.dmnParser.parse() + + decision = self.dmnParser.decision + assert len(decision.decisionTables) == 1, \ + 'Exactly one decision table should exist! (%s)' \ + % (len(decision.decisionTables)) + + self.dmnEngine = DMNEngine(decision.decisionTables[0]) + + def decide(self, context): + + if not isinstance(context, dict): + context = {'input': context} + task = Task(self.script_engine, context) + return self.dmnEngine.decide(task) diff --git a/tests/SpiffWorkflow/dmn/Dmn20151101VersionTest.py b/tests/SpiffWorkflow/dmn/Dmn20151101VersionTest.py new file mode 100644 index 000000000..8ff40003b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/Dmn20151101VersionTest.py @@ -0,0 +1,24 @@ +import os +import unittest + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class DmnVersionTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + self.parser = BpmnDmnParser() + + def testLoad(self): + dmn = os.path.join(os.path.dirname(__file__), 'data', + 'dmn_version_20191111_test.dmn') + self.assertIsNone(self.parser.add_dmn_file(dmn)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DmnVersionTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/Dmn20191111VersionTest.py b/tests/SpiffWorkflow/dmn/Dmn20191111VersionTest.py new file mode 100644 index 000000000..8ff40003b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/Dmn20191111VersionTest.py @@ -0,0 +1,24 @@ +import os +import unittest + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class DmnVersionTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + self.parser = BpmnDmnParser() + + def testLoad(self): + dmn = os.path.join(os.path.dirname(__file__), 'data', + 'dmn_version_20191111_test.dmn') + self.assertIsNone(self.parser.add_dmn_file(dmn)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DmnVersionTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DmnFailVersionTest.py b/tests/SpiffWorkflow/dmn/DmnFailVersionTest.py new file mode 100644 index 000000000..a645aa93d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DmnFailVersionTest.py @@ -0,0 +1,43 @@ +import unittest +import os + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser + + +class DmnVersionTest(unittest.TestCase): + + def setUp(self): + self.parser = BpmnDmnParser() + + def testLoadV1_2_supported(self): + self._assert_parse_all_pass('v1_2_supported') + + def testLoadV1_2_unsupported(self): + self._assert_parse_all_fail('v1_2_unsupported') + + def testLoadV1_3_supported(self): + self._assert_parse_all_pass('v1_3_supported') + + def testLoadV1_3_unsupported(self): + self._assert_parse_all_fail('v1_3_unsupported') + + def _assert_parse_all_pass(self, dir_path): + dirname = os.path.join(os.path.dirname(__file__), 'data', 'dmn_version_test', dir_path) + self.parser.add_dmn_files_by_glob(f'{dirname}/*.dmn') + for parser in self.parser.dmn_parsers.values(): + parser.parse() + self.assertIsNotNone(parser.get_id()) + self.assertIsNotNone(parser.get_name()) + + def _assert_parse_all_fail(self, dir_path): + dirname = os.path.join(os.path.dirname(__file__), 'data', 'dmn_version_test', dir_path) + with self.assertRaises(IndexError): + self.parser.add_dmn_files_by_glob(f'{dirname}/*.dmn') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DmnVersionTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/__init__.py b/tests/SpiffWorkflow/dmn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_20151101_test.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_20151101_test.dmn new file mode 100644 index 000000000..c3ecf312e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_20151101_test.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_20191111_test.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_20191111_test.dmn new file mode 100644 index 000000000..44a946465 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_20191111_test.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_fail_test.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_fail_test.dmn new file mode 100644 index 000000000..6bcee64f5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_fail_test.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_supported/diagram-interchange-decision-with-listed-input-data.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_supported/diagram-interchange-decision-with-listed-input-data.dmn new file mode 100644 index 000000000..1882e06d7 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_supported/diagram-interchange-decision-with-listed-input-data.dmn @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_supported/diagram-interchange-shape-with-label-text.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_supported/diagram-interchange-shape-with-label-text.dmn new file mode 100644 index 000000000..f4aef81fe --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_supported/diagram-interchange-shape-with-label-text.dmn @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/chapter-11-example-financial.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/chapter-11-example-financial.dmn new file mode 100644 index 000000000..ea71d782b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/chapter-11-example-financial.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + + (Amount *Rate/12) / (1 - (1 + Rate/12)**-Term) + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/chapter-11-example.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/chapter-11-example.dmn new file mode 100644 index 000000000..6acdb378d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/chapter-11-example.dmn @@ -0,0 +1,2521 @@ + + + + + + feel:string + + "DECLINE","BUREAU","THROUGH" + + + + feel:string + + "INELIGIBLE","ELIGIBLE" + + + + feel:string + + "FULL","MINI","NONE" + + + + feel:string + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + feel:number + + + feel:string + + "S","M" + + + + feel:string + + "EMPLOYED","SELF-EMPLOYED","STUDENT","UNEMPLOYED" + + + + feel:boolean + + + + feel:number + + + feel:number + + + feel:number + + + + + + feel:boolean + + + feel:number + + [0..999], null + + + + + feel:string + + "DECLINE","REFER","ACCEPT" + + + + + feel:string + + "STANDARD LOAN","SPECIAL LOAN" + + + + feel:number + + + feel:number + + + feel:number + + + + + + + + + + + + + + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Bureau call type&nbsp;</span></strong><span lang="JA">decision logic&nbsp;invokes the Bureau call type&nbsp;</span>table, passing the output of the Pre-bureau risk category decision as the Pre-Bureau Risk Category parameter.</span></p> + + + + + + + + + + Bureau call type table + + + + + Pre-bureau risk category + + + + + + <p><span style="font-size: 10pt; font-family: arial, helvetica, sans-serif;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Strategy&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a complete, unique-hit decision table deriving Strategy from&nbsp;</span>Eligibility and Bureau call type.</span></p> + + + + + + + + + + + Eligibility + + + "INELIGIBLE","ELIGIBLE" + + + + + Bureau call type + + + "FULL","MINI","NONE" + + + + + "DECLINE","BUREAU","THROUGH" + + + + + "INELIGIBLE" + + + - + + + "DECLINE" + + + + + "ELIGIBLE" + + + "FULL", "MINI" + + + "BUREAU" + + + + + "ELIGIBLE" + + + "NONE" + + + "THROUGH" + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Eligibility&nbsp;</span></strong><span lang="JA">decision logic invokes the Eligibility rules business&nbsp;</span>knowledge model, passing Applicant data.Age as the Age parameter, the output of the Pre-bureau risk category decision as the Pre-Bureau Risk Category parameter, and the output of the Pre-bureau affordability decision as the Pre-Bureau Affordability parameter.</span></p> + + + + + + + + + + + + + + + + Eligibility rules + + + + + Applicant data.Age + + + + + + Pre-bureau risk category + + + + + + Pre-bureau affordability + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Eligibility rules&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a complete, priority-ordered single hit decision table&nbsp;</span>deriving Eligibility from Pre-Bureau Risk Category, Pre-Bureau Affordability and Age.</span></p> + + + + + + + + + Pre-Bureau Risk Category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + Pre-Bureau Affordability + + + + + Age + + + + + "INELIGIBLE","ELIGIBLE" + + + + + "DECLINE" + + + - + + + - + + + "INELIGIBLE" + + + + + - + + + false + + + - + + + "INELIGIBLE" + + + + + - + + + - + + + < 18 + + + "INELIGIBLE" + + + + + - + + + - + + + - + + + "ELIGIBLE" + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Routing Rules&nbsp;</span></strong><span lang="JA">decision logic defines a complete, priority-ordered single hit decision table&nbsp;</span>deriving Routing from Post-Bureau Risk Category, Post-Bureau Affordability, Bankrupt and Credit Score.</span></p> + + + + + + + + + + Post-bureau risk category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + Post-bureau affordability + + + + + Bankrupt + + + + + Credit score + + + null, [0..999] + + + + + "DECLINE","REFER","ACCEPT" + + + + + - + + + false + + + - + + + - + + + "DECLINE" + + + + + - + + + - + + + true + + + - + + + "DECLINE" + + + + + "HIGH" + + + - + + + - + + + - + + + "REFER" + + + + + - + + + - + + + - + + + < 580 + + + "REFER" + + + + + - + + + - + + + - + + + - + + + "ACCEPT" + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Routing&nbsp;</span></strong><span lang="JA">decision logic invokes the Routing rules business&nbsp;</span>knowledge model, passing Bureau data . Bankrupt as the Bankrupt parameter, Bureau data . CreditScore as the Credit Score parameter, the output of the Post-bureau risk category decision as the Post-Bureau Risk Category parameter, and the output of the Post-bureau affordability decision as the Post-Bureau Affordability parameter. Note that if Bureau data is null (due to the THROUGH strategy bypassing the Collect bureau data task) the Bankrupt and Credit Score parameters will be null.</span></p> + + + + + + + + + + + + + + + + Routing rules + + + + + Bureau data.Bankrupt + + + + + + Bureau data.CreditScore + + + + + + Post-bureau risk category + + + + + + Post-bureau affordability + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Bureau call type table&nbsp;</span></strong><span lang="JA">decision logic defines a complete, unique-hit decision table deriving&nbsp;</span>Bureau Call Type from Pre-Bureau Risk Category.</span></p> + + + + + + + Pre-Bureau Risk Category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + "FULL","MINI","NONE" + + + + + "HIGH", "MEDIUM" + + + "FULL" + + + + + "LOW" + + + "MINI" + + + + + "VERY LOW", "DECLINE" + + + "NONE" + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"></span><span lang="JA">The&nbsp;</span><strong><span lang="JA">Credit contingency factor table&nbsp;</span></strong><span lang="JA"><span style="font-size: 10pt; font-family: arial, helvetica, sans-serif;">decision</span> logic defines a complete, unique-hit decision table&nbsp;</span>deriving Credit contingency factor from Risk Category.</p> +<p>&nbsp;</p> + + + + + + + Risk Category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + + "HIGH", "DECLINE" + + + 0.6 + + + + + "MEDIUM" + + + 0.7 + + + + + "LOW", "VERY LOW" + + + 0.8 + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Affordability calculation&nbsp;</span></strong><span lang="JA">decision logic defines a boxed function deriving Affordability from&nbsp;</span>Monthly Income, Monthly Repayments, Monthly Expenses and Required Monthly Installment. One step in this calculation derives Credit contingency factor by invoking the Credit contingency factor table business</span></p> + + + + + + + + + + + + Monthly Income - (Monthly Repayments + Monthly Expenses) + + + + + + + Credit contingency factor table + + + + + Risk Category + + + + + + + + if Disposable Income * Credit Contingency Factor > Required Monthly Installment +then true +else false + + + + + Affordability + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Pre-bureau affordability&nbsp;</span></strong><span lang="JA">decision logic&nbsp;invokes the&nbsp;</span>Affordability calculation business knowledge model, passing Applicant data.Monthly.Income as the Monthly Income parameter, Applicant data.Monthly.Repayments as the Monthly Repayments parameter, Applicant data.Monthly.Expenses as the Monthly Expenses parameter, the output of the Pre-bureau risk category decision as the Risk Category parameter, and the output of the Required monthly installment decision as the Required Monthly Installment parameter.</span></p> + + + + + + + + + + + + + + + + Affordability calculation + + + + + Applicant data.Monthly.Income + + + + + + Applicant data.Monthly.Repayments + + + + + + Applicant data.Monthly.Expenses + + + + + + Pre-bureau risk category + + + + + + Required monthly installment + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Post-bureau affordability&nbsp;</span></strong><span lang="JA">decision logic invokes the&nbsp;</span>Affordability calculation business knowledge model, passing Applicant data.Monthly.Income as the Monthly Income parameter, Applicant data.Monthly.Repayments as the Monthly Repayments parameter, Applicant data.Monthly.Expenses as the Monthly Expenses parameter, the output of the Post-bureau risk category decision as the Risk Category parameter, and the output of the Required monthly installment decision as the Required Monthly Installment parameter.</span></p> + + + + + + + + + + + + + + + + Affordability calculation + + + + + Applicant data.Monthly.Income + + + + + + Applicant data.Monthly.Repayments + + + + + + Applicant data.Monthly.Expenses + + + + + + Post-bureau risk category + + + + + + Required monthly installment + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Post-bureau risk category&nbsp;</span></strong><span lang="JA">decision logic invokes the Post-bureau&nbsp;</span>risk category business knowledge model, passing Applicant data.ExistingCustomer as the Existing Customer parameter, Bureau data.CreditScore as the Credit Score parameter, and the output of the Application risk score decision as the Application Risk Score parameter. Note that if Bureau data is null (due to the THROUGH strategy bypassing the Collect bureau data task) the Credit Score parameter will be null.</span></p> + + + + + + + + + + + + + + + + Post-bureau risk category table + + + + + Applicant data.ExistingCustomer + + + + + + Bureau data.CreditScore + + + + + + Application risk score + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Post-bureau risk category table&nbsp;</span></strong><span lang="JA">decision logic defines a complete, unique-hit decision table&nbsp;</span>deriving Post-Bureau Risk Category from Existing Customer, Application Risk Score and Credit Score.</span></p> + + + + + + + + + Existing Customer + + + + + Application Risk Score + + + + + Credit Score + + + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + false + + + < 120 + + + < 590 + + + "HIGH" + + + + + false + + + < 120 + + + [590..610] + + + "MEDIUM" + + + + + false + + + < 120 + + + > 610 + + + "LOW" + + + + + false + + + [120..130] + + + < 600 + + + "HIGH" + + + + + false + + + [120..130] + + + [600..625] + + + "MEDIUM" + + + + + false + + + [120..130] + + + > 625 + + + "LOW" + + + + + false + + + > 130 + + + - + + + "VERY LOW" + + + + + true + + + <= 100 + + + < 580 + + + "HIGH" + + + + + true + + + <= 100 + + + [580..600] + + + "MEDIUM" + + + + + true + + + <= 100 + + + > 600 + + + "LOW" + + + + + true + + + > 100 + + + < 590 + + + "HIGH" + + + + + true + + + > 100 + + + [590..615] + + + "MEDIUM" + + + + + true + + + > 100 + + + > 615 + + + "LOW" + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Pre-Bureau Risk Category&nbsp;</span></strong><span lang="JA">decision logic&nbsp;invokes the Pre-bureau&nbsp;</span>risk category table business knowledge model, passing Applicant data.ExistingCustomer as the Existing Customer parameter and the output of the Application risk score decision as the Application Risk Score parameter.</span></p> + + + + + + + + + + + + + Pre-bureau risk category table + + + + + Applicant data.ExistingCustomer + + + + + + Application risk score + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Pre-bureau risk category table&nbsp;</span></strong><span lang="JA">decision logic defines a complete, unique-hit decision table&nbsp;</span>deriving Pre-bureau risk category from Existing Customer and Application Risk Score.</span></p> + + + + + + + + Existing Customer + + + + + Application Risk Score + + + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + false + + + < 100 + + + "HIGH" + + + + + false + + + [100..120) + + + "MEDIUM" + + + + + false + + + [120..130] + + + "LOW" + + + + + false + + + > 130 + + + "VERY LOW" + + + + + true + + + < 80 + + + "DECLINE" + + + + + true + + + [80..90) + + + "HIGH" + + + + + true + + + [90..110] + + + "MEDIUM" + + + + + true + + + > 110 + + + "LOW" + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Application Risk Score&nbsp;</span></strong><span lang="JA">decision logic invokes the Application&nbsp;</span>risk score model business knowledge model, passing Applicant data.Age as the Age parameter, Applicant data.MaritalStatus as the Marital Status parameter and Applicant data.EmploymentStatus as the Employment Status parameter.</span></p> + + + + + + + + + + Application risk score model + + + + + Applicant data.Age + + + + + + Applicant data.MartitalStatus + + + + + + Applicant data.EmploymentStatus + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Application risk score model&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a complete, no-order multiple-hit table&nbsp;</span>with aggregation, deriving Application risk score from Age, Marital Status and Employment Status, as the sum of the Partial scores of all matching rows (this is therefore a predictive scorecard represented as a decision table).</span></p> + + + + + + + + + Age + + + [18..120] + + + + + Marital Status + + + "S","M" + + + + + Employment Status + + + "UNEMPLOYED","STUDENT","EMPLOYED","SELF-EMPLOYED" + + + + + + [18..22) + + + - + + + - + + + 32 + + + + + [22..26) + + + - + + + - + + + 35 + + + + + [26..36) + + + - + + + - + + + 40 + + + + + [36..50) + + + - + + + - + + + 43 + + + + + >=50 + + + - + + + - + + + 48 + + + + + - + + + "S" + + + - + + + 25 + + + + + - + + + "M" + + + - + + + 45 + + + + + - + + + - + + + "UNEMPLOYED" + + + 15 + + + + + - + + + - + + + "STUDENT" + + + 18 + + + + + - + + + - + + + "EMPLOYED" + + + 45 + + + + + - + + + - + + + "SELF-EMPLOYED" + + + 36 + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Required monthly installment&nbsp;</span></strong><span lang="JA">decision logic invokes the&nbsp;</span>Installment calculation business knowledge model, passing Requested product.ProductType as the Product Type parameter, Requested product.Rate as the Rate parameter, Requested product.Term as the Term parameter, and Requested product.Amount as the Amount parameter.</span></p> + + + + + + + + + + Installment calculation + + + + + Requested product.ProductType + + + + + + Requested product.Rate + + + + + + Requested product.Term + + + + + + Requested product.Amount + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Installment calculation&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a boxed function deriving monthly installment&nbsp;</span>from Product Type, Rate, Term and Amount.</span></p> + + + + + + + + + + + if Product Type = "STANDARD LOAN" +then 20.00 +else if Product Type = "SPECIAL LOAN" +then 25.00 +else null + + + + + + Financial.PMT(Rate, Term, Amount) + + + + + Monthly Repayment + Monthly Fee + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/diagram-interchange-decision-service.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/diagram-interchange-decision-service.dmn new file mode 100644 index 000000000..4f9baa33f --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/diagram-interchange-decision-service.dmn @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/diagram-interchange-dish-example.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/diagram-interchange-dish-example.dmn new file mode 100644 index 000000000..4afe54dba --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_2_unsupported/diagram-interchange-dish-example.dmn @@ -0,0 +1,223 @@ + + + + + + + + + + + + + + + + + + + + + + + + season + + + + + guestCount + + + + + + + + + + + + + + + + + + + + 8]]> + + + + + + + + + + + 10]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + =10]]> + + + + + + + + + + + + + + + temperature + + + + + + 30]]> + + + + + + + + + + + + + + + + [10..30] + + + + + + + + + + + + + + + dayType + + + + + + + + + 4 + + + + + + + + 10 + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_supported/diagram-interchange-decision-with-listed-input-data.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_supported/diagram-interchange-decision-with-listed-input-data.dmn new file mode 100644 index 000000000..8284abf8a --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_supported/diagram-interchange-decision-with-listed-input-data.dmn @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_supported/diagram-interchange-shape-with-label-text.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_supported/diagram-interchange-shape-with-label-text.dmn new file mode 100644 index 000000000..fda0fc42d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_supported/diagram-interchange-shape-with-label-text.dmn @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/chapter-11-example.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/chapter-11-example.dmn new file mode 100644 index 000000000..deaf94a23 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/chapter-11-example.dmn @@ -0,0 +1,2995 @@ + + + + + + + + + + + + string + + "DECLINE","BUREAU","THROUGH" + + + + string + + "INELIGIBLE","ELIGIBLE" + + + + string + + "FULL","MINI","NONE" + + + + string + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + number + + + string + + "S","M" + + + + string + + "EMPLOYED","SELF-EMPLOYED","STUDENT","UNEMPLOYED" + + + + boolean + + + + number + + + number + + + number + + + + + + boolean + + + number + + [0..999], null + + + + + string + + "DECLINE","REFER","ACCEPT" + + + + + string + + "STANDARD LOAN","SPECIAL LOAN" + + + + number + + + number + + + number + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <p><span lang="JA">Determine if&nbsp;an application requiring adjudication should be accepted or declined given the available application data and supporting documents.</span></p> + Should this application that has been referred for adjudication be accepted? + Yes/No + + + + + + + + + + + + + + + + + + + + + + + <p>The collected wisdom of the credit officers as collected in their best practice wiki.</p> + Expertise + + + + <p>Documents associated with a loan that are not processed electronically but are available for manual adjudication.</p> + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Bureau call type&nbsp;</span></strong><span lang="JA">decision logic&nbsp;invokes the Bureau call type&nbsp;</span>table, passing the output of the Pre-bureau risk category decision as the Pre-Bureau Risk Category parameter.</span></p> + How much data should be requested from the credit bureau for this application? + A value from the explicit list "Full", "Mini", "None" + + + + + + + + + + + + Bureau call type table + + + + + Pre-bureau risk category + + + + + + <p><span style="font-size: 10pt; font-family: arial, helvetica, sans-serif;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Strategy&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a complete, unique-hit decision table deriving Strategy from&nbsp;</span>Eligibility and Bureau call type.</span></p> + What is the appropriate handling strategy for this application? + A value from the explicit list "Decline", "Bureau", "Through" + + + + + + + + + + + + + + + + Eligibility + + + "INELIGIBLE","ELIGIBLE" + + + + + Bureau call type + + + "FULL","MINI","NONE" + + + + + "DECLINE","BUREAU","THROUGH" + + + + + + "INELIGIBLE" + + + - + + + "DECLINE" + + + + + + + + "ELIGIBLE" + + + "FULL", "MINI" + + + "BUREAU" + + + + + + + + "ELIGIBLE" + + + "NONE" + + + "THROUGH" + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Eligibility&nbsp;</span></strong><span lang="JA">decision logic invokes the Eligibility rules business&nbsp;</span>knowledge model, passing Applicant data.Age as the Age parameter, the output of the Pre-bureau risk category decision as the Pre-Bureau Risk Category parameter, and the output of the Pre-bureau affordability decision as the Pre-Bureau Affordability parameter.</span></p> + Does this applicant appear eligible for the loan they applied for given only their application data? + Value from the explicit list "Eligible", "Not Eligible" + + + + + + + + + + + + + + + + + Eligibility rules + + + + + Applicant data.Age + + + + + + Pre-bureau risk category + + + + + + Pre-bureau affordability + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Eligibility rules&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a complete, priority-ordered single hit decision table&nbsp;</span>deriving Eligibility from Pre-Bureau Risk Category, Pre-Bureau Affordability and Age.</span></p> + + + + + + + + + Pre-Bureau Risk Category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + Pre-Bureau Affordability + + + + + Age + + + + + "INELIGIBLE","ELIGIBLE" + + + + + + "DECLINE" + + + - + + + - + + + "INELIGIBLE" + + + + + + + + - + + + false + + + - + + + "INELIGIBLE" + + + + + + + + - + + + - + + + < 18 + + + "INELIGIBLE" + + + + + + + + - + + + - + + + - + + + "ELIGIBLE" + + + + + + + + + + + + + <p>Definitions of the products, their cost structure and eligibility criteria.</p> + Policy + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Routing Rules&nbsp;</span></strong><span lang="JA">decision logic defines a complete, priority-ordered single hit decision table&nbsp;</span>deriving Routing from Post-Bureau Risk Category, Post-Bureau Affordability, Bankrupt and Credit Score.</span></p> + + + + + + + + + + Post-bureau risk category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + Post-bureau affordability + + + + + Bankrupt + + + + + Credit score + + + null, [0..999] + + + + + "DECLINE","REFER","ACCEPT" + + + + + + - + + + false + + + - + + + - + + + "DECLINE" + + + + + + + + - + + + - + + + true + + + - + + + "DECLINE" + + + + + + + + "HIGH" + + + - + + + - + + + - + + + "REFER" + + + + + + + + - + + + - + + + - + + + < 580 + + + "REFER" + + + + + + + + - + + + - + + + - + + + - + + + "ACCEPT" + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Routing&nbsp;</span></strong><span lang="JA">decision logic invokes the Routing rules business&nbsp;</span>knowledge model, passing Bureau data . Bankrupt as the Bankrupt parameter, Bureau data . CreditScore as the Credit Score parameter, the output of the Post-bureau risk category decision as the Post-Bureau Risk Category parameter, and the output of the Post-bureau affordability decision as the Post-Bureau Affordability parameter. Note that if Bureau data is null (due to the THROUGH strategy bypassing the Collect bureau data task) the Bankrupt and Credit Score parameters will be null.</span></p> + How this should this applicant be routed given all available data? + A value from the explicit list "Decline", "Refer for Adjudication", "Accept without Review" + + + + + + + + + + + + + + + + + + + + + Routing rules + + + + + Bureau data.Bankrupt + + + + + + Bureau data.CreditScore + + + + + + Post-bureau risk category + + + + + + Post-bureau affordability + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Bureau call type table&nbsp;</span></strong><span lang="JA">decision logic defines a complete, unique-hit decision table deriving&nbsp;</span>Bureau Call Type from Pre-Bureau Risk Category.</span></p> + + + + + + + Pre-Bureau Risk Category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + "FULL","MINI","NONE" + + + + + + "HIGH", "MEDIUM" + + + "FULL" + + + + + + + + "LOW" + + + "MINI" + + + + + + + + "VERY LOW", "DECLINE" + + + "NONE" + + + + + + + + + + + + + <p>Overall risk management approach for the financial institution including its approach to&nbsp;application risk, credit contingencies and credit risk scoring.</p> + Policy + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"></span><span lang="JA">The&nbsp;</span><strong><span lang="JA">Credit contingency factor table&nbsp;</span></strong><span lang="JA"><span style="font-size: 10pt; font-family: arial, helvetica, sans-serif;">decision</span> logic defines a complete, unique-hit decision table&nbsp;</span>deriving Credit contingency factor from Risk Category.</p> +<p>&nbsp;</p> + + + + + + + Risk Category + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + + + "HIGH", "DECLINE" + + + 0.6 + + + + + + + + "MEDIUM" + + + 0.7 + + + + + + + + "LOW", "VERY LOW" + + + 0.8 + + + + + + + + + + + + + <p>Internal spreadsheet showing the relationship of income, payments, expenses, risk and affordability.</p> + Policy + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Affordability calculation&nbsp;</span></strong><span lang="JA">decision logic defines a boxed function deriving Affordability from&nbsp;</span>Monthly Income, Monthly Repayments, Monthly Expenses and Required Monthly Installment. One step in this calculation derives Credit contingency factor by invoking the Credit contingency factor table business</span></p> + + + + + + + + + + + + Monthly Income - (Monthly Repayments + Monthly Expenses) + + + + + + + Credit contingency factor table + + + + + Risk Category + + + + + + + + if Disposable Income * Credit Contingency Factor > Required Monthly Installment +then true +else false + + + + + Affordability + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Pre-bureau affordability&nbsp;</span></strong><span lang="JA">decision logic&nbsp;invokes the&nbsp;</span>Affordability calculation business knowledge model, passing Applicant data.Monthly.Income as the Monthly Income parameter, Applicant data.Monthly.Repayments as the Monthly Repayments parameter, Applicant data.Monthly.Expenses as the Monthly Expenses parameter, the output of the Pre-bureau risk category decision as the Risk Category parameter, and the output of the Required monthly installment decision as the Required Monthly Installment parameter.</span></p> + Can the applicant afford the loan they applied for given only their application data? + Yes/No + + + + + + + + + + + + + + + + + Affordability calculation + + + + + Applicant data.Monthly.Income + + + + + + Applicant data.Monthly.Repayments + + + + + + Applicant data.Monthly.Expenses + + + + + + Pre-bureau risk category + + + + + + Required monthly installment + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Post-bureau affordability&nbsp;</span></strong><span lang="JA">decision logic invokes the&nbsp;</span>Affordability calculation business knowledge model, passing Applicant data.Monthly.Income as the Monthly Income parameter, Applicant data.Monthly.Repayments as the Monthly Repayments parameter, Applicant data.Monthly.Expenses as the Monthly Expenses parameter, the output of the Post-bureau risk category decision as the Risk Category parameter, and the output of the Required monthly installment decision as the Required Monthly Installment parameter.</span></p> + Can the applicant afford the loan they applied for given all available data? + Yes/No + + + + + + + + + + + + + + + + + Affordability calculation + + + + + Applicant data.Monthly.Income + + + + + + Applicant data.Monthly.Repayments + + + + + + Applicant data.Monthly.Expenses + + + + + + Post-bureau risk category + + + + + + Required monthly installment + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Post-bureau risk category&nbsp;</span></strong><span lang="JA">decision logic invokes the Post-bureau&nbsp;</span>risk category business knowledge model, passing Applicant data.ExistingCustomer as the Existing Customer parameter, Bureau data.CreditScore as the Credit Score parameter, and the output of the Application risk score decision as the Application Risk Score parameter. Note that if Bureau data is null (due to the THROUGH strategy bypassing the Collect bureau data task) the Credit Score parameter will be null.</span></p> + Which risk category is most appropriate for this applicant given all available data? + A value from the explicit list "Decline", "High Risk", "Medium Risk", "Low Risk", "Very Low Risk" + + + + + + + + + + + + + + + + + Post-bureau risk category table + + + + + Applicant data.ExistingCustomer + + + + + + Bureau data.CreditScore + + + + + + Application risk score + + + + + + <p>External credit score and bankruptcy information provided by a bureau.</p> + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Post-bureau risk category table&nbsp;</span></strong><span lang="JA">decision logic defines a complete, unique-hit decision table&nbsp;</span>deriving Post-Bureau Risk Category from Existing Customer, Application Risk Score and Credit Score.</span></p> + + + + + + + + + Existing Customer + + + + + Application Risk Score + + + + + Credit Score + + + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + + false + + + < 120 + + + < 590 + + + "HIGH" + + + + + + + + false + + + < 120 + + + [590..610] + + + "MEDIUM" + + + + + + + + false + + + < 120 + + + > 610 + + + "LOW" + + + + + + + + false + + + [120..130] + + + < 600 + + + "HIGH" + + + + + + + + false + + + [120..130] + + + [600..625] + + + "MEDIUM" + + + + + + + + false + + + [120..130] + + + > 625 + + + "LOW" + + + + + + + + false + + + > 130 + + + - + + + "VERY LOW" + + + + + + + + true + + + <= 100 + + + < 580 + + + "HIGH" + + + + + + + + true + + + <= 100 + + + [580..600] + + + "MEDIUM" + + + + + + + + true + + + <= 100 + + + > 600 + + + "LOW" + + + + + + + + true + + + > 100 + + + < 590 + + + "HIGH" + + + + + + + + true + + + > 100 + + + [590..615] + + + "MEDIUM" + + + + + + + + true + + + > 100 + + + > 615 + + + "LOW" + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Pre-Bureau Risk Category&nbsp;</span></strong><span lang="JA">decision logic&nbsp;invokes the Pre-bureau&nbsp;</span>risk category table business knowledge model, passing Applicant data.ExistingCustomer as the Existing Customer parameter and the output of the Application risk score decision as the Application Risk Score parameter.</span></p> + Which risk category is most appropriate for this applicant given only their application data? + Value from explicit list "Decline", "High Risk", "Medium Risk", "Low Risk", "Very Low Risk" + + + + + + + + + + + + + + Pre-bureau risk category table + + + + + Applicant data.ExistingCustomer + + + + + + Application risk score + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Pre-bureau risk category table&nbsp;</span></strong><span lang="JA">decision logic defines a complete, unique-hit decision table&nbsp;</span>deriving Pre-bureau risk category from Existing Customer and Application Risk Score.</span></p> + + + + + + + + Existing Customer + + + + + Application Risk Score + + + + + "DECLINE","HIGH","MEDIUM","LOW","VERY LOW" + + + + + + false + + + < 100 + + + "HIGH" + + + + + + + + false + + + [100..120) + + + "MEDIUM" + + + + + + + + false + + + [120..130] + + + "LOW" + + + + + + + + false + + + > 130 + + + "VERY LOW" + + + + + + + + true + + + < 80 + + + "DECLINE" + + + + + + + + true + + + [80..90) + + + "HIGH" + + + + + + + + true + + + [90..110] + + + "MEDIUM" + + + + + + + + true + + + > 110 + + + "LOW" + + + + + + + + + + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Application Risk Score&nbsp;</span></strong><span lang="JA">decision logic invokes the Application&nbsp;</span>risk score model business knowledge model, passing Applicant data.Age as the Age parameter, Applicant data.MaritalStatus as the Marital Status parameter and Applicant data.EmploymentStatus as the Employment Status parameter.</span></p> + What is the risk score for this applicant? + A number greater than 70 and less than 150 + + + + + + + + + + + + Application risk score model + + + + + Applicant data.Age + + + + + + Applicant data.MartitalStatus + + + + + + Applicant data.EmploymentStatus + + + + + + <p>Credit risk scorecard analysis to determine the relevant factors for application risk scoring</p> + + + + + + + + + + Analytic Insight + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Application risk score model&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a complete, no-order multiple-hit table&nbsp;</span>with aggregation, deriving Application risk score from Age, Marital Status and Employment Status, as the sum of the Partial scores of all matching rows (this is therefore a predictive scorecard represented as a decision table).</span></p> + + + + + + + + + Age + + + [18..120] + + + + + Marital Status + + + "S","M" + + + + + Employment Status + + + "UNEMPLOYED","STUDENT","EMPLOYED","SELF-EMPLOYED" + + + + + + + [18..22) + + + - + + + - + + + 32 + + + + + + + + [22..26) + + + - + + + - + + + 35 + + + + + + + + [26..36) + + + - + + + - + + + 40 + + + + + + + + [36..50) + + + - + + + - + + + 43 + + + + + + + + >=50 + + + - + + + - + + + 48 + + + + + + + + - + + + "S" + + + - + + + 25 + + + + + + + + - + + + "M" + + + - + + + 45 + + + + + + + + - + + + - + + + "UNEMPLOYED" + + + 15 + + + + + + + + - + + + - + + + "STUDENT" + + + 18 + + + + + + + + - + + + - + + + "EMPLOYED" + + + 45 + + + + + + + + - + + + - + + + "SELF-EMPLOYED" + + + 36 + + + + + + + + + + + + + <p>Information about the applicant including personal information, marital status and household income/expenses.</p> + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Required monthly installment&nbsp;</span></strong><span lang="JA">decision logic invokes the&nbsp;</span>Installment calculation business knowledge model, passing Requested product.ProductType as the Product Type parameter, Requested product.Rate as the Rate parameter, Requested product.Term as the Term parameter, and Requested product.Amount as the Amount parameter.</span></p> + What is the minimum monthly installment payment required for this loan product? + A dollar amount greater than zero + + + + + + + + + + + Installment calculation + + + + + Requested product.ProductType + + + + + + Requested product.Rate + + + + + + Requested product.Term + + + + + + Requested product.Amount + + + + + + <p>Details of the loan the applicant has applied for.</p> + + + + <p><span style="font-family: arial, helvetica, sans-serif; font-size: 10pt;"><span lang="JA">The&nbsp;</span><strong><span lang="JA">Installment calculation&nbsp;</span></strong><span lang="JA">decision logic&nbsp;defines a boxed function deriving monthly installment&nbsp;</span>from Product Type, Rate, Term and Amount.</span></p> + + + + + + + + + + + if Product Type = "STANDARD LOAN" +then 20.00 +else if Product Type = "SPECIAL LOAN" +then 25.00 +else null + + + + + + Financial.PMT(Rate, Term, Amount) + + + + + Monthly Repayment + Monthly Fee + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <p>Information about historical loan defaults.</p> + + + + The credit risk scorecard is built from past applicants' data and information about those loans that defaulted. It must conform to the overall risk management strategy. + + + <p>Individuals in the Retail Banking organization responsible for manual adjudication of loans.</p> + + + + <p>Organization responsible for defining loan and other banking products, how those products are priced, sold and tracked for profitability.</p> + + + + + + <p>Organization within the bank responsible for defining credit risk strategies and policies and providing tools for managing against these.</p> + + + + + + + + + + <p>Organization responsible for credit risk models and the use of data to predict credit risk for customers and loan applicants.</p> + + + + <p>The percentage of loans accepted in a calendar month.</p> + + + + + + <p>The percentage of loans that did not require a credit officer to review the case in a calendar month.</p> + + + + + <p>The total value of Loans written in a calendar month</p> + + + + + + <p>By end of the current year, have an auto-adjudication rate of at least 90 percent</p> + + + + + <p>The total cost charged by the bureau for all Bureau Data requested while originating Loans in a calendar month.</p> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/diagram-interchange-decision-service.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/diagram-interchange-decision-service.dmn new file mode 100644 index 000000000..ba8febdb3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/diagram-interchange-decision-service.dmn @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/diagram-interchange-dish-example.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/diagram-interchange-dish-example.dmn new file mode 100644 index 000000000..7ad1c9ea1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/diagram-interchange-dish-example.dmn @@ -0,0 +1,223 @@ + + + + + + + + + + + + + + + + + + + + + + + + season + + + + + guestCount + + + + + + + + + + + + + + + + + + + + 8]]> + + + + + + + + + + + 10]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + =10]]> + + + + + + + + + + + + + + + temperature + + + + + + 30]]> + + + + + + + + + + + + + + + + [10..30] + + + + + + + + + + + + + + + dayType + + + + + + + + + 4 + + + + + + + + 10 + + + + + + + + 15 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/financial.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/financial.dmn new file mode 100644 index 000000000..5a722a9bc --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/financial.dmn @@ -0,0 +1,31 @@ + + + + + + + + + <p><span lang="JA">Standard calculation of monthly installment&nbsp;</span>from Rate, Term and Amount.</p> + + + + + + + (Amount *Rate/12) / (1 - (1 + Rate/12)**-Term) + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/loan-info.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/loan-info.dmn new file mode 100644 index 000000000..64ae12740 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/loan-info.dmn @@ -0,0 +1,758 @@ + + + + + string + + + string + + + number + + + number + + + tAssets + + + tLiabilities + + + + + tAssetType + + + string + + + number + + + + tAsset + + + string + + "Checking Savings Brokerage account","Real Estate","Other Liquid","Other Non-Liquid" + + + + + tLiabilityType + + + string + + + number + + + number + + + boolean + + + + tLiability + + + string + + "Credit card","Auto loan","Student loan","Lease","Lien","Real estate loan","Alimony child support","Other" + + + + string + + "Fixed rate","Variable rate" + + + + + string + + + tProductName + + "Fixed30-NoPoints","Fixed30-Standard","Fixed15-NoPoints","Fixed15-Standard","ARM5/1-NoPoints","ARM5/1-Standard" + + + + tAmortizationType + + + tPercent + + + tPercent + + + number + + + number + + + + + + string + + + string + + + string + + + string + + + string + + + + number + + + number + + + number + + + number + + + + number + + + number + + + number + + [300..850] + + + + string + + "Fixed30-NoPoints","Fixed30-Standard","Fixed15-NoPoints","Fixed15-Standard","ARM5/1-NoPoints","ARM5/1-Standard" + + + + + number + + + number + + + tPercent + + + number + + + number + + + tPercent + + + tPercent + + + number + + + number + + + + + tProductName + + "Fixed30-NoPoints","Fixed30-Standard","Fixed15-NoPoints","Fixed15-Standard","ARM5/1-NoPoints","ARM5/1-Standard" + + + + tAmortizationType + + "Fixed rate","Variable rate" + + + + tPercent + + + number + + + tPercent + + + tPercent + + + number + + + number + + + number + + + number + + + number + + + number + + + number + + + number + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + decimal((Property.Purchase Price - Down Payment)*Loan Product.Points Pct/100,2) + + + + + + Property.Purchase Price - Down Payment + Loan Product.Fees Amount + Points Amount + + + + + + decimal(100*Note Amount/Property.Purchase Price,2) + + + + + + decimal(0.02*Note Amount,2) + + + + + + Note Amount - Loan Product.Fees Amount - Points Amount - Closing Costs + + + + + + Loan Product.Best Rate Pct + Rate Adjustment(Credit Score, LTV) + + + + + + if Loan Product.Type="Variable rate" then Interest Rate Percent+2 else Interest Rate Percent + + + + + + + payment + + + + + Note Amount + + + + + + Interest Rate Percent/100 + + + + + + Loan Product.Term + + + + + + + + + payment + + + + + Note Amount + + + + + + Qualifying Rate Percent/100 + + + + + + Loan Product.Term + + + + + + + + + + + + + + + + + + Credit Score + + + [300..850] + + + + + LTV + + + + + + + >=660 + + + <=60 + + + 0 + + + + + + + + [620..660) + + + <=60 + + + 0.125 + + + + + + + + >=700 + + + >60 + + + 0.125 + + + + + + + + [660..700) + + + (60..70] + + + 0.125 + + + + + + + + [620..660) + + + (60..70] + + + 0.25 + + + + + + + + [680..700) + + + >70 + + + 0.25 + + + + + + + + [640..680) + + + >70 + + + 0.375 + + + + + + + + [620..640) + + + (70..80] + + + 0.375 + + + + + + + + [620..640) + + + >80 + + + 0.5 + + + + + + + + <620 + + + - + + + 0.5 + + + + + + + + + + + + + + + + decimal(p*r/12/(1-(1+r/12)**-n),2) + + + + + + + + + + + + + + + + + + + + + + Loan Product.Product Name + + + + + + Loan Product.Type + + + + + + Loan Data.LTV + + + + + + Loan Data.Note Amount + + + + + + Loan Data.Interest Rate Percent + + + + + + Loan Data.Qualifying Rate Percent + + + + + + Loan Data.Monthly Payment + + + + + + Loan Data.Qualifying Payment + + + + + + Loan Data.Points Amount + + + + + + Loan Product.Fees Amount + + + + + + Loan Data.Funds Toward Purchase + + + + + + Down Payment + + + + + + Loan Data.Closing Costs + + + + + + Property.Purchase Price - Funds Toward Purchase + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/recommended-loan-products.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/recommended-loan-products.dmn new file mode 100644 index 000000000..3c1983eeb --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_test/v1_3_unsupported/recommended-loan-products.dmn @@ -0,0 +1,1412 @@ + + + + + + string + + + string + + + number + + + number + + + tAssets + + + tLiabilities + + + + + tAssetType + + + string + + + number + + + + tAsset + + + string + + "Checking Savings Brokerage account","Real Estate","Other Liquid","Other Non-Liquid" + + + + + tLiabilityType + + + string + + + number + + + number + + + boolean + + + + tLiability + + + string + + "Credit card","Auto loan","Student loan","Lease","Lien","Real estate loan","Alimony child support","Other" + + + + string + + "Fixed rate","Variable rate" + + + + + tAmortizationType + + + number + + + number + + + number + + + number + + + + + + string + + + string + + + string + + + string + + + string + + + + number + + + number + + + number + + + number + + + + number + + + number + + + number + + [300..850] + + + + string + + "Affordable","Marginal","Unaffordable" + + + + string + + "Fixed30-NoPoints","Fixed30-Standard","Fixed15-NoPoints","Fixed15-Standard","ARM5/1-NoPoints","ARM5/1-Standard" + + + + + string + + + tProductName + + "Fixed30-NoPointsOrFees","Fixed30-Standard","Fixed30-LowestRate","Fixed15-NoPointsOrFees","Fixed15-Standard" + + + + tAmortizationType + + "Fixed rate","Variable rate" + + + + tPercent + + + tPercent + + + number + + + number + + + + tLoanProduct + + + + tProductName + + + tAmortizationType + + + tPercent + + + number + + + tPercent + + + tPercent + + + number + + + number + + + number + + + number + + + number + + + number + + + number + + + number + + + + tLoanInfoRow + + + tTableRow + + + string + + "Low","Medium","High" + + + + + tAffordability + + + number + + + tLTVCategory + + + + + tPercent + + + tAffordability + + "Affordable","Marginal","Unaffordable" + + + + tLTVCategory + + "Low","Medium","High" + + + + number + + + number + + + + + number + + + number + + + number + + + tPercent + + + number + + + number + + + number + + + number + + + + + string + + + number + + [1..5] + + + + + tLenderRating + + + string + + "Best","Good","Not Recommended","Ineligible" + + + + + string + + + number + + + tPercent + + + number + + + tPercent + + + tPercent + + + number + + + number + + + number + + + tCreditScore + + [300..850] + + + + tRecommendation + + "Best","Good","Not Recommended","Ineligible" + + + + + + string + + + string + + + string + + + string + + + string + + + string + + + string + + + string + + + string + + + tCreditScore + + + string + + + + tformattedrow_1 + + + + string + + + string + + + string + + + string + + + string + + + tCreditScore + + [300..850] + + + + string + + + + + + + + + + + + + + + + + + + + + + + + DTI + + + + + LTV + + + + + Reserves + + + + + [300..850] + + + + + + <=36 + + + <=75 + + + >2 + + + 620 + + + + + + + + <=36 + + + <=75 + + + >0 + + + 640 + + + + + + + + <=36 + + + (75..95] + + + >6 + + + 660 + + + + + + + + <=36 + + + (75..95] + + + >0 + + + 680 + + + + + + + + (36..45] + + + <=75 + + + >6 + + + 660 + + + + + + + + (36..45] + + + <=75 + + + >0 + + + 680 + + + + + + + + (36..45] + + + (75..95] + + + >6 + + + 700 + + + + + + + + (36..45] + + + (75..95] + + + >0 + + + 720 + + + + + + + + + + + + + + + + + + + + + "Lender A" + + + "Fixed30-NoPoints" + + + "Fixed rate" + + + 3.95 + + + 0 + + + 1925 + + + 360 + + + + + "Lender C" + + + "Fixed30-Standard" + + + "Fixed rate" + + + 3.75 + + + 0.972 + + + 1975 + + + 360 + + + + + "Lender A" + + + "Fixed15-NoPoints" + + + "Fixed rate" + + + 3.625 + + + 0 + + + 816 + + + 180 + + + + + "Lender C" + + + "Fixed15-Standard" + + + "Fixed rate" + + + 3.25 + + + 0.767 + + + 1975 + + + 180 + + + + + "Lender B" + + + "ARM5/1-NoPoints" + + + "Variable rate" + + + 3.875 + + + 0 + + + 1776 + + + 360 + + + + + "Lender B" + + + "ARM5/1-Standard" + + + "Variable rate" + + + 3.625 + + + 0.667 + + + 1975 + + + 360 + + + + + + + + + + + + + + + + + + + + + + + for x in Loan Products return Services.Loan Info Service(x,Down Payment,Property,Credit Score) + + + + + + + + + + + + + + + + + + + + + + + + + + + for i in 1..count(Loan Products) return Eligibility(Loan Products[i], Borrower, Loan Info Table[i], +Property, Credit Score, Lender Ratings) + + + + + + + + + + + + + + + + Eligibility Parameters(Loan Product, Borrower, Loan Info, +Property, Credit Score) + + + + + + Min Credit Score(Params.DTI Pct, Loan Info.LTV, Params.Reserves) + + + + + + if Required Credit Score != null then +Credit Score >= Required Credit Score else false + + + + + + + + Loan Product + + + + + Eligible + + + + + "Best","Good","Not Recommended","Ineligible" + + + + + + count(Ratings[Lender Name=?.Lender Name and Customer Rating > 4] )>0 + + + true + + + "Best" + + + + + + + + count(Ratings[Lender Name=?.Lender Name and Customer Rating in [3..4]] )>0 + + + true + + + "Good" + + + + + + + + count(Ratings[Lender Name=?.Lender Name and Customer Rating <3] )>0 + + + true + + + "Not Recommended" + + + + + + + + - + + + - + + + "Ineligible" + + + + + + + + + + + + + + Loan Product.Lender Name + " - " + Loan Product.Product Name + + + + + + Loan Info.Note Amount + + + + + + Loan Info.Initial Rate Pct + + + + + + Loan Info.Initial Monthly Payment + + + + + + Loan Info.LTV + + + + + + Params.DTI Pct + + + + + + Loan Info.Cash to Close + + + + + + Params.Liquid Assets After Closing + + + + + + Params.Reserves + + + + + + Required Credit Score + + + + + + Recommendation + + + + + + + Table Row + + + + + + + + + + + + + + + + + + + + + + + + + + + if x.Recommendation != "Ineligible" and y.Recommendation != "Ineligible" +then x.Monthly Payment<y.Monthly Payment +else if x.Recommendation != "Ineligible" and y.Recommendation = "Ineligible" +then true else false + + + + + + + sort(Eligibility Table, precedes) + + + + + for row in Sorted Table return Format Row(row) + + + + + + + + + + + + + + + + + sum([Loan Info.Qualifying Monthly Payment, Property.Monthly Tax Payment, +Property.Monthly Insurance Payment, Property.Monthly HOA Condo Fee][item != null]) + + + + + + sum(Borrower.Liabilities[Type!="Real estate loan" and To be paid off +=false].Monthly payment) + + + + + + sum([Borrower.Employment Income, Borrower.Other Income][item != null]) + + + + + + decimal((Housing Expense+Non-Housing Debt Payments)/Income*100,2) + + + + + + sum(Borrower.Assets[Type="Checking Savings Brokerage account" +or Type="Other Liquid"].Value) + + + + + + sum(Borrower.Liabilities[Type!="Real estate loan" +and To be paid off=true].Balance[item!=null]) + + + + + + Liquid Assets Before Closing - Debts Paid Off By Closing - Loan Info.Cash to Close + + + + + + decimal(Liquid Assets After Closing/Housing Expense,2) + + + + + + + + + + + + + + + + + + + + + + + "java.lang.String" + + + + + + "format( java.lang.String, [Ljava.lang.Object; )" + + + + + + + + + + + + row.Product + + + + + + string format("$%,4.2f", row.Note Amount) + + + + + + string format(" %,4.2f", row.Interest Rate Pct) + + + + + + string format("$%,4.2f", row.Monthly Payment) + + + + + + string format("$%,4.2f", row.Cash to Close) + + + + + + row.Required Credit Score + + + + + + row.Recommendation + + + + + + + formatted row + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelBoolDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelBoolDecisionTest.py new file mode 100644 index 000000000..3addab0e3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelBoolDecisionTest.py @@ -0,0 +1,31 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelBoolDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('bool_decision_feel.dmn') + + def test_bool_decision_string_output1(self): + res = self.runner.decide(True) + self.assertEqual(res.description, 'Y Row Annotation') + + def test_bool_decision_string_output2(self): + res = self.runner.decide(False) + self.assertEqual(res.description, 'N Row Annotation') + + def test_bool_decision_string_output3(self): + res = self.runner.decide(None) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelBoolDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelDateDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelDateDecisionTest.py new file mode 100644 index 000000000..42aaaa580 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelDateDecisionTest.py @@ -0,0 +1,42 @@ +import unittest +from datetime import datetime + +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelDateDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('date_decision_feel.dmn') + + def test_date_decision_string_output1(self): + res = self.runner.decide(datetime.strptime('2017-11-01T10:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '111 Row Annotation') + + def test_date_decision_string_output2(self): + res = self.runner.decide(datetime.strptime('2017-11-03T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '311 Row Annotation') + + def test_date_decision_string_output3(self): + res = self.runner.decide(datetime.strptime('2017-11-02T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '<3.11 Row Annotation') + + def test_date_decision_string_output4(self): + res = self.runner.decide(datetime.strptime('2017-11-04T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>3.11 Row Annotation') + + def test_date_decision_string_output5(self): + res = self.runner.decide(datetime.strptime('2017-11-13T12:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>13.11<14.11 Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelDateDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelDecisionRunner.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelDecisionRunner.py new file mode 100644 index 000000000..f0737d203 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelDecisionRunner.py @@ -0,0 +1,8 @@ +from SpiffWorkflow.bpmn.FeelLikeScriptEngine import FeelLikeScriptEngine + +from ..DecisionRunner import DecisionRunner + +class FeelDecisionRunner(DecisionRunner): + + def __init__(self, filename): + super().__init__(FeelLikeScriptEngine(), filename, 'feel_engine') \ No newline at end of file diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDecisionTest.py new file mode 100644 index 000000000..1ab727f9d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDecisionTest.py @@ -0,0 +1,39 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelDictDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('dict_decision_feel.dmn') + + def test_string_decision_string_output1(self): + data = {"allergies": { + "PEANUTS": {"delicious": True}, + "SPAM": {"delicious": False} + }} + PythonScriptEngine.convert_to_box(PythonScriptEngine(), data) + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output2(self): + data = {"allergies": { + "SpAm": {"delicious": False}, + "SPAM": {"delicious": False} + }} + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelDictDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDotNotationDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDotNotationDecisionTest.py new file mode 100644 index 000000000..bf19b44e5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDotNotationDecisionTest.py @@ -0,0 +1,39 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelDictDotNotationDecisionTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('dict_dot_notation_decision_feel.dmn') + + def test_string_decision_string_output1(self): + data = {"foods": { + "spam": {"delicious": False} + }} + res = self.runner.decide(Box(data)) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + data = {"foods": { + "spam": {"delicious": False} + }} + def test_string_decision_string_output2(self): + data = {"foods": { + "spam": {"delicious": True} + }} + res = self.runner.decide(Box(data)) + self.assertEqual(res.description, 'This person is lacking many ' + 'critical decision making skills, ' + 'or is a viking.') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelDictDotNotationDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionComparisonTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionComparisonTest.py new file mode 100644 index 000000000..680206205 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionComparisonTest.py @@ -0,0 +1,31 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelIntegerDecisionComparisonTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('integer_decision_comparison_feel.dmn') + + def test_integer_decision_string_output1(self): + res = self.runner.decide(30) + self.assertEqual(res.description, '30 Row Annotation') + + def test_integer_decision_string_output2(self): + res = self.runner.decide(24) + self.assertEqual(res.description, 'L Row Annotation') + + def test_integer_decision_string_output3(self): + res = self.runner.decide(25) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelIntegerDecisionComparisonTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionRangeTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionRangeTest.py new file mode 100644 index 000000000..6baf7d587 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionRangeTest.py @@ -0,0 +1,74 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelIntegerDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + def test_integer_decision_string_output_inclusive(self): + runner = FeelDecisionRunner('integer_decision_range_inclusive_feel.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide({"Age":99}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide({"Age":111}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_exclusive(self): + runner = FeelDecisionRunner('integer_decision_range_exclusive_feel.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":101}) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":109}) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + def test_integer_decision_string_output_excl_inclusive(self): + runner = FeelDecisionRunner('integer_decision_range_excl_inclusive_feel.dmn') + + res = runner.decide({'Age': 100}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({'Age':101}) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide({'Age':110}) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide({'Age':111}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_incl_exclusive(self): + runner = FeelDecisionRunner('integer_decision_range_incl_exclusive_feel.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + + res = runner.decide({"Age":99}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":109}) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelIntegerDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelKwargsParameterTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelKwargsParameterTest.py new file mode 100644 index 000000000..a919ebae2 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelKwargsParameterTest.py @@ -0,0 +1,23 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelStringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('kwargs_parameter_feel.dmn') + + def test_string_decision_string_output1(self): + res = self.runner.decide({"Gender":'m'}) + self.assertEqual(res.description, 'm Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelStringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelListDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelListDecisionTest.py new file mode 100644 index 000000000..5a21075a0 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelListDecisionTest.py @@ -0,0 +1,27 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + +class FeelListDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('list_decision_feel.dmn') + + def test_string_decision_string_output1(self): + res = self.runner.decide({'allergies':["PEANUTS", "SPAM"]}) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output1(self): + res = self.runner.decide({'allergies':["SPAM", "SPAM"]}) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelListDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelLongDoubleComparisonTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelLongDoubleComparisonTest.py new file mode 100644 index 000000000..9376a7499 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelLongDoubleComparisonTest.py @@ -0,0 +1,33 @@ +import unittest + +from decimal import Decimal + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelLongOrDoubleDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('long_or_double_decision_comparison_feel.dmn') + + def test_long_or_double_decision_string_output1(self): + res = self.runner.decide({"Age":Decimal('30.5')}) + self.assertEqual(res.description, '30.5 Row Annotation') + + def test_long_or_double_decision_stringz_output2(self): + res = self.runner.decide({"Age":Decimal('25.3')}) + self.assertEqual(res.description, 'L Row Annotation') + + def test_long_or_double_decision_string_output3(self): + res = self.runner.decide({"Age":Decimal('25.4')}) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelLongOrDoubleDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelLongOrDoubleRangeTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelLongOrDoubleRangeTest.py new file mode 100644 index 000000000..1c16de750 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelLongOrDoubleRangeTest.py @@ -0,0 +1,77 @@ +import unittest + +from decimal import Decimal + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelLongOrDoubleDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_long_or_double_decision_string_output_inclusive(self): + runner = FeelDecisionRunner('long_or_double_decision_range_inclusive_feel.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide({"Age":Decimal('99')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide({"Age":Decimal('111')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_exclusive(self): + runner = FeelDecisionRunner('long_or_double_decision_range_exclusive_feel.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('101')}) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('109')}) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + def test_long_or_double_decision_string_output_excl_inclusive(self): + runner = FeelDecisionRunner('long_or_double_decision_range_excl_inclusive_feel.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('101')}) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide({"Age":Decimal('111')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_incl_exclusive(self): + runner = FeelDecisionRunner('long_or_double_decision_range_incl_exclusive_feel.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + + res = runner.decide({"Age":Decimal('99')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('109')}) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelLongOrDoubleDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelNearMissNameTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelNearMissNameTest.py new file mode 100644 index 000000000..1ecbca08b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelNearMissNameTest.py @@ -0,0 +1,53 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelNearMissTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.data = { + "Exclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "eXclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "EXCLUSIVE": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "personnel": [ + { + "PersonnelType": "Faculty", + "label": "Steven K Funkhouser (sf4d)", + "value": "sf4d" + } + ], + + "shared": [] + } + + cls.runner = FeelDecisionRunner('exclusive_feel.dmn') + + def test_string_decision_string_output1(self): + self.assertRaisesRegex(Exception, + ".+\['Exclusive', 'eXclusive', 'EXCLUSIVE'\].+", + self.runner.decide, + self.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelNearMissTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelStringDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelStringDecisionTest.py new file mode 100644 index 000000000..f9601b5ca --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelStringDecisionTest.py @@ -0,0 +1,35 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelStringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('string_decision_feel.dmn') + + def test_string_decision_string_output1(self): + res = self.runner.decide({"Gender":'m'}) + self.assertEqual(res.description, 'm Row Annotation') + + def test_string_decision_string_output2(self): + res = self.runner.decide({"Gender":'f'}) + self.assertEqual(res.description, 'f Row Annotation') + + def test_string_decision_string_output3(self): + res = self.runner.decide({"Gender":'y'}) + self.assertEqual(res.description, 'NOT x Row Annotation') + + def test_string_decision_string_output4(self): + res = self.runner.decide({"Gender":'x'}) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelStringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/FeelStringIntegerDecisionTest.py b/tests/SpiffWorkflow/dmn/feel_engine/FeelStringIntegerDecisionTest.py new file mode 100644 index 000000000..47cc5381e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/FeelStringIntegerDecisionTest.py @@ -0,0 +1,39 @@ +import unittest + +from .FeelDecisionRunner import FeelDecisionRunner + + +class FeelStringIntegerDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = FeelDecisionRunner('string_integer_decision_feel.dmn') + + def test_string_integer_decision_string_output1(self): + res = self.runner.decide({"Gender":'m', "Age": 30}) + self.assertEqual(res.description, 'm30 Row Annotation') + + def test_string_integer_decision_string_output2(self): + res = self.runner.decide({"Gender":'m', "Age": 24}) + self.assertEqual(res.description, 'mL Row Annotation') + + def test_string_integer_decision_string_output3(self): + res = self.runner.decide({"Gender":'m', "Age": 25}) + self.assertEqual(res.description, 'mH Row Annotation') + + def test_string_integer_decision_string_output4(self): + res = self.runner.decide({"Gender":'f', "Age": -1}) + self.assertEqual(res.description, 'fL Row Annotation') + + def test_string_integer_decision_string_output5(self): + res = self.runner.decide({"Gender":'x', "Age": 0}) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelStringIntegerDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/feel_engine/__init__.py b/tests/SpiffWorkflow/dmn/feel_engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/bool_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/bool_decision_feel.dmn new file mode 100644 index 000000000..1bd3cc1d3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/bool_decision_feel.dmn @@ -0,0 +1,40 @@ + + + + + + + input + + + + + Y Row Annotation + + true + + + "Yesss" + + + + N Row Annotation + + false + + + "Noooo" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/date_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/date_decision_feel.dmn new file mode 100644 index 000000000..19deb3c5c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/date_decision_feel.dmn @@ -0,0 +1,58 @@ + + + + + + + input + + + + + >13.11<14.11 Row Annotation + + [date and time("2017-11-13T00:00:00")..date and time("2017-11-14T23:59:59")] + + + "between 13.11 and 14.11" + + + + 111 Row Annotation + + date and time("2017-11-01T10:00:00") + + + "01.11" + + + + 311 Row Annotation + + date and time("2017-11-03T00:00:00") + + + "03.11" + + + + <3.11 Row Annotation + + < date and time("2017-11-03T00:00:00") + + + "before 03.11" + + + + >3.11 Row Annotation + + > date and time("2017-11-03T00:00:00") + + + "after 03.11" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/dict_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/dict_decision_feel.dmn new file mode 100644 index 000000000..22d550476 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/dict_decision_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + allergies.keys() + + + + + They are allergic to peanuts + + mGender Description + contains("PEANUTS") + + + "isPeanuts" + + + + They are not allergic to peanuts + + not contains("PEANUTS") + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/dict_dot_notation_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/dict_dot_notation_decision_feel.dmn new file mode 100644 index 000000000..81aca89f3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/dict_dot_notation_decision_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + foods.spam.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/dict_dot_notation_decision_weird_characters_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/dict_dot_notation_decision_weird_characters_feel.dmn new file mode 100644 index 000000000..c7a9602ec --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/dict_dot_notation_decision_weird_characters_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + odd_foods.SPAM_LIKE.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + true + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + false + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/exclusive_feel.dmn new file mode 100644 index 000000000..902140306 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/exclusive_feel.dmn @@ -0,0 +1,31 @@ + + + + + + + sum([1 for x in exclusive if x.ExclusiveSpaceAMComputingID is None]) + + + + + No exclusive spaces without Area Monitor + + 0 + + + true + + + + More than one exclusive space without an Area Monitor + + > 0 + + + false + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_comparison_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_comparison_feel.dmn new file mode 100644 index 000000000..42a421312 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_comparison_feel.dmn @@ -0,0 +1,49 @@ + + + + + + + input + + + + + 30 Row Annotation + + 30 + + + "30" + + + + L Row Annotation + + < 25 + + + "low" + + + + H Row Annotation + + >= 25 + + + "high" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_excl_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_excl_inclusive_feel.dmn new file mode 100644 index 000000000..d3f0de87b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_excl_inclusive_feel.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 ExclInclusive Annotation + + ]100..110] + + + "100-110 ExclInclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_exclusive_feel.dmn new file mode 100644 index 000000000..74200deef --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100-110 Exclusive Annotation + ]100..110[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_incl_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_incl_exclusive_feel.dmn new file mode 100644 index 000000000..792d74793 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_incl_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100-110 InclExclusive Annotation + [100..110[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_inclusive_feel.dmn new file mode 100644 index 000000000..d667fd18b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/integer_decision_range_inclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100-110 Inclusive Annotation + [100..110] + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/invalid_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/invalid_decision_feel.dmn new file mode 100644 index 000000000..cbafd50c1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/invalid_decision_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + spam + + + + + This is complletely wrong. + + mGender Description + = 1 + + + "wrong" + + + + so is this. + + >= 100 + + + "My cat's breath smells like cat food." + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/kwargs_parameter_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/kwargs_parameter_feel.dmn new file mode 100644 index 000000000..d470b04b0 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/kwargs_parameter_feel.dmn @@ -0,0 +1,41 @@ + + + + + + + Gender + + + + + m Row Annotation + + mGender Description + "m" + + + "isM" + + + + f Row Annotation + + "f" + + + "isF" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/list_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/list_decision_feel.dmn new file mode 100644 index 000000000..7d993b612 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/list_decision_feel.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + contains("PEANUTS") + + + "isPeanuts" + + + + They are not allergic to peanuts + + not contains("PEANUTS") + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_comparison_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_comparison_feel.dmn new file mode 100644 index 000000000..0ccf4a30d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_comparison_feel.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30.5 Row Annotation + 30.5 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25.4]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_excl_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_excl_inclusive_feel.dmn new file mode 100644 index 000000000..7c16ef3d8 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_excl_inclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 ExclInclusive Annotation + ]100.05..110.05] + + + + + + ELSE Row Annotation + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_exclusive_feel.dmn new file mode 100644 index 000000000..cd689a935 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 Exclusive Annotation + ]100.05..110.05[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_incl_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_incl_exclusive_feel.dmn new file mode 100644 index 000000000..ed30196af --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_incl_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 InclExclusive Annotation + [100.05..110.05[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_inclusive_feel.dmn new file mode 100644 index 000000000..04a5515ab --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/long_or_double_decision_range_inclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 Inclusive Annotation + [100.05..110.05] + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/string_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/string_decision_feel.dmn new file mode 100644 index 000000000..b18d9b053 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/string_decision_feel.dmn @@ -0,0 +1,43 @@ + + + + + + + + + + + + m Row Annotation + + mGender Description + + + + + + + f Row Annotation + + + + + + + NOT x Row Annotation + + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/string_integer_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/string_integer_decision_feel.dmn new file mode 100644 index 000000000..e21f1ca05 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/string_integer_decision_feel.dmn @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + m30 Row Annotation + + mGender Description + + + 30 + + + + + + mL Row Annotation + + + + + + + + + mH Row Annotation + + + = 25]]> + + + + + + fL Row Annotation + + + + + + + + + fH Row Annotation + + + = 20]]> + + + + + + ELSE Row Annotation + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/feel_engine/data/test_integer_decision_feel.dmn b/tests/SpiffWorkflow/dmn/feel_engine/data/test_integer_decision_feel.dmn new file mode 100644 index 000000000..86b41068c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/feel_engine/data/test_integer_decision_feel.dmn @@ -0,0 +1,49 @@ + + + + + + + x + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/BoolDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/BoolDecisionTest.py new file mode 100644 index 000000000..aedddb4ab --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/BoolDecisionTest.py @@ -0,0 +1,28 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class BoolDecisionTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('bool_decision.dmn') + + def test_bool_decision_string_output1(self): + res = self.runner.decide({'input': True}) + self.assertEqual(res.description, 'Y Row Annotation') + + def test_bool_decision_string_output2(self): + res = self.runner.decide({'input': False}) + self.assertEqual(res.description, 'N Row Annotation') + + def test_bool_decision_string_output3(self): + res = self.runner.decide(None) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BoolDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/DateDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/DateDecisionTest.py new file mode 100644 index 000000000..32e7735f1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/DateDecisionTest.py @@ -0,0 +1,41 @@ +import unittest +from datetime import datetime + +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser +from .PythonDecisionRunner import PythonDecisionRunner + + +class DateDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('date_decision.dmn') + + def test_date_decision_string_output1(self): + res = self.runner.decide(datetime.strptime('2017-11-01T10:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '111 Row Annotation') + + def test_date_decision_string_output2(self): + res = self.runner.decide(datetime.strptime('2017-11-03T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '311 Row Annotation') + + def test_date_decision_string_output3(self): + res = self.runner.decide(datetime.strptime('2017-11-02T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '<3.11 Row Annotation') + + def test_date_decision_string_output4(self): + res = self.runner.decide(datetime.strptime('2017-11-04T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>3.11 Row Annotation') + + def test_date_decision_string_output5(self): + res = self.runner.decide(datetime.strptime('2017-11-13T12:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>13.11<14.11 Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DateDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/DictDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/DictDecisionTest.py new file mode 100644 index 000000000..910e26e96 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/DictDecisionTest.py @@ -0,0 +1,36 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class DictDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('dict_decision.dmn') + + def test_string_decision_string_output1(self): + data = {"allergies": { + "PEANUTS": {"delicious": True}, + "SPAM": {"delicious": False} + }} + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output2(self): + data = {"allergies": { + "SpAm": {"delicious": False}, + "SPAM": {"delicious": False} + }} + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DictDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/DictDotNotationDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/DictDotNotationDecisionTest.py new file mode 100644 index 000000000..375b9fd16 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/DictDotNotationDecisionTest.py @@ -0,0 +1,46 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box + +from .PythonDecisionRunner import PythonDecisionRunner + +class DictDotNotationDecisionTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + dmn_files =[ + 'dict_dot_notation_decision.dmn', + 'dict_dot_notation_decision_v1_3.dmn', + ] + cls.runners = [PythonDecisionRunner(d) for d in dmn_files] + + def test_string_decision_string_output1(self): + for runner in self.runners: + data = {"foods": { + "spam": {"delicious": False} + }} + data = Box(data) + res = runner.decide(data) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + data = Box({"foods": { + "spam": {"delicious": False} + }}) + + def test_string_decision_string_output2(self): + for runner in self.runners: + data = {"foods": { + "spam": {"delicious": True} + }} + res = runner.decide(Box(data)) + self.assertEqual(res.description, 'This person is lacking many ' + 'critical decision making skills, ' + 'or is a viking.') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DictDotNotationDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/DictDotNotationDecisionWeirdCharactersTest.py b/tests/SpiffWorkflow/dmn/python_engine/DictDotNotationDecisionWeirdCharactersTest.py new file mode 100644 index 000000000..d81acb472 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/DictDotNotationDecisionWeirdCharactersTest.py @@ -0,0 +1,36 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box + +from .PythonDecisionRunner import PythonDecisionRunner + + +class DictDotNotationDecisionWeirdCharactersTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + dmn_files =[ + 'dict_dot_notation_decision_weird_characters.dmn', + 'dict_dot_notation_decision_weird_characters_v1_3.dmn', + ] + cls.runners = [PythonDecisionRunner(d) for d in dmn_files] + + def test_string_decision_string_output1(self): + for runner in self.runners: + data = {"odd_foods": { + "SPAM_LIKE": {"delicious": False} + }} + res = runner.decide(Box(data)) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + data = {"foods": { + "spam": {"delicious": False} + }} + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase( + DictDotNotationDecisionWeirdCharactersTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionComparisonTest.py b/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionComparisonTest.py new file mode 100644 index 000000000..a5b825593 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionComparisonTest.py @@ -0,0 +1,31 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class IntegerDecisionComparisonTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('integer_decision_comparison.dmn') + + def test_integer_decision_string_output1(self): + res = self.runner.decide({"Age":30}) + self.assertEqual(res.description, '30 Row Annotation') + + def test_integer_decision_string_output2(self): + res = self.runner.decide({"Age":24}) + self.assertEqual(res.description, 'L Row Annotation') + + def test_integer_decision_string_output3(self): + res = self.runner.decide({"Age":25}) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(IntegerDecisionComparisonTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionRangeTest.py b/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionRangeTest.py new file mode 100644 index 000000000..e566a685d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionRangeTest.py @@ -0,0 +1,75 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class IntegerDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_integer_decision_string_output_inclusive(self): + runner = PythonDecisionRunner('integer_decision_range_inclusive.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide({"Age":99}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide({"Age":111}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_exclusive(self): + runner = PythonDecisionRunner('integer_decision_range_exclusive.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":101}) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":109}) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + def test_integer_decision_string_output_excl_inclusive(self): + runner = PythonDecisionRunner('integer_decision_range_excl_inclusive.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":101}) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide({"Age":111}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_incl_exclusive(self): + runner = PythonDecisionRunner('integer_decision_range_incl_exclusive.dmn') + + res = runner.decide({"Age":100}) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + + res = runner.decide({"Age":99}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":110}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":109}) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(IntegerDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/InvalidBusinessRuleNameErrorTest.py b/tests/SpiffWorkflow/dmn/python_engine/InvalidBusinessRuleNameErrorTest.py new file mode 100644 index 000000000..4e6130576 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/InvalidBusinessRuleNameErrorTest.py @@ -0,0 +1,19 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class InvalidBusinessRuleNameErrorTest(unittest.TestCase): + + def test_integer_decision_string_output_inclusive(self): + runner = PythonDecisionRunner('invalid_decision_name_error.dmn') + try: + res = runner.decide({'spam': 1}) + except Exception as e: + self.assertRegexpMatches(str(e), "Did you mean 'spam'") + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InvalidBusinessRuleNameErrorTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/ListDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/ListDecisionTest.py new file mode 100644 index 000000000..c4aff2521 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/ListDecisionTest.py @@ -0,0 +1,28 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class ListDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('list_decision.dmn') + + def test_string_decision_string_output1(self): + res = self.runner.decide({'allergies',["PEANUTS", "SPAM"]}) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output1(self): + res = self.runner.decide({'allergies':["SPAM", "SPAM"]}) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ListDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/LongDoubleComparisonTest.py b/tests/SpiffWorkflow/dmn/python_engine/LongDoubleComparisonTest.py new file mode 100644 index 000000000..b129a3b0d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/LongDoubleComparisonTest.py @@ -0,0 +1,33 @@ +import unittest + +from decimal import Decimal + +from .PythonDecisionRunner import PythonDecisionRunner + + +class LongOrDoubleDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('long_or_double_decision_comparison.dmn') + + def test_long_or_double_decision_string_output1(self): + res = self.runner.decide({"Age":Decimal('30.5')}) + self.assertEqual(res.description, '30.5 Row Annotation') + + def test_long_or_double_decision_string_output2(self): + res = self.runner.decide({"Age":Decimal('25.3')}) + self.assertEqual(res.description, 'L Row Annotation') + + def test_long_or_double_decision_string_output3(self): + res = self.runner.decide({"Age":Decimal('25.4')}) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LongOrDoubleDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/LongOrDoubleRangeTest.py b/tests/SpiffWorkflow/dmn/python_engine/LongOrDoubleRangeTest.py new file mode 100644 index 000000000..e00b5913f --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/LongOrDoubleRangeTest.py @@ -0,0 +1,77 @@ +import unittest + +from decimal import Decimal + +from .PythonDecisionRunner import PythonDecisionRunner + + +class LongOrDoubleDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_long_or_double_decision_string_output_inclusive(self): + runner = PythonDecisionRunner('long_or_double_decision_range_inclusive.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide({"Age":Decimal('99')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide({"Age":Decimal('111')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_exclusive(self): + runner = PythonDecisionRunner('long_or_double_decision_range_exclusive.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('101')}) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('109')}) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + def test_long_or_double_decision_string_output_excl_inclusive(self): + runner = PythonDecisionRunner('long_or_double_decision_range_excl_inclusive.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('101')}) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide({"Age":Decimal('111')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_incl_exclusive(self): + runner = PythonDecisionRunner('long_or_double_decision_range_incl_exclusive.dmn') + + res = runner.decide({"Age":Decimal('100.05')}) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + + res = runner.decide({"Age":Decimal('99')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('110.05')}) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide({"Age":Decimal('109')}) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LongOrDoubleDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/NearMissNameTest.py b/tests/SpiffWorkflow/dmn/python_engine/NearMissNameTest.py new file mode 100644 index 000000000..6424dd822 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/NearMissNameTest.py @@ -0,0 +1,53 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class NearMissTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.data = { + "Exclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "eXclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "EXCLUSIVE": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "personnel": [ + { + "PersonnelType": "Faculty", + "label": "Steven K Funkhouser (sf4d)", + "value": "sf4d" + } + ], + + "shared": [] + } + + cls.runner = PythonDecisionRunner('exclusive.dmn') + + def test_string_decision_string_output1(self): + self.assertRaisesRegex(Exception, + ".+\['Exclusive', 'eXclusive', 'EXCLUSIVE'\].+", + self.runner.decide, + self.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NearMissTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/PythonDecisionRunner.py b/tests/SpiffWorkflow/dmn/python_engine/PythonDecisionRunner.py new file mode 100644 index 000000000..7abeb88ad --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/PythonDecisionRunner.py @@ -0,0 +1,10 @@ +from decimal import Decimal + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine + +from ..DecisionRunner import DecisionRunner + +class PythonDecisionRunner(DecisionRunner): + + def __init__(self, filename): + super().__init__(PythonScriptEngine(scripting_additions={'Decimal': Decimal}), filename, 'python_engine') \ No newline at end of file diff --git a/tests/SpiffWorkflow/dmn/python_engine/StringDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/StringDecisionTest.py new file mode 100644 index 000000000..a3b808437 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/StringDecisionTest.py @@ -0,0 +1,35 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class StringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('string_decision.dmn') + + def test_string_decision_string_output1(self): + res = self.runner.decide({"Gender":'m'}) + self.assertEqual(res.description, 'm Row Annotation') + + def test_string_decision_string_output2(self): + res = self.runner.decide({"Gender":'f'}) + self.assertEqual(res.description, 'f Row Annotation') + + def test_string_decision_string_output3(self): + res = self.runner.decide({"Gender":'y'}) + self.assertEqual(res.description, 'NOT x Row Annotation') + + def test_string_decision_string_output4(self): + res = self.runner.decide({"Gender":'x'}) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/StringIntegerDecisionTest.py b/tests/SpiffWorkflow/dmn/python_engine/StringIntegerDecisionTest.py new file mode 100644 index 000000000..1b13cafd1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/StringIntegerDecisionTest.py @@ -0,0 +1,39 @@ +import unittest + +from .PythonDecisionRunner import PythonDecisionRunner + + +class StringIntegerDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = PythonDecisionRunner('string_integer_decision.dmn') + + def test_string_integer_decision_string_output1(self): + res = self.runner.decide({"Gender":'m', "Age":30}) + self.assertEqual(res.description, 'm30 Row Annotation') + + def test_string_integer_decision_string_output2(self): + res = self.runner.decide({"Gender":'m', "Age":24}) + self.assertEqual(res.description, 'mL Row Annotation') + + def test_string_integer_decision_string_output3(self): + res = self.runner.decide({"Gender":'m', "Age":25}) + self.assertEqual(res.description, 'mH Row Annotation') + + def test_string_integer_decision_string_output4(self): + res = self.runner.decide({"Gender":'f', "Age":-1}) + self.assertEqual(res.description, 'fL Row Annotation') + + def test_string_integer_decision_string_output5(self): + res = self.runner.decide({"Gender":'x', "Age":0}) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StringIntegerDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/python_engine/__init__.py b/tests/SpiffWorkflow/dmn/python_engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/bool_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/bool_decision.dmn new file mode 100644 index 000000000..aeb4b9d6f --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/bool_decision.dmn @@ -0,0 +1,40 @@ + + + + + + + input + + + + + Y Row Annotation + + True + + + "Yesss" + + + + N Row Annotation + + False + + + "Noooo" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/date_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/date_decision.dmn new file mode 100644 index 000000000..ac14e5832 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/date_decision.dmn @@ -0,0 +1,58 @@ + + + + + + + input + + + + + >13.11<14.11 Row Annotation + + datetime.datetime(2017,11,13) <= ? <= datetime.datetime(2017,11,14,23,59,59) + + + "between 13.11 and 14.11" + + + + 111 Row Annotation + + datetime.datetime(2017,11,1,10) + + + "01.11" + + + + 311 Row Annotation + + datetime.datetime(2017,11,3) + + + "03.11" + + + + <3.11 Row Annotation + + < datetime.datetime(2017,11,3) + + + "before 03.11" + + + + >3.11 Row Annotation + + > datetime.datetime(2017,11,3) + + + "after 03.11" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/dict_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/dict_decision.dmn new file mode 100644 index 000000000..4c26b6151 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/dict_decision.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + allergies.keys() + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision.dmn new file mode 100644 index 000000000..42e75fbf1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + foods.spam.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_v1_3.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_v1_3.dmn new file mode 100644 index 000000000..69da25695 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_v1_3.dmn @@ -0,0 +1,39 @@ + + + + + + + foods.spam.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_weird_characters.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_weird_characters.dmn new file mode 100644 index 000000000..3ebb9dc4e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_weird_characters.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + odd_foods.SPAM_LIKE.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_weird_characters_v1_3.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_weird_characters_v1_3.dmn new file mode 100644 index 000000000..864ea180a --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/dict_dot_notation_decision_weird_characters_v1_3.dmn @@ -0,0 +1,39 @@ + + + + + + + odd_foods.SPAM_LIKE.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/exclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/exclusive.dmn new file mode 100644 index 000000000..95065d0e4 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/exclusive.dmn @@ -0,0 +1,31 @@ + + + + + + + sum([1 for x in exclusive if x.ExclusiveSpaceAMComputingID is None]) + + + + + No exclusive spaces without Area Monitor + + 0 + + + true + + + + More than one exclusive space without an Area Monitor + + > 0 + + + false + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_comparison.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_comparison.dmn new file mode 100644 index 000000000..6ef46d6af --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_comparison.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30 Row Annotation + 30 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_excl_inclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_excl_inclusive.dmn new file mode 100644 index 000000000..5786e6ac9 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_excl_inclusive.dmn @@ -0,0 +1,38 @@ + + + + + + + + + + Assure that ? is not incorrectly interpreted when quoted. + + "RedHarring?" + + + "Possible Bug" + + + + 100-110 ExclInclusive Annotation + + 100 < ? <= 110 + + + "100-110 ExclInclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_exclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_exclusive.dmn new file mode 100644 index 000000000..53dfb5217 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 Exclusive Annotation + + 100 < ? < 110 + + + "100-110 Exclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_incl_exclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_incl_exclusive.dmn new file mode 100644 index 000000000..34d739bd2 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_incl_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 InclExclusive Annotation + + 100 <= ? < 110 + + + "100-110 InclExclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_inclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_inclusive.dmn new file mode 100644 index 000000000..27afab635 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/integer_decision_range_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 Inclusive Annotation + + 100 <= ? <= 110 + + + "100-110 Inclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/invalid_decision_name_error.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/invalid_decision_name_error.dmn new file mode 100644 index 000000000..8c8e343fe --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/invalid_decision_name_error.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + sparm + + + + + I need sleep. + + mGender Description + == 1 + + + "Apes plague my brain" + + + + so is this. + + >= 100 + + + "My cat's breath smells like cat food." + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/kwargs_parameter.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/kwargs_parameter.dmn new file mode 100644 index 000000000..351397cf4 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/kwargs_parameter.dmn @@ -0,0 +1,41 @@ + + + + + + + Gender + + + + + m Row Annotation + + mGender Description + "m" + + + "isM" + + + + f Row Annotation + + "f" + + + "isF" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/list_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/list_decision.dmn new file mode 100644 index 000000000..c3ecf312e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/list_decision.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_comparison.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_comparison.dmn new file mode 100644 index 000000000..ef74e30b6 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_comparison.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30.5 Row Annotation + 30.5 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25.4]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_excl_inclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_excl_inclusive.dmn new file mode 100644 index 000000000..d095d2147 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_excl_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 ExclInclusive Annotation + + Decimal('100.05') < ? <= Decimal('110.05') + + + "100.05-110.05 ExclInclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_exclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_exclusive.dmn new file mode 100644 index 000000000..1a8e79b5f --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 Exclusive Annotation + + Decimal('100.05') < ? < Decimal('110.05') + + + "100.05-110.05 Exclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_incl_exclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_incl_exclusive.dmn new file mode 100644 index 000000000..3cb396b28 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_incl_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 InclExclusive Annotation + + 100.05 <= ? < 110.05 + + + "100.05-110.05 InclExclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_inclusive.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_inclusive.dmn new file mode 100644 index 000000000..325d6134b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/long_or_double_decision_range_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 Inclusive Annotation + + Decimal('100.05') <= ? <= Decimal('110.05') + + + "100.05-110.05 Inclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/string_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/string_decision.dmn new file mode 100644 index 000000000..4860f018c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/string_decision.dmn @@ -0,0 +1,50 @@ + + + + + + + + + + + + m Row Annotation + + mGender Description + "m" + + + "isM" + + + + f Row Annotation + + "f" + + + "isF" + + + + NOT x Row Annotation + + ? != "x" + + + "notX" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/string_integer_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/string_integer_decision.dmn new file mode 100644 index 000000000..3fb05c367 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/string_integer_decision.dmn @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + m30 Row Annotation + + mGender Description + + + 30 + + + + + + mL Row Annotation + + + + + + + + + mH Row Annotation + + + = 25]]> + + + + + + fL Row Annotation + + + + + + + + + fH Row Annotation + + + = 20]]> + + + + + + ELSE Row Annotation + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/python_engine/data/test_integer_decision.dmn b/tests/SpiffWorkflow/dmn/python_engine/data/test_integer_decision.dmn new file mode 100644 index 000000000..9986d16ad --- /dev/null +++ b/tests/SpiffWorkflow/dmn/python_engine/data/test_integer_decision.dmn @@ -0,0 +1,49 @@ + + + + + + + x + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/docTest.py b/tests/SpiffWorkflow/docTest.py new file mode 100644 index 000000000..3a1ef1cf5 --- /dev/null +++ b/tests/SpiffWorkflow/docTest.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +dirname = os.path.abspath(os.path.dirname(__file__)) +sys.path.insert(0, os.path.join(dirname, '..', '..')) +doc_dir = os.path.join(dirname, '..', '..', 'doc') + + +class TutorialTest(object): + + """ + Tests the examples that are included in the docs. + """ + tutorial_dir = None + + def setUp(self): + os.chdir(self.tutorial_dir) + sys.path.insert(0, self.tutorial_dir) + + def tearDown(self): + sys.path.pop(0) + os.chdir(dirname) + + def testTutorial(self): + from start import workflow + self.assertTrue(workflow.is_completed()) + + +class Tutorial1Test(TutorialTest, unittest.TestCase): + tutorial_dir = os.path.join(doc_dir, 'non-bpmn', 'tutorial') + + +class Tutorial2Test(TutorialTest, unittest.TestCase): + tutorial_dir = os.path.join(doc_dir, 'non-bpmn', 'custom-tasks') + + +def suite(): + tests = unittest.TestLoader().loadTestsFromTestCase(Tutorial1Test) + tests.addTests( + unittest.defaultTestLoader.loadTestsFromTestCase(Tutorial2Test)) + return tests +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/serializer/__init__.py b/tests/SpiffWorkflow/serializer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/serializer/baseTest.py b/tests/SpiffWorkflow/serializer/baseTest.py new file mode 100644 index 000000000..9bb79b137 --- /dev/null +++ b/tests/SpiffWorkflow/serializer/baseTest.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- + +from builtins import str +import sys +import unittest +import os +import warnings +dirname = os.path.dirname(__file__) +data_dir = os.path.join(dirname, '..', 'data') +sys.path.insert(0, os.path.join(dirname, '..')) + +from PatternTest import run_workflow, PatternTest +from SpiffWorkflow.serializer.base import Serializer +from SpiffWorkflow.specs import WorkflowSpec +from SpiffWorkflow.workflow import Workflow +from SpiffWorkflow.serializer.exceptions import TaskNotSupportedError + + +class SerializerTest(PatternTest): + + def setUp(self): + super(SerializerTest, self).setUp() + self.serializer = Serializer() + self.return_type = None + + def _prepare_result(self, item): + return item + + def _compare_results(self, item1, item2, exclude_dynamic=False, + exclude_items=None): + #with open('1.xml', 'w') as fp: fp.write(item1) + #with open('2.xml', 'w') as fp: fp.write(item2) + self.assertEqual(item1.decode('utf8'), item2.decode('utf8')) + + def _test_roundtrip_serialization(self, obj): + # Test round trip serialization. + try: + serialized1 = obj.serialize(self.serializer) + restored = obj.__class__.deserialize(self.serializer, serialized1) + serialized2 = restored.serialize(self.serializer) + except TaskNotSupportedError as e: + warnings.warn('unsupported task spec: ' + str(e)) + return + self.assertIsInstance(serialized1, self.return_type) + self.assertIsInstance(serialized2, self.return_type) + serialized1 = self._prepare_result(serialized1) + serialized2 = self._prepare_result(serialized2) + self._compare_results(serialized1, serialized2) + return serialized1 + + def _test_workflow_spec(self, test): + spec_result1 = self._test_roundtrip_serialization(test.spec) + spec_result2 = self._test_roundtrip_serialization(test.spec) + self.assertEqual(spec_result1, spec_result2) + self._compare_results(spec_result1, spec_result2) + + workflow = run_workflow(self, test.spec, test.path, test.data) + spec_result3 = self._test_roundtrip_serialization(test.spec) + wf_result3 = self._test_roundtrip_serialization(workflow) + # We can't compare spec_result 2 and 3, because starting a workflow + # implicitely causes a Root node to be added to the workflow spec. + # (No, that doesn't seem to be a clean solution.) + # self.assertEqual(spec_result2, spec_result3) + # self._compare_results(spec_result2, spec_result3) + + def testWorkflowSpec(self): + if type(self.serializer) is Serializer: + spec = self.workflows[0].spec + wf = Workflow(spec) + self.assertRaises(NotImplementedError, spec.serialize, + self.serializer) + self.assertRaises(NotImplementedError, + WorkflowSpec.deserialize, self.serializer, None) + self.assertRaises(NotImplementedError, wf.serialize, + self.serializer) + self.assertRaises(NotImplementedError, + Workflow.deserialize, self.serializer, None) + return + + for test in self.workflows: + print(test.filename) + self._test_workflow_spec(test) + + +def suite(): + return unittest.defaultTestLoader.loadTestsFromTestCase(SerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/serializer/dictTest.py b/tests/SpiffWorkflow/serializer/dictTest.py new file mode 100644 index 000000000..4143ec605 --- /dev/null +++ b/tests/SpiffWorkflow/serializer/dictTest.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +from builtins import str +import sys +import unittest +import os +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +import uuid +from SpiffWorkflow.serializer.dict import DictionarySerializer +from .baseTest import SerializerTest +from SpiffWorkflow.workflow import Workflow + + +class DictionarySerializerTest(SerializerTest): + + def setUp(self): + super(DictionarySerializerTest, self).setUp() + self.serializer = DictionarySerializer() + self.return_type = dict + + def _compare_results(self, item1, item2, + exclude_dynamic=False, + exclude_items=None): + exclude_items = exclude_items if exclude_items is not None else [] + if exclude_dynamic: + if 'last_state_change' not in exclude_items: + exclude_items.append('last_state_change') + if 'last_task' not in exclude_items: + exclude_items.append('last_task') + if uuid.UUID not in exclude_items: + exclude_items.append(uuid.UUID) + if type(item1) in exclude_items: + return + + if isinstance(item1, dict): + self.assertIsInstance(item2, dict) + for key, value in list(item1.items()): + self.assertIn(key, item2) + if key in exclude_items: + continue + self._compare_results(value, item2[key], + exclude_dynamic=exclude_dynamic, + exclude_items=exclude_items) + for key in item2: + self.assertIn(key, item1) + + elif isinstance(item1, list): + msg = "item is not a list (is a " + str(type(item2)) + ")" + self.assertIsInstance(item2, list, msg) + msg = "list lengths differ: {} vs {}".format( + len(item1), len(item2)) + self.assertEqual(len(item1), len(item2), msg) + for i, listitem in enumerate(item1): + self._compare_results(listitem, item2[i], + exclude_dynamic=exclude_dynamic, + exclude_items=exclude_items) + + elif isinstance(item1, Workflow): + raise Exception("Item is a Workflow") + + else: + msg = "{}: types differ: {} vs {}".format( + str(item2), type(item1), type(item2)) + self.assertEqual(type(item1), type(item2), msg) + self.assertEqual(item1, item2) + + +def suite(): + return unittest.defaultTestLoader.loadTestsFromTestCase(DictionarySerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/serializer/jsonTest.py b/tests/SpiffWorkflow/serializer/jsonTest.py new file mode 100644 index 000000000..cfdaa4d6b --- /dev/null +++ b/tests/SpiffWorkflow/serializer/jsonTest.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +import json +from SpiffWorkflow.serializer.json import JSONSerializer +from .dictTest import DictionarySerializerTest + + +class JSONSerializerTest(DictionarySerializerTest): + + def setUp(self): + super(JSONSerializerTest, self).setUp() + self.serializer = JSONSerializer() + self.return_type = str + + def _prepare_result(self, item): + return json.loads(item) + + def _compare_results(self, item1, item2, exclude_dynamic=False, + exclude_items=None): + if exclude_dynamic: + exclude_items = ['__uuid__'] + else: + exclude_items = [] + super(JSONSerializerTest, self)._compare_results(item1, item2, + exclude_dynamic=exclude_dynamic, + exclude_items=exclude_items) + + +def suite(): + return unittest.defaultTestLoader.loadTestsFromTestCase(JSONSerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/serializer/prettyxmlTest.py b/tests/SpiffWorkflow/serializer/prettyxmlTest.py new file mode 100644 index 000000000..25a604c66 --- /dev/null +++ b/tests/SpiffWorkflow/serializer/prettyxmlTest.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +dirname = os.path.dirname(__file__) +data_dir = os.path.join(dirname, '..', 'data') +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +from SpiffWorkflow.serializer.prettyxml import XmlSerializer +from .baseTest import SerializerTest + + +class XmlSerializerTest(SerializerTest): + + def setUp(self): + super(XmlSerializerTest, self).setUp() + self.serializer = XmlSerializer() + self.return_type = str + + def testWorkflowSpec(self): + # Nothing to test here: The deserialization is already used in setUp() + # to load all specs, and serialization is not supported. + pass + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(XmlSerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/serializer/xmlTest.py b/tests/SpiffWorkflow/serializer/xmlTest.py new file mode 100644 index 000000000..bdffc6e9e --- /dev/null +++ b/tests/SpiffWorkflow/serializer/xmlTest.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..')) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +from lxml import etree +from SpiffWorkflow.serializer.xml import XmlSerializer +from serializer.baseTest import SerializerTest + + +class XmlSerializerTest(SerializerTest): + + def setUp(self): + super(XmlSerializerTest, self).setUp() + self.serializer = XmlSerializer() + self.return_type = etree._Element + + def _prepare_result(self, item): + return etree.tostring(item, pretty_print=True) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(XmlSerializerTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/CeleryTest.py b/tests/SpiffWorkflow/specs/CeleryTest.py new file mode 100644 index 000000000..9190dd33f --- /dev/null +++ b/tests/SpiffWorkflow/specs/CeleryTest.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + +import os +import sys +import unittest +import pickle +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from .TaskSpecTest import TaskSpecTest +from SpiffWorkflow.specs import Celery, WorkflowSpec +from SpiffWorkflow.operators import Attrib +from SpiffWorkflow.serializer.dict import DictionarySerializer +from base64 import b64encode + + +class CeleryTest(TaskSpecTest): + CORRELATE = Celery + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + return Celery(self.wf_spec, + 'testtask', 'call.name', + call_args=[Attrib('the_attribute'), 1], + description='foo', + named_kw=[], + dict_kw={} + ) + + def testTryFire(self): + pass + + def testRetryFire(self): + pass + + def testSerializationWithoutKwargs(self): + new_wf_spec = WorkflowSpec() + serializer = DictionarySerializer() + nokw = Celery(self.wf_spec, 'testnokw', 'call.name', + call_args=[Attrib('the_attribute'), 1]) + data = nokw.serialize(serializer) + nokw2 = Celery.deserialize(serializer, new_wf_spec, data) + self.assertDictEqual(nokw.kwargs, nokw2.kwargs) + + kw = Celery(self.wf_spec, 'testkw', 'call.name', + call_args=[Attrib('the_attribute'), 1], + some_arg={"key": "value"}) + data = kw.serialize(serializer) + kw2 = Celery.deserialize(serializer, new_wf_spec, data) + self.assertDictEqual(kw.kwargs, kw2.kwargs) + + # Has kwargs, but they belong to TaskSpec + kw_defined = Celery(self.wf_spec, 'testkwdef', 'call.name', + call_args=[Attrib('the_attribute'), 1], + some_ref=Attrib('value'), + defines={"key": "value"}) + data = kw_defined.serialize(serializer) + kw_defined2 = Celery.deserialize(serializer, new_wf_spec, data) + self.assertIsInstance(kw_defined2.kwargs['some_ref'], Attrib) + + args = [b64encode(pickle.dumps(v)) + for v in [Attrib('the_attribute'), 'ip', 'dc455016e2e04a469c01a866f11c0854']] + + data = {'R': b64encode(pickle.dumps('1'))} + # Comes from live data. Bug not identified, but there we are... + data = {'inputs': ['Wait:1'], 'lookahead': 2, 'description': '', + 'outputs': [], 'args': args, + 'manual': False, + 'data': data, 'locks': [], 'pre_assign': [], + 'call': 'call.x', + 'internal': False, 'post_assign': [], 'id': 8, + 'result_key': None, 'defines': data, + 'class': 'SpiffWorkflow.specs.Celery.Celery', + 'name': 'RS1:1'} + Celery.deserialize(serializer, new_wf_spec, data) + + +def suite(): + try: + import celery + except ImportError: + print("WARNING: Celery not found, not all tests are running!") + return lambda x: None + else: + return unittest.TestLoader().loadTestsFromTestCase(CeleryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/DeepMergeTest.py b/tests/SpiffWorkflow/specs/DeepMergeTest.py new file mode 100644 index 000000000..3fa3944ef --- /dev/null +++ b/tests/SpiffWorkflow/specs/DeepMergeTest.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + + + +import os +import sys +import unittest + +from SpiffWorkflow.util.deep_merge import DeepMerge + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from .TaskSpecTest import TaskSpecTest + + +class DeepMergeTest(TaskSpecTest): + CORRELATE = DeepMerge + + def testBasicMerge(self): + """ + Tests that we can merge one dictionary into another dictionary deeply + and that dot-notation is correctly parsed and processed. + """ + a = {"fruit": {"apples": "tasty"}} + b = {"fruit": {"oranges": "also tasty"}} + c = DeepMerge.merge(a, b) + self.assertEqual({"fruit": + {"apples": "tasty", + "oranges": "also tasty" + } + }, c) + + + def testOutOfOrderMerge(self): + a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]} + b = {"foods": [{"fruit": {"oranges": "also tasty", "apples": "tasty"}}, + {"canned meats": {"spam": "nope."}}]} + c = DeepMerge.merge(a, b) + self.assertEqual({"foods": [ + {"fruit": + {"apples": "tasty", + "oranges": "also tasty" + } + }, + {"canned meats": + {"spam": "nope."} + } + ]}, c) + + def testMixOfArrayTypes(self): + a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["spam", "more spam"]}]} + b = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["wonderful spam", "spam", "more spam"]}]} + + c = DeepMerge.merge(a, b) + + self.assertEqual({"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["spam", "more spam", "wonderful spam"]}]}, c) + + def testRemovingItemsFromArrays(self): + a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["spam", "more spam"]}]} + b = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]} + + c = DeepMerge.merge(a, b) + + self.assertEqual({"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]}, c) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DeepMergeTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/ExecuteTest.py b/tests/SpiffWorkflow/specs/ExecuteTest.py new file mode 100644 index 000000000..bd3b06fd5 --- /dev/null +++ b/tests/SpiffWorkflow/specs/ExecuteTest.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- + + + +import os +import sys +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from tests.SpiffWorkflow.util import run_workflow +from .TaskSpecTest import TaskSpecTest +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.specs import Execute + + +class ExecuteTest(TaskSpecTest): + CORRELATE = Execute + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + return Execute(self.wf_spec, + 'testtask', + description='foo', + args=self.cmd_args) + + def setUp(self): + self.cmd_args = ["python", "ExecuteProcessMock.py"] + TaskSpecTest.setUp(self) + + def testConstructor(self): + TaskSpecTest.testConstructor(self) + self.assertEqual(self.spec.args, self.cmd_args) + + def testPattern(self): + """ + Tests that we can create a task that executes a shell command + and that the workflow can be called to complete such tasks. + """ + self.wf_spec.start.connect(self.spec) + expected = 'Start\n testtask\n' + workflow = run_workflow(self, self.wf_spec, expected, '') + task = workflow.get_tasks_from_spec_name('testtask')[0] + self.assertEqual(task.state_history, [TaskState.FUTURE, + TaskState.WAITING, + TaskState.READY, + TaskState.COMPLETED]) + self.assertIn(b'127.0.0.1', task.results[0]) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExecuteTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/JoinTest.py b/tests/SpiffWorkflow/specs/JoinTest.py new file mode 100644 index 000000000..3eeb92866 --- /dev/null +++ b/tests/SpiffWorkflow/specs/JoinTest.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + + + +import os +import sys +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from .TaskSpecTest import TaskSpecTest +from SpiffWorkflow.specs import Join + + +class JoinTest(TaskSpecTest): + CORRELATE = Join + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + + return Join(self.wf_spec, + 'testtask', + description='foo') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(JoinTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/MergeTest.py b/tests/SpiffWorkflow/specs/MergeTest.py new file mode 100644 index 000000000..68f028d90 --- /dev/null +++ b/tests/SpiffWorkflow/specs/MergeTest.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- + + + +import os +import sys +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from .JoinTest import JoinTest +from SpiffWorkflow.specs import Merge, WorkflowSpec, Simple +from SpiffWorkflow.workflow import Workflow + + +class MergeTest(JoinTest): + CORRELATE = Merge + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + + return Merge(self.wf_spec, + 'testtask', + description='foo') + + def test_Merge_data_merging(self): + """Test that Merge task actually merges data""" + wf_spec = WorkflowSpec() + first = Simple(wf_spec, 'first') + second = Simple(wf_spec, 'second') + third = Simple(wf_spec, 'third') + bump = Simple(wf_spec, 'bump') + fourth = Simple(wf_spec, 'fourth') + merge1 = Merge(wf_spec, 'merge 1') + simple1 = Simple(wf_spec, 'simple 1') + merge2 = Merge(wf_spec, 'merge 2') + simple2 = Simple(wf_spec, 'simple 2') + unmerged = Simple(wf_spec, 'unmerged') + + wf_spec.start.connect(first) + wf_spec.start.connect(second) + wf_spec.start.connect(third) + wf_spec.start.connect(bump) + bump.connect(fourth) # Test join at different depths in tree + + first.connect(merge1) + second.connect(merge1) + second.connect(unmerged) + + first.connect(merge2) + second.connect(merge2) + third.connect(merge2) + fourth.connect(merge2) + + merge1.connect(simple1) + merge2.connect(simple2) + + workflow = Workflow(wf_spec) + workflow.task_tree.set_data(everywhere=1) + for task in workflow.get_tasks(): + task.set_data(**{'name': task.get_name(), task.get_name(): 1}) + workflow.complete_all() + self.assertTrue(workflow.is_completed()) + found = {} + for task in workflow.get_tasks(): + if task.task_spec is simple1: + self.assertIn('first', task.data) + self.assertIn('second', task.data) + self.assertEqual(task.data, {'Start': 1, + 'merge 1': 1, 'name': 'Start', 'simple 1': 1, + 'second': 1, 'first': 1}) + found['simple1'] = task + if task.task_spec is simple2: + self.assertIn('first', task.data) + self.assertIn('second', task.data) + self.assertIn('third', task.data) + self.assertIn('fourth', task.data) + self.assertEqual(task.data, {'merge 2': 1, + 'simple 2': 1, 'name': 'Start', 'third': 1, 'bump': 1, + 'Start': 1, 'second': 1, 'first': 1, 'fourth': 1}) + found['simple2'] = task + if task.task_spec is unmerged: + self.assertEqual(task.data, {'Start': 1, + 'second': 1, 'name': 'Start', 'unmerged': 1}) + found['unmerged'] = task + self.assertIn('simple1', found) + self.assertIn('simple2', found) + self.assertIn('unmerged', found) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MergeTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/SubWorkflowTest.py b/tests/SpiffWorkflow/specs/SubWorkflowTest.py new file mode 100644 index 000000000..aed09fc27 --- /dev/null +++ b/tests/SpiffWorkflow/specs/SubWorkflowTest.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from SpiffWorkflow.specs import WorkflowSpec +from SpiffWorkflow.specs.SubWorkflow import SubWorkflow +from SpiffWorkflow.serializer.prettyxml import XmlSerializer +from SpiffWorkflow.task import TaskState +from SpiffWorkflow.workflow import Workflow + + +class TaskSpecTest(unittest.TestCase): + CORRELATE = SubWorkflow + + def testConstructor(self): + pass # FIXME + + def testSerialize(self): + pass # FIXME + + def testTest(self): + pass # FIXME + + def load_workflow_spec(self, folder, f): + file = os.path.join( + os.path.dirname(__file__), '..', 'data', 'spiff', folder, f) + serializer = XmlSerializer() + with open(file) as fp: + xml = fp.read() + self.wf_spec = WorkflowSpec.deserialize( + serializer, xml, filename=file) + self.workflow = Workflow(self.wf_spec) + + def do_next_unique_task(self, name): + # This method asserts that there is only one ready task! The specified + # one - and then completes it + ready_tasks = self.workflow.get_tasks(TaskState.READY) + self.assertEqual(1, len(ready_tasks)) + task = ready_tasks[0] + self.assertEqual(name, task.task_spec.name) + task.complete() + + def do_next_named_step(self, name, other_ready_tasks): + # This method completes a single task from the specified set of ready + # tasks + ready_tasks = self.workflow.get_tasks(TaskState.READY) + all_tasks = sorted([name] + other_ready_tasks) + self.assertEqual( + all_tasks, sorted([t.task_spec.name for t in ready_tasks])) + task = list([t for t in ready_tasks if t.task_spec.name == name])[0] + task.complete() + + def test_block_to_subworkflow(self): + self.load_workflow_spec('data', 'block_to_subworkflow.xml') + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_unique_task('sub_workflow_1') + # Inner: + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_unique_task('last') + self.do_next_unique_task('End') + # Back to outer: + self.do_next_unique_task('last') + self.do_next_unique_task('End') + + def test_subworkflow_to_block(self): + self.load_workflow_spec('data', 'subworkflow_to_block.xml') + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_unique_task('sub_workflow_1') + # Inner: + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_unique_task('last') + self.do_next_unique_task('End') + # Back to outer: + self.do_next_unique_task('last') + self.do_next_unique_task('End') + + def test_subworkflow_to_join(self): + self.load_workflow_spec('control-flow', 'subworkflow_to_join.xml') + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_named_step('second', ['sub_workflow_1']) + self.do_next_unique_task('sub_workflow_1') + # Inner: + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_unique_task('last') + self.do_next_unique_task('End') + # Back to outer: + self.do_next_unique_task('join') + self.do_next_unique_task('last') + self.do_next_unique_task('End') + + def test_subworkflow_to_join_refresh_waiting(self): + self.load_workflow_spec('control-flow', 'subworkflow_to_join.xml') + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + self.do_next_named_step('second', ['sub_workflow_1']) + self.do_next_unique_task('sub_workflow_1') + # Inner: + self.do_next_unique_task('Start') + self.do_next_unique_task('first') + + # Now refresh waiting tasks: + # Update the state of every WAITING task. + for thetask in self.workflow._get_waiting_tasks(): + thetask.task_spec._update(thetask) + + self.do_next_unique_task('last') + self.do_next_unique_task('End') + # Back to outer: + self.do_next_unique_task('join') + self.do_next_unique_task('last') + self.do_next_unique_task('End') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TaskSpecTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/TaskSpecTest.py b/tests/SpiffWorkflow/specs/TaskSpecTest.py new file mode 100644 index 000000000..2b213b4af --- /dev/null +++ b/tests/SpiffWorkflow/specs/TaskSpecTest.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- + +import sys +import unittest +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from SpiffWorkflow.specs import WorkflowSpec, Simple, Join +from SpiffWorkflow.exceptions import WorkflowException +from SpiffWorkflow.specs import TaskSpec +from SpiffWorkflow.serializer.dict import DictionarySerializer + + +class TaskSpecTest(unittest.TestCase): + CORRELATE = TaskSpec + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + return TaskSpec(self.wf_spec, 'testtask', description='foo') + + def setUp(self): + self.wf_spec = WorkflowSpec() + self.spec = self.create_instance() + + def testConstructor(self): + self.assertEqual(self.spec.name, 'testtask') + self.assertEqual(self.spec.description, 'foo') + self.assertEqual(self.spec.data, {}) + self.assertEqual(self.spec.defines, {}) + self.assertEqual(self.spec.pre_assign, []) + self.assertEqual(self.spec.post_assign, []) + self.assertEqual(self.spec.locks, []) + + def testSetData(self): + self.assertEqual(self.spec.get_data('foo'), None) + self.assertEqual(self.spec.get_data('foo', 'bar'), 'bar') + self.spec.set_data(foo='foobar') + self.assertEqual(self.spec.get_data('foo'), 'foobar') + self.assertEqual(self.spec.get_data('foo', 'bar'), 'foobar') + + def testGetData(self): + return self.testSetData() + + def testConnect(self): + self.assertEqual(self.spec.outputs, []) + self.assertEqual(self.spec.inputs, []) + spec = self.create_instance() + self.spec.connect(spec) + self.assertEqual(self.spec.outputs, [spec]) + self.assertEqual(spec.inputs, [self.spec]) + + def testFollow(self): + self.assertEqual(self.spec.outputs, []) + self.assertEqual(self.spec.inputs, []) + spec = self.create_instance() + self.spec.follow(spec) + self.assertEqual(spec.outputs, [self.spec]) + self.assertEqual(self.spec.inputs, [spec]) + + def testTest(self): + # Should fail because the TaskSpec has no id yet. + spec = self.create_instance() + self.assertRaises(WorkflowException, spec.test) + + # Should fail because the task has no inputs. + self.spec.id = 1 + self.assertRaises(WorkflowException, spec.test) + + # Connect another task to make sure that it has an input. + self.spec.connect(spec) + self.assertEqual(spec.test(), None) + + def testSerialize(self): + serializer = DictionarySerializer() + spec = self.create_instance() + + try: + serialized = spec.serialize(serializer) + self.assertIsInstance(serialized, dict) + except NotImplementedError: + self.assertIsInstance(spec, TaskSpec) + self.assertRaises(NotImplementedError, + spec.__class__.deserialize, None, None, None) + return + + new_wf_spec = WorkflowSpec() + new_spec = spec.__class__.deserialize(serializer, new_wf_spec, + serialized) + before = spec.serialize(serializer) + after = new_spec.serialize(serializer) + self.assertEqual(before, after, 'Before:\n%s\nAfter:\n%s\n' % (before, + after)) + + def testAncestors(self): + T1 = Simple(self.wf_spec, 'T1') + T2A = Simple(self.wf_spec, 'T2A') + T2B = Simple(self.wf_spec, 'T2B') + M = Join(self.wf_spec, 'M') + T3 = Simple(self.wf_spec, 'T3') + + T1.follow(self.wf_spec.start) + T2A.follow(T1) + T2B.follow(T1) + T2A.connect(M) + T2B.connect(M) + T3.follow(M) + + self.assertEqual(T1.ancestors(), [self.wf_spec.start]) + self.assertEqual(T2A.ancestors(), [T1, self.wf_spec.start]) + self.assertEqual(T2B.ancestors(), [T1, self.wf_spec.start]) + self.assertEqual(M.ancestors(), [T2A, T1, self.wf_spec.start, T2B]) + self.assertEqual(len(T3.ancestors()), 5) + + def test_ancestors_cyclic(self): + T1 = Join(self.wf_spec, 'T1') + T2 = Simple(self.wf_spec, 'T2') + + T1.follow(self.wf_spec.start) + T2.follow(T1) + T1.connect(T2) + + self.assertEqual(T1.ancestors(), [self.wf_spec.start]) + self.assertEqual(T2.ancestors(), [T1, self.wf_spec.start]) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TaskSpecTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/TransformTest.py b/tests/SpiffWorkflow/specs/TransformTest.py new file mode 100644 index 000000000..ca2aaf1c3 --- /dev/null +++ b/tests/SpiffWorkflow/specs/TransformTest.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + + + +import os +import sys +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from tests.SpiffWorkflow.util import run_workflow +from .TaskSpecTest import TaskSpecTest +from SpiffWorkflow.specs import Transform, Simple + + +class TransformTest(TaskSpecTest): + CORRELATE = Transform + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + + return Transform(self.wf_spec, + 'testtask', + description='foo', + transforms=['']) + + def testPattern(self): + """ + Tests that we can create a task that executes a shell command + and that the workflow can be called to complete such tasks. + """ + task1 = Transform(self.wf_spec, 'First', transforms=[ + "my_task.set_data(foo=1)"]) + self.wf_spec.start.connect(task1) + task2 = Transform(self.wf_spec, 'Second', transforms=[ + "my_task.set_data(foo=my_task.data['foo']+1)", + "my_task.set_data(copy=my_task.data['foo'])" + ]) + task1.connect(task2) + task3 = Simple(self.wf_spec, 'Last') + task2.connect(task3) + + expected = 'Start\n First\n Second\n Last\n' + workflow = run_workflow(self, self.wf_spec, expected, '') + first = workflow.get_tasks_from_spec_name('First')[0] + last = workflow.get_tasks_from_spec_name('Last')[0] + self.assertEqual(first.data.get('foo'), 1) + self.assertEqual(last.data.get('foo'), 2) + self.assertEqual(last.data.get('copy'), 2) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TransformTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/WorkflowSpecTest.py b/tests/SpiffWorkflow/specs/WorkflowSpecTest.py new file mode 100644 index 000000000..8b0289b8d --- /dev/null +++ b/tests/SpiffWorkflow/specs/WorkflowSpecTest.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- + +from builtins import zip +from builtins import range +import os +import sys +import unittest +data_dir = os.path.join(os.path.dirname(__file__), '..', 'data') +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +import pickle +from random import randint +try: + from util import track_workflow +except ImportError as e: + from tests.SpiffWorkflow.util import track_workflow +from SpiffWorkflow.workflow import Workflow +from SpiffWorkflow.specs import Join, WorkflowSpec +from SpiffWorkflow.serializer.prettyxml import XmlSerializer + +serializer = XmlSerializer() +data_file = 'data.pkl' + + +class WorkflowSpecTest(unittest.TestCase): + CORRELATE = WorkflowSpec + + def setUp(self): + self.wf_spec = WorkflowSpec() + + def testConstructor(self): + spec = WorkflowSpec('my spec') + self.assertEqual('my spec', spec.name) + + def testGetTaskSpecFromName(self): + pass # FIXME + + def testGetDump(self): + pass # FIXME + + def testDump(self): + pass # FIXME + + def doPickleSingle(self, workflow, expected_path): + taken_path = track_workflow(workflow.spec) + + # Execute a random number of steps. + for i in range(randint(0, len(workflow.spec.task_specs))): + workflow.complete_next() + + # Store the workflow instance in a file. + with open(data_file, 'wb') as fp: + pickle.dump(workflow, fp, -1) + before = workflow.get_dump() + + # Load the workflow instance from a file and delete the file. + with open(data_file, 'rb') as fp: + workflow = pickle.load(fp) + os.remove(data_file) + after = workflow.get_dump() + + # Make sure that the state of the workflow did not change. + self.assertEqual(before, after) + + # Re-connect signals, because the pickle dump now only contains a + # copy of taken_path. + taken_path = track_workflow(workflow.spec, taken_path) + + # Run the rest of the workflow. + workflow.complete_all() + after = workflow.get_dump() + self.assertTrue(workflow.is_completed(), 'Workflow not complete:' + after) + # taken_path = '\n'.join(taken_path) + '\n' + if taken_path != expected_path: + for taken, expected in zip(taken_path, expected_path): + print("TAKEN: ", taken) + print("EXPECTED:", expected) + self.assertEqual(expected_path, taken_path) + + def testSerialize(self): + # Read a complete workflow spec. + xml_file = os.path.join(data_dir, 'spiff', 'workflow1.xml') + with open(xml_file) as fp: + xml = fp.read() + path_file = os.path.splitext(xml_file)[0] + '.path' + with open(path_file) as fp: + expected_path = fp.read().strip().split('\n') + wf_spec = WorkflowSpec.deserialize(serializer, xml) + + for i in range(5): + workflow = Workflow(wf_spec) + self.doPickleSingle(workflow, expected_path) + + def testValidate(self): + """ + Tests that we can detect when two wait tasks are waiting on each + other. + """ + task1 = Join(self.wf_spec, 'First') + self.wf_spec.start.connect(task1) + task2 = Join(self.wf_spec, 'Second') + task1.connect(task2) + + task2.follow(task1) + task1.follow(task2) + + results = self.wf_spec.validate() + self.assertIn("Found loop with 'Second': Second->First then 'Second' " + "again", results) + self.assertIn("Found loop with 'First': First->Second then 'First' " + "again", results) + + def testGetTaskSpecFromId(self): + pass + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(WorkflowSpecTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/__init__.py b/tests/SpiffWorkflow/specs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/spiff/BaseTestCase.py b/tests/SpiffWorkflow/spiff/BaseTestCase.py new file mode 100644 index 000000000..248f84a1f --- /dev/null +++ b/tests/SpiffWorkflow/spiff/BaseTestCase.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +import os + +from SpiffWorkflow.spiff.parser import SpiffBpmnParser +from SpiffWorkflow.spiff.serializer import NoneTaskConverter, \ + ManualTaskConverter, UserTaskConverter, ScriptTaskConverter, \ + SubWorkflowTaskConverter, TransactionSubprocessConverter, \ + CallActivityTaskConverter, \ + StartEventConverter, EndEventConverter, BoundaryEventConverter, \ + SendTaskConverter, ReceiveTaskConverter, \ + IntermediateCatchEventConverter, IntermediateThrowEventConverter, \ + ServiceTaskConverter +from SpiffWorkflow.dmn.serializer.task_spec_converters import BusinessRuleTaskConverter +from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer + +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([ + NoneTaskConverter, ManualTaskConverter, UserTaskConverter, ScriptTaskConverter, + SubWorkflowTaskConverter, TransactionSubprocessConverter, CallActivityTaskConverter, + StartEventConverter, EndEventConverter, BoundaryEventConverter, SendTaskConverter, ReceiveTaskConverter, + IntermediateCatchEventConverter, IntermediateThrowEventConverter, BusinessRuleTaskConverter, + ServiceTaskConverter +]) + +class BaseTestCase(BpmnWorkflowTestCase): + """ Provides some basic tools for loading up and parsing Spiff extensions""" + + serializer = BpmnWorkflowSerializer(wf_spec_converter) + + def load_workflow_spec(self, filename, process_name, dmn_filename=None): + bpmn = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = SpiffBpmnParser() + parser.add_bpmn_files_by_glob(bpmn) + if dmn_filename is not None: + dmn = os.path.join(os.path.dirname(__file__), 'data', 'dmn', dmn_filename) + parser.add_dmn_files_by_glob(dmn) + top_level_spec = parser.get_spec(process_name) + subprocesses = parser.get_subprocess_specs(process_name) + return top_level_spec, subprocesses + + def load_collaboration(self, filename, collaboration_name): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = SpiffBpmnParser() + parser.add_bpmn_files_by_glob(f) + return parser.get_collaboration(collaboration_name) + + def get_all_specs(self, filename): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + parser = SpiffBpmnParser() + parser.add_bpmn_files_by_glob(f) + return parser.find_all_specs() diff --git a/tests/SpiffWorkflow/spiff/BusinessRuleTaskTest.py b/tests/SpiffWorkflow/spiff/BusinessRuleTaskTest.py new file mode 100644 index 000000000..f738b7da7 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/BusinessRuleTaskTest.py @@ -0,0 +1,12 @@ +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + +class BusinessRuleTaskTest(BaseTestCase): + + def testBusinessRule(self): + spec, subprocesses = self.load_workflow_spec('business_rule_task.bpmn', 'Process_bd2e724', 'business_rules.dmn') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.save_restore() + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) \ No newline at end of file diff --git a/tests/SpiffWorkflow/spiff/CorrelationTest.py b/tests/SpiffWorkflow/spiff/CorrelationTest.py new file mode 100644 index 000000000..1b1a2be30 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/CorrelationTest.py @@ -0,0 +1,62 @@ +import os +import sys +import unittest + +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from .BaseTestCase import BaseTestCase + +class CorrelationTest(BaseTestCase): + + def testMessagePayload(self): + self.actual_test(False) + + def testMessagePayloadSaveRestore(self): + self.actual_test(True) + + def actual_test(self,save_restore): + + specs = self.get_all_specs('correlation.bpmn') + proc_1 = specs['proc_1'] + self.workflow = BpmnWorkflow(proc_1, specs) + if save_restore: + self.save_restore() + self.workflow.do_engine_steps() + # Set up some data to evaluate the payload expression against + for idx, task in enumerate(self.workflow.get_ready_user_tasks()): + task.data['task_num'] = idx + task.data['task_name'] = f'subprocess {idx}' + task.data['extra_data'] = f'unused data' + task.complete() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + for task in ready_tasks: + self.assertEqual(task.task_spec.name, 'prepare_response') + response = 'OK' if task.data['source_task']['num'] else 'No' + task.data.update(response=response) + task.complete() + self.workflow.do_engine_steps() + # If the messages were routed properly, the task number should match the response id + for task in self.workflow.get_tasks_from_spec_name('subprocess_end'): + self.assertEqual(task.data['response']['init_id'], task.data['task_num']) + self.assertEqual(task.data['response']['response'], 'OK' if task.data['task_num'] else 'No') + + +class DualConversationTest(BaseTestCase): + + def testTwoCorrelatonKeys(self): + + spec, subprocesses = self.load_workflow_spec('correlation_two_conversations.bpmn', 'message_send_process') + workflow = BpmnWorkflow(spec, subprocesses) + workflow.do_engine_steps() + messages = workflow.get_bpmn_messages() + self.assertEqual(len(messages), 2) + message_one = [ msg for msg in messages if msg.name== 'Message Send One' ][0] + message_two = [ msg for msg in messages if msg.name== 'Message Send Two' ][0] + self.assertIn('message_correlation_key_one', message_one.correlations) + self.assertNotIn('message_correlation_key_one', message_two.correlations) + self.assertIn('message_correlation_key_two', message_two.correlations) + self.assertNotIn('message_correlation_key_two', message_one.correlations) diff --git a/tests/SpiffWorkflow/spiff/PrescriptPostscriptTest.py b/tests/SpiffWorkflow/spiff/PrescriptPostscriptTest.py new file mode 100644 index 000000000..a07341096 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/PrescriptPostscriptTest.py @@ -0,0 +1,57 @@ +from SpiffWorkflow.task import TaskState +from .BaseTestCase import BaseTestCase +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + + +class PrescriptPostsciptTest(BaseTestCase): + + def testTask(self): + self.task_test() + + def testCallActivity(self): + self.call_activity_test() + + def testTaskSaveRestore(self): + self.task_test(True) + + def testCallActivitySaveRestore(self): + self.call_activity_test(True) + + def task_test(self, save_restore=False): + + spec, subprocesses = self.load_workflow_spec('prescript_postscript.bpmn', 'Process_1') + self.workflow = BpmnWorkflow(spec, subprocesses) + if save_restore: + self.save_restore() + + self.set_process_data({'a': 1, 'b': 2}) + ready_tasks = self.workflow.get_tasks(TaskState.READY) + # The prescript sets x, y = a * 2, b * 2 and creates the variable z = x + y + # The postscript sets c = z * 2 and deletes x and y + # a and b should remain unchanged, and c and z should be added + ready_tasks[0].complete() + self.assertDictEqual({'a': 1, 'b': 2, 'c': 12, 'z': 6}, ready_tasks[0].data) + + def call_activity_test(self, save_restore=False): + + spec, subprocesses = self.load_workflow_spec('prescript_postscript_*.bpmn', 'parent') + self.workflow = BpmnWorkflow(spec, subprocesses) + if save_restore: + self.save_restore() + + # Set the data and proceed. The call activity needs in_data and creates out_data + # The prescript sets in_data = old and creates out_data; the postscript copies out_data into new + # in_data and out_data remain (they're created my the calling task NOT the subprocess) and + # we did not explicitly remove them. We don't implicitly remove them because this would be + # the wrong behavior for regular tasks. + self.set_process_data({'old': 'hello'}) + task = self.workflow.get_tasks_from_spec_name('Activity_0g9bcsc')[0] + # The original data is still present and unchanged + self.assertEqual(task.data.get('old'), 'hello') + # The new data has been added + self.assertEqual(task.data.get('new'), 'HELLO') + + def set_process_data(self, data): + ready_tasks = self.workflow.get_tasks(TaskState.READY) + ready_tasks[0].set_data(**data) + self.workflow.do_engine_steps() diff --git a/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py b/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py new file mode 100644 index 000000000..9e78fd602 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py @@ -0,0 +1,42 @@ +from .BaseTestCase import BaseTestCase +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +# Assure we correctly parse and pass on the Spiffworkflow properties in +# an extension. +class ScriptUnitTestExtensionsTest(BaseTestCase): + + def testTask(self): + self.task_test() + + def testTaskSaveRestore(self): + self.task_test(True) + + def task_test(self, save_restore=False): + + spec, subprocesses = self.load_workflow_spec('script_task_with_unit_tests.bpmn', 'Process_ScriptTaskWithUnitTests') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + + # unitTests should be a list of dicts + expected_unit_tests_wrapper_class = list + expected_unit_test_class = dict + + script_with_unit_tests = [t for t in self.workflow.get_tasks() if + t.task_spec.name == 'script_with_unit_test_id'][0] + + extensions = script_with_unit_tests.task_spec.extensions + unit_test_extensions = extensions['unitTests'] + + self.assertEqual(len(unit_test_extensions), 2) + self.assertIsInstance(unit_test_extensions, expected_unit_tests_wrapper_class) + + first_unit_test = unit_test_extensions[0] + self.assertIsInstance(first_unit_test, expected_unit_test_class) + + expected_first_unit_test = { + 'id': 'sets_hey_to_true_if_hey_is_false', + 'inputJson': '{"hey": false}', 'expectedOutputJson': '{"hey": true}' + } + self.assertDictEqual(first_unit_test, expected_first_unit_test) diff --git a/tests/SpiffWorkflow/spiff/ServiceTaskTest.py b/tests/SpiffWorkflow/spiff/ServiceTaskTest.py new file mode 100644 index 000000000..9417b42ed --- /dev/null +++ b/tests/SpiffWorkflow/spiff/ServiceTaskTest.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +import json +import os +import sys +import unittest + +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException +from .BaseTestCase import BaseTestCase + +class ServiceTaskDelegate: + @staticmethod + def call_connector(name, params, task_data): + if name == 'bamboohr/GetPayRate': + assertEqual(len(params), 3) + assertEqual(params['api_key']['value'], 'secret:BAMBOOHR_API_KEY') + assertEqual(params['employee_id']['value'], 4) + assertEqual(params['subdomain']['value'], 'ServiceTask') + elif name == 'weather/CurrentTemp': + assertEqual(len(params), 1) + assertEqual(params['zipcode']['value'], 22980) + else: + raise AssertionError('unexpected connector name') + + if name == 'bamboohr/GetPayRate': + sample_response = { + "amount": "65000.00", + "currency": "USD", + "id": "4", + "payRate": "65000.00 USD", + } + elif name == 'weather/CurrentTemp': + sample_response = { + "temp": "72F", + } + + return json.dumps(sample_response) + +class ExampleCustomScriptEngine(PythonScriptEngine): + def call_service(self, operation_name, operation_params, task_data): + return ServiceTaskDelegate.call_connector(operation_name, operation_params, + task_data) + +class ServiceTaskTest(BaseTestCase): + + def setUp(self): + global assertEqual + assertEqual = self.assertEqual + + spec, subprocesses = self.load_workflow_spec('service_task.bpmn', + 'service_task_example1') + self.script_engine = ExampleCustomScriptEngine() + self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=self.script_engine) + + def testRunThroughHappy(self): + self.workflow.do_engine_steps() + self._assert_service_tasks() + + def testRunThroughSaveRestore(self): + self.save_restore() + # Engine isn't preserved through save/restore, so we have to reset it. + self.workflow.script_engine = self.script_engine + self.workflow.do_engine_steps() + self.save_restore() + self._assert_service_tasks() + + def _assert_service_tasks(self): + # service task without result variable name specified, mock + # bamboohr/GetPayRate response + result = self.workflow.data['spiff__Activity_1inxqgx_result'] + self.assertEqual(len(result), 4) + self.assertEqual(result['amount'], '65000.00') + self.assertEqual(result['currency'], 'USD') + self.assertEqual(result['id'], '4') + self.assertEqual(result['payRate'], '65000.00 USD') + + # service task with result variable specified, mock weather response + result = self.workflow.data['waynesboroWeatherResult'] + self.assertEqual(len(result), 1) + self.assertEqual(result['temp'], '72F') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ServiceTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/spiff/ServiceTaskVariableTest.py b/tests/SpiffWorkflow/spiff/ServiceTaskVariableTest.py new file mode 100644 index 000000000..12237ae6b --- /dev/null +++ b/tests/SpiffWorkflow/spiff/ServiceTaskVariableTest.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +import json +import os +import sys +import unittest + +dirname = os.path.dirname(__file__) +sys.path.insert(0, os.path.join(dirname, '..', '..', '..')) + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskExecException +from .BaseTestCase import BaseTestCase + +class ServiceTaskDelegate: + @staticmethod + def call_connector(name, params, task_data): + assertEqual(name, 'bamboohr/GetPayRate') + assertEqual(len(params), 3) + assertEqual(params['api_key']['value'], 'secret:BAMBOOHR_API_KEY') + assertEqual(params['employee_id']['value'], '109') + assertEqual(params['subdomain']['value'], 'statusdemo') + + sample_response = { + "amount": "65000.00", + "currency": "USD", + "id": "4", + "payRate": "65000.00 USD", + } + + return json.dumps(sample_response) + +class ExampleCustomScriptEngine(PythonScriptEngine): + def call_service(self, operation_name, operation_params, task_data): + return ServiceTaskDelegate.call_connector(operation_name, operation_params, + task_data) + +class ServiceTaskVariableTest(BaseTestCase): + + def setUp(self): + global assertEqual + assertEqual = self.assertEqual + + spec, subprocesses = self.load_workflow_spec('service_task_variable.bpmn', + 'Process_bd2e724555') + self.script_engine = ExampleCustomScriptEngine() + self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=self.script_engine) + + def testRunThroughHappy(self): + self.workflow.do_engine_steps() + self._assert_service_task() + + def testRunThroughSaveRestore(self): + self.save_restore() + # Engine isn't preserved through save/restore, so we have to reset it. + self.workflow.script_engine = self.script_engine + self.workflow.do_engine_steps() + self.save_restore() + self._assert_service_task() + + def _assert_service_task(self): + result = self.workflow.data['spiff__Activity_0xhr131_result'] + self.assertEqual(len(result), 4) + self.assertEqual(result['amount'], '65000.00') + self.assertEqual(result['currency'], 'USD') + self.assertEqual(result['id'], '4') + self.assertEqual(result['payRate'], '65000.00 USD') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ServiceTaskVariableTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py b/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py new file mode 100644 index 000000000..9087aae01 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py @@ -0,0 +1,29 @@ +from SpiffWorkflow.task import TaskState +from .BaseTestCase import BaseTestCase +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +# Assure we correctly parse and pass on the Spiffworkflow properties in +# an extension. +class SpiffPropertiesTest(BaseTestCase): + + def testTask(self): + self.task_test() + + def testTaskSaveRestore(self): + self.task_test(True) + + def task_test(self, save_restore=False): + + spec, subprocesses = self.load_workflow_spec('spiff_properties.bpmn', 'Process_1') + self.workflow = BpmnWorkflow(spec, subprocesses) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + ready_tasks = self.workflow.get_tasks(TaskState.READY) + # The ready task's spec should contain extension properties + # with name/value pairs. + task = ready_tasks[0] + self.assertDictEqual({'formJsonSchemaFilename': 'my_json_jschema.json', + 'formUiSchemaFilename': 'my_ui_jschema.json'}, + task.task_spec.extensions['properties']) + diff --git a/tests/SpiffWorkflow/spiff/__init__.py b/tests/SpiffWorkflow/spiff/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/spiff/data/business_rule_task.bpmn b/tests/SpiffWorkflow/spiff/data/business_rule_task.bpmn new file mode 100644 index 000000000..8b46dfc3d --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/business_rule_task.bpmn @@ -0,0 +1,57 @@ + + + + + Flow_0lrg65h + + + + Flow_0l8nhib + + + + + decision_1 + + + + Flow_1109ldv + Flow_0l8nhib + + + + Flow_0lrg65h + Flow_1109ldv + question = "X" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/correlation.bpmn b/tests/SpiffWorkflow/spiff/data/correlation.bpmn new file mode 100644 index 000000000..e1cca32cc --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/correlation.bpmn @@ -0,0 +1,181 @@ + + + + + + + process_id + + + + + num + + + init_id + + + + + {'num': task_num, 'name': task_name} + source_task + + + + + {'init_id': source_task['num'], 'response': response} + response + + + + + + + Flow_0lrjj2a + + + Flow_0lrjj2a + Flow_0gp7t8p + + 2 + + + Flow_10qgjde + + + + Flow_17cd3h6 + + + Flow_10qgjde + Flow_02xt17l + + + + + Flow_02xt17l + Flow_0ts36fv + + + + Flow_0ts36fv + Flow_17cd3h6 + + + + Flow_0gp7t8p + + + + + Flow_0qafvbe + + + + + + Flow_0qafvbe + Flow_12j0ayf + + + + Flow_12j0ayf + Flow_0k7rc31 + + + + Flow_0k7rc31 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/correlation_two_conversations.bpmn b/tests/SpiffWorkflow/spiff/data/correlation_two_conversations.bpmn new file mode 100644 index 000000000..4865a5c20 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/correlation_two_conversations.bpmn @@ -0,0 +1,231 @@ + + + + + + + + + + + + mcp_topica_one + mcp_topicb_one + + + mcp_topica_two + mcp_topicb_two + + + + + topica_one + + + payload_var_one.topica + + + + + topicb_one + + + payload_var_one.topicb + + + + + + + + + + + Flow_0sxqx67 + + + Flow_01u8qkn + Flow_0sxqx67 + + + + Flow_1yt3owq + Flow_01u8qkn + + + Flow_10conab + + + Flow_10conab + Flow_1ihr88m + import time +timestamp = time.time() +topic_one_a = f"topic_one_a_conversation_{timestamp}" +topic_one_b = f"topic_one_b_conversation_{timestamp}" +del time + + + + the_topic = "first_conversation" + + Flow_1ihr88m + Flow_0n4m9ti + + + + Flow_0q3clix + Flow_1yt3owq + + + + Flow_0n4m9ti + Flow_0q3clix + import time +timestamp = time.time() +topic_two_a = f"topic_two_a_conversation_{timestamp}" +topic_two_b = f"topic_two_b_conversation_{timestamp}" +del time + + + + + { +"topica_one": topic_one_a, +"topicb_one": topic_one_b, +"initial_var_one": 3 +} + + + + + payload_var_one + + + + + payload_var_two + + + + + { +"topica_two": topic_two_a, +"topicb_two": topic_two_b, +"initial_var_two": 5 +} + + + + + topica_two + + + topica_two + + + + + topicb_two + + + topicb_two + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/dmn/business_rules.dmn b/tests/SpiffWorkflow/spiff/data/dmn/business_rules.dmn new file mode 100644 index 000000000..06e190aa0 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/dmn/business_rules.dmn @@ -0,0 +1,44 @@ + + + + + + + question + + + + + + "X" + + + "Y" + + + + + "A" + + + "B" + + + + + + + + "Something is wrog" + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/prescript_postscript.bpmn b/tests/SpiffWorkflow/spiff/data/prescript_postscript.bpmn new file mode 100644 index 000000000..ed0557cf6 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/prescript_postscript.bpmn @@ -0,0 +1,48 @@ + + + + + + Flow_1hjrex4 + + + + x = a * 2 +y = b * 2 +z = x + y + c = z * 2 +del x +del y + + Flow_1hjrex4 + Flow_1xndbxy + + + + Flow_1xndbxy + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/prescript_postscript_call_activity.bpmn b/tests/SpiffWorkflow/spiff/data/prescript_postscript_call_activity.bpmn new file mode 100644 index 000000000..864f9408d --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/prescript_postscript_call_activity.bpmn @@ -0,0 +1,56 @@ + + + + + + + + + Flow_1a4nkhi + + + + Flow_1eqhoeg + + + + Flow_1a4nkhi + Flow_1eqhoeg + out_data = in_data.upper() + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/prescript_postscript_parent.bpmn b/tests/SpiffWorkflow/spiff/data/prescript_postscript_parent.bpmn new file mode 100644 index 000000000..9837fa8df --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/prescript_postscript_parent.bpmn @@ -0,0 +1,44 @@ + + + + + + Flow_1e5oj0e + + + + Flow_089vunc + + + + + in_data = old + new = out_data + + Flow_1e5oj0e + Flow_089vunc + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/script_task_with_unit_tests.bpmn b/tests/SpiffWorkflow/spiff/data/script_task_with_unit_tests.bpmn new file mode 100644 index 000000000..d2873b86d --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/script_task_with_unit_tests.bpmn @@ -0,0 +1,69 @@ + + + + + Flow_10jwwqy + + + + Flow_0htxke7 + + + + + + + {"hey": false} + {"hey": true} + + + {} + {"something_else": true} + + + + Flow_0niwe1y + Flow_0htxke7 + if 'hey' in locals(): + hey = True +else: + something_else = True + + + + Flow_10jwwqy + Flow_0niwe1y + hey = False + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/service_task.bpmn b/tests/SpiffWorkflow/spiff/data/service_task.bpmn new file mode 100644 index 000000000..078f525b4 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/service_task.bpmn @@ -0,0 +1,68 @@ + + + + + Flow_0l9vzsi + + + + + + + + + + + + + Flow_0l9vzsi + Flow_16rdnn7 + + + Flow_1fpsye7 + + + + + + + + + + + + Flow_16rdnn7 + Flow_1fpsye7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/service_task_variable.bpmn b/tests/SpiffWorkflow/spiff/data/service_task_variable.bpmn new file mode 100644 index 000000000..424a1dc96 --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/service_task_variable.bpmn @@ -0,0 +1,62 @@ + + + + + Flow_1tqygmt + + + + Flow_1h9lfz7 + + + + + + + + + + + + + Flow_1boxww6 + Flow_1h9lfz7 + + + + + Flow_1tqygmt + Flow_1boxww6 + employeeID = "109" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/spiff/data/spiff_properties.bpmn b/tests/SpiffWorkflow/spiff/data/spiff_properties.bpmn new file mode 100644 index 000000000..3e93a3ece --- /dev/null +++ b/tests/SpiffWorkflow/spiff/data/spiff_properties.bpmn @@ -0,0 +1,46 @@ + + + + + + Flow_1hjrex4 + + + + Flow_1vlqqxh + + + + + + + + + 3 + Flow_1hjrex4 + Flow_1vlqqxh + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/util.py b/tests/SpiffWorkflow/util.py new file mode 100644 index 000000000..5bc5a0aad --- /dev/null +++ b/tests/SpiffWorkflow/util.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- + +from builtins import str +from builtins import range +import time +from SpiffWorkflow.task import Task, TaskState +from SpiffWorkflow.workflow import Workflow + + +def on_reached_cb(workflow, task, taken_path): + reached_key = "%s_reached" % str(task.get_name()) + n_reached = task.get_data(reached_key, 0) + 1 + task.set_data(**{reached_key: n_reached, + 'two': 2, + 'three': 3, + 'test_attribute1': 'false', + 'test_attribute2': 'true'}) + + # Collect a list of all data. + atts = [] + for key, value in list(task.data.items()): + if key in ['data', + 'two', + 'three', + 'test_attribute1', + 'test_attribute2']: + continue + if key.endswith('reached'): + continue + atts.append('='.join((key, str(value)))) + + # Collect a list of all task data. + props = [] + for key, value in list(task.task_spec.data.items()): + props.append('='.join((key, str(value)))) + # print "REACHED:", task.get_name(), atts, props + + # Store the list of data in the workflow. + atts = ';'.join(atts) + props = ';'.join(props) + old = task.get_data('data', '') + data = task.get_name() + ': ' + atts + '/' + props + '\n' + task.set_data(data=old + data) + + # In workflows that load a subworkflow, the newly loaded children + # will not have on_reached_cb() assigned. By using this function, we + # re-assign the function in every step, thus making sure that new + # children also call on_reached_cb(). + for child in task.children: + track_task(child.task_spec, taken_path) + return True + + +def on_complete_cb(workflow, task, taken_path): + # Record the path. + indent = ' ' * (task._get_depth() - 1) + taken_path.append('%s%s' % (indent, task.get_name())) + return True + + +def track_task(task_spec, taken_path): + if task_spec.reached_event.is_connected(on_reached_cb): + task_spec.reached_event.disconnect(on_reached_cb) + task_spec.reached_event.connect(on_reached_cb, taken_path) + if task_spec.completed_event.is_connected(on_complete_cb): + task_spec.completed_event.disconnect(on_complete_cb) + task_spec.completed_event.connect(on_complete_cb, taken_path) + + +def track_workflow(wf_spec, taken_path=None): + if taken_path is None: + taken_path = [] + for name in wf_spec.task_specs: + track_task(wf_spec.task_specs[name], taken_path) + return taken_path + + +def run_workflow(test, wf_spec, expected_path, expected_data, workflow=None): + # Execute all tasks within the Workflow. + if workflow is None: + taken_path = track_workflow(wf_spec) + workflow = Workflow(wf_spec) + else: + taken_path = track_workflow(workflow.spec) + + test.assertFalse(workflow.is_completed()) + try: + # We allow the workflow to require a maximum of 5 seconds to + # complete, to allow for testing long running tasks. + for i in range(10): + workflow.complete_all(False) + if workflow.is_completed(): + break + time.sleep(0.5) + except: + workflow.task_tree.dump() + raise + + # workflow.task_tree.dump() + test.assertTrue(workflow.is_completed(), workflow.task_tree.get_dump()) + + # Make sure that there are no waiting tasks left in the tree. + for thetask in Task.Iterator(workflow.task_tree, TaskState.READY): + workflow.task_tree.dump() + raise Exception('Task with state READY: %s' % thetask.name) + + # Check whether the correct route was taken. + if expected_path is not None: + taken_path = '\n'.join(taken_path) + '\n' + test.assertEqual(taken_path, expected_path) + + # Check data availibility. + if expected_data is not None: + result = workflow.get_data('data', '') + test.assertIn(result, expected_data) + + return workflow diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/unit_test_extensions.patch b/unit_test_extensions.patch new file mode 100644 index 000000000..ebd0cf797 --- /dev/null +++ b/unit_test_extensions.patch @@ -0,0 +1,348 @@ +diff --git a/SpiffWorkflow/spiff/parser/process.py b/SpiffWorkflow/spiff/parser/process.py +index 4abdb96..2ec536f 100644 +--- a/SpiffWorkflow/spiff/parser/process.py ++++ b/SpiffWorkflow/spiff/parser/process.py +@@ -2,9 +2,9 @@ from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser + from SpiffWorkflow.bpmn.parser.BpmnParser import full_tag + + from SpiffWorkflow.bpmn.specs.events import StartEvent, EndEvent, IntermediateThrowEvent, BoundaryEvent, IntermediateCatchEvent +-from SpiffWorkflow.spiff.specs import NoneTask, ManualTask, UserTask, SubWorkflowTask, TransactionSubprocess, CallActivity, ServiceTask ++from SpiffWorkflow.spiff.specs import NoneTask, ManualTask, UserTask, ScriptTask, SubWorkflowTask, TransactionSubprocess, CallActivity, ServiceTask + from SpiffWorkflow.spiff.specs.events.event_types import SendTask, ReceiveTask +-from SpiffWorkflow.spiff.parser.task_spec import SpiffTaskParser, SubWorkflowParser, CallActivityParser, ServiceTaskParser ++from SpiffWorkflow.spiff.parser.task_spec import SpiffTaskParser, SubWorkflowParser, CallActivityParser, ServiceTaskParser, ScriptTaskParser + from SpiffWorkflow.spiff.parser.event_parsers import (SpiffStartEventParser, SpiffEndEventParser, SpiffBoundaryEventParser, + SpiffIntermediateCatchEventParser, SpiffIntermediateThrowEventParser, SpiffSendTaskParser, SpiffReceiveTaskParser) + from SpiffWorkflow.dmn.specs import BusinessRuleTask +@@ -17,6 +17,7 @@ class SpiffBpmnParser(BpmnDmnParser): + full_tag('task'): (SpiffTaskParser, NoneTask), + full_tag('userTask'): (SpiffTaskParser, UserTask), + full_tag('manualTask'): (SpiffTaskParser, ManualTask), ++ full_tag('scriptTask'): (ScriptTaskParser, ScriptTask), + full_tag('subProcess'): (SubWorkflowParser, SubWorkflowTask), + full_tag('transaction'): (SubWorkflowParser, TransactionSubprocess), + full_tag('callActivity'): (CallActivityParser, CallActivity), +diff --git a/SpiffWorkflow/spiff/parser/task_spec.py b/SpiffWorkflow/spiff/parser/task_spec.py +index 32e8ecf..da3961d 100644 +--- a/SpiffWorkflow/spiff/parser/task_spec.py ++++ b/SpiffWorkflow/spiff/parser/task_spec.py +@@ -6,6 +6,7 @@ from SpiffWorkflow.bpmn.parser.task_parsers import SubprocessParser + from SpiffWorkflow.bpmn.parser.util import xpath_eval + + SPIFFWORKFLOW_MODEL_NS = 'http://spiffworkflow.org/bpmn/schema/1.0/core' ++SPIFFWORKFLOW_MODEL_PREFIX = 'spiffworkflow' + + + class SpiffTaskParser(TaskParser): +@@ -20,36 +21,55 @@ class SpiffTaskParser(TaskParser): + # Too bad doing this works in such a stupid way. + # We should set a namespace and automatically do this. + extensions = {} +- extra_ns = {'spiffworkflow': SPIFFWORKFLOW_MODEL_NS} ++ extra_ns = {SPIFFWORKFLOW_MODEL_PREFIX: SPIFFWORKFLOW_MODEL_NS} + xpath = xpath_eval(node, extra_ns) +- extension_nodes = xpath('.//bpmn:extensionElements/spiffworkflow:*') ++ extension_nodes = xpath(f'.//bpmn:extensionElements/{SPIFFWORKFLOW_MODEL_PREFIX}:*') + for node in extension_nodes: + name = etree.QName(node).localname + if name == 'properties': + extensions['properties'] = SpiffTaskParser._parse_properties(node) ++ elif name == 'unitTests': ++ extensions['unitTests'] = SpiffTaskParser._parse_script_unit_tests(node) + elif name == 'serviceTaskOperator': + extensions['serviceTaskOperator'] = SpiffTaskParser._parse_servicetask_operator(node) + else: + extensions[name] = node.text + return extensions + +- @staticmethod +- def _parse_properties(node): +- extra_ns = {'spiffworkflow': SPIFFWORKFLOW_MODEL_NS} +- xpath = xpath_eval(node, extra_ns) +- property_nodes = xpath('.//spiffworkflow:property') ++ @classmethod ++ def _node_children_by_tag_name(cls, node, tag_name): ++ xpath = cls._spiffworkflow_ready_xpath_for_node(node) ++ return xpath(f'.//{SPIFFWORKFLOW_MODEL_PREFIX}:{tag_name}') ++ ++ @classmethod ++ def _parse_properties(cls, node): ++ property_nodes = cls._node_children_by_tag_name(node, 'property') + properties = {} + for prop_node in property_nodes: + properties[prop_node.attrib['name']] = prop_node.attrib['value'] + return properties + + @staticmethod +- def _parse_servicetask_operator(node): ++ def _spiffworkflow_ready_xpath_for_node(node): ++ extra_ns = {SPIFFWORKFLOW_MODEL_PREFIX: SPIFFWORKFLOW_MODEL_NS} ++ return xpath_eval(node, extra_ns) ++ ++ @classmethod ++ def _parse_script_unit_tests(cls, node): ++ unit_test_nodes = cls._node_children_by_tag_name(node, 'unitTest') ++ unit_tests = [] ++ for unit_test_node in unit_test_nodes: ++ unit_test_dict = {"id": unit_test_node.attrib['id']} ++ unit_test_dict['inputJson'] = cls._node_children_by_tag_name(unit_test_node, 'inputJson')[0].text ++ unit_test_dict['expectedOutputJson'] = cls._node_children_by_tag_name(unit_test_node, 'expectedOutputJson')[0].text ++ unit_tests.append(unit_test_dict) ++ return unit_tests ++ ++ @classmethod ++ def _parse_servicetask_operator(cls, node): + name = node.attrib['id'] + result_variable = node.get('resultVariable', None) +- extra_ns = {'spiffworkflow': SPIFFWORKFLOW_MODEL_NS} +- xpath = xpath_eval(node, extra_ns) +- parameter_nodes = xpath('.//spiffworkflow:parameter') ++ parameter_nodes = cls._node_children_by_tag_name(node, 'parameter') + operator = {'name': name, 'resultVariable': result_variable} + parameters = {} + for param_node in parameter_nodes: +@@ -92,6 +112,20 @@ class SubWorkflowParser(SpiffTaskParser): + postscript=postscript) + + ++class ScriptTaskParser(SpiffTaskParser): ++ def create_task(self): ++ extensions = self.parse_extensions() ++ script = None ++ for child_node in self.node: ++ if child_node.tag.endswith('script'): ++ script = child_node.text ++ # import pdb; pdb.set_trace() ++ return self.spec_class( ++ self.spec, self.get_task_spec_name(), script, ++ lane=self.lane, position=self.position, ++ description=self.node.get('name', None)) ++ ++ + class CallActivityParser(SpiffTaskParser): + + def create_task(self): +diff --git a/SpiffWorkflow/spiff/serializer/__init__.py b/SpiffWorkflow/spiff/serializer/__init__.py +index 4ba82d6..364c3eb 100644 +--- a/SpiffWorkflow/spiff/serializer/__init__.py ++++ b/SpiffWorkflow/spiff/serializer/__init__.py +@@ -1,4 +1,4 @@ +-from .task_spec_converters import NoneTaskConverter, ManualTaskConverter, UserTaskConverter ++from .task_spec_converters import NoneTaskConverter, ManualTaskConverter, UserTaskConverter, ScriptTaskConverter + from .task_spec_converters import TransactionSubprocessConverter, CallActivityTaskConverter, SubWorkflowTaskConverter + from .task_spec_converters import StartEventConverter, EndEventConverter, IntermediateCatchEventConverter, IntermediateThrowEventConverter, \ + BoundaryEventConverter, SendTaskConverter, ReceiveTaskConverter, ServiceTaskConverter +diff --git a/SpiffWorkflow/spiff/serializer/task_spec_converters.py b/SpiffWorkflow/spiff/serializer/task_spec_converters.py +index 8c25970..5b24278 100644 +--- a/SpiffWorkflow/spiff/serializer/task_spec_converters.py ++++ b/SpiffWorkflow/spiff/serializer/task_spec_converters.py +@@ -2,7 +2,7 @@ from functools import partial + + from SpiffWorkflow.bpmn.serializer.bpmn_converters import BpmnTaskSpecConverter + from SpiffWorkflow.bpmn.specs.events import EndEvent, StartEvent, IntermediateThrowEvent, IntermediateCatchEvent, BoundaryEvent +-from SpiffWorkflow.spiff.specs import NoneTask, ManualTask, UserTask, ServiceTask, SubWorkflowTask, TransactionSubprocess, CallActivity ++from SpiffWorkflow.spiff.specs import NoneTask, ManualTask, UserTask, ScriptTask, ServiceTask, SubWorkflowTask, TransactionSubprocess, CallActivity + from SpiffWorkflow.spiff.specs.events import SendTask, ReceiveTask + from SpiffWorkflow.spiff.specs.events.event_definitions import MessageEventDefinition + +@@ -35,6 +35,16 @@ class UserTaskConverter(SpiffBpmnTaskConverter): + super().__init__(UserTask, data_converter) + + ++class ScriptTaskConverter(SpiffBpmnTaskConverter): ++ def __init__(self, data_converter=None): ++ super().__init__(ScriptTask, data_converter) ++ ++ def to_dict(self, spec): ++ dct = super().to_dict(spec) ++ dct['script'] = spec.script ++ return dct ++ ++ + class ServiceTaskConverter(SpiffBpmnTaskConverter): + def __init__(self, data_converter=None): + super().__init__(ServiceTask, data_converter) +diff --git a/SpiffWorkflow/spiff/specs/__init__.py b/SpiffWorkflow/spiff/specs/__init__.py +index 5378a5b..1e18b63 100644 +--- a/SpiffWorkflow/spiff/specs/__init__.py ++++ b/SpiffWorkflow/spiff/specs/__init__.py +@@ -2,4 +2,5 @@ from .manual_task import ManualTask + from .none_task import NoneTask + from .subworkflow_task import SubWorkflowTask, TransactionSubprocess, CallActivity + from .user_task import UserTask ++from .script_task import ScriptTask + from .service_task import ServiceTask +diff --git a/SpiffWorkflow/spiff/specs/script_task.py b/SpiffWorkflow/spiff/specs/script_task.py +new file mode 100644 +index 0000000..c56e7e8 +--- /dev/null ++++ b/SpiffWorkflow/spiff/specs/script_task.py +@@ -0,0 +1,6 @@ ++from SpiffWorkflow.spiff.specs.spiff_task import SpiffBpmnTask ++from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask as BpmnScriptTask ++ ++ ++class ScriptTask(BpmnScriptTask, SpiffBpmnTask): ++ pass +diff --git a/tests/SpiffWorkflow/spiff/BaseTestCase.py b/tests/SpiffWorkflow/spiff/BaseTestCase.py +index 92c23eb..248f84a 100644 +--- a/tests/SpiffWorkflow/spiff/BaseTestCase.py ++++ b/tests/SpiffWorkflow/spiff/BaseTestCase.py +@@ -3,7 +3,7 @@ import os + + from SpiffWorkflow.spiff.parser import SpiffBpmnParser + from SpiffWorkflow.spiff.serializer import NoneTaskConverter, \ +- ManualTaskConverter, UserTaskConverter, \ ++ ManualTaskConverter, UserTaskConverter, ScriptTaskConverter, \ + SubWorkflowTaskConverter, TransactionSubprocessConverter, \ + CallActivityTaskConverter, \ + StartEventConverter, EndEventConverter, BoundaryEventConverter, \ +@@ -16,7 +16,7 @@ from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer + from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + + wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter([ +- NoneTaskConverter, ManualTaskConverter, UserTaskConverter, ++ NoneTaskConverter, ManualTaskConverter, UserTaskConverter, ScriptTaskConverter, + SubWorkflowTaskConverter, TransactionSubprocessConverter, CallActivityTaskConverter, + StartEventConverter, EndEventConverter, BoundaryEventConverter, SendTaskConverter, ReceiveTaskConverter, + IntermediateCatchEventConverter, IntermediateThrowEventConverter, BusinessRuleTaskConverter, +diff --git a/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py b/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py +new file mode 100644 +index 0000000..87c8335 +--- /dev/null ++++ b/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py +@@ -0,0 +1,46 @@ ++from SpiffWorkflow.task import TaskState ++from .BaseTestCase import BaseTestCase ++from SpiffWorkflow.bpmn.workflow import BpmnWorkflow ++ ++# Assure we correctly parse and pass on the Spiffworkflow properties in ++# an extension. ++class ScriptUnitTestExtensionsTest(BaseTestCase): ++ ++ def testTask(self): ++ self.task_test() ++ ++ def testTaskSaveRestore(self): ++ self.task_test(True) ++ ++ def task_test(self, save_restore=False): ++ ++ spec, subprocesses = self.load_workflow_spec('script_task_with_unit_tests.bpmn', 'Process_ScriptTaskWithUnitTests') ++ self.workflow = BpmnWorkflow(spec, subprocesses) ++ self.workflow.do_engine_steps() ++ if save_restore: ++ self.save_restore() ++ ++ # unitTests should be a list of dicts ++ expected_unit_tests_wrapper_class_name = 'list' ++ expected_unit_test_class_name = 'dict' ++ ++ script_with_unit_tests = [t for t in self.workflow.get_tasks() if ++ t.task_spec.name == 'script_with_unit_test_id'][0] ++ print(f"script_with_unit_tests.task_spec.extensions: {script_with_unit_tests.task_spec.extensions}") ++ extensions = script_with_unit_tests.task_spec.extensions ++ unit_test_extensions = extensions['unitTests'] ++ print(f"unit_test_extensions: {unit_test_extensions}") ++ print(f"unit_test_extensions.class: {unit_test_extensions.__class__.__name__}") ++ unit_test_extensions_class_name = unit_test_extensions.__class__.__name__ ++ self.assertEqual(unit_test_extensions_class_name, expected_unit_tests_wrapper_class_name) ++ self.assertEqual(len(unit_test_extensions), 2) ++ first_unit_test = unit_test_extensions[0] ++ self.assertEqual(first_unit_test.__class__.__name__, expected_unit_test_class_name) ++ expected_first_unit_test = {'id': 'sets_hey_to_true_if_hey_is_false', ++ 'inputJson': '{"hey": false}', 'expectedOutputJson': '{"hey": true}'} ++ self.assertDictEqual(first_unit_test, expected_first_unit_test) ++ # self.assertEqual(len(unit_test_extensions), 2) ++ # self.assertDictEqual({'formJsonSchemaFilename': 'my_json_jschema.json', ++ # 'formUiSchemaFilename': 'my_ui_jschema.json'}, ++ # task.task_spec.extensions) ++ +diff --git a/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py b/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py +index d029f35..9087aae 100644 +--- a/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py ++++ b/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py +@@ -4,7 +4,7 @@ from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + + # Assure we correctly parse and pass on the Spiffworkflow properties in + # an extension. +-class SpiffWorkflowProperties(BaseTestCase): ++class SpiffPropertiesTest(BaseTestCase): + + def testTask(self): + self.task_test() +diff --git a/tests/SpiffWorkflow/spiff/data/script_task_with_unit_tests.bpmn b/tests/SpiffWorkflow/spiff/data/script_task_with_unit_tests.bpmn +new file mode 100644 +index 0000000..d2873b8 +--- /dev/null ++++ b/tests/SpiffWorkflow/spiff/data/script_task_with_unit_tests.bpmn +@@ -0,0 +1,69 @@ ++ ++ ++ ++ ++ Flow_10jwwqy ++ ++ ++ ++ Flow_0htxke7 ++ ++ ++ ++ ++ ++ ++ {"hey": false} ++ {"hey": true} ++ ++ ++ {} ++ {"something_else": true} ++ ++ ++ ++ Flow_0niwe1y ++ Flow_0htxke7 ++ if 'hey' in locals(): ++ hey = True ++else: ++ something_else = True ++ ++ ++ ++ Flow_10jwwqy ++ Flow_0niwe1y ++ hey = False ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ diff --git a/version.sh b/version.sh new file mode 100755 index 000000000..90d5c20d4 --- /dev/null +++ b/version.sh @@ -0,0 +1,50 @@ +#!/bin/sh +# Tag revisions like this: +# $ git tag -a -m "v0.2" v0.2 +VERSION_IN=VERSION.in +VERSION_FILE=SpiffWorkflow/version.py + +# Check that we are actually in a git managed project. +if [ ! -e .git -a -z "$1" ]; then + echo >&2 Not a git repository. + exit 1 +fi + +# Make sure that we have permission to modify the version file. +if [ -r $VERSION_FILE -a ! -w $VERSION_FILE ]; then + echo >&2 No permission to modify $VERSION_FILE. + exit 1 +fi + +# By default, get the version number from "git describe". +if [ ! -z "$1" ]; then + VERSION=$1 +else + HEAD=`git log -1 --pretty=format:%H HEAD` + VERSION=`git describe $HEAD --tags --match "v[0-9]*" | sed 's/^v//;s/-[^\-]*$//;s/-/./' 2>/dev/null` + if [ -z "$VERSION" ]; then + echo >&2 No matching tag was found. + exit 1 + fi +fi + +# If the --reset switch was given, reset the version number to 'DEVELOPMENT'. +[ "$1" = "--reset" ] && VERSION='DEVELOPMENT' + +# If there is no version file, we are already done. +echo Version is $VERSION +[ ! -r $VERSION_FILE ] && exit 0 + +# Check whether the version file already contains this number, +# and only touch it if there is a change to avoid changing +# the timestamp. +VERSION_FILE_TMP=`mktemp` +cat $VERSION_IN | sed "s/@VERSION@/$VERSION/g" > $VERSION_FILE_TMP +if diff -q $VERSION_FILE_TMP $VERSION_FILE; then + echo Version file unchanged. + rm $VERSION_FILE_TMP + exit 0 +fi + +mv $VERSION_FILE_TMP $VERSION_FILE +echo Version file updated.