diff --git a/docs/how_to/bpmn_unit_tests.md b/docs/how_to/bpmn_unit_tests.md new file mode 100644 index 000000000..5f1c9547d --- /dev/null +++ b/docs/how_to/bpmn_unit_tests.md @@ -0,0 +1,48 @@ +# BPMN Unit Tests + +Software Engineers test their code. +With this feature, BPMN authors can test their creations, too. +These tests can provide you with faster feedback than you would get by simply running your process model, and they allow you to mock out form input and service task connections as well as provide specific input to exercise different branches of your process model. +BPMN unit tests are designed to give you greater confidence that your process models will work as designed when they are run in the wild, both the first time they are used by real users and also after you make changes to them. + +## Creating BPMN Unit Tests + +First, create a process model that you want to test. +Navigate to the process model and add a JSON file based on the name of one of the BPMN files. +For example, if you have a process model that includes a file called `awesome_script_task.bpmn`, your test JSON file would be called `test_awesome_script_task.json`. +If you have multiple BPMN files you want to test, you can have multiple test JSON files. +The BPMN files you test do not have to be marked as the primary file for the process model in question. +The structure of your json should be as follows: + + { + "test_case_1": { + "tasks": { + "ServiceTaskProcess:service_task_one": { + "data": [{ "the_result": "result_from_service" }] + } + }, + "expected_output_json": { "the_result": "result_from_service" } + } + } + +The top-level keys should be names of unit tests. +In this example, the unit test is named "test_case_1." +Under that, you can specify "tasks" and "expected_output_json." + +Under "tasks," each key is the BPMN id of a specific task. +If you are testing a file that uses Call Activities and therefore calls other processes, there can be conflicting BPMN ids. +In this case, you can specify the unique activity by prepending the Process id (in the above example, that is "ServiceTaskProcess"). +For simple processes, "service_task_one" (for example) would be sufficient as the BPMN id. +For User Tasks, the "data" (under a specific task) represents the data that will be entered by the user in the form. +For Service Tasks, the data represents the data that will be returned by the service. +Note that all User Tasks and Service Tasks must have their BPMN ids mentioned in the JSON file (with mock task data as desired), since otherwise we won't know what to do when the flow arrives at these types of tasks. + +The "expected_output_json" represents the state of the task data that you expect when the process completes. +When the test is run, if the actual task data differs from this expectation, the test will fail. +The test will also fail if the process never completes or if an error occurs. + +## Running BPMN Unit Tests + +Go to a process model and either click “Run Unit Tests” to run all tests for the process model or click on the “play icon” next to a "test_something.json" file. +Then you will get a green check mark or a red x. +You can click on these colored icons to get more details about the passing or failing test. diff --git a/docs/index.md b/docs/index.md index c5442c2ee..599b6d71f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,17 +1,16 @@ -Welcome to SpiffWorkflow's documentation! -======================================= +# Welcome to SpiffWorkflow's documentation ```{toctree} :maxdepth: 2 :caption: Contents quick_start/quick_start.md documentation/documentation.md +how_to/bpmn_unit_tests.md ``` This is great! -Indices and tables -================== +## Indices and tables * [](genindex) * [](modindex) diff --git a/docs/quick_start/quick_start.md b/docs/quick_start/quick_start.md index cf86b21e1..5f504f72e 100644 --- a/docs/quick_start/quick_start.md +++ b/docs/quick_start/quick_start.md @@ -305,4 +305,4 @@ Ensure that all required details have been included such as Process name, Proces  -By following these steps, you can request the special permissions needed to carry out your tasks effectively. \ No newline at end of file +By following these steps, you can request the special permissions needed to carry out your tasks effectively. diff --git a/spiffworkflow-backend/bin/recreate_db b/spiffworkflow-backend/bin/recreate_db index 14b23cf8f..13a3bede6 100755 --- a/spiffworkflow-backend/bin/recreate_db +++ b/spiffworkflow-backend/bin/recreate_db @@ -23,6 +23,11 @@ if [[ -z "${SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR:-}" ]]; then export SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR fi +database_host="localhost" +if [[ -n "${SPIFFWORKFLOW_BACKEND_DATABASE_URI:-}" ]]; then + database_host=$(grep -oP "^[^:]+://.*@\K(.+?)[:/]" <<<"$SPIFFWORKFLOW_BACKEND_DATABASE_URI" | sed -E 's/[:\/]$//') +fi + tasks="" if [[ "${1:-}" == "clean" ]]; then subcommand="${2:-}" @@ -37,8 +42,8 @@ if [[ "${1:-}" == "clean" ]]; then if [[ "${SPIFFWORKFLOW_BACKEND_DATABASE_TYPE:-mysql}" != "mysql" ]]; then rm -f ./src/instance/*.sqlite3 else - mysql -uroot -e "DROP DATABASE IF EXISTS spiffworkflow_backend_local_development" - mysql -uroot -e "DROP DATABASE IF EXISTS spiffworkflow_backend_unit_testing" + mysql -h "$database_host" -uroot -e "DROP DATABASE IF EXISTS spiffworkflow_backend_local_development" + mysql -h "$database_host" -uroot -e "DROP DATABASE IF EXISTS spiffworkflow_backend_unit_testing" fi # TODO: check to see if the db already exists and we can connect to it. also actually clean it up. @@ -74,8 +79,8 @@ else fi if [[ "${SPIFFWORKFLOW_BACKEND_DATABASE_TYPE:-mysql}" == "mysql" ]]; then - mysql -uroot -e "CREATE DATABASE IF NOT EXISTS spiffworkflow_backend_local_development" - mysql -uroot -e "CREATE DATABASE IF NOT EXISTS spiffworkflow_backend_unit_testing" + mysql -h "$database_host" -uroot -e "CREATE DATABASE IF NOT EXISTS spiffworkflow_backend_local_development" + mysql -h "$database_host" -uroot -e "CREATE DATABASE IF NOT EXISTS spiffworkflow_backend_unit_testing" fi for task in $tasks; do @@ -85,7 +90,7 @@ done SPIFFWORKFLOW_BACKEND_ENV=unit_testing FLASK_APP=src/spiffworkflow_backend poetry run flask db upgrade if [[ -n "${SPIFFWORKFLOW_BACKEND_ENV:-}" ]] && ! grep -Eq '^(local_development|unit_testing)$' <<< "$SPIFFWORKFLOW_BACKEND_ENV"; then if [[ "${SPIFFWORKFLOW_BACKEND_DATABASE_TYPE:-mysql}" == "mysql" ]]; then - mysql -uroot -e "CREATE DATABASE IF NOT EXISTS spiffworkflow_backend_$SPIFFWORKFLOW_BACKEND_ENV" + mysql -h "$database_host" -uroot -e "CREATE DATABASE IF NOT EXISTS spiffworkflow_backend_$SPIFFWORKFLOW_BACKEND_ENV" fi FLASK_APP=src/spiffworkflow_backend poetry run flask db upgrade fi diff --git a/spiffworkflow-backend/conftest.py b/spiffworkflow-backend/conftest.py index dc0cf650f..2cd60296e 100644 --- a/spiffworkflow-backend/conftest.py +++ b/spiffworkflow-backend/conftest.py @@ -48,9 +48,8 @@ def with_db_and_bpmn_file_cleanup() -> None: try: yield finally: - process_model_service = ProcessModelService() - if os.path.exists(process_model_service.root_path()): - shutil.rmtree(process_model_service.root_path()) + if os.path.exists(ProcessModelService.root_path()): + shutil.rmtree(ProcessModelService.root_path()) @pytest.fixture() diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/api.yml b/spiffworkflow-backend/src/spiffworkflow_backend/api.yml index fd308f44e..fed169ec6 100755 --- a/spiffworkflow-backend/src/spiffworkflow_backend/api.yml +++ b/spiffworkflow-backend/src/spiffworkflow_backend/api.yml @@ -451,6 +451,48 @@ paths: schema: $ref: "#/components/schemas/ProcessModel" + /process-model-tests/{modified_process_model_identifier}: + parameters: + - name: modified_process_model_identifier + in: path + required: true + description: The process_model_id, modified to replace slashes (/) + schema: + type: string + - name: test_case_file + in: query + required: false + description: The name of the test case file to run + schema: + type: string + - name: test_case_identifier + in: query + required: false + description: The name of the test case file to run + schema: + type: string + post: + operationId: spiffworkflow_backend.routes.process_models_controller.process_model_test_run + summary: Run a test for a process model + tags: + - Process Model Tests + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + file: + type: string + format: binary + responses: + "201": + description: Metadata about the uploaded file, but not the file content. + content: + application/json: + schema: + $ref: "#/components/schemas/File" + /process-models/{modified_process_model_identifier}/files: parameters: - name: modified_process_model_identifier diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/config/__init__.py b/spiffworkflow-backend/src/spiffworkflow_backend/config/__init__.py index 224791108..cc2a9c1f3 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/config/__init__.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/config/__init__.py @@ -10,7 +10,7 @@ from spiffworkflow_backend.services.logging_service import setup_logger class ConfigurationError(Exception): - """ConfigurationError.""" + pass def setup_database_configs(app: Flask) -> None: diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_groups_controller.py b/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_groups_controller.py index 6d1a479a8..1901300d4 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_groups_controller.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_groups_controller.py @@ -53,7 +53,7 @@ def process_group_delete(modified_process_group_id: str) -> flask.wrappers.Respo process_group_id = _un_modify_modified_process_model_id(modified_process_group_id) try: - ProcessModelService().process_group_delete(process_group_id) + ProcessModelService.process_group_delete(process_group_id) except ProcessModelWithInstancesNotDeletableError as exception: raise ApiError( error_code="existing_instances", @@ -88,7 +88,7 @@ def process_group_list( process_group_identifier: Optional[str] = None, page: int = 1, per_page: int = 100 ) -> flask.wrappers.Response: process_groups = ProcessModelService.get_process_groups_for_api(process_group_identifier) - batch = ProcessModelService().get_batch(items=process_groups, page=page, per_page=per_page) + batch = ProcessModelService.get_batch(items=process_groups, page=page, per_page=per_page) pages = len(process_groups) // per_page remainder = len(process_groups) % per_page if remainder > 0: @@ -128,7 +128,7 @@ def process_group_show( def process_group_move(modified_process_group_identifier: str, new_location: str) -> flask.wrappers.Response: """Process_group_move.""" original_process_group_id = _un_modify_modified_process_model_id(modified_process_group_identifier) - new_process_group = ProcessModelService().process_group_move(original_process_group_id, new_location) + new_process_group = ProcessModelService.process_group_move(original_process_group_id, new_location) _commit_and_push_to_git( f"User: {g.user.username} moved process group {original_process_group_id} to {new_process_group.id}" ) diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_models_controller.py b/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_models_controller.py index eb7f2f9b8..acaa22eb0 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_models_controller.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/routes/process_models_controller.py @@ -30,6 +30,7 @@ from spiffworkflow_backend.routes.process_api_blueprint import _get_process_mode from spiffworkflow_backend.routes.process_api_blueprint import ( _un_modify_modified_process_model_id, ) +from spiffworkflow_backend.services.file_system_service import FileSystemService from spiffworkflow_backend.services.git_service import GitCommandError from spiffworkflow_backend.services.git_service import GitService from spiffworkflow_backend.services.git_service import MissingGitConfigsError @@ -43,6 +44,7 @@ from spiffworkflow_backend.services.process_model_service import ProcessModelSer from spiffworkflow_backend.services.process_model_service import ( ProcessModelWithInstancesNotDeletableError, ) +from spiffworkflow_backend.services.process_model_test_runner_service import ProcessModelTestRunner from spiffworkflow_backend.services.spec_file_service import ( ProcessModelFileInvalidError, ) @@ -104,7 +106,7 @@ def process_model_delete( """Process_model_delete.""" process_model_identifier = modified_process_model_identifier.replace(":", "/") try: - ProcessModelService().process_model_delete(process_model_identifier) + ProcessModelService.process_model_delete(process_model_identifier) except ProcessModelWithInstancesNotDeletableError as exception: raise ApiError( error_code="existing_instances", @@ -182,7 +184,7 @@ def process_model_show(modified_process_model_identifier: str, include_file_refe def process_model_move(modified_process_model_identifier: str, new_location: str) -> flask.wrappers.Response: """Process_model_move.""" original_process_model_id = _un_modify_modified_process_model_id(modified_process_model_identifier) - new_process_model = ProcessModelService().process_model_move(original_process_model_id, new_location) + new_process_model = ProcessModelService.process_model_move(original_process_model_id, new_location) _commit_and_push_to_git( f"User: {g.user.username} moved process model {original_process_model_id} to {new_process_model.id}" ) @@ -219,7 +221,7 @@ def process_model_list( recursive=recursive, filter_runnable_by_user=filter_runnable_by_user, ) - process_models_to_return = ProcessModelService().get_batch(process_models, page=page, per_page=per_page) + process_models_to_return = ProcessModelService.get_batch(process_models, page=page, per_page=per_page) if include_parent_groups: process_group_cache = IdToProcessGroupMapping({}) @@ -314,6 +316,29 @@ def process_model_file_show(modified_process_model_identifier: str, file_name: s return make_response(jsonify(file), 200) +def process_model_test_run( + modified_process_model_identifier: str, + test_case_file: Optional[str] = None, + test_case_identifier: Optional[str] = None, +) -> flask.wrappers.Response: + process_model_identifier = modified_process_model_identifier.replace(":", "/") + process_model = _get_process_model(process_model_identifier) + process_model_test_runner = ProcessModelTestRunner( + process_model_directory_path=FileSystemService.root_path(), + process_model_directory_for_test_discovery=FileSystemService.full_path_from_id(process_model.id), + test_case_file=test_case_file, + test_case_identifier=test_case_identifier, + ) + process_model_test_runner.run() + + response_json = { + "all_passed": process_model_test_runner.all_test_cases_passed(), + "passing": process_model_test_runner.passing_tests(), + "failing": process_model_test_runner.failing_tests(), + } + return make_response(jsonify(response_json), 200) + + # { # "natural_language_text": "Create a bug tracker process model \ # with a bug-details form that collects summary, description, and priority" diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/services/file_system_service.py b/spiffworkflow-backend/src/spiffworkflow_backend/services/file_system_service.py index 5cad69ad3..be38f886d 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/services/file_system_service.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/services/file_system_service.py @@ -49,13 +49,12 @@ class FileSystemService: """Id_string_to_relative_path.""" return id_string.replace("/", os.sep) - @staticmethod - def process_group_path(name: str) -> str: - """Category_path.""" + @classmethod + def full_path_from_id(cls, id: str) -> str: return os.path.abspath( os.path.join( - FileSystemService.root_path(), - FileSystemService.id_string_to_relative_path(name), + cls.root_path(), + cls.id_string_to_relative_path(id), ) ) @@ -65,36 +64,35 @@ class FileSystemService: return os.path.join(FileSystemService.root_path(), relative_path) @staticmethod - def process_model_relative_path(spec: ProcessModelInfo) -> str: + def process_model_relative_path(process_model: ProcessModelInfo) -> str: """Get the file path to a process model relative to SPIFFWORKFLOW_BACKEND_BPMN_SPEC_ABSOLUTE_DIR. If the full path is /path/to/process-group-a/group-b/process-model-a, it will return: process-group-a/group-b/process-model-a """ - workflow_path = FileSystemService.workflow_path(spec) + workflow_path = FileSystemService.process_model_full_path(process_model) return os.path.relpath(workflow_path, start=FileSystemService.root_path()) @staticmethod - def process_group_path_for_spec(spec: ProcessModelInfo) -> str: - """Category_path_for_spec.""" + def process_group_path_for_spec(process_model: ProcessModelInfo) -> str: # os.path.split apparently returns 2 element tulple like: (first/path, last_item) - process_group_id, _ = os.path.split(spec.id_for_file_path()) - return FileSystemService.process_group_path(process_group_id) + process_group_id, _ = os.path.split(process_model.id_for_file_path()) + return FileSystemService.full_path_from_id(process_group_id) + + @classmethod + def process_model_full_path(cls, process_model: ProcessModelInfo) -> str: + return cls.full_path_from_id(process_model.id) @staticmethod - def workflow_path(spec: ProcessModelInfo) -> str: - """Workflow_path.""" - process_model_path = os.path.join(FileSystemService.root_path(), spec.id_for_file_path()) - return process_model_path - - @staticmethod - def full_path_to_process_model_file(spec: ProcessModelInfo) -> str: + def full_path_to_process_model_file(process_model: ProcessModelInfo) -> str: """Full_path_to_process_model_file.""" - return os.path.join(FileSystemService.workflow_path(spec), spec.primary_file_name) # type: ignore + return os.path.join( + FileSystemService.process_model_full_path(process_model), process_model.primary_file_name # type: ignore + ) - def next_display_order(self, spec: ProcessModelInfo) -> int: + def next_display_order(self, process_model: ProcessModelInfo) -> int: """Next_display_order.""" - path = self.process_group_path_for_spec(spec) + path = self.process_group_path_for_spec(process_model) if os.path.exists(path): return len(next(os.walk(path))[1]) else: diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_processor.py b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_processor.py index f82f92ce8..29cbab04a 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_processor.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_processor.py @@ -420,7 +420,6 @@ class ProcessInstanceProcessor: ) self.process_instance_model = process_instance_model - self.process_model_service = ProcessModelService() bpmn_process_spec = None self.full_bpmn_process_dict: dict = {} @@ -1018,7 +1017,7 @@ class ProcessInstanceProcessor: ready_or_waiting_tasks = self.get_all_ready_or_waiting_tasks() process_model_display_name = "" - process_model_info = self.process_model_service.get_process_model( + process_model_info = ProcessModelService.get_process_model( self.process_instance_model.process_model_identifier ) if process_model_info is not None: diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_service.py b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_service.py index 0bded4b44..ac9b10a3f 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_service.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_instance_service.py @@ -196,8 +196,7 @@ class ProcessInstanceService: """ # navigation = processor.bpmn_process_instance.get_deep_nav_list() # ProcessInstanceService.update_navigation(navigation, processor) - process_model_service = ProcessModelService() - process_model_service.get_process_model(processor.process_model_identifier) + ProcessModelService.get_process_model(processor.process_model_identifier) process_instance_api = ProcessInstanceApi( id=processor.get_process_instance_id(), status=processor.get_status(), diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_service.py b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_service.py index 1cfc33398..dd771089a 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_service.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_service.py @@ -60,12 +60,7 @@ class ProcessModelService(FileSystemService): def is_process_group_identifier(cls, process_group_identifier: str) -> bool: """Is_process_group_identifier.""" if os.path.exists(FileSystemService.root_path()): - process_group_path = os.path.abspath( - os.path.join( - FileSystemService.root_path(), - FileSystemService.id_string_to_relative_path(process_group_identifier), - ) - ) + process_group_path = FileSystemService.full_path_from_id(process_group_identifier) return cls.is_process_group(process_group_path) return False @@ -82,12 +77,7 @@ class ProcessModelService(FileSystemService): def is_process_model_identifier(cls, process_model_identifier: str) -> bool: """Is_process_model_identifier.""" if os.path.exists(FileSystemService.root_path()): - process_model_path = os.path.abspath( - os.path.join( - FileSystemService.root_path(), - FileSystemService.id_string_to_relative_path(process_model_identifier), - ) - ) + process_model_path = FileSystemService.full_path_from_id(process_model_identifier) return cls.is_process_model(process_model_path) return False @@ -104,7 +94,6 @@ class ProcessModelService(FileSystemService): page: int = 1, per_page: int = 10, ) -> list[T]: - """Get_batch.""" start = (page - 1) * per_page end = start + per_page return items[start:end] @@ -139,8 +128,8 @@ class ProcessModelService(FileSystemService): cls.write_json_file(json_path, json_data) process_model.id = process_model_id - def process_model_delete(self, process_model_id: str) -> None: - """Delete Procecss Model.""" + @classmethod + def process_model_delete(cls, process_model_id: str) -> None: instances = ProcessInstanceModel.query.filter( ProcessInstanceModel.process_model_identifier == process_model_id ).all() @@ -148,19 +137,19 @@ class ProcessModelService(FileSystemService): raise ProcessModelWithInstancesNotDeletableError( f"We cannot delete the model `{process_model_id}`, there are existing instances that depend on it." ) - process_model = self.get_process_model(process_model_id) - path = self.workflow_path(process_model) + process_model = cls.get_process_model(process_model_id) + path = cls.process_model_full_path(process_model) shutil.rmtree(path) - def process_model_move(self, original_process_model_id: str, new_location: str) -> ProcessModelInfo: - """Process_model_move.""" - process_model = self.get_process_model(original_process_model_id) - original_model_path = self.workflow_path(process_model) + @classmethod + def process_model_move(cls, original_process_model_id: str, new_location: str) -> ProcessModelInfo: + process_model = cls.get_process_model(original_process_model_id) + original_model_path = cls.process_model_full_path(process_model) _, model_id = os.path.split(original_model_path) new_relative_path = os.path.join(new_location, model_id) new_model_path = os.path.abspath(os.path.join(FileSystemService.root_path(), new_relative_path)) shutil.move(original_model_path, new_model_path) - new_process_model = self.get_process_model(new_relative_path) + new_process_model = cls.get_process_model(new_relative_path) return new_process_model @classmethod @@ -314,12 +303,7 @@ class ProcessModelService(FileSystemService): def get_process_group(cls, process_group_id: str, find_direct_nested_items: bool = True) -> ProcessGroup: """Look for a given process_group, and return it.""" if os.path.exists(FileSystemService.root_path()): - process_group_path = os.path.abspath( - os.path.join( - FileSystemService.root_path(), - FileSystemService.id_string_to_relative_path(process_group_id), - ) - ) + process_group_path = FileSystemService.full_path_from_id(process_group_id) if cls.is_process_group(process_group_path): return cls.find_or_create_process_group( process_group_path, @@ -330,13 +314,11 @@ class ProcessModelService(FileSystemService): @classmethod def add_process_group(cls, process_group: ProcessGroup) -> ProcessGroup: - """Add_process_group.""" return cls.update_process_group(process_group) @classmethod def update_process_group(cls, process_group: ProcessGroup) -> ProcessGroup: - """Update_process_group.""" - cat_path = cls.process_group_path(process_group.id) + cat_path = cls.full_path_from_id(process_group.id) os.makedirs(cat_path, exist_ok=True) json_path = os.path.join(cat_path, cls.PROCESS_GROUP_JSON_FILE) serialized_process_group = process_group.serialized @@ -346,33 +328,33 @@ class ProcessModelService(FileSystemService): cls.write_json_file(json_path, serialized_process_group) return process_group - def process_group_move(self, original_process_group_id: str, new_location: str) -> ProcessGroup: - """Process_group_move.""" - original_group_path = self.process_group_path(original_process_group_id) + @classmethod + def process_group_move(cls, original_process_group_id: str, new_location: str) -> ProcessGroup: + original_group_path = cls.full_path_from_id(original_process_group_id) _, original_group_id = os.path.split(original_group_path) new_root = os.path.join(FileSystemService.root_path(), new_location) new_group_path = os.path.abspath(os.path.join(FileSystemService.root_path(), new_root, original_group_id)) destination = shutil.move(original_group_path, new_group_path) - new_process_group = self.get_process_group(destination) + new_process_group = cls.get_process_group(destination) return new_process_group - def __get_all_nested_models(self, group_path: str) -> list: - """__get_all_nested_models.""" + @classmethod + def __get_all_nested_models(cls, group_path: str) -> list: all_nested_models = [] for _root, dirs, _files in os.walk(group_path): for dir in dirs: model_dir = os.path.join(group_path, dir) if ProcessModelService.is_process_model(model_dir): - process_model = self.get_process_model(model_dir) + process_model = cls.get_process_model(model_dir) all_nested_models.append(process_model) return all_nested_models - def process_group_delete(self, process_group_id: str) -> None: - """Delete_process_group.""" + @classmethod + def process_group_delete(cls, process_group_id: str) -> None: problem_models = [] - path = self.process_group_path(process_group_id) + path = cls.full_path_from_id(process_group_id) if os.path.exists(path): - nested_models = self.__get_all_nested_models(path) + nested_models = cls.__get_all_nested_models(path) for process_model in nested_models: instances = ProcessInstanceModel.query.filter( ProcessInstanceModel.process_model_identifier == process_model.id @@ -386,15 +368,15 @@ class ProcessModelService(FileSystemService): f" {problem_models}" ) shutil.rmtree(path) - self.cleanup_process_group_display_order() + cls._cleanup_process_group_display_order() - def cleanup_process_group_display_order(self) -> List[Any]: - """Cleanup_process_group_display_order.""" - process_groups = self.get_process_groups() # Returns an ordered list + @classmethod + def _cleanup_process_group_display_order(cls) -> List[Any]: + process_groups = cls.get_process_groups() # Returns an ordered list index = 0 for process_group in process_groups: process_group.display_order = index - self.update_process_group(process_group) + cls.update_process_group(process_group) index += 1 return process_groups diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_test_runner_service.py b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_test_runner_service.py new file mode 100644 index 000000000..db5a0d449 --- /dev/null +++ b/spiffworkflow-backend/src/spiffworkflow_backend/services/process_model_test_runner_service.py @@ -0,0 +1,407 @@ +import glob +import json +import os +import re +import traceback +from dataclasses import dataclass +from typing import Any +from typing import Callable +from typing import Optional +from typing import Union + +from lxml import etree # type: ignore +from SpiffWorkflow.bpmn.exceptions import WorkflowTaskException # type: ignore +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow # type: ignore +from SpiffWorkflow.task import Task as SpiffTask # type: ignore +from SpiffWorkflow.task import TaskState + +from spiffworkflow_backend.services.custom_parser import MyCustomParser + + +class UnrunnableTestCaseError(Exception): + pass + + +class MissingBpmnFileForTestCaseError(Exception): + pass + + +class NoTestCasesFoundError(Exception): + pass + + +class MissingInputTaskData(Exception): + pass + + +@dataclass +class TestCaseErrorDetails: + error_messages: list[str] + task_error_line: Optional[str] = None + task_trace: Optional[list[str]] = None + task_bpmn_identifier: Optional[str] = None + task_bpmn_name: Optional[str] = None + task_line_number: Optional[int] = None + stacktrace: Optional[list[str]] = None + + +@dataclass +class TestCaseResult: + passed: bool + bpmn_file: str + test_case_identifier: str + test_case_error_details: Optional[TestCaseErrorDetails] = None + + +DEFAULT_NSMAP = { + "bpmn": "http://www.omg.org/spec/BPMN/20100524/MODEL", + "bpmndi": "http://www.omg.org/spec/BPMN/20100524/DI", + "dc": "http://www.omg.org/spec/DD/20100524/DC", +} + + +""" +JSON file name: + The name should be in format "test_BPMN_FILE_NAME_IT_TESTS.json". + +BPMN_TASK_IDENTIIFER: + can be either task bpmn identifier or in format: + [BPMN_PROCESS_ID]:[TASK_BPMN_IDENTIFIER] + example: 'BasicServiceTaskProcess:service_task_one' + this allows for tasks to share bpmn identifiers across models + which is useful for call activities + +DATA for tasks: + This is an array of task data. This allows for the task to + be called multiple times and given different data each time. + This is useful for testing loops where each iteration needs + different input. The test will fail if the task is called + multiple times without task data input for each call. + +JSON file format: +{ + TEST_CASE_NAME: { + "tasks": { + BPMN_TASK_IDENTIIFER: { + "data": [DATA] + } + }, + "expected_output_json": DATA + } +} +""" + + +class ProcessModelTestRunner: + """Generic test runner code. May move into own library at some point. + + KEEP THIS GENERIC. do not add backend specific code here. + """ + + def __init__( + self, + process_model_directory_path: str, + process_model_directory_for_test_discovery: Optional[str] = None, + instantiate_executer_callback: Optional[Callable[[str], Any]] = None, + execute_task_callback: Optional[Callable[[Any, Optional[str], Optional[dict]], Any]] = None, + get_next_task_callback: Optional[Callable[[Any], Any]] = None, + test_case_file: Optional[str] = None, + test_case_identifier: Optional[str] = None, + ) -> None: + self.process_model_directory_path = process_model_directory_path + self.process_model_directory_for_test_discovery = ( + process_model_directory_for_test_discovery or process_model_directory_path + ) + self.instantiate_executer_callback = instantiate_executer_callback + self.execute_task_callback = execute_task_callback + self.get_next_task_callback = get_next_task_callback + self.test_case_file = test_case_file + self.test_case_identifier = test_case_identifier + + # keep track of the current task data index + self.task_data_index: dict[str, int] = {} + + self.test_case_results: list[TestCaseResult] = [] + self.bpmn_processes_to_file_mappings: dict[str, str] = {} + self.bpmn_files_to_called_element_mappings: dict[str, list[str]] = {} + + self.test_mappings = self._discover_process_model_test_cases() + self._discover_process_model_processes() + + def all_test_cases_passed(self) -> bool: + failed_tests = self.failing_tests() + return len(failed_tests) < 1 + + def failing_tests(self) -> list[TestCaseResult]: + return [t for t in self.test_case_results if t.passed is False] + + def passing_tests(self) -> list[TestCaseResult]: + return [t for t in self.test_case_results if t.passed is True] + + def failing_tests_formatted(self) -> str: + formatted_tests = ["FAILING TESTS:"] + for failing_test in self.failing_tests(): + msg = "" + if failing_test.test_case_error_details is not None: + msg = "\n\t\t".join(failing_test.test_case_error_details.error_messages) + formatted_tests.append(f"\t{failing_test.bpmn_file}: {failing_test.test_case_identifier}: {msg}") + return "\n".join(formatted_tests) + + def run(self) -> None: + if len(self.test_mappings.items()) < 1: + raise NoTestCasesFoundError( + f"Could not find any test cases in given directory: {self.process_model_directory_for_test_discovery}" + ) + for json_test_case_file, bpmn_file in self.test_mappings.items(): + with open(json_test_case_file) as f: + json_file_contents = json.loads(f.read()) + + for test_case_identifier, test_case_contents in json_file_contents.items(): + if self.test_case_identifier is None or test_case_identifier == self.test_case_identifier: + self.task_data_index = {} + try: + self.run_test_case(bpmn_file, test_case_identifier, test_case_contents) + except Exception as ex: + self._add_test_result(False, bpmn_file, test_case_identifier, exception=ex) + + def run_test_case(self, bpmn_file: str, test_case_identifier: str, test_case_contents: dict) -> None: + bpmn_process_instance = self._instantiate_executer(bpmn_file) + next_task = self._get_next_task(bpmn_process_instance) + while next_task is not None: + test_case_task_properties = None + test_case_task_key = next_task.task_spec.bpmn_id + if "tasks" in test_case_contents: + if test_case_task_key not in test_case_contents["tasks"]: + # we may need to go to the top level workflow of a given bpmn file + test_case_task_key = f"{next_task.workflow.spec.name}:{next_task.task_spec.bpmn_id}" + if test_case_task_key in test_case_contents["tasks"]: + test_case_task_properties = test_case_contents["tasks"][test_case_task_key] + + task_type = next_task.task_spec.__class__.__name__ + if task_type in ["ServiceTask", "UserTask", "CallActivity"] and test_case_task_properties is None: + raise UnrunnableTestCaseError( + f"Cannot run test case '{test_case_identifier}'. It requires task data for" + f" {next_task.task_spec.bpmn_id} because it is of type '{task_type}'" + ) + self._execute_task(next_task, test_case_task_key, test_case_task_properties) + next_task = self._get_next_task(bpmn_process_instance) + + error_message = None + if bpmn_process_instance.is_completed() is False: + error_message = [ + "Expected process instance to complete but it did not.", + f"Final data was: {bpmn_process_instance.last_task.data}", + f"Last task bpmn id: {bpmn_process_instance.last_task.task_spec.bpmn_id}", + f"Last task type: {bpmn_process_instance.last_task.task_spec.__class__.__name__}", + ] + elif bpmn_process_instance.success is False: + error_message = [ + "Expected process instance to succeed but it did not.", + f"Final data was: {bpmn_process_instance.data}", + ] + elif test_case_contents["expected_output_json"] != bpmn_process_instance.data: + error_message = [ + "Expected output did not match actual output:", + f"expected: {test_case_contents['expected_output_json']}", + f"actual: {bpmn_process_instance.data}", + ] + self._add_test_result(error_message is None, bpmn_file, test_case_identifier, error_message) + + def _discover_process_model_test_cases( + self, + ) -> dict[str, str]: + test_mappings = {} + + json_test_file_glob = os.path.join(self.process_model_directory_for_test_discovery, "**", "test_*.json") + + for file in glob.glob(json_test_file_glob, recursive=True): + file_norm = os.path.normpath(file) + file_dir = os.path.dirname(file_norm) + json_file_name = os.path.basename(file_norm) + if self.test_case_file is None or json_file_name == self.test_case_file: + bpmn_file_name = re.sub(r"^test_(.*)\.json", r"\1.bpmn", json_file_name) + bpmn_file_path = os.path.join(file_dir, bpmn_file_name) + if os.path.isfile(bpmn_file_path): + test_mappings[file_norm] = bpmn_file_path + else: + raise MissingBpmnFileForTestCaseError( + f"Cannot find a matching bpmn file for test case json file: '{file_norm}'" + ) + return test_mappings + + def _discover_process_model_processes( + self, + ) -> None: + process_model_bpmn_file_glob = os.path.join(self.process_model_directory_path, "**", "*.bpmn") + + for file in glob.glob(process_model_bpmn_file_glob, recursive=True): + file_norm = os.path.normpath(file) + if file_norm not in self.bpmn_files_to_called_element_mappings: + self.bpmn_files_to_called_element_mappings[file_norm] = [] + with open(file_norm, "rb") as f: + file_contents = f.read() + etree_xml_parser = etree.XMLParser(resolve_entities=False) + + # if we cannot load process model then ignore it since it can cause errors unrelated + # to the test and if it is related, it will most likely be caught further along the test + try: + root = etree.fromstring(file_contents, parser=etree_xml_parser) + except etree.XMLSyntaxError: + continue + + call_activities = root.findall(".//bpmn:callActivity", namespaces=DEFAULT_NSMAP) + for call_activity in call_activities: + if "calledElement" in call_activity.attrib: + called_element = call_activity.attrib["calledElement"] + self.bpmn_files_to_called_element_mappings[file_norm].append(called_element) + bpmn_process_element = root.find('.//bpmn:process[@isExecutable="true"]', namespaces=DEFAULT_NSMAP) + if bpmn_process_element is not None: + bpmn_process_identifier = bpmn_process_element.attrib["id"] + self.bpmn_processes_to_file_mappings[bpmn_process_identifier] = file_norm + + def _execute_task( + self, spiff_task: SpiffTask, test_case_task_key: Optional[str], test_case_task_properties: Optional[dict] + ) -> None: + if self.execute_task_callback: + self.execute_task_callback(spiff_task, test_case_task_key, test_case_task_properties) + self._default_execute_task(spiff_task, test_case_task_key, test_case_task_properties) + + def _get_next_task(self, bpmn_process_instance: BpmnWorkflow) -> Optional[SpiffTask]: + if self.get_next_task_callback: + return self.get_next_task_callback(bpmn_process_instance) + return self._default_get_next_task(bpmn_process_instance) + + def _instantiate_executer(self, bpmn_file: str) -> BpmnWorkflow: + if self.instantiate_executer_callback: + return self.instantiate_executer_callback(bpmn_file) + return self._default_instantiate_executer(bpmn_file) + + def _default_get_next_task(self, bpmn_process_instance: BpmnWorkflow) -> Optional[SpiffTask]: + ready_tasks = list([t for t in bpmn_process_instance.get_tasks(TaskState.READY)]) + if len(ready_tasks) > 0: + return ready_tasks[0] + return None + + def _default_execute_task( + self, spiff_task: SpiffTask, test_case_task_key: Optional[str], test_case_task_properties: Optional[dict] + ) -> None: + if spiff_task.task_spec.manual or spiff_task.task_spec.__class__.__name__ == "ServiceTask": + if test_case_task_key and test_case_task_properties and "data" in test_case_task_properties: + if test_case_task_key not in self.task_data_index: + self.task_data_index[test_case_task_key] = 0 + task_data_length = len(test_case_task_properties["data"]) + test_case_index = self.task_data_index[test_case_task_key] + if task_data_length <= test_case_index: + raise MissingInputTaskData( + f"Missing input task data for task: {test_case_task_key}. " + f"Only {task_data_length} given in the json but task was called {test_case_index + 1} times" + ) + spiff_task.update_data(test_case_task_properties["data"][test_case_index]) + self.task_data_index[test_case_task_key] += 1 + spiff_task.complete() + else: + spiff_task.run() + + def _find_related_bpmn_files(self, bpmn_file: str) -> list[str]: + related_bpmn_files = [] + if bpmn_file in self.bpmn_files_to_called_element_mappings: + for bpmn_process_identifier in self.bpmn_files_to_called_element_mappings[bpmn_file]: + if bpmn_process_identifier in self.bpmn_processes_to_file_mappings: + new_file = self.bpmn_processes_to_file_mappings[bpmn_process_identifier] + related_bpmn_files.append(new_file) + related_bpmn_files.extend(self._find_related_bpmn_files(new_file)) + return related_bpmn_files + + def _get_etree_from_bpmn_file(self, bpmn_file: str) -> etree._Element: + data = None + with open(bpmn_file, "rb") as f_handle: + data = f_handle.read() + etree_xml_parser = etree.XMLParser(resolve_entities=False) + return etree.fromstring(data, parser=etree_xml_parser) + + def _default_instantiate_executer(self, bpmn_file: str) -> BpmnWorkflow: + parser = MyCustomParser() + bpmn_file_etree = self._get_etree_from_bpmn_file(bpmn_file) + parser.add_bpmn_xml(bpmn_file_etree, filename=os.path.basename(bpmn_file)) + all_related = self._find_related_bpmn_files(bpmn_file) + for related_file in all_related: + related_file_etree = self._get_etree_from_bpmn_file(related_file) + parser.add_bpmn_xml(related_file_etree, filename=os.path.basename(related_file)) + sub_parsers = list(parser.process_parsers.values()) + executable_process = None + for sub_parser in sub_parsers: + if sub_parser.process_executable: + executable_process = sub_parser.bpmn_id + if executable_process is None: + raise BpmnFileMissingExecutableProcessError( + f"Executable process cannot be found in {bpmn_file}. Test cannot run." + ) + bpmn_process_spec = parser.get_spec(executable_process) + bpmn_process_instance = BpmnWorkflow(bpmn_process_spec) + return bpmn_process_instance + + def _get_relative_path_of_bpmn_file(self, bpmn_file: str) -> str: + return os.path.relpath(bpmn_file, start=self.process_model_directory_path) + + def _exception_to_test_case_error_details( + self, exception: Union[Exception, WorkflowTaskException] + ) -> TestCaseErrorDetails: + error_messages = str(exception).split("\n") + test_case_error_details = TestCaseErrorDetails(error_messages=error_messages) + if isinstance(exception, WorkflowTaskException): + test_case_error_details.task_error_line = exception.error_line + test_case_error_details.task_trace = exception.task_trace + test_case_error_details.task_line_number = exception.line_number + test_case_error_details.task_bpmn_identifier = exception.task_spec.bpmn_id + test_case_error_details.task_bpmn_name = exception.task_spec.bpmn_name + else: + test_case_error_details.stacktrace = traceback.format_exc().split("\n") + + return test_case_error_details + + def _add_test_result( + self, + passed: bool, + bpmn_file: str, + test_case_identifier: str, + error_messages: Optional[list[str]] = None, + exception: Optional[Exception] = None, + ) -> None: + test_case_error_details = None + if exception is not None: + test_case_error_details = self._exception_to_test_case_error_details(exception) + elif error_messages: + test_case_error_details = TestCaseErrorDetails(error_messages=error_messages) + + bpmn_file_relative = self._get_relative_path_of_bpmn_file(bpmn_file) + test_result = TestCaseResult( + passed=passed, + bpmn_file=bpmn_file_relative, + test_case_identifier=test_case_identifier, + test_case_error_details=test_case_error_details, + ) + self.test_case_results.append(test_result) + + +class BpmnFileMissingExecutableProcessError(Exception): + pass + + +class ProcessModelTestRunnerService: + def __init__( + self, + process_model_directory_path: str, + test_case_file: Optional[str] = None, + test_case_identifier: Optional[str] = None, + ) -> None: + self.process_model_test_runner = ProcessModelTestRunner( + process_model_directory_path, + test_case_file=test_case_file, + test_case_identifier=test_case_identifier, + # instantiate_executer_callback=self._instantiate_executer_callback, + # execute_task_callback=self._execute_task_callback, + # get_next_task_callback=self._get_next_task_callback, + ) + + def run(self) -> None: + self.process_model_test_runner.run() diff --git a/spiffworkflow-backend/src/spiffworkflow_backend/services/spec_file_service.py b/spiffworkflow-backend/src/spiffworkflow_backend/services/spec_file_service.py index e8771738c..9169c5d60 100644 --- a/spiffworkflow-backend/src/spiffworkflow_backend/services/spec_file_service.py +++ b/spiffworkflow-backend/src/spiffworkflow_backend/services/spec_file_service.py @@ -221,37 +221,37 @@ class SpecFileService(FileSystemService): return spec_file_data @staticmethod - def full_file_path(spec: ProcessModelInfo, file_name: str) -> str: + def full_file_path(process_model: ProcessModelInfo, file_name: str) -> str: """File_path.""" - return os.path.abspath(os.path.join(SpecFileService.workflow_path(spec), file_name)) + return os.path.abspath(os.path.join(SpecFileService.process_model_full_path(process_model), file_name)) @staticmethod - def last_modified(spec: ProcessModelInfo, file_name: str) -> datetime: + def last_modified(process_model: ProcessModelInfo, file_name: str) -> datetime: """Last_modified.""" - full_file_path = SpecFileService.full_file_path(spec, file_name) + full_file_path = SpecFileService.full_file_path(process_model, file_name) return FileSystemService._last_modified(full_file_path) @staticmethod - def timestamp(spec: ProcessModelInfo, file_name: str) -> float: + def timestamp(process_model: ProcessModelInfo, file_name: str) -> float: """Timestamp.""" - full_file_path = SpecFileService.full_file_path(spec, file_name) + full_file_path = SpecFileService.full_file_path(process_model, file_name) return FileSystemService._timestamp(full_file_path) @staticmethod - def delete_file(spec: ProcessModelInfo, file_name: str) -> None: + def delete_file(process_model: ProcessModelInfo, file_name: str) -> None: """Delete_file.""" - # Fixme: Remember to remove the lookup files when the spec file is removed. + # Fixme: Remember to remove the lookup files when the process_model file is removed. # lookup_files = session.query(LookupFileModel).filter_by(file_model_id=file_id).all() # for lf in lookup_files: # session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete() # session.query(LookupFileModel).filter_by(id=lf.id).delete() - full_file_path = SpecFileService.full_file_path(spec, file_name) + full_file_path = SpecFileService.full_file_path(process_model, file_name) os.remove(full_file_path) @staticmethod - def delete_all_files(spec: ProcessModelInfo) -> None: + def delete_all_files(process_model: ProcessModelInfo) -> None: """Delete_all_files.""" - dir_path = SpecFileService.workflow_path(spec) + dir_path = SpecFileService.process_model_full_path(process_model) if os.path.exists(dir_path): shutil.rmtree(dir_path) diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/failing_script_task.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/failing_script_task.bpmn new file mode 100644 index 000000000..4b0463358 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/failing_script_task.bpmn @@ -0,0 +1,41 @@ + + + + + Flow_1xkc1ru + + + + Flow_0tkkq9s + + + + Flow_1xkc1ru + Flow_0tkkq9s + a = 1 +b = a + 'two' + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/process_model.json new file mode 100644 index 000000000..23cc190ba --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/process_model.json @@ -0,0 +1,11 @@ +{ + "description": "Process that raises an exception", + "display_name": "Failing Process", + "display_order": 0, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "metadata_extraction_paths": null, + "primary_file_name": "failing_task.bpmn", + "primary_process_id": "Process_FailingProcess" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/test_failing_script_task.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/test_failing_script_task.json new file mode 100644 index 000000000..0c81e0724 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-fail/failing_script_task/test_failing_script_task.json @@ -0,0 +1,3 @@ +{ + "test_case_2": {} +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/call_activity.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/call_activity.bpmn new file mode 100644 index 000000000..68a05f280 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/call_activity.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_0ext5lt + + + + Flow_1hzwssi + + + + Flow_0ext5lt + Flow_1hzwssi + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/process_model.json new file mode 100644 index 000000000..302fa24a1 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/process_model.json @@ -0,0 +1,9 @@ +{ + "description": "", + "display_name": "Call Activity", + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "primary_file_name": "call_activity.bpmn", + "primary_process_id": "CallActivityProcess" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/test_call_activity.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/test_call_activity.json new file mode 100644 index 000000000..60e638a97 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/call-activity/test_call_activity.json @@ -0,0 +1,5 @@ +{ + "test_case_1": { + "expected_output_json": {} + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/choose-your-branch-schema.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/choose-your-branch-schema.json new file mode 100644 index 000000000..e5f090f4f --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/choose-your-branch-schema.json @@ -0,0 +1,11 @@ +{ + "title": "Choose Your Branch", + "description": "", + "properties": { + "branch": { + "type": "string", + "title": "branch" + } + }, + "required": [] +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/choose-your-branch-uischema.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/choose-your-branch-uischema.json new file mode 100644 index 000000000..b6cc5da64 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/choose-your-branch-uischema.json @@ -0,0 +1,5 @@ +{ + "ui:order": [ + "branch" + ] +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/exclusive_gateway_based_on_user_task.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/exclusive_gateway_based_on_user_task.bpmn new file mode 100644 index 000000000..63bafd46c --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/exclusive_gateway_based_on_user_task.bpmn @@ -0,0 +1,98 @@ + + + + + Flow_19j3jcx + + + + Flow_0qa66xz + Flow_1ww41l3 + Flow_10m4g0q + + + + branch == 'a' + + + + Flow_1oxbb75 + Flow_1ck9lfk + + + + + + + + + + + Flow_19j3jcx + Flow_0qa66xz + + + Flow_1ww41l3 + Flow_1ck9lfk + chosen_branch = 'A' + + + Flow_10m4g0q + Flow_1oxbb75 + chosen_branch = 'B' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/process_model.json new file mode 100644 index 000000000..25961b506 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/process_model.json @@ -0,0 +1,11 @@ +{ + "description": "", + "display_name": "Exclusive Gateway Based on User Task", + "display_order": 0, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "metadata_extraction_paths": null, + "primary_file_name": "exclusive_gateway_based_on_user_task.bpmn", + "primary_process_id": "exclusive_gateway_based_on_user_task_process" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/test_exclusive_gateway_based_on_user_task.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/test_exclusive_gateway_based_on_user_task.json new file mode 100644 index 000000000..e5d86ba05 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/exclusive-gateway-based-on-user-task/test_exclusive_gateway_based_on_user_task.json @@ -0,0 +1,22 @@ +{ + "test_case_one": { + "tasks": { + "user_task_choose_branch": { + "data": [ + { "branch": "a" } + ] + } + }, + "expected_output_json": { "branch": "a", "chosen_branch": "A"} + }, + "test_case_two": { + "tasks": { + "user_task_choose_branch": { + "data": [ + { "branch": "b" } + ] + } + }, + "expected_output_json": { "branch": "b", "chosen_branch": "B"} + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/loopback_to_user_task.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/loopback_to_user_task.bpmn new file mode 100644 index 000000000..0298a1d65 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/loopback_to_user_task.bpmn @@ -0,0 +1,110 @@ + + + + + Flow_12xxe7w + + + + Flow_1s3znr2 + Flow_0utss6p + Flow_1sg0c65 + + + + Flow_0utss6p + + + counter == 3 + + + Flow_12xxe7w + Flow_08tc3r7 + counter = 1 +the_var = 0 + + + + + + + + + + + + Flow_08tc3r7 + Flow_1sg0c65 + Flow_0wnc5ju + + + + Flow_0wnc5ju + Flow_1s3znr2 + the_var = user_input_variable + the_var +counter += 1 + + + loop back if a < 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/process_model.json new file mode 100644 index 000000000..069e8a905 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/process_model.json @@ -0,0 +1,11 @@ +{ + "description": "", + "display_name": "Loopback to User Task", + "display_order": 0, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "metadata_extraction_paths": null, + "primary_file_name": "loopback_to_user_task.bpmn", + "primary_process_id": "loopback_to_user_task_process" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/test_loopback_to_user_task.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/test_loopback_to_user_task.json new file mode 100644 index 000000000..b78fc373d --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/test_loopback_to_user_task.json @@ -0,0 +1,13 @@ +{ + "test_case_one": { + "tasks": { + "user_task_enter_increment": { + "data": [ + { "user_input_variable": 7 }, + { "user_input_variable": 8 } + ] + } + }, + "expected_output_json": { "the_var": 15, "counter": 3, "user_input_variable": 8 } + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/user-input-schema.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/user-input-schema.json new file mode 100644 index 000000000..84b15e067 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/user-input-schema.json @@ -0,0 +1,11 @@ +{ + "title": "User Input", + "description": "", + "properties": { + "user_input_variable": { + "type": "integer", + "title": "user_input_variable" + } + }, + "required": [] +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/user-input-uischema.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/user-input-uischema.json new file mode 100644 index 000000000..6083eec87 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback-to-user-task/user-input-uischema.json @@ -0,0 +1,5 @@ +{ + "ui:order": [ + "user_input_variable" + ] +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/loopback.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/loopback.bpmn new file mode 100644 index 000000000..0d742ca72 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/loopback.bpmn @@ -0,0 +1,92 @@ + + + + + Flow_12xxe7w + + + + Flow_0wnc5ju + Flow_0utss6p + Flow_1sg0c65 + + + + Flow_0utss6p + + + a == 3 + + + Flow_12xxe7w + Flow_08tc3r7 + a = 1 + + + + + + + Flow_08tc3r7 + Flow_1sg0c65 + Flow_0wnc5ju + a += 1 + + + + loop back if a < 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/process_model.json new file mode 100644 index 000000000..96903e5b9 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/process_model.json @@ -0,0 +1,11 @@ +{ + "description": "", + "display_name": "Loopback", + "display_order": 0, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "metadata_extraction_paths": null, + "primary_file_name": "loopback.bpmn", + "primary_process_id": "loopback_process" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/test_loopback.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/test_loopback.json new file mode 100644 index 000000000..336551376 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/loopback/test_loopback.json @@ -0,0 +1,5 @@ +{ + "test_case_1": { + "expected_output_json": { "a": 3 } + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/manual_task.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/manual_task.bpmn new file mode 100644 index 000000000..3ee85d4fc --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/manual_task.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_0gz6i84 + + + + Flow_0ikklg6 + + + + Flow_0gz6i84 + Flow_0ikklg6 + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/process_model.json new file mode 100644 index 000000000..f843745dd --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/process_model.json @@ -0,0 +1,9 @@ +{ + "description": "Manual Task", + "display_name": "Manual Task", + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "primary_file_name": "manual_task.bpmn", + "primary_process_id": "ManualTaskProcess" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/test_manual_task.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/test_manual_task.json new file mode 100644 index 000000000..889af9374 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/manual-task/test_manual_task.json @@ -0,0 +1,10 @@ +{ + "test_case_1": { + "tasks": { + "manual_task_one": { + "data": [{}] + } + }, + "expected_output_json": {} + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/a.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/a.bpmn new file mode 100644 index 000000000..6eb2e3313 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/a.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_0jk46kf + + + + Flow_0pw6euz + + + + Flow_0jk46kf + Flow_0pw6euz + a = 1 + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/b.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/b.bpmn new file mode 100644 index 000000000..33eaa6084 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/b.bpmn @@ -0,0 +1,42 @@ + + + + + Flow_1qgv480 + + + + Flow_1sbj39z + + + + Flow_1qgv480 + Flow_1sbj39z + b = 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/process_model.json new file mode 100644 index 000000000..737847d6e --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/process_model.json @@ -0,0 +1,11 @@ +{ + "description": "", + "display_name": "Multiple Test Files", + "display_order": 0, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "metadata_extraction_paths": null, + "primary_file_name": "a.bpmn", + "primary_process_id": "ProcessA" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/test_a.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/test_a.json new file mode 100644 index 000000000..98ff465b1 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/test_a.json @@ -0,0 +1,5 @@ +{ + "test_case_1": { + "expected_output_json": { "a": 1 } + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/test_b.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/test_b.json new file mode 100644 index 000000000..a1dac99dd --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/multiple-test-files/test_b.json @@ -0,0 +1,8 @@ +{ + "test_case_1": { + "expected_output_json": { "b": 1 } + }, + "test_case_2": { + "expected_output_json": { "b": 1 } + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/process_group.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/process_group.json new file mode 100644 index 000000000..1a77fbe0f --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/process_group.json @@ -0,0 +1,9 @@ +{ + "admin": false, + "description": "", + "display_name": "Expected To Pass", + "display_order": 0, + "parent_groups": null, + "process_groups": [], + "process_models": [] +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/process_model.json new file mode 100644 index 000000000..03d72515d --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/process_model.json @@ -0,0 +1,11 @@ +{ + "description": "", + "display_name": "Script Task", + "display_order": 0, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "metadata_extraction_paths": null, + "primary_file_name": "Script.bpmn", + "primary_process_id": "Process_Script_Task" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/script_task.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/script_task.bpmn new file mode 100644 index 000000000..3a5302e62 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/script_task.bpmn @@ -0,0 +1,39 @@ + + + + + Flow_0qfycuk + + + + Flow_1auiekw + + + + Flow_0qfycuk + Flow_1auiekw + a = 1 + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/test_script_task.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/test_script_task.json new file mode 100644 index 000000000..98ff465b1 --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/script-task/test_script_task.json @@ -0,0 +1,5 @@ +{ + "test_case_1": { + "expected_output_json": { "a": 1 } + } +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/process_model.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/process_model.json new file mode 100644 index 000000000..b5e63674e --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/process_model.json @@ -0,0 +1,10 @@ +{ + "description": "A.1.0.2", + "display_name": "A.1.0.2 - Service Task", + "display_order": 13, + "exception_notification_addresses": [], + "fault_or_suspend_on_exception": "fault", + "files": [], + "primary_file_name": "A.1.0.2.bpmn", + "primary_process_id": "Process_test_a102_A_1_0_2_bd2e724" +} diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/service_task.bpmn b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/service_task.bpmn new file mode 100644 index 000000000..178c16dae --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/service_task.bpmn @@ -0,0 +1,56 @@ + + + + + Flow_19ephzh + + + + Flow_1dsxn78 + + + + + + + + + + + + + + This is the Service Task Unit Test Screen. + + + Flow_0xx2kop + Flow_1dsxn78 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/test_service_task.json b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/test_service_task.json new file mode 100644 index 000000000..7b5aae53a --- /dev/null +++ b/spiffworkflow-backend/tests/data/bpmn_unit_test_process_models/expected-to-pass/service-task/test_service_task.json @@ -0,0 +1,10 @@ +{ + "test_case_1": { + "tasks": { + "ServiceTaskProcess:service_task_one": { + "data": [{ "the_result": "result_from_service" }] + } + }, + "expected_output_json": { "the_result": "result_from_service" } + } +} diff --git a/spiffworkflow-backend/tests/spiffworkflow_backend/helpers/test_data.py b/spiffworkflow-backend/tests/spiffworkflow_backend/helpers/test_data.py index 5a7a969ef..7af2cfdcb 100644 --- a/spiffworkflow-backend/tests/spiffworkflow_backend/helpers/test_data.py +++ b/spiffworkflow-backend/tests/spiffworkflow_backend/helpers/test_data.py @@ -12,12 +12,10 @@ from spiffworkflow_backend.services.process_model_service import ProcessModelSer def assure_process_group_exists(process_group_id: Optional[str] = None) -> ProcessGroup: - """Assure_process_group_exists.""" process_group = None - process_model_service = ProcessModelService() if process_group_id is not None: try: - process_group = process_model_service.get_process_group(process_group_id) + process_group = ProcessModelService.get_process_group(process_group_id) except ProcessEntityNotFoundError: process_group = None @@ -31,7 +29,7 @@ def assure_process_group_exists(process_group_id: Optional[str] = None) -> Proce admin=False, display_order=0, ) - process_model_service.add_process_group(process_group) + ProcessModelService.add_process_group(process_group) return process_group diff --git a/spiffworkflow-backend/tests/spiffworkflow_backend/unit/test_process_model_test_runner.py b/spiffworkflow-backend/tests/spiffworkflow_backend/unit/test_process_model_test_runner.py new file mode 100644 index 000000000..78e6b2e1e --- /dev/null +++ b/spiffworkflow-backend/tests/spiffworkflow_backend/unit/test_process_model_test_runner.py @@ -0,0 +1,130 @@ +import os +from typing import Optional + +import pytest +from flask import current_app +from flask import Flask +from tests.spiffworkflow_backend.helpers.base_test import BaseTest + +from spiffworkflow_backend.services.process_model_test_runner_service import NoTestCasesFoundError +from spiffworkflow_backend.services.process_model_test_runner_service import ProcessModelTestRunner + + +class TestProcessModelTestRunner(BaseTest): + def test_can_test_a_simple_process_model( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests("script-task") + assert len(process_model_test_runner.test_case_results) == 1 + + def test_will_raise_if_no_tests_found( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = ProcessModelTestRunner(os.path.join(self.root_path(), "DNE")) + with pytest.raises(NoTestCasesFoundError): + process_model_test_runner.run() + assert process_model_test_runner.all_test_cases_passed(), process_model_test_runner.test_case_results + + def test_can_test_multiple_process_models_with_all_passing_tests( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests() + assert len(process_model_test_runner.test_case_results) > 1 + + def test_can_test_multiple_process_models_with_failing_tests( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests(parent_directory="expected-to-fail") + assert len(process_model_test_runner.test_case_results) == 1 + + def test_can_test_process_model_with_multiple_files( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests(bpmn_process_directory_name="multiple-test-files") + assert len(process_model_test_runner.test_case_results) == 3 + + process_model_test_runner = self._run_model_tests( + bpmn_process_directory_name="multiple-test-files", test_case_file="test_a.json" + ) + assert len(process_model_test_runner.test_case_results) == 1 + + process_model_test_runner = self._run_model_tests( + bpmn_process_directory_name="multiple-test-files", test_case_file="test_b.json" + ) + assert len(process_model_test_runner.test_case_results) == 2 + + process_model_test_runner = self._run_model_tests( + bpmn_process_directory_name="multiple-test-files", + test_case_file="test_b.json", + test_case_identifier="test_case_2", + ) + assert len(process_model_test_runner.test_case_results) == 1 + + def test_can_test_process_model_call_activity( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests(bpmn_process_directory_name="call-activity") + assert len(process_model_test_runner.test_case_results) == 1 + + def test_can_test_process_model_with_service_task( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests(bpmn_process_directory_name="service-task") + assert len(process_model_test_runner.test_case_results) == 1 + + def test_can_test_process_model_with_loopback_to_user_task( + self, + app: Flask, + with_db_and_bpmn_file_cleanup: None, + ) -> None: + process_model_test_runner = self._run_model_tests(bpmn_process_directory_name="loopback-to-user-task") + assert len(process_model_test_runner.test_case_results) == 1 + + def _run_model_tests( + self, + bpmn_process_directory_name: Optional[str] = None, + parent_directory: str = "expected-to-pass", + test_case_file: Optional[str] = None, + test_case_identifier: Optional[str] = None, + ) -> ProcessModelTestRunner: + base_process_model_dir_path_segments = [self.root_path(), parent_directory] + path_segments = base_process_model_dir_path_segments + if bpmn_process_directory_name: + path_segments = path_segments + [bpmn_process_directory_name] + process_model_test_runner = ProcessModelTestRunner( + process_model_directory_path=os.path.join(*base_process_model_dir_path_segments), + process_model_directory_for_test_discovery=os.path.join(*path_segments), + test_case_file=test_case_file, + test_case_identifier=test_case_identifier, + ) + process_model_test_runner.run() + + all_tests_expected_to_pass = parent_directory == "expected-to-pass" + assert ( + process_model_test_runner.all_test_cases_passed() is all_tests_expected_to_pass + ), process_model_test_runner.failing_tests_formatted() + return process_model_test_runner + + def root_path(self) -> str: + return os.path.join( + current_app.instance_path, + "..", + "..", + "tests", + "data", + "bpmn_unit_test_process_models", + ) diff --git a/spiffworkflow-frontend/src/components/ErrorDisplay.tsx b/spiffworkflow-frontend/src/components/ErrorDisplay.tsx index 67e6e18dc..760094188 100644 --- a/spiffworkflow-frontend/src/components/ErrorDisplay.tsx +++ b/spiffworkflow-frontend/src/components/ErrorDisplay.tsx @@ -4,6 +4,7 @@ import { ErrorForDisplay, ProcessInstanceEventErrorDetail, ProcessInstanceLogEntry, + TestCaseErrorDetails, } from '../interfaces'; function errorDetailDisplay( @@ -40,6 +41,22 @@ export const errorForDisplayFromProcessInstanceErrorDetail = ( return errorForDisplay; }; +export const errorForDisplayFromTestCaseErrorDetails = ( + testCaseErrorDetails: TestCaseErrorDetails +) => { + const errorForDisplay: ErrorForDisplay = { + message: testCaseErrorDetails.error_messages.join('\n'), + messageClassName: 'failure-string', + task_name: testCaseErrorDetails.task_bpmn_name, + task_id: testCaseErrorDetails.task_bpmn_identifier, + line_number: testCaseErrorDetails.task_line_number, + error_line: testCaseErrorDetails.task_line_contents, + task_trace: testCaseErrorDetails.task_trace, + stacktrace: testCaseErrorDetails.stacktrace, + }; + return errorForDisplay; +}; + export const childrenForErrorObject = (errorObject: ErrorForDisplay) => { let sentryLinkTag = null; if (errorObject.sentry_link) { diff --git a/spiffworkflow-frontend/src/components/ProcessModelTestRun.tsx b/spiffworkflow-frontend/src/components/ProcessModelTestRun.tsx new file mode 100644 index 000000000..9938431a0 --- /dev/null +++ b/spiffworkflow-frontend/src/components/ProcessModelTestRun.tsx @@ -0,0 +1,188 @@ +import { PlayOutline, Checkmark, Close } from '@carbon/icons-react'; +import { Button, Modal } from '@carbon/react'; +import { useState } from 'react'; +import { useUriListForPermissions } from '../hooks/UriListForPermissions'; +import HttpService from '../services/HttpService'; +import { ProcessFile, TestCaseResult, TestCaseResults } from '../interfaces'; +import { + childrenForErrorObject, + errorForDisplayFromTestCaseErrorDetails, +} from './ErrorDisplay'; + +type OwnProps = { + processModelFile?: ProcessFile; + buttonType?: string; +}; + +export default function ProcessModelTestRun({ + processModelFile, + buttonType = 'icon', +}: OwnProps) { + const [testCaseResults, setTestCaseResults] = + useState(null); + const [showTestCaseResultsModal, setShowTestCaseResultsModal] = + useState(false); + const { targetUris } = useUriListForPermissions(); + + const onProcessModelTestRunSuccess = (result: TestCaseResults) => { + setTestCaseResults(result); + }; + + const processModelTestRunResultTag = () => { + if (testCaseResults) { + if (testCaseResults.all_passed) { + return ( + setShowTestCaseResultsModal(true)} + /> + ); + } + return ( + setShowTestCaseResultsModal(true)} + /> + ); + } + return null; + }; + + const onProcessModelTestRun = () => { + const httpMethod = 'POST'; + setTestCaseResults(null); + + let queryParams = ''; + if (processModelFile) { + queryParams = `?test_case_file=${processModelFile.name}`; + } + + HttpService.makeCallToBackend({ + path: `${targetUris.processModelTestsPath}${queryParams}`, + successCallback: onProcessModelTestRunSuccess, + httpMethod, + }); + }; + + const testCaseFormattedResultTag = () => { + if (!testCaseResults) { + return null; + } + + const passingRows: any[] = []; + const failingRows: any[] = []; + + testCaseResults.passing.forEach((testCaseResult: TestCaseResult) => { + passingRows.push({testCaseResult.test_case_identifier}); + }); + + testCaseResults.failing + .slice(0, 2) + .forEach((testCaseResult: TestCaseResult) => { + if (testCaseResult.test_case_error_details) { + const errorForDisplay = errorForDisplayFromTestCaseErrorDetails( + testCaseResult.test_case_error_details + ); + const errorChildren = childrenForErrorObject(errorForDisplay); + failingRows.push( + <> + + + Test Case:{' '} + {testCaseResult.test_case_identifier} + + {errorChildren} + > + ); + } + }); + + return ( + <> + Passing: {testCaseResults.passing.length} + Failing: {testCaseResults.failing.length} + + {failingRows.length > 0 ? ( + <> + Failure Details: + {failingRows} + > + ) : null} + {passingRows.length > 0 ? ( + <> + Successful Test Cases: + {passingRows} + > + ) : null} + > + ); + }; + + const testCaseResultsModal = () => { + if (!testCaseResults) { + return null; + } + + let modalHeading = 'All Tests PASSED'; + if (!testCaseResults.all_passed) { + modalHeading = 'Some Tests FAILED'; + } + return ( + setShowTestCaseResultsModal(false)} + onRequestClose={() => setShowTestCaseResultsModal(false)} + > + {testCaseFormattedResultTag()} + + ); + }; + + const buttonElement = () => { + if (buttonType === 'icon') { + return ( + onProcessModelTestRun()} + /> + ); + } + if (buttonType === 'text') { + return ( + onProcessModelTestRun()} + title="Run all BPMN unit tests for this process model" + > + Run Unit Tests + + ); + } + return null; + }; + + return ( + <> + {testCaseResultsModal()} + {buttonElement()} + {processModelTestRunResultTag()} + > + ); +} diff --git a/spiffworkflow-frontend/src/hooks/UriListForPermissions.tsx b/spiffworkflow-frontend/src/hooks/UriListForPermissions.tsx index e51d961d2..374d17610 100644 --- a/spiffworkflow-frontend/src/hooks/UriListForPermissions.tsx +++ b/spiffworkflow-frontend/src/hooks/UriListForPermissions.tsx @@ -29,6 +29,7 @@ export const useUriListForPermissions = () => { processModelFileShowPath: `/v1.0/process-models/${params.process_model_id}/files/${params.file_name}`, processModelPublishPath: `/v1.0/process-model-publish/${params.process_model_id}`, processModelShowPath: `/v1.0/process-models/${params.process_model_id}`, + processModelTestsPath: `/v1.0/process-model-tests/${params.process_model_id}`, secretListPath: `/v1.0/secrets`, userSearch: `/v1.0/users/search`, userExists: `/v1.0/users/exists/by-username`, diff --git a/spiffworkflow-frontend/src/index.css b/spiffworkflow-frontend/src/index.css index cfea25f48..a36ab97e2 100644 --- a/spiffworkflow-frontend/src/index.css +++ b/spiffworkflow-frontend/src/index.css @@ -418,6 +418,12 @@ td.actions-cell { padding-bottom: 10px; } +.cds--btn--ghost:not([disabled]).red-icon svg { + fill: red; +} +.cds--btn--ghost:not([disabled]).green-icon svg { + fill: #198038; +} .cds--btn--ghost:not([disabled]) svg.red-icon { fill: red; } diff --git a/spiffworkflow-frontend/src/interfaces.ts b/spiffworkflow-frontend/src/interfaces.ts index 02a983673..81a563261 100644 --- a/spiffworkflow-frontend/src/interfaces.ts +++ b/spiffworkflow-frontend/src/interfaces.ts @@ -365,3 +365,26 @@ export interface InterstitialPageResponse { task?: ProcessInstanceTask; process_instance?: ProcessInstance; } + +export interface TestCaseErrorDetails { + error_messages: string[]; + stacktrace?: string[]; + task_bpmn_identifier?: string; + task_bpmn_name?: string; + task_line_contents?: string; + task_line_number?: number; + task_trace?: string[]; +} + +export interface TestCaseResult { + bpmn_file: string; + passed: boolean; + test_case_identifier: string; + test_case_error_details?: TestCaseErrorDetails; +} + +export interface TestCaseResults { + all_passed: boolean; + failing: TestCaseResult[]; + passing: TestCaseResult[]; +} diff --git a/spiffworkflow-frontend/src/routes/ProcessModelShow.tsx b/spiffworkflow-frontend/src/routes/ProcessModelShow.tsx index f50ee6a34..407c81151 100644 --- a/spiffworkflow-frontend/src/routes/ProcessModelShow.tsx +++ b/spiffworkflow-frontend/src/routes/ProcessModelShow.tsx @@ -2,11 +2,11 @@ import { useEffect, useState } from 'react'; import { Link, useNavigate, useParams } from 'react-router-dom'; import { Add, - Upload, Download, - TrashCan, - Favorite, Edit, + Favorite, + TrashCan, + Upload, View, // @ts-ignore } from '@carbon/icons-react'; @@ -14,18 +14,18 @@ import { Accordion, AccordionItem, Button, - Grid, - Column, - Stack, ButtonSet, - Modal, + Column, FileUploader, + Grid, + Modal, + Stack, Table, + TableBody, + TableCell, TableHead, TableHeader, TableRow, - TableCell, - TableBody, // @ts-ignore } from '@carbon/react'; import { Can } from '@casl/react'; @@ -49,6 +49,7 @@ import { usePermissionFetcher } from '../hooks/PermissionService'; import { useUriListForPermissions } from '../hooks/UriListForPermissions'; import ProcessInstanceRun from '../components/ProcessInstanceRun'; import { Notification } from '../components/Notification'; +import ProcessModelTestRun from '../components/ProcessModelTestRun'; export default function ProcessModelShow() { const params = useParams(); @@ -68,6 +69,7 @@ export default function ProcessModelShow() { const { targetUris } = useUriListForPermissions(); const permissionRequestData: PermissionsToCheck = { [targetUris.processModelShowPath]: ['PUT', 'DELETE'], + [targetUris.processModelTestsPath]: ['POST'], [targetUris.processModelPublishPath]: ['POST'], [targetUris.processInstanceListPath]: ['GET'], [targetUris.processInstanceCreatePath]: ['POST'], @@ -81,6 +83,18 @@ export default function ProcessModelShow() { `${params.process_model_id}` ); + let hasTestCaseFiles: boolean = false; + + const isTestCaseFile = (processModelFile: ProcessFile) => { + return processModelFile.name.match(/^test_.*\.json$/); + }; + + if (processModel) { + hasTestCaseFiles = !!processModel.files.find( + (processModelFile: ProcessFile) => isTestCaseFile(processModelFile) + ); + } + useEffect(() => { const processResult = (result: ProcessModel) => { setProcessModel(result); @@ -308,6 +322,13 @@ export default function ProcessModelShow() { ); } + if (isTestCaseFile(processModelFile)) { + elements.push( + + + + ); + } return elements; }; @@ -647,6 +668,11 @@ export default function ProcessModelShow() { Publish Changes + + {hasTestCaseFiles ? ( + + ) : null} + {processModelFilesSection()}
{testCaseResult.test_case_identifier}
+ Test Case:{' '} + {testCaseResult.test_case_identifier} +
Passing: {testCaseResults.passing.length}
Failing: {testCaseResults.failing.length}
Failure Details:
Successful Test Cases: