2020-05-25 16:29:05 +00:00
|
|
|
import json
|
2020-03-26 16:51:53 +00:00
|
|
|
import logging
|
2020-03-04 22:08:45 +00:00
|
|
|
import os
|
2020-03-03 18:50:22 +00:00
|
|
|
from unittest.mock import patch
|
2020-02-03 20:15:36 +00:00
|
|
|
|
2020-05-25 16:29:05 +00:00
|
|
|
from tests.base_test import BaseTest
|
|
|
|
|
2020-02-25 17:01:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
2020-05-25 16:29:05 +00:00
|
|
|
from SpiffWorkflow.camunda.specs.UserTask import FormField
|
2020-02-25 17:01:25 +00:00
|
|
|
|
2020-03-04 22:08:45 +00:00
|
|
|
from crc import session, db, app
|
2020-02-18 21:38:56 +00:00
|
|
|
from crc.api.common import ApiError
|
2020-05-25 16:29:05 +00:00
|
|
|
from crc.models.file import FileModel, FileDataModel
|
|
|
|
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
|
|
|
from crc.services.protocol_builder import ProtocolBuilderService
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.models.study import StudyModel
|
2020-05-25 16:29:05 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowStatus
|
2020-03-04 22:08:45 +00:00
|
|
|
from crc.services.file_service import FileService
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from crc.services.study_service import StudyService
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2020-05-25 16:29:05 +00:00
|
|
|
from crc.services.workflow_service import WorkflowService
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
|
2020-01-24 14:35:14 +00:00
|
|
|
class TestWorkflowProcessor(BaseTest):
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def _populate_form_with_random_data(self, task):
|
2020-05-11 21:04:05 +00:00
|
|
|
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
2020-05-25 19:30:06 +00:00
|
|
|
WorkflowService.populate_form_with_random_data(task, api_task)
|
2020-02-04 21:49:28 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def get_processor(self, study_model, spec_model):
|
|
|
|
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
|
|
|
|
return WorkflowProcessor(workflow_model)
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def test_create_and_complete_workflow(self):
|
|
|
|
self.load_example_data()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("random_fact")
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-10 21:19:23 +00:00
|
|
|
self.assertEqual(study.id, processor.bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY])
|
2019-12-18 19:02:17 +00:00
|
|
|
self.assertIsNotNone(processor)
|
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
|
|
|
task = next_user_tasks[0]
|
|
|
|
self.assertEqual("Task_User_Select_Type", task.get_name())
|
2020-02-12 03:13:46 +00:00
|
|
|
model = {"type": "buzzword"}
|
2019-12-18 19:02:17 +00:00
|
|
|
if task.data is None:
|
|
|
|
task.data = {}
|
|
|
|
task.data.update(model)
|
|
|
|
processor.complete_task(task)
|
|
|
|
self.assertEqual(WorkflowStatus.waiting, processor.get_status())
|
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
|
|
|
data = processor.get_data()
|
|
|
|
self.assertIsNotNone(data)
|
2020-04-07 18:09:21 +00:00
|
|
|
self.assertIn("FactService", data)
|
2019-12-31 21:32:47 +00:00
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
def test_workflow_with_dmn(self):
|
2019-12-31 21:32:47 +00:00
|
|
|
self.load_example_data()
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("decision_table")
|
2020-01-23 20:32:53 +00:00
|
|
|
files = session.query(FileModel).filter_by(workflow_spec_id='decision_table').all()
|
2020-02-04 02:56:18 +00:00
|
|
|
self.assertEqual(2, len(files))
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2019-12-31 21:32:47 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
2020-01-23 20:32:53 +00:00
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
2019-12-31 21:32:47 +00:00
|
|
|
task = next_user_tasks[0]
|
2020-01-23 20:32:53 +00:00
|
|
|
self.assertEqual("get_num_presents", task.get_name())
|
|
|
|
model = {"num_presents": 1}
|
|
|
|
if task.data is None:
|
|
|
|
task.data = {}
|
|
|
|
task.data.update(model)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
2019-12-31 21:32:47 +00:00
|
|
|
data = processor.get_data()
|
2020-01-23 20:32:53 +00:00
|
|
|
self.assertIsNotNone(data)
|
|
|
|
self.assertIn("message", data)
|
2020-01-24 14:29:50 +00:00
|
|
|
self.assertEqual("Oh, Ginger.", data.get('message'))
|
2020-02-25 17:01:25 +00:00
|
|
|
self.assertEqual("End", processor.bpmn_workflow.last_task.task_spec.name)
|
|
|
|
self.assertEqual("Oh, Ginger.", processor.bpmn_workflow.last_task.data.get('message'))
|
|
|
|
|
2020-02-03 20:15:36 +00:00
|
|
|
|
|
|
|
def test_workflow_with_parallel_forms(self):
|
|
|
|
self.load_example_data()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("parallel_tasks")
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-03 20:15:36 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
2020-02-11 18:40:14 +00:00
|
|
|
|
|
|
|
# Complete the first steps of the 4 parallel tasks
|
2020-02-03 20:15:36 +00:00
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
2020-02-07 16:34:44 +00:00
|
|
|
self._populate_form_with_random_data(next_user_tasks[0])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[1])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[2])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[3])
|
2020-02-03 20:15:36 +00:00
|
|
|
processor.complete_task(next_user_tasks[0])
|
|
|
|
processor.complete_task(next_user_tasks[1])
|
|
|
|
processor.complete_task(next_user_tasks[2])
|
|
|
|
processor.complete_task(next_user_tasks[3])
|
2020-02-11 18:40:14 +00:00
|
|
|
|
|
|
|
# There are another 4 tasks to complete (each parallel task has a follow-up task)
|
2020-02-03 20:15:36 +00:00
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
2020-02-07 16:34:44 +00:00
|
|
|
self._populate_form_with_random_data(next_user_tasks[0])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[1])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[2])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[3])
|
2020-02-03 20:15:36 +00:00
|
|
|
processor.complete_task(next_user_tasks[0])
|
|
|
|
processor.complete_task(next_user_tasks[1])
|
|
|
|
processor.complete_task(next_user_tasks[2])
|
|
|
|
processor.complete_task(next_user_tasks[3])
|
2020-02-11 18:40:14 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
|
|
|
|
# Should be one last step after the above are complete
|
|
|
|
final_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(final_user_tasks))
|
|
|
|
self._populate_form_with_random_data(final_user_tasks[0])
|
|
|
|
processor.complete_task(final_user_tasks[0])
|
|
|
|
|
2020-02-03 20:15:36 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertTrue(processor.bpmn_workflow.is_completed())
|
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def test_workflow_processor_knows_the_text_task_even_when_parallel(self):
|
|
|
|
self.load_example_data()
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("parallel_tasks")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-07 16:34:44 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
|
|
|
self.assertEqual(next_user_tasks[0], processor.next_task(), "First task in list of 4")
|
|
|
|
|
|
|
|
# Complete the third open task, so do things out of order
|
|
|
|
# this should cause the system to recommend the first ready task that is a
|
|
|
|
# child of the last completed task.
|
|
|
|
task = next_user_tasks[2]
|
|
|
|
self._populate_form_with_random_data(task)
|
|
|
|
processor.complete_task(task)
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(processor.bpmn_workflow.last_task, task)
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
|
|
|
self.assertEqual(task.children[0], processor.next_task())
|
|
|
|
|
2020-02-25 17:01:25 +00:00
|
|
|
def test_workflow_processor_returns_next_task_as_end_task_if_complete(self):
|
|
|
|
self.load_example_data()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("random_fact")
|
2020-02-25 17:01:25 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-25 17:01:25 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"type": "buzzword"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
self.assertEqual(WorkflowStatus.waiting, processor.get_status())
|
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertIsNotNone(task)
|
2020-04-07 18:09:21 +00:00
|
|
|
self.assertIn("FactService", task.data)
|
2020-02-25 17:01:25 +00:00
|
|
|
self.assertIsInstance(task.task_spec, EndEvent)
|
|
|
|
|
2020-03-11 20:33:18 +00:00
|
|
|
def test_workflow_validation_error_is_properly_raised(self):
|
|
|
|
self.load_example_data()
|
|
|
|
workflow_spec_model = self.load_test_spec("invalid_spec")
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
with self.assertRaises(ApiError) as context:
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.get_processor(study, workflow_spec_model)
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("workflow_validation_error", context.exception.code)
|
2020-03-11 20:33:18 +00:00
|
|
|
self.assertTrue("bpmn:startEvent" in context.exception.message)
|
|
|
|
|
|
|
|
def test_workflow_spec_key_error(self):
|
|
|
|
"""Frequently seeing errors in the logs about a 'Key' error, where a workflow
|
|
|
|
references something that doesn't exist in the midst of processing. Want to
|
|
|
|
make sure we produce errors to the front end that allows us to debug this."""
|
|
|
|
# Start the two_forms workflow, and enter some data in the first form.
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("two_forms")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
2020-03-11 20:33:18 +00:00
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"color": "blue"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
|
|
|
|
# Modify the specification, with a major change.
|
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_struc_mod.bpmn')
|
|
|
|
self.replace_file("two_forms.bpmn", file_path)
|
|
|
|
|
|
|
|
# Attemping a soft update on a structural change should raise a sensible error.
|
|
|
|
with self.assertRaises(ApiError) as context:
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True)
|
2020-03-11 20:33:18 +00:00
|
|
|
self.assertEqual("unexpected_workflow_structure", context.exception.code)
|
|
|
|
|
2020-02-04 21:49:28 +00:00
|
|
|
def test_workflow_with_bad_expression_raises_sensible_error(self):
|
2020-02-10 21:19:23 +00:00
|
|
|
self.load_example_data()
|
|
|
|
|
2020-02-04 21:49:28 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("invalid_expression")
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-04 21:49:28 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
2020-02-07 16:34:44 +00:00
|
|
|
self._populate_form_with_random_data(next_user_tasks[0])
|
2020-02-04 21:49:28 +00:00
|
|
|
processor.complete_task(next_user_tasks[0])
|
2020-02-18 21:38:56 +00:00
|
|
|
with self.assertRaises(ApiError) as context:
|
2020-02-04 21:49:28 +00:00
|
|
|
processor.do_engine_steps()
|
2020-05-11 21:04:05 +00:00
|
|
|
self.assertEqual("task_error", context.exception.code)
|
2020-02-04 21:49:28 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
def test_workflow_with_docx_template(self):
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("docx")
|
|
|
|
files = session.query(FileModel).filter_by(workflow_spec_id='docx').all()
|
|
|
|
self.assertEqual(2, len(files))
|
|
|
|
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id="docx").first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-10 21:19:23 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
|
|
|
task = next_user_tasks[0]
|
|
|
|
self.assertEqual("task_gather_information", task.get_name())
|
|
|
|
self._populate_form_with_random_data(task)
|
|
|
|
processor.complete_task(task)
|
2020-02-03 20:15:36 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
files = session.query(FileModel).filter_by(study_id=study.id, workflow_id=processor.get_workflow_id()).all()
|
|
|
|
self.assertEqual(0, len(files))
|
|
|
|
processor.do_engine_steps()
|
|
|
|
files = session.query(FileModel).filter_by(study_id=study.id, workflow_id=processor.get_workflow_id()).all()
|
|
|
|
self.assertEqual(1, len(files), "The task should create a new file.")
|
|
|
|
file_data = session.query(FileDataModel).filter(FileDataModel.file_model_id == files[0].id).first()
|
|
|
|
self.assertIsNotNone(file_data.data)
|
|
|
|
self.assertTrue(len(file_data.data) > 0)
|
2020-02-11 18:40:14 +00:00
|
|
|
# Not going any farther here, assuming this is tested in libraries correctly.
|
2020-03-03 18:50:22 +00:00
|
|
|
|
|
|
|
def test_load_study_information(self):
|
|
|
|
""" Test a workflow that includes requests to pull in Study Details."""
|
|
|
|
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("study_details")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-03 18:50:22 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.bpmn_workflow.last_task
|
|
|
|
self.assertIsNotNone(task.data)
|
2020-04-17 19:12:48 +00:00
|
|
|
self.assertIn("StudyInfo", task.data)
|
|
|
|
self.assertIn("info", task.data["StudyInfo"])
|
|
|
|
self.assertIn("title", task.data["StudyInfo"]["info"])
|
|
|
|
self.assertIn("last_updated", task.data["StudyInfo"]["info"])
|
|
|
|
self.assertIn("sponsor", task.data["StudyInfo"]["info"])
|
2020-03-03 18:50:22 +00:00
|
|
|
|
2020-03-04 22:08:45 +00:00
|
|
|
def test_spec_versioning(self):
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("decision_table")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-05 18:25:28 +00:00
|
|
|
self.assertTrue(processor.get_spec_version().startswith('v1.1'))
|
2020-03-04 22:08:45 +00:00
|
|
|
file_service = FileService()
|
|
|
|
|
|
|
|
file_service.add_workflow_spec_file(workflow_spec_model, "new_file.txt", "txt", b'blahblah')
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-05 18:25:28 +00:00
|
|
|
self.assertTrue(processor.get_spec_version().startswith('v1.1.1'))
|
2020-03-04 22:08:45 +00:00
|
|
|
|
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'docx', 'docx.bpmn')
|
|
|
|
file = open(file_path, "rb")
|
|
|
|
data = file.read()
|
|
|
|
|
|
|
|
file_model = db.session.query(FileModel).filter(FileModel.name == "decision_table.bpmn").first()
|
|
|
|
file_service.update_file(file_model, data, "txt")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-05 18:25:28 +00:00
|
|
|
self.assertTrue(processor.get_spec_version().startswith('v2.1.1'))
|
2020-03-04 22:08:45 +00:00
|
|
|
|
|
|
|
def test_restart_workflow(self):
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("two_forms")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
2020-03-04 22:08:45 +00:00
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"key": "Value"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
task_before_restart = processor.next_task()
|
2020-03-05 20:35:55 +00:00
|
|
|
processor.hard_reset()
|
2020-03-04 22:08:45 +00:00
|
|
|
task_after_restart = processor.next_task()
|
|
|
|
|
|
|
|
self.assertNotEqual(task.get_name(), task_before_restart.get_name())
|
|
|
|
self.assertEqual(task.get_name(), task_after_restart.get_name())
|
2020-03-05 16:18:20 +00:00
|
|
|
self.assertEqual(task.data, task_after_restart.data)
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def test_soft_reset(self):
|
2020-03-05 16:18:20 +00:00
|
|
|
self.load_example_data()
|
|
|
|
|
|
|
|
# Start the two_forms workflow, and enter some data in the first form.
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("two_forms")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
2020-03-05 16:18:20 +00:00
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"color": "blue"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
|
|
|
|
# Modify the specification, with a minor text change.
|
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_text_mod.bpmn')
|
|
|
|
self.replace_file("two_forms.bpmn", file_path)
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
# Setting up another processor should not error out, but doesn't pick up the update.
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
|
|
|
processor2 = WorkflowProcessor(processor.workflow_model)
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description)
|
2020-03-16 17:37:31 +00:00
|
|
|
self.assertNotEqual("# This is some documentation I wanted to add.",
|
2020-03-05 16:18:20 +00:00
|
|
|
processor2.bpmn_workflow.last_task.task_spec.documentation)
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
# You can do a soft update and get the right response.
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True)
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description)
|
|
|
|
self.assertEqual("# This is some documentation I wanted to add.",
|
2020-03-05 20:35:55 +00:00
|
|
|
processor3.bpmn_workflow.last_task.task_spec.documentation)
|
|
|
|
|
|
|
|
|
2020-03-05 16:18:20 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def test_hard_reset(self):
|
2020-03-05 16:18:20 +00:00
|
|
|
self.load_example_data()
|
|
|
|
|
|
|
|
# Start the two_forms workflow, and enter some data in the first form.
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("two_forms")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
2020-03-05 16:18:20 +00:00
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"color": "blue"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
next_task = processor.next_task()
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("Step 2", next_task.task_spec.description)
|
2020-03-05 16:18:20 +00:00
|
|
|
|
|
|
|
# Modify the specification, with a major change that alters the flow and can't be serialized effectively.
|
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_struc_mod.bpmn')
|
|
|
|
self.replace_file("two_forms.bpmn", file_path)
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
# Assure that creating a new processor doesn't cause any issues, and maintains the spec version.
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
|
|
|
processor2 = WorkflowProcessor(processor.workflow_model)
|
2020-03-05 20:35:55 +00:00
|
|
|
self.assertTrue(processor2.get_spec_version().startswith("v1 ")) # Still at version 1.
|
|
|
|
|
|
|
|
# Do a hard reset, which should bring us back to the beginning, but retain the data.
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True)
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("Step 1", processor3.next_task().task_spec.description)
|
|
|
|
self.assertEqual({"color": "blue"}, processor3.next_task().data)
|
2020-03-05 20:35:55 +00:00
|
|
|
processor3.complete_task(processor3.next_task())
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("New Step", processor3.next_task().task_spec.description)
|
2020-03-24 18:40:24 +00:00
|
|
|
self.assertEqual("blue", processor3.next_task().data["color"])
|
2020-03-05 18:25:28 +00:00
|
|
|
|
2020-03-05 21:45:44 +00:00
|
|
|
def test_get_latest_spec_version(self):
|
|
|
|
workflow_spec_model = self.load_test_spec("two_forms")
|
|
|
|
version = WorkflowProcessor.get_latest_version_string("two_forms")
|
|
|
|
self.assertTrue(version.startswith("v1 "))
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-04-28 02:54:05 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies')
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators')
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs')
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details')
|
2020-05-25 16:29:05 +00:00
|
|
|
def test_master_bpmn_for_crc(self, mock_details, mock_required_docs, mock_investigators, mock_studies):
|
2020-04-28 02:54:05 +00:00
|
|
|
|
|
|
|
# Mock Protocol Builder response
|
|
|
|
studies_response = self.protocol_builder_response('user_studies.json')
|
|
|
|
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
|
|
|
|
|
|
|
investigators_response = self.protocol_builder_response('investigators.json')
|
|
|
|
mock_investigators.return_value = json.loads(investigators_response)
|
|
|
|
|
|
|
|
required_docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_required_docs.return_value = json.loads(required_docs_response)
|
|
|
|
|
|
|
|
details_response = self.protocol_builder_response('study_details.json')
|
|
|
|
mock_details.return_value = json.loads(details_response)
|
|
|
|
|
2020-05-25 16:29:05 +00:00
|
|
|
self.load_example_data(use_crc_data=True)
|
|
|
|
ProtocolBuilderService.ENABLED = True
|
2020-03-15 19:54:13 +00:00
|
|
|
|
2020-03-13 18:58:07 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
workflow_spec_model = db.session.query(WorkflowSpecModel).\
|
2020-04-28 02:54:05 +00:00
|
|
|
filter(WorkflowSpecModel.name == "top_level_workflow").first()
|
|
|
|
self.assertIsNotNone(workflow_spec_model)
|
2020-03-26 16:51:53 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-26 16:51:53 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertTrue("Top level process is fully automatic.", processor.bpmn_workflow.is_completed())
|
|
|
|
data = processor.bpmn_workflow.last_task.data
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# It should mark Enter Core Data as required, because it is always required.
|
|
|
|
self.assertTrue("enter_core_info" in data)
|
2020-04-06 20:56:00 +00:00
|
|
|
self.assertEqual("required", data["enter_core_info"])
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-05-05 20:15:38 +00:00
|
|
|
# It should mark Personnel as required, because StudyInfo.investigators is not empty.
|
|
|
|
self.assertTrue("personnel" in data)
|
|
|
|
self.assertEqual("required", data["personnel"])
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# It should mark the sponsor funding source as disabled since the funding required (12) is not included in the required docs.
|
|
|
|
self.assertTrue("sponsor_funding_source" in data)
|
2020-04-06 20:56:00 +00:00
|
|
|
self.assertEqual("disabled", data["sponsor_funding_source"])
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-04-27 19:10:09 +00:00
|
|
|
def test_enum_with_no_choices_raises_api_error(self):
|
|
|
|
self.load_example_data()
|
|
|
|
workflow_spec_model = self.load_test_spec("random_fact")
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
tasks = processor.next_user_tasks()
|
|
|
|
task = tasks[0]
|
|
|
|
|
|
|
|
|
|
|
|
field = FormField()
|
|
|
|
field.id = "test_enum_field"
|
|
|
|
field.type = "enum"
|
|
|
|
field.options = []
|
|
|
|
task.task_spec.form.fields.append(field)
|
|
|
|
|
|
|
|
with self.assertRaises(ApiError):
|
2020-05-11 21:04:05 +00:00
|
|
|
self._populate_form_with_random_data(task)
|