2020-05-25 16:29:05 +00:00
|
|
|
import json
|
2020-03-26 16:51:53 +00:00
|
|
|
import logging
|
2020-03-04 22:08:45 +00:00
|
|
|
import os
|
2020-03-03 18:50:22 +00:00
|
|
|
from unittest.mock import patch
|
2020-02-03 20:15:36 +00:00
|
|
|
|
2020-05-25 16:29:05 +00:00
|
|
|
from tests.base_test import BaseTest
|
|
|
|
|
2020-02-25 17:01:25 +00:00
|
|
|
from SpiffWorkflow.bpmn.specs.EndEvent import EndEvent
|
2020-05-25 16:29:05 +00:00
|
|
|
from SpiffWorkflow.camunda.specs.UserTask import FormField
|
2020-02-25 17:01:25 +00:00
|
|
|
|
2020-03-04 22:08:45 +00:00
|
|
|
from crc import session, db, app
|
2020-02-18 21:38:56 +00:00
|
|
|
from crc.api.common import ApiError
|
2020-05-25 16:29:05 +00:00
|
|
|
from crc.models.file import FileModel, FileDataModel
|
|
|
|
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
|
|
|
from crc.services.protocol_builder import ProtocolBuilderService
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.models.study import StudyModel
|
2020-05-25 16:29:05 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowStatus
|
2020-03-04 22:08:45 +00:00
|
|
|
from crc.services.file_service import FileService
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from crc.services.study_service import StudyService
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2020-05-25 16:29:05 +00:00
|
|
|
from crc.services.workflow_service import WorkflowService
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
|
2020-01-24 14:35:14 +00:00
|
|
|
class TestWorkflowProcessor(BaseTest):
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def _populate_form_with_random_data(self, task):
|
2020-05-11 21:04:05 +00:00
|
|
|
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
2020-05-30 21:21:57 +00:00
|
|
|
WorkflowService.populate_form_with_random_data(task, api_task, required_only=False)
|
2020-02-04 21:49:28 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def get_processor(self, study_model, spec_model):
|
|
|
|
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
|
|
|
|
return WorkflowProcessor(workflow_model)
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def test_create_and_complete_workflow(self):
|
|
|
|
self.load_example_data()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("random_fact")
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-07-28 17:33:38 +00:00
|
|
|
processor.do_engine_steps()
|
2020-02-10 21:19:23 +00:00
|
|
|
self.assertEqual(study.id, processor.bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY])
|
2019-12-18 19:02:17 +00:00
|
|
|
self.assertIsNotNone(processor)
|
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
|
|
|
task = next_user_tasks[0]
|
|
|
|
self.assertEqual("Task_User_Select_Type", task.get_name())
|
2020-02-12 03:13:46 +00:00
|
|
|
model = {"type": "buzzword"}
|
2019-12-18 19:02:17 +00:00
|
|
|
if task.data is None:
|
|
|
|
task.data = {}
|
|
|
|
task.data.update(model)
|
|
|
|
processor.complete_task(task)
|
|
|
|
self.assertEqual(WorkflowStatus.waiting, processor.get_status())
|
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
|
|
|
data = processor.get_data()
|
|
|
|
self.assertIsNotNone(data)
|
2020-04-07 18:09:21 +00:00
|
|
|
self.assertIn("FactService", data)
|
2019-12-31 21:32:47 +00:00
|
|
|
|
2020-01-23 20:32:53 +00:00
|
|
|
def test_workflow_with_dmn(self):
|
2019-12-31 21:32:47 +00:00
|
|
|
self.load_example_data()
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("decision_table")
|
2020-01-23 20:32:53 +00:00
|
|
|
files = session.query(FileModel).filter_by(workflow_spec_id='decision_table').all()
|
2020-02-04 02:56:18 +00:00
|
|
|
self.assertEqual(2, len(files))
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-07-28 17:33:38 +00:00
|
|
|
processor.do_engine_steps()
|
2019-12-31 21:32:47 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
2020-01-23 20:32:53 +00:00
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
2019-12-31 21:32:47 +00:00
|
|
|
task = next_user_tasks[0]
|
2020-01-23 20:32:53 +00:00
|
|
|
self.assertEqual("get_num_presents", task.get_name())
|
|
|
|
model = {"num_presents": 1}
|
|
|
|
if task.data is None:
|
|
|
|
task.data = {}
|
|
|
|
task.data.update(model)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
2019-12-31 21:32:47 +00:00
|
|
|
data = processor.get_data()
|
2020-01-23 20:32:53 +00:00
|
|
|
self.assertIsNotNone(data)
|
|
|
|
self.assertIn("message", data)
|
2020-01-24 14:29:50 +00:00
|
|
|
self.assertEqual("Oh, Ginger.", data.get('message'))
|
2020-02-25 17:01:25 +00:00
|
|
|
self.assertEqual("End", processor.bpmn_workflow.last_task.task_spec.name)
|
|
|
|
self.assertEqual("Oh, Ginger.", processor.bpmn_workflow.last_task.data.get('message'))
|
|
|
|
|
2020-02-03 20:15:36 +00:00
|
|
|
|
|
|
|
def test_workflow_with_parallel_forms(self):
|
|
|
|
self.load_example_data()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("parallel_tasks")
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-07-28 17:33:38 +00:00
|
|
|
processor.do_engine_steps()
|
2020-02-03 20:15:36 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
2020-02-11 18:40:14 +00:00
|
|
|
|
|
|
|
# Complete the first steps of the 4 parallel tasks
|
2020-02-03 20:15:36 +00:00
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
2020-02-07 16:34:44 +00:00
|
|
|
self._populate_form_with_random_data(next_user_tasks[0])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[1])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[2])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[3])
|
2020-02-03 20:15:36 +00:00
|
|
|
processor.complete_task(next_user_tasks[0])
|
|
|
|
processor.complete_task(next_user_tasks[1])
|
|
|
|
processor.complete_task(next_user_tasks[2])
|
|
|
|
processor.complete_task(next_user_tasks[3])
|
2020-02-11 18:40:14 +00:00
|
|
|
|
|
|
|
# There are another 4 tasks to complete (each parallel task has a follow-up task)
|
2020-02-03 20:15:36 +00:00
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
2020-02-07 16:34:44 +00:00
|
|
|
self._populate_form_with_random_data(next_user_tasks[0])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[1])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[2])
|
|
|
|
self._populate_form_with_random_data(next_user_tasks[3])
|
2020-02-03 20:15:36 +00:00
|
|
|
processor.complete_task(next_user_tasks[0])
|
|
|
|
processor.complete_task(next_user_tasks[1])
|
|
|
|
processor.complete_task(next_user_tasks[2])
|
|
|
|
processor.complete_task(next_user_tasks[3])
|
2020-02-11 18:40:14 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
|
|
|
|
# Should be one last step after the above are complete
|
|
|
|
final_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(final_user_tasks))
|
|
|
|
self._populate_form_with_random_data(final_user_tasks[0])
|
|
|
|
processor.complete_task(final_user_tasks[0])
|
|
|
|
|
2020-02-03 20:15:36 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertTrue(processor.bpmn_workflow.is_completed())
|
|
|
|
|
2020-02-07 16:34:44 +00:00
|
|
|
def test_workflow_processor_knows_the_text_task_even_when_parallel(self):
|
|
|
|
self.load_example_data()
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("parallel_tasks")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-07-28 17:33:38 +00:00
|
|
|
processor.do_engine_steps()
|
2020-02-07 16:34:44 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
|
|
|
self.assertEqual(next_user_tasks[0], processor.next_task(), "First task in list of 4")
|
|
|
|
|
|
|
|
# Complete the third open task, so do things out of order
|
|
|
|
# this should cause the system to recommend the first ready task that is a
|
|
|
|
# child of the last completed task.
|
|
|
|
task = next_user_tasks[2]
|
|
|
|
self._populate_form_with_random_data(task)
|
|
|
|
processor.complete_task(task)
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(processor.bpmn_workflow.last_task, task)
|
|
|
|
self.assertEqual(4, len(next_user_tasks))
|
|
|
|
self.assertEqual(task.children[0], processor.next_task())
|
|
|
|
|
2020-02-25 17:01:25 +00:00
|
|
|
def test_workflow_processor_returns_next_task_as_end_task_if_complete(self):
|
|
|
|
self.load_example_data()
|
2020-03-03 18:50:22 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("random_fact")
|
2020-02-25 17:01:25 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-25 17:01:25 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"type": "buzzword"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
self.assertEqual(WorkflowStatus.waiting, processor.get_status())
|
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertIsNotNone(task)
|
2020-04-07 18:09:21 +00:00
|
|
|
self.assertIn("FactService", task.data)
|
2020-02-25 17:01:25 +00:00
|
|
|
self.assertIsInstance(task.task_spec, EndEvent)
|
|
|
|
|
2020-03-11 20:33:18 +00:00
|
|
|
def test_workflow_validation_error_is_properly_raised(self):
|
|
|
|
self.load_example_data()
|
|
|
|
workflow_spec_model = self.load_test_spec("invalid_spec")
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
with self.assertRaises(ApiError) as context:
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.get_processor(study, workflow_spec_model)
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("workflow_validation_error", context.exception.code)
|
2020-03-11 20:33:18 +00:00
|
|
|
self.assertTrue("bpmn:startEvent" in context.exception.message)
|
|
|
|
|
|
|
|
|
2020-02-04 21:49:28 +00:00
|
|
|
def test_workflow_with_bad_expression_raises_sensible_error(self):
|
2020-02-10 21:19:23 +00:00
|
|
|
self.load_example_data()
|
|
|
|
|
2020-02-04 21:49:28 +00:00
|
|
|
workflow_spec_model = self.load_test_spec("invalid_expression")
|
2020-02-10 21:19:23 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-02-04 21:49:28 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
2020-02-07 16:34:44 +00:00
|
|
|
self._populate_form_with_random_data(next_user_tasks[0])
|
2020-02-04 21:49:28 +00:00
|
|
|
processor.complete_task(next_user_tasks[0])
|
2020-02-18 21:38:56 +00:00
|
|
|
with self.assertRaises(ApiError) as context:
|
2020-02-04 21:49:28 +00:00
|
|
|
processor.do_engine_steps()
|
2020-05-11 21:04:05 +00:00
|
|
|
self.assertEqual("task_error", context.exception.code)
|
2020-02-04 21:49:28 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
def test_workflow_with_docx_template(self):
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("docx")
|
|
|
|
files = session.query(FileModel).filter_by(workflow_spec_id='docx').all()
|
|
|
|
self.assertEqual(2, len(files))
|
|
|
|
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id="docx").first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-07-28 17:33:38 +00:00
|
|
|
processor.do_engine_steps()
|
2020-02-10 21:19:23 +00:00
|
|
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
|
|
|
next_user_tasks = processor.next_user_tasks()
|
|
|
|
self.assertEqual(1, len(next_user_tasks))
|
|
|
|
task = next_user_tasks[0]
|
|
|
|
self.assertEqual("task_gather_information", task.get_name())
|
|
|
|
self._populate_form_with_random_data(task)
|
|
|
|
processor.complete_task(task)
|
2020-02-03 20:15:36 +00:00
|
|
|
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
files = session.query(FileModel).filter_by(workflow_id=processor.get_workflow_id()).all()
|
2020-02-10 21:19:23 +00:00
|
|
|
self.assertEqual(0, len(files))
|
|
|
|
processor.do_engine_steps()
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
files = session.query(FileModel).filter_by(workflow_id=processor.get_workflow_id()).all()
|
2020-02-10 21:19:23 +00:00
|
|
|
self.assertEqual(1, len(files), "The task should create a new file.")
|
|
|
|
file_data = session.query(FileDataModel).filter(FileDataModel.file_model_id == files[0].id).first()
|
|
|
|
self.assertIsNotNone(file_data.data)
|
|
|
|
self.assertTrue(len(file_data.data) > 0)
|
2020-02-11 18:40:14 +00:00
|
|
|
# Not going any farther here, assuming this is tested in libraries correctly.
|
2020-03-03 18:50:22 +00:00
|
|
|
|
|
|
|
def test_load_study_information(self):
|
|
|
|
""" Test a workflow that includes requests to pull in Study Details."""
|
|
|
|
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("study_details")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-03 18:50:22 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.bpmn_workflow.last_task
|
|
|
|
self.assertIsNotNone(task.data)
|
2020-04-17 19:12:48 +00:00
|
|
|
self.assertIn("StudyInfo", task.data)
|
|
|
|
self.assertIn("info", task.data["StudyInfo"])
|
|
|
|
self.assertIn("title", task.data["StudyInfo"]["info"])
|
|
|
|
self.assertIn("last_updated", task.data["StudyInfo"]["info"])
|
|
|
|
self.assertIn("sponsor", task.data["StudyInfo"]["info"])
|
2020-03-03 18:50:22 +00:00
|
|
|
|
2020-03-04 22:08:45 +00:00
|
|
|
def test_spec_versioning(self):
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("decision_table")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-05-29 00:03:50 +00:00
|
|
|
self.assertTrue(processor.get_version_string().startswith('v1.1'))
|
2020-03-04 22:08:45 +00:00
|
|
|
file_service = FileService()
|
|
|
|
|
|
|
|
file_service.add_workflow_spec_file(workflow_spec_model, "new_file.txt", "txt", b'blahblah')
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-05-29 00:03:50 +00:00
|
|
|
self.assertTrue(processor.get_version_string().startswith('v1.1.1'))
|
2020-03-04 22:08:45 +00:00
|
|
|
|
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'docx', 'docx.bpmn')
|
|
|
|
file = open(file_path, "rb")
|
|
|
|
data = file.read()
|
|
|
|
|
|
|
|
file_model = db.session.query(FileModel).filter(FileModel.name == "decision_table.bpmn").first()
|
|
|
|
file_service.update_file(file_model, data, "txt")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-05-29 00:03:50 +00:00
|
|
|
self.assertTrue(processor.get_version_string().startswith('v2.1.1'))
|
2020-03-04 22:08:45 +00:00
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
def test_hard_reset(self):
|
2020-03-05 16:18:20 +00:00
|
|
|
self.load_example_data()
|
|
|
|
# Start the two_forms workflow, and enter some data in the first form.
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("two_forms")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-07-28 17:33:38 +00:00
|
|
|
processor.do_engine_steps()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
2020-03-05 16:18:20 +00:00
|
|
|
task = processor.next_task()
|
|
|
|
task.data = {"color": "blue"}
|
|
|
|
processor.complete_task(task)
|
|
|
|
next_task = processor.next_task()
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("Step 2", next_task.task_spec.description)
|
2020-03-05 16:18:20 +00:00
|
|
|
|
|
|
|
# Modify the specification, with a major change that alters the flow and can't be serialized effectively.
|
2020-10-05 21:35:35 +00:00
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'modified', 'two_forms_struc_mod.bpmn')
|
2020-03-05 16:18:20 +00:00
|
|
|
self.replace_file("two_forms.bpmn", file_path)
|
|
|
|
|
2020-03-05 20:35:55 +00:00
|
|
|
# Assure that creating a new processor doesn't cause any issues, and maintains the spec version.
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
2021-04-02 21:10:05 +00:00
|
|
|
db.session.add(processor.workflow_model) ## Assure this isn't transient, which was causing some errors.
|
|
|
|
self.assertIsNotNone(processor.workflow_model.bpmn_workflow_json)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor2 = WorkflowProcessor(processor.workflow_model)
|
2020-05-29 00:03:50 +00:00
|
|
|
self.assertFalse(processor2.is_latest_spec) # Still at version 1.
|
2020-03-05 20:35:55 +00:00
|
|
|
|
|
|
|
# Do a hard reset, which should bring us back to the beginning, but retain the data.
|
2021-03-09 16:52:55 +00:00
|
|
|
processor2 = WorkflowProcessor.reset(processor2.workflow_model)
|
2021-01-19 20:39:59 +00:00
|
|
|
processor3 = WorkflowProcessor(processor.workflow_model)
|
|
|
|
processor3.do_engine_steps()
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("Step 1", processor3.next_task().task_spec.description)
|
2020-06-16 16:54:51 +00:00
|
|
|
self.assertTrue(processor3.is_latest_spec) # Now at version 2.
|
|
|
|
task = processor3.next_task()
|
|
|
|
task.data = {"color": "blue"}
|
|
|
|
processor3.complete_task(task)
|
2020-03-16 17:05:15 +00:00
|
|
|
self.assertEqual("New Step", processor3.next_task().task_spec.description)
|
2020-03-24 18:40:24 +00:00
|
|
|
self.assertEqual("blue", processor3.next_task().data["color"])
|
2020-03-05 18:25:28 +00:00
|
|
|
|
2021-05-19 20:26:16 +00:00
|
|
|
def test_next_task_when_completing_sequential_steps_within_parallel(self):
|
|
|
|
self.load_example_data()
|
|
|
|
# Start the two_forms workflow, and enter some data in the first form.
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
workflow_spec_model = self.load_test_spec("nav_order")
|
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
|
|
|
ready_tasks = processor.get_ready_user_tasks()
|
|
|
|
task = ready_tasks[2]
|
|
|
|
self.assertEqual("B1", task.task_spec.name)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertEqual("B1_0", task.task_spec.name)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertEqual("B2", task.task_spec.name)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertEqual("B3", task.task_spec.name)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertEqual("B4", task.task_spec.name)
|
|
|
|
processor.complete_task(task)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
task = processor.next_task()
|
|
|
|
self.assertEqual("A1", task.task_spec.name)
|
|
|
|
|
|
|
|
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-04-28 02:54:05 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies')
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators')
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs')
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details')
|
2020-05-25 16:29:05 +00:00
|
|
|
def test_master_bpmn_for_crc(self, mock_details, mock_required_docs, mock_investigators, mock_studies):
|
2020-04-28 02:54:05 +00:00
|
|
|
|
|
|
|
# Mock Protocol Builder response
|
|
|
|
studies_response = self.protocol_builder_response('user_studies.json')
|
|
|
|
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
|
|
|
|
|
|
|
investigators_response = self.protocol_builder_response('investigators.json')
|
|
|
|
mock_investigators.return_value = json.loads(investigators_response)
|
|
|
|
|
|
|
|
required_docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_required_docs.return_value = json.loads(required_docs_response)
|
|
|
|
|
|
|
|
details_response = self.protocol_builder_response('study_details.json')
|
|
|
|
mock_details.return_value = json.loads(details_response)
|
|
|
|
|
2020-05-25 16:29:05 +00:00
|
|
|
self.load_example_data(use_crc_data=True)
|
2020-05-27 02:42:49 +00:00
|
|
|
app.config['PB_ENABLED'] = True
|
2020-03-15 19:54:13 +00:00
|
|
|
|
2020-03-13 18:58:07 +00:00
|
|
|
study = session.query(StudyModel).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
workflow_spec_model = db.session.query(WorkflowSpecModel).\
|
2020-04-28 02:54:05 +00:00
|
|
|
filter(WorkflowSpecModel.name == "top_level_workflow").first()
|
|
|
|
self.assertIsNotNone(workflow_spec_model)
|
2020-03-26 16:51:53 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
2020-03-26 16:51:53 +00:00
|
|
|
processor.do_engine_steps()
|
|
|
|
self.assertTrue("Top level process is fully automatic.", processor.bpmn_workflow.is_completed())
|
|
|
|
data = processor.bpmn_workflow.last_task.data
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# It should mark Enter Core Data as required, because it is always required.
|
|
|
|
self.assertTrue("enter_core_info" in data)
|
2020-04-06 20:56:00 +00:00
|
|
|
self.assertEqual("required", data["enter_core_info"])
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-05-05 20:15:38 +00:00
|
|
|
# It should mark Personnel as required, because StudyInfo.investigators is not empty.
|
|
|
|
self.assertTrue("personnel" in data)
|
|
|
|
self.assertEqual("required", data["personnel"])
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# It should mark the sponsor funding source as disabled since the funding required (12) is not included in the required docs.
|
|
|
|
self.assertTrue("sponsor_funding_source" in data)
|
2020-08-27 18:06:03 +00:00
|
|
|
self.assertEqual("required", data["sponsor_funding_source"])
|
2020-03-13 18:58:07 +00:00
|
|
|
|
2020-04-27 19:10:09 +00:00
|
|
|
def test_enum_with_no_choices_raises_api_error(self):
|
|
|
|
self.load_example_data()
|
|
|
|
workflow_spec_model = self.load_test_spec("random_fact")
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
tasks = processor.next_user_tasks()
|
|
|
|
task = tasks[0]
|
|
|
|
|
|
|
|
|
|
|
|
field = FormField()
|
|
|
|
field.id = "test_enum_field"
|
|
|
|
field.type = "enum"
|
|
|
|
field.options = []
|
|
|
|
task.task_spec.form.fields.append(field)
|
|
|
|
|
|
|
|
with self.assertRaises(ApiError):
|
2020-07-14 14:29:25 +00:00
|
|
|
self._populate_form_with_random_data(task)
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_role_by_name(self):
|
|
|
|
self.load_example_data()
|
|
|
|
workflow_spec_model = self.load_test_spec("roles")
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
processor = self.get_processor(study, workflow_spec_model)
|
|
|
|
processor.do_engine_steps()
|
|
|
|
tasks = processor.next_user_tasks()
|
|
|
|
task = tasks[0]
|
|
|
|
self._populate_form_with_random_data(task)
|
|
|
|
processor.complete_task(task)
|
|
|
|
supervisor_task = processor.next_user_tasks()[0]
|
2020-08-14 14:34:57 +00:00
|
|
|
self.assertEqual("supervisor", supervisor_task.task_spec.lane)
|
2020-07-14 14:29:25 +00:00
|
|
|
|