2019-12-18 19:02:17 +00:00
|
|
|
import json
|
2020-05-22 18:37:49 +00:00
|
|
|
from tests.base_test import BaseTest
|
2020-06-01 01:15:40 +00:00
|
|
|
|
2020-02-27 15:30:16 +00:00
|
|
|
from datetime import datetime, timezone
|
2020-03-26 16:51:53 +00:00
|
|
|
from unittest.mock import patch
|
2019-12-18 20:22:46 +00:00
|
|
|
|
2020-05-27 02:42:49 +00:00
|
|
|
from crc import session, app
|
2020-04-03 20:24:38 +00:00
|
|
|
from crc.models.protocol_builder import ProtocolBuilderStatus, \
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
ProtocolBuilderStudySchema
|
2020-05-04 14:57:09 +00:00
|
|
|
from crc.models.stats import TaskEventModel
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from crc.models.study import StudyModel, StudySchema
|
2020-06-01 01:15:40 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowModel
|
|
|
|
from crc.services.file_service import FileService
|
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2019-12-18 19:02:17 +00:00
|
|
|
|
|
|
|
|
2020-02-20 18:30:04 +00:00
|
|
|
class TestStudyApi(BaseTest):
|
2019-12-18 19:02:17 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
TEST_STUDY = {
|
|
|
|
"title": "Phase III Trial of Genuine People Personalities (GPP) Autonomous Intelligent Emotional Agents "
|
|
|
|
"for Interstellar Spacecraft",
|
|
|
|
"last_updated": datetime.now(tz=timezone.utc),
|
2020-04-21 21:13:30 +00:00
|
|
|
"protocol_builder_status": ProtocolBuilderStatus.ACTIVE,
|
2020-05-25 16:29:05 +00:00
|
|
|
"primary_investigator_id": "tmm2x",
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
"user_uid": "dhf8r",
|
|
|
|
}
|
|
|
|
|
|
|
|
def add_test_study(self):
|
|
|
|
rv = self.app.post('/v1.0/study',
|
|
|
|
content_type="application/json",
|
|
|
|
headers=self.logged_in_headers(),
|
|
|
|
data=json.dumps(StudySchema().dump(self.TEST_STUDY)))
|
|
|
|
self.assert_success(rv)
|
|
|
|
return json.loads(rv.get_data(as_text=True))
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def test_study_basics(self):
|
|
|
|
self.load_example_data()
|
2020-01-14 16:45:12 +00:00
|
|
|
study = session.query(StudyModel).first()
|
2019-12-18 19:02:17 +00:00
|
|
|
self.assertIsNotNone(study)
|
|
|
|
|
2020-05-22 18:37:49 +00:00
|
|
|
def test_get_study(self):
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
"""Generic test, but pretty detailed, in that the study should return a categorized list of workflows
|
|
|
|
This starts with out loading the example data, to show that all the bases are covered from ground 0."""
|
2020-04-15 15:13:32 +00:00
|
|
|
|
2020-05-22 18:37:49 +00:00
|
|
|
"""NOTE: The protocol builder is not enabled or mocked out. As the master workflow (which is empty),
|
|
|
|
and the test workflow do not need it, and it is disabled in the configuration."""
|
2020-05-25 16:29:05 +00:00
|
|
|
self.load_example_data()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
new_study = self.add_test_study()
|
|
|
|
new_study = session.query(StudyModel).filter_by(id=new_study["id"]).first()
|
|
|
|
|
|
|
|
api_response = self.app.get('/v1.0/study/%i' % new_study.id,
|
|
|
|
headers=self.logged_in_headers(), content_type="application/json")
|
|
|
|
self.assert_success(api_response)
|
|
|
|
study = StudySchema().loads(api_response.get_data(as_text=True))
|
|
|
|
|
|
|
|
self.assertEqual(study.title, self.TEST_STUDY['title'])
|
|
|
|
self.assertEqual(study.primary_investigator_id, self.TEST_STUDY['primary_investigator_id'])
|
|
|
|
self.assertEqual(study.user_uid, self.TEST_STUDY['user_uid'])
|
|
|
|
|
|
|
|
# Categories are read only, so switching to sub-scripting here.
|
2020-05-25 16:29:05 +00:00
|
|
|
# This assumes there is one test category set up in the example data.
|
|
|
|
category = study.categories[0]
|
|
|
|
self.assertEqual("test_category", category['name'])
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.assertEqual("Test Category", category['display_name'])
|
|
|
|
self.assertEqual(1, len(category["workflows"]))
|
|
|
|
workflow = category["workflows"][0]
|
|
|
|
self.assertEqual("random_fact", workflow["name"])
|
|
|
|
self.assertEqual("optional", workflow["state"])
|
|
|
|
self.assertEqual("not_started", workflow["status"])
|
|
|
|
self.assertEqual(0, workflow["total_tasks"])
|
|
|
|
self.assertEqual(0, workflow["completed_tasks"])
|
|
|
|
|
2020-06-01 01:15:40 +00:00
|
|
|
def test_get_study_has_details_about_files(self):
|
|
|
|
|
2020-06-01 04:00:52 +00:00
|
|
|
# Set up the study and attach a file to it.
|
|
|
|
self.load_example_data()
|
|
|
|
self.create_reference_document()
|
|
|
|
workflow = self.create_workflow('file_upload_form')
|
|
|
|
processor = WorkflowProcessor(workflow)
|
|
|
|
task = processor.next_task()
|
|
|
|
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
|
|
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
|
|
|
name="anything.png", content_type="png",
|
|
|
|
binary_data=b'1234', irb_doc_code=irb_code)
|
|
|
|
|
|
|
|
api_response = self.app.get('/v1.0/study/%i' % workflow.study_id,
|
|
|
|
headers=self.logged_in_headers(), content_type="application/json")
|
|
|
|
self.assert_success(api_response)
|
|
|
|
study = StudySchema().loads(api_response.get_data(as_text=True))
|
2020-06-05 18:08:46 +00:00
|
|
|
self.assertEqual(1, len(study.files))
|
|
|
|
self.assertEqual("UVA Compliance/PRC Approval", study.files[0]["category"])
|
|
|
|
self.assertEqual("Cancer Center's PRC Approval Form", study.files[0]["description"])
|
|
|
|
self.assertEqual("UVA Compliance/PRC Approval.png", study.files[0]["download_name"])
|
2020-06-01 03:16:14 +00:00
|
|
|
|
|
|
|
# TODO: WRITE A TEST FOR STUDY FILES
|
|
|
|
|
|
|
|
def test_get_study_has_details_about_approvals(self):
|
|
|
|
# TODO: WRITE A TEST FOR STUDY APPROVALS
|
|
|
|
pass
|
2020-06-01 01:15:40 +00:00
|
|
|
|
2020-01-03 16:44:24 +00:00
|
|
|
def test_add_study(self):
|
2020-01-14 16:46:59 +00:00
|
|
|
self.load_example_data()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
study = self.add_test_study()
|
2020-05-25 16:29:05 +00:00
|
|
|
db_study = session.query(StudyModel).filter_by(id=study['id']).first()
|
2020-01-03 16:44:24 +00:00
|
|
|
self.assertIsNotNone(db_study)
|
|
|
|
self.assertEqual(study["title"], db_study.title)
|
|
|
|
self.assertEqual(study["primary_investigator_id"], db_study.primary_investigator_id)
|
|
|
|
self.assertEqual(study["sponsor"], db_study.sponsor)
|
|
|
|
self.assertEqual(study["ind_number"], db_study.ind_number)
|
2020-02-28 16:14:30 +00:00
|
|
|
self.assertEqual(study["user_uid"], db_study.user_uid)
|
2020-01-03 16:44:24 +00:00
|
|
|
|
2020-03-27 15:55:36 +00:00
|
|
|
workflow_spec_count =session.query(WorkflowSpecModel).filter(WorkflowSpecModel.is_master_spec == False).count()
|
2020-05-25 16:29:05 +00:00
|
|
|
workflow_count = session.query(WorkflowModel).filter(WorkflowModel.study_id == study['id']).count()
|
2020-03-27 15:55:36 +00:00
|
|
|
error_count = len(study["errors"])
|
2020-04-06 22:15:32 +00:00
|
|
|
self.assertEqual(workflow_spec_count, workflow_count + error_count)
|
2020-03-27 15:55:36 +00:00
|
|
|
|
2020-01-03 16:44:24 +00:00
|
|
|
def test_update_study(self):
|
|
|
|
self.load_example_data()
|
2020-01-14 16:45:12 +00:00
|
|
|
study: StudyModel = session.query(StudyModel).first()
|
2020-01-03 16:44:24 +00:00
|
|
|
study.title = "Pilot Study of Fjord Placement for Single Fraction Outcomes to Cortisol Susceptibility"
|
2020-04-21 21:13:30 +00:00
|
|
|
study.protocol_builder_status = ProtocolBuilderStatus.ACTIVE
|
2020-01-31 16:33:43 +00:00
|
|
|
rv = self.app.put('/v1.0/study/%i' % study.id,
|
2020-02-27 15:30:16 +00:00
|
|
|
content_type="application/json",
|
|
|
|
headers=self.logged_in_headers(),
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
data=json.dumps(StudySchema().dump(study)))
|
2020-01-03 16:44:24 +00:00
|
|
|
self.assert_success(rv)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
|
|
|
self.assertEqual(study.title, json_data['title'])
|
|
|
|
self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status'])
|
2020-01-03 16:44:24 +00:00
|
|
|
|
2020-04-28 21:25:53 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies
|
2020-04-15 15:13:32 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
2020-03-04 14:54:00 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
2020-04-28 21:25:53 +00:00
|
|
|
def test_get_all_studies(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
2020-05-22 18:37:49 +00:00
|
|
|
# Enable the protocol builder for these tests, as the master_workflow and other workflows
|
|
|
|
# depend on using the PB for data.
|
2020-05-27 02:42:49 +00:00
|
|
|
app.config['PB_ENABLED'] = True
|
2020-03-02 19:42:30 +00:00
|
|
|
self.load_example_data()
|
2020-03-26 16:51:53 +00:00
|
|
|
s = StudyModel(
|
|
|
|
id=54321, # This matches one of the ids from the study_details_json data.
|
|
|
|
title='The impact of pandemics on dog owner sanity after 12 days',
|
|
|
|
user_uid='dhf8r',
|
|
|
|
)
|
|
|
|
session.add(s)
|
|
|
|
session.commit()
|
|
|
|
|
|
|
|
num_db_studies_before = session.query(StudyModel).count()
|
2020-03-02 19:42:30 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# Mock Protocol Builder responses
|
2020-03-04 14:54:00 +00:00
|
|
|
studies_response = self.protocol_builder_response('user_studies.json')
|
|
|
|
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
|
|
|
details_response = self.protocol_builder_response('study_details.json')
|
2020-04-03 20:24:38 +00:00
|
|
|
mock_details.return_value = json.loads(details_response)
|
2020-04-15 15:13:32 +00:00
|
|
|
docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_docs.return_value = json.loads(docs_response)
|
2020-04-28 21:25:53 +00:00
|
|
|
investigators_response = self.protocol_builder_response('investigators.json')
|
|
|
|
mock_investigators.return_value = json.loads(investigators_response)
|
2020-03-02 19:42:30 +00:00
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# Make the api call to get all studies
|
|
|
|
api_response = self.app.get('/v1.0/study', headers=self.logged_in_headers(), content_type="application/json")
|
2020-03-02 19:42:30 +00:00
|
|
|
self.assert_success(api_response)
|
|
|
|
json_data = json.loads(api_response.get_data(as_text=True))
|
|
|
|
|
2020-04-21 21:13:30 +00:00
|
|
|
num_incomplete = 0
|
|
|
|
num_abandoned = 0
|
2020-03-02 19:42:30 +00:00
|
|
|
num_active = 0
|
2020-04-21 21:13:30 +00:00
|
|
|
num_open = 0
|
2020-03-02 19:42:30 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
for study in json_data:
|
2020-04-21 21:13:30 +00:00
|
|
|
if study['protocol_builder_status'] == 'INCOMPLETE': # One study in user_studies.json is not q_complete
|
|
|
|
num_incomplete += 1
|
|
|
|
if study['protocol_builder_status'] == 'ABANDONED': # One study does not exist in user_studies.json
|
|
|
|
num_abandoned += 1
|
|
|
|
if study['protocol_builder_status'] == 'ACTIVE': # One study is marked complete without HSR Number
|
2020-03-02 19:42:30 +00:00
|
|
|
num_active += 1
|
2020-04-21 21:13:30 +00:00
|
|
|
if study['protocol_builder_status'] == 'OPEN': # One study is marked complete and has an HSR Number
|
|
|
|
num_open += 1
|
2020-03-02 19:42:30 +00:00
|
|
|
|
2020-03-02 20:01:41 +00:00
|
|
|
db_studies_after = session.query(StudyModel).all()
|
|
|
|
num_db_studies_after = len(db_studies_after)
|
|
|
|
self.assertGreater(num_db_studies_after, num_db_studies_before)
|
2020-06-05 18:08:46 +00:00
|
|
|
self.assertEqual(num_abandoned, 1)
|
|
|
|
self.assertEqual(num_open, 1)
|
|
|
|
self.assertEqual(num_active, 1)
|
|
|
|
self.assertEqual(num_incomplete, 1)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.assertEqual(len(json_data), num_db_studies_after)
|
2020-04-21 21:13:30 +00:00
|
|
|
self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after)
|
2020-03-02 19:42:30 +00:00
|
|
|
|
2020-04-28 21:25:53 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies
|
2020-04-15 15:13:32 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
2020-04-28 21:25:53 +00:00
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
|
|
|
def test_get_single_study(self, mock_studies, mock_details, mock_docs, mock_investigators):
|
2020-04-15 15:13:32 +00:00
|
|
|
|
2020-04-28 21:25:53 +00:00
|
|
|
# Mock Protocol Builder responses
|
|
|
|
studies_response = self.protocol_builder_response('user_studies.json')
|
|
|
|
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
|
|
|
details_response = self.protocol_builder_response('study_details.json')
|
|
|
|
mock_details.return_value = json.loads(details_response)
|
2020-04-15 15:13:32 +00:00
|
|
|
docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_docs.return_value = json.loads(docs_response)
|
2020-04-28 21:25:53 +00:00
|
|
|
investigators_response = self.protocol_builder_response('investigators.json')
|
|
|
|
mock_investigators.return_value = json.loads(investigators_response)
|
2020-04-15 15:13:32 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
self.load_example_data()
|
2020-01-14 16:45:12 +00:00
|
|
|
study = session.query(StudyModel).first()
|
2019-12-18 19:02:17 +00:00
|
|
|
rv = self.app.get('/v1.0/study/%i' % study.id,
|
|
|
|
follow_redirects=True,
|
2020-02-27 15:30:16 +00:00
|
|
|
headers=self.logged_in_headers(),
|
2019-12-18 19:02:17 +00:00
|
|
|
content_type="application/json")
|
|
|
|
self.assert_success(rv)
|
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.assertEqual(study.id, json_data['id'])
|
|
|
|
self.assertEqual(study.title, json_data['title'])
|
|
|
|
self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status'])
|
|
|
|
self.assertEqual(study.primary_investigator_id, json_data['primary_investigator_id'])
|
|
|
|
self.assertEqual(study.sponsor, json_data['sponsor'])
|
|
|
|
self.assertEqual(study.ind_number, json_data['ind_number'])
|
2019-12-19 16:58:51 +00:00
|
|
|
|
2020-05-22 18:37:49 +00:00
|
|
|
|
2020-03-09 19:12:40 +00:00
|
|
|
def test_delete_study(self):
|
|
|
|
self.load_example_data()
|
|
|
|
study = session.query(StudyModel).first()
|
2020-03-24 18:15:21 +00:00
|
|
|
rv = self.app.delete('/v1.0/study/%i' % study.id, headers=self.logged_in_headers())
|
2020-03-09 19:12:40 +00:00
|
|
|
self.assert_success(rv)
|
|
|
|
|
2020-04-03 20:41:16 +00:00
|
|
|
def test_delete_study_with_workflow_and_status(self):
|
|
|
|
self.load_example_data()
|
2020-05-25 16:29:05 +00:00
|
|
|
workflow = session.query(WorkflowModel).first()
|
|
|
|
stats2 = TaskEventModel(study_id=workflow.study_id, workflow_id=workflow.id, user_uid=self.users[0]['uid'])
|
2020-04-06 17:08:17 +00:00
|
|
|
session.add(stats2)
|
|
|
|
session.commit()
|
2020-05-25 16:29:05 +00:00
|
|
|
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
2020-04-03 20:41:16 +00:00
|
|
|
self.assert_success(rv)
|
2020-05-25 16:29:05 +00:00
|
|
|
del_study = session.query(StudyModel).filter(StudyModel.id == workflow.study_id).first()
|
2020-04-08 17:28:43 +00:00
|
|
|
self.assertIsNone(del_study)
|
|
|
|
|
2020-04-03 20:41:16 +00:00
|
|
|
|
|
|
|
|
2020-03-26 16:51:53 +00:00
|
|
|
# """
|
2020-03-27 15:55:36 +00:00
|
|
|
# Workflow Specs that have been made available (or not) to a particular study via the status.bpmn should be flagged
|
|
|
|
# as available (or not) when the list of a study's workflows is retrieved.
|
2020-03-26 16:51:53 +00:00
|
|
|
# """
|
2020-03-27 15:55:36 +00:00
|
|
|
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies')
|
|
|
|
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators')
|
|
|
|
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs')
|
|
|
|
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details')
|
|
|
|
# def test_workflow_spec_status(self,
|
|
|
|
# mock_details,
|
|
|
|
# mock_required_docs,
|
|
|
|
# mock_investigators,
|
|
|
|
# mock_studies):
|
|
|
|
#
|
|
|
|
# # Mock Protocol Builder response
|
|
|
|
# studies_response = self.protocol_builder_response('user_studies.json')
|
|
|
|
# mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
|
|
|
#
|
|
|
|
# investigators_response = self.protocol_builder_response('investigators.json')
|
|
|
|
# mock_investigators.return_value = ProtocolBuilderInvestigatorSchema(many=True).loads(investigators_response)
|
|
|
|
#
|
|
|
|
# required_docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
# mock_required_docs.return_value = ProtocolBuilderRequiredDocumentSchema(many=True).loads(required_docs_response)
|
|
|
|
#
|
|
|
|
# details_response = self.protocol_builder_response('study_details.json')
|
|
|
|
# mock_details.return_value = ProtocolBuilderStudyDetailsSchema().loads(details_response)
|
|
|
|
#
|
|
|
|
# self.load_example_data()
|
|
|
|
# study = session.query(StudyModel).first()
|
|
|
|
# study_id = study.id
|
|
|
|
#
|
|
|
|
# # Add status workflow
|
|
|
|
# self.load_test_spec('top_level_workflow')
|
2020-03-26 16:51:53 +00:00
|
|
|
#
|
2020-03-27 15:55:36 +00:00
|
|
|
# # Assure the top_level_workflow is added to the study
|
|
|
|
# top_level_spec = session.query(WorkflowSpecModel).filter_by(is_master_spec=True).first()
|
|
|
|
# self.assertIsNotNone(top_level_spec)
|
2020-03-26 16:51:53 +00:00
|
|
|
#
|
|
|
|
#
|
|
|
|
#
|
|
|
|
# for is_active in [False, True]:
|
|
|
|
# # Set all workflow specs to inactive|active
|
|
|
|
# update_status_response = self.app.put('/v1.0/workflow/%i/task/%s/data' % (status_workflow.id, status_task_id),
|
|
|
|
# headers=self.logged_in_headers(),
|
|
|
|
# content_type="application/json",
|
|
|
|
# data=json.dumps({'some_input': is_active}))
|
|
|
|
# self.assert_success(update_status_response)
|
|
|
|
# json_workflow_api = json.loads(update_status_response.get_data(as_text=True))
|
|
|
|
# updated_workflow_api: WorkflowApi = WorkflowApiSchema().load(json_workflow_api)
|
|
|
|
# self.assertIsNotNone(updated_workflow_api)
|
|
|
|
# self.assertEqual(updated_workflow_api.status, WorkflowStatus.complete)
|
|
|
|
# self.assertIsNotNone(updated_workflow_api.last_task)
|
|
|
|
# self.assertIsNotNone(updated_workflow_api.last_task['data'])
|
|
|
|
# self.assertIsNotNone(updated_workflow_api.last_task['data']['some_input'])
|
|
|
|
# self.assertEqual(updated_workflow_api.last_task['data']['some_input'], is_active)
|
|
|
|
#
|
|
|
|
# # List workflows for study
|
|
|
|
# response_after = self.app.get('/v1.0/study/%i/workflows' % study.id,
|
|
|
|
# content_type="application/json",
|
|
|
|
# headers=self.logged_in_headers())
|
|
|
|
# self.assert_success(response_after)
|
|
|
|
#
|
|
|
|
# json_data_after = json.loads(response_after.get_data(as_text=True))
|
|
|
|
# workflows_after = WorkflowApiSchema(many=True).load(json_data_after)
|
|
|
|
# self.assertEqual(len(specs), len(workflows_after))
|
|
|
|
#
|
|
|
|
# # All workflows should be inactive|active
|
|
|
|
# for workflow in workflows_after:
|
|
|
|
# self.assertEqual(workflow.is_active, is_active)
|
2020-03-15 19:54:13 +00:00
|
|
|
|