2020-04-15 15:13:32 +00:00
|
|
|
import json
|
2020-05-04 14:57:09 +00:00
|
|
|
from datetime import datetime
|
2020-04-15 15:13:32 +00:00
|
|
|
from unittest.mock import patch
|
|
|
|
|
2020-04-03 20:24:38 +00:00
|
|
|
from crc import db
|
|
|
|
from crc.models.protocol_builder import ProtocolBuilderStatus
|
|
|
|
from crc.models.study import StudyModel
|
2020-03-30 18:01:57 +00:00
|
|
|
from crc.models.user import UserModel
|
2020-04-03 20:24:38 +00:00
|
|
|
from crc.models.workflow import WorkflowModel, WorkflowStatus, \
|
2020-03-30 14:12:10 +00:00
|
|
|
WorkflowSpecCategoryModel
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
from crc.services.file_service import FileService
|
2020-03-30 18:01:57 +00:00
|
|
|
from crc.services.study_service import StudyService
|
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2020-03-30 19:39:50 +00:00
|
|
|
from example_data import ExampleDataLoader
|
2020-03-30 14:12:10 +00:00
|
|
|
from tests.base_test import BaseTest
|
|
|
|
|
|
|
|
|
|
|
|
class TestStudyService(BaseTest):
|
2020-03-30 18:01:57 +00:00
|
|
|
"""Largely tested via the test_study_api, and time is tight, but adding new tests here."""
|
2020-03-30 14:12:10 +00:00
|
|
|
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
def create_user_with_study_and_workflow(self):
|
2020-03-30 19:39:50 +00:00
|
|
|
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
# clear it all out.
|
|
|
|
from example_data import ExampleDataLoader
|
|
|
|
ExampleDataLoader.clean_db()
|
2020-04-15 15:13:32 +00:00
|
|
|
|
2020-03-30 19:39:50 +00:00
|
|
|
# Assure some basic models are in place, This is a damn mess. Our database models need an overhaul to make
|
|
|
|
# this easier - better relationship modeling is now critical.
|
|
|
|
self.load_test_spec("top_level_workflow", master_spec=True)
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
user = db.session.query(UserModel).filter(UserModel.uid == "dhf8r").first()
|
|
|
|
if not user:
|
|
|
|
user = UserModel(uid="dhf8r", email_address="whatever@stuff.com", display_name="Stayathome Smellalots")
|
|
|
|
db.session.add(user)
|
|
|
|
db.session.commit()
|
|
|
|
else:
|
|
|
|
for study in db.session.query(StudyModel).all():
|
|
|
|
StudyService().delete_study(study.id)
|
|
|
|
|
2020-04-21 21:13:30 +00:00
|
|
|
study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.ACTIVE, user_uid=user.uid)
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
db.session.add(study)
|
2020-04-23 18:40:05 +00:00
|
|
|
cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0)
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
db.session.add(cat)
|
2020-03-30 19:39:50 +00:00
|
|
|
db.session.commit()
|
2020-04-23 18:40:05 +00:00
|
|
|
|
|
|
|
self.assertIsNotNone(cat.id)
|
2020-03-30 19:39:50 +00:00
|
|
|
self.load_test_spec("random_fact", category_id=cat.id)
|
2020-04-23 18:40:05 +00:00
|
|
|
|
|
|
|
self.assertIsNotNone(study.id)
|
2020-05-04 14:57:09 +00:00
|
|
|
workflow = WorkflowModel(workflow_spec_id="random_fact", study_id=study.id,
|
|
|
|
status=WorkflowStatus.not_started, last_updated=datetime.now())
|
2020-03-30 19:39:50 +00:00
|
|
|
db.session.add(workflow)
|
|
|
|
db.session.commit()
|
|
|
|
# Assure there is a master specification, one standard spec, and lookup tables.
|
|
|
|
ExampleDataLoader().load_reference_documents()
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
return user
|
|
|
|
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
|
|
|
def test_total_tasks_updated(self, mock_docs):
|
|
|
|
"""Assure that as a users progress is available when getting a list of studies for that user."""
|
|
|
|
|
|
|
|
docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_docs.return_value = json.loads(docs_response)
|
|
|
|
|
|
|
|
user = self.create_user_with_study_and_workflow()
|
2020-03-30 18:01:57 +00:00
|
|
|
|
|
|
|
# The load example data script should set us up a user and at least one study, one category, and one workflow.
|
|
|
|
studies = StudyService.get_studies_for_user(user)
|
2020-03-30 19:39:50 +00:00
|
|
|
self.assertTrue(len(studies) == 1)
|
|
|
|
self.assertTrue(len(studies[0].categories) == 1)
|
|
|
|
self.assertTrue(len(studies[0].categories[0].workflows) == 1)
|
2020-03-30 18:01:57 +00:00
|
|
|
|
|
|
|
workflow = next(iter(studies[0].categories[0].workflows)) # Workflows is a set.
|
|
|
|
|
|
|
|
# workflow should not be started, and it should have 0 completed tasks, and 0 total tasks.
|
|
|
|
self.assertEqual(WorkflowStatus.not_started, workflow.status)
|
|
|
|
self.assertEqual(None, workflow.spec_version)
|
|
|
|
self.assertEqual(0, workflow.total_tasks)
|
|
|
|
self.assertEqual(0, workflow.completed_tasks)
|
|
|
|
|
|
|
|
# Initialize the Workflow with the workflow processor.
|
|
|
|
workflow_model = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow.id).first()
|
|
|
|
processor = WorkflowProcessor(workflow_model)
|
|
|
|
|
|
|
|
# Assure the workflow is now started, and knows the total and completed tasks.
|
|
|
|
studies = StudyService.get_studies_for_user(user)
|
|
|
|
workflow = next(iter(studies[0].categories[0].workflows)) # Workflows is a set.
|
|
|
|
# self.assertEqual(WorkflowStatus.user_input_required, workflow.status)
|
|
|
|
self.assertTrue(workflow.total_tasks > 0)
|
|
|
|
self.assertEqual(0, workflow.completed_tasks)
|
|
|
|
self.assertIsNotNone(workflow.spec_version)
|
|
|
|
|
|
|
|
# Complete a task
|
|
|
|
task = processor.next_task()
|
|
|
|
processor.complete_task(task)
|
2020-05-04 14:57:09 +00:00
|
|
|
processor.save()
|
2020-03-30 18:01:57 +00:00
|
|
|
|
|
|
|
# Assure the workflow has moved on to the next task.
|
|
|
|
studies = StudyService.get_studies_for_user(user)
|
|
|
|
workflow = next(iter(studies[0].categories[0].workflows)) # Workflows is a set.
|
|
|
|
self.assertEqual(1, workflow.completed_tasks)
|
2020-04-23 18:40:05 +00:00
|
|
|
|
|
|
|
# Get approvals
|
|
|
|
approvals = StudyService.get_approvals(studies[0].id)
|
|
|
|
self.assertGreater(len(approvals), 0)
|
2020-04-24 12:54:14 +00:00
|
|
|
self.assertIsNotNone(approvals[0]['display_order'])
|
Refactor the document details scripts. Now there is one script, it returns data in a consistent format, and has all the details required. The script is located in StudyInfo, with the argument documents. Make note that it returns a dictionary of ALL the documents, with a field to mark which ones are required according to the protocol builder. Others may become required if a workflow determines such, in which case the workflow will enforce this, and the document will have a count > 0, and additional details in a list of files within the document. I modified the XLS file to use lower case variable names, because it disturbed me, and we have to reference them frequently. Removed devious "as_object" variable on get_required_docs, so it behaves like the other methods all the time, and returns a dictionary. All the core business logic for finding the documents list now resides in the StudyService.
Because this changes the endpoint for all existing document details, I've modified all the test and static bpmn files to use the new format.
Shorting up the SponsorsList.xls file makes for slightly faster tests. seems senseless to load 5000 everytime we reset the data.
Tried to test all of this carefully in the test_study_details_documents.py test.
2020-04-29 19:08:11 +00:00
|
|
|
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
|
|
|
def test_get_required_docs(self, mock_docs):
|
|
|
|
|
|
|
|
# mock out the protocol builder
|
|
|
|
docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_docs.return_value = json.loads(docs_response)
|
|
|
|
|
|
|
|
user = self.create_user_with_study_and_workflow()
|
|
|
|
studies = StudyService.get_studies_for_user(user)
|
|
|
|
study = studies[0]
|
|
|
|
|
|
|
|
|
|
|
|
study_service = StudyService()
|
|
|
|
documents = study_service.get_documents_status(study_id=study.id) # Mocked out, any random study id works.
|
|
|
|
self.assertIsNotNone(documents)
|
|
|
|
self.assertTrue("UVACompl_PRCAppr" in documents.keys())
|
|
|
|
self.assertEqual("UVACompl_PRCAppr", documents["UVACompl_PRCAppr"]['code'])
|
|
|
|
self.assertEqual("UVA Compliance / PRC Approval", documents["UVACompl_PRCAppr"]['display_name'])
|
|
|
|
self.assertEqual("Cancer Center's PRC Approval Form", documents["UVACompl_PRCAppr"]['description'])
|
|
|
|
self.assertEqual("UVA Compliance", documents["UVACompl_PRCAppr"]['category1'])
|
|
|
|
self.assertEqual("PRC Approval", documents["UVACompl_PRCAppr"]['category2'])
|
|
|
|
self.assertEqual("", documents["UVACompl_PRCAppr"]['category3'])
|
|
|
|
self.assertEqual("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
|
|
|
|
self.assertEqual(0, documents["UVACompl_PRCAppr"]['count'])
|
|
|
|
self.assertEqual(True, documents["UVACompl_PRCAppr"]['required'])
|
|
|
|
self.assertEqual('6', documents["UVACompl_PRCAppr"]['id'])
|
|
|
|
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
|
|
|
|
def test_get_documents_has_file_details(self, mock_docs):
|
|
|
|
|
|
|
|
# mock out the protocol builder
|
|
|
|
docs_response = self.protocol_builder_response('required_docs.json')
|
|
|
|
mock_docs.return_value = json.loads(docs_response)
|
|
|
|
|
|
|
|
user = self.create_user_with_study_and_workflow()
|
|
|
|
|
|
|
|
# Add a document to the study with the correct code.
|
|
|
|
workflow = self.create_workflow('docx')
|
|
|
|
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
|
|
|
FileService.add_task_file(study_id=workflow.study_id, workflow_id=workflow.id,
|
|
|
|
workflow_spec_id=workflow.workflow_spec_id,
|
|
|
|
task_id="fakingthisout",
|
|
|
|
name="anything.png", content_type="text",
|
|
|
|
binary_data=b'1234', irb_doc_code=irb_code)
|
|
|
|
|
|
|
|
docs = StudyService().get_documents_status(workflow.study_id)
|
|
|
|
self.assertIsNotNone(docs)
|
|
|
|
self.assertEqual("not_started", docs["UVACompl_PRCAppr"]['status'])
|
|
|
|
self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
|
|
|
|
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
|
|
|
|
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
|
|
|
|
self.assertEquals(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
|
|
|
|
self.assertEquals(workflow.workflow_spec_id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_spec_id'])
|
|
|
|
|
|
|
|
# 'file_id': 123,
|
|
|
|
# 'task_id': 'abcdef14236890',
|
|
|
|
# 'workflow_id': 456,
|
|
|
|
# 'workflow_spec_id': 'irb_api_details',
|
|
|
|
# 'status': 'complete',
|
2020-05-07 17:57:24 +00:00
|
|
|
|
|
|
|
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs
|
|
|
|
def test_get_personnel(self, mock_docs):
|
|
|
|
self.load_example_data()
|
|
|
|
|
|
|
|
# mock out the protocol builder
|
|
|
|
docs_response = self.protocol_builder_response('investigators.json')
|
|
|
|
mock_docs.return_value = json.loads(docs_response)
|
|
|
|
|
|
|
|
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
|
|
|
|
investigators = StudyService().get_investigators(workflow.study_id)
|
|
|
|
|
|
|
|
self.assertEquals(9, len(investigators))
|
|
|
|
|
|
|
|
# dhf8r is in the ldap mock data.
|
|
|
|
self.assertEquals("dhf8r", investigators['PI']['user_id'])
|
|
|
|
self.assertEquals("Dan Funk", investigators['PI']['display_name']) # Data from ldap
|
|
|
|
self.assertEquals("Primary Investigator", investigators['PI']['label']) # Data from xls file.
|
|
|
|
self.assertEquals("Always", investigators['PI']['display']) # Data from xls file.
|
|
|
|
|
|
|
|
# asd3v is not in ldap, so an error should be returned.
|
|
|
|
self.assertEquals("asd3v", investigators['DC']['user_id'])
|
|
|
|
self.assertEquals("Unable to locate a user with id asd3v in LDAP", investigators['DC']['error']) # Data from ldap
|
|
|
|
|
|
|
|
# No value is provided for Department Chair
|
|
|
|
self.assertIsNone(investigators['DEPT_CH']['user_id'])
|