Merge pull request #38 from sartography/feature/investigators_reference_file

Adding a new reference file that provides greater details about the i…
This commit is contained in:
Dan Funk 2020-05-12 10:01:48 -04:00 committed by GitHub
commit e043a5ff63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 393 additions and 130 deletions

View File

@ -3,8 +3,7 @@ from typing import cast
from marshmallow_enum import EnumField
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from sqlalchemy import func, Index, text
from sqlalchemy.dialects import postgresql
from sqlalchemy import func, Index
from sqlalchemy.dialects.postgresql import UUID
from crc import db

View File

@ -52,7 +52,7 @@ Takes two arguments:
message="The CompleteTemplate script requires 2 arguments. The first argument is "
"the name of the docx template to use. The second "
"argument is a code for the document, as "
"set in the reference document %s. " % FileService.IRB_PRO_CATEGORIES_FILE)
"set in the reference document %s. " % FileService.DOCUMENT_LIST)
task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
file_name = args[0]

View File

@ -1,39 +1,149 @@
from ldap3.core.exceptions import LDAPSocketOpenError
import json
from crc import session, app
from crc import session
from crc.api.common import ApiError
from crc.models.study import StudyModel, StudySchema
from crc.models.workflow import WorkflowStatus
from crc.scripts.script import Script, ScriptValidationError
from crc.scripts.script import Script
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
class StudyInfo(Script):
"""Please see the detailed description that is provided below. """
"""Just your basic class that can pull in data from a few api endpoints and do a basic task."""
pb = ProtocolBuilderService()
type_options = ['info', 'investigators', 'details', 'approvals', 'documents', 'protocol']
# This is used for test/workflow validation, as well as documentation.
example_data = {
"StudyInfo": {
"info": {
"id": 12,
"title": "test",
"primary_investigator_id": 21,
"user_uid": "dif84",
"sponsor": "sponsor",
"ind_number": "1234",
"inactive": False
},
"investigators": {
'PI': {
'label': 'Primary Investigator',
'display': 'Always',
'unique': 'Yes',
'user_id': 'dhf8r',
'display_name': 'Dan Funk',
'given_name': 'Dan',
'email': 'dhf8r@virginia.edu',
'telephone_number': '+1 (434) 924-1723',
'title': "E42:He's a hoopy frood",
'department': 'E0:EN-Eng Study of Parallel Universes',
'affiliation': 'faculty',
'sponsor_type': 'Staff'},
'SC_I': {
'label': 'Study Coordinator I',
'display': 'Always',
'unique': 'Yes',
'user_id': None},
'DC': {
'label': 'Department Contact',
'display': 'Optional',
'unique': 'Yes',
'user_id': 'asd3v',
'error': 'Unable to locate a user with id asd3v in LDAP'}
},
"documents": {
'AD_CoCApp': {'category1': 'Ancillary Document', 'category2': 'CoC Application', 'category3': '',
'Who Uploads?': 'CRC', 'id': '12',
'description': 'Certificate of Confidentiality Application', 'required': False,
'study_id': 1, 'code': 'AD_CoCApp', 'display_name': 'Ancillary Document / CoC Application',
'count': 0, 'files': []},
'UVACompl_PRCAppr': {'category1': 'UVA Compliance', 'category2': 'PRC Approval', 'category3': '',
'Who Uploads?': 'CRC', 'id': '6', 'description': "Cancer Center's PRC Approval Form",
'required': True, 'study_id': 1, 'code': 'UVACompl_PRCAppr',
'display_name': 'UVA Compliance / PRC Approval', 'count': 1, 'files': [
{'file_id': 10,
'task_id': 'fakingthisout',
'workflow_id': 2,
'workflow_spec_id': 'docx'}],
'status': 'complete'}
},
"details":
{},
"approvals": {
"study_id": 12,
"workflow_id": 321,
"display_name": "IRB API Details",
"name": "irb_api_details",
"status": WorkflowStatus.not_started.value,
"workflow_spec_id": "irb_api_details",
},
'protocol': {
id: 0,
}
}
}
def example_to_string(self, key):
return json.dumps(self.example_data['StudyInfo'][key], indent=2, separators=(',', ': '))
def get_description(self):
return """StudyInfo [TYPE], where TYPE is one of 'info', 'investigators', or 'details', 'approvals',
return """
StudyInfo [TYPE], where TYPE is one of 'info', 'investigators', 'details', 'approvals',
'documents' or 'protocol'.
Adds details about the current study to the Task Data. The type of information required should be
provided as an argument. 'info' returns the basic information such as the title. 'Investigators' provides
detailed information about each investigator in th study. 'Details' provides a large number
of details about the study, as gathered within the protocol builder, and 'documents',
lists all the documents that can be a part of the study, with documents from Protocol Builder
marked as required, and details about any files that were uploaded' .
"""
provided as an argument. The following arguments are available:
### Info ###
Returns the basic information such as the id and title
```
{info_example}
```
### Investigators ###
Returns detailed information about related personnel.
The order returned is guaranteed to match the order provided in the investigators.xslx reference file.
If possible, detailed information is added in from LDAP about each personnel based on their user_id.
```
{investigators_example}
```
### Details ###
Returns detailed information about variable keys read in from the Protocol Builder.
### Approvals ###
Returns data about the status of approvals related to a study.
```
{approvals_example}
```
### Documents ###
Returns a list of all documents that might be related to a study, reading all columns from the irb_documents.xsl
file. Including information about any files that were uploaded or generated that relate to a given document.
Please note this is just a few examples, ALL known document types are returned in an actual call.
```
{documents_example}
```
### Protocol ###
Returns information specific to the protocol.
""".format(info_example=self.example_to_string("info"),
investigators_example=self.example_to_string("investigators"),
approvals_example=self.example_to_string("approvals"),
documents_example=self.example_to_string("documents"),
)
def do_task_validate_only(self, task, study_id, *args, **kwargs):
"""For validation only, pretend no results come back from pb"""
self.check_args(args)
# Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
FileService.get_file_reference_dictionary()
FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST)
data = {
"study":{
"info": {
@ -87,8 +197,7 @@ class StudyInfo(Script):
schema = StudySchema()
self.add_data_to_task(task, {cmd: schema.dump(study)})
if cmd == 'investigators':
pb_response = self.pb.get_investigators(study_id)
self.add_data_to_task(task, {cmd: self.organize_investigators_by_type(pb_response)})
self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id)})
if cmd == 'details':
self.add_data_to_task(task, {cmd: self.pb.get_study_details(study_id)})
if cmd == 'approvals':
@ -106,22 +215,3 @@ class StudyInfo(Script):
"one of %s" % ",".join(StudyInfo.type_options))
def organize_investigators_by_type(self, pb_investigators):
"""Convert array of investigators from protocol builder into a dictionary keyed on the type"""
output = {}
for i in pb_investigators:
dict = {"user_id": i["NETBADGEID"], "type_full": i["INVESTIGATORTYPEFULL"]}
dict.update(self.get_ldap_dict_if_available(i["NETBADGEID"]))
output[i["INVESTIGATORTYPE"]] = dict
return output
def get_ldap_dict_if_available(self, user_id):
try:
ldap_service = LdapService()
return ldap_service.user_info(user_id).__dict__
except ApiError:
app.logger.info(str(ApiError))
return {}
except LDAPSocketOpenError:
app.logger.info("Failed to connect to LDAP Server.")
return {}

View File

@ -16,7 +16,8 @@ import hashlib
class FileService(object):
"""Provides consistent management and rules for storing, retrieving and processing files."""
IRB_PRO_CATEGORIES_FILE = "irb_documents.xlsx"
DOCUMENT_LIST = "irb_documents.xlsx"
INVESTIGATOR_LIST = "investigators.xlsx"
@staticmethod
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
@ -31,12 +32,18 @@ class FileService(object):
return FileService.update_file(file_model, binary_data, content_type)
@staticmethod
def is_allowed_document(code):
data_model = FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
return code in df['code'].values
@staticmethod
def add_form_field_file(study_id, workflow_id, task_id, form_field_key, name, content_type, binary_data):
"""Create a new file and associate it with a user task form field within a workflow.
Please note that the form_field_key MUST be a known file in the irb_documents.xslx reference document."""
if not FileService.irb_document_reference_exists(form_field_key):
if not FileService.is_allowed_document(form_field_key):
raise ApiError("invalid_form_field_key",
"When uploading files, the form field id must match a known document in the "
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % form_field_key)
@ -52,32 +59,21 @@ class FileService(object):
return FileService.update_file(file_model, binary_data, content_type)
@staticmethod
def irb_document_reference_exists(code):
data_model = FileService.get_reference_file_data(FileService.IRB_PRO_CATEGORIES_FILE)
def get_reference_data(reference_file_name, index_column, int_columns=[]):
""" Opens a reference file (assumes that it is xls file) and returns the data as a
dictionary, each row keyed on the given index_column name. If there are columns
that should be represented as integers, pass these as an array of int_columns, lest
you get '1.0' rather than '1' """
data_model = FileService.get_reference_file_data(reference_file_name)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
return code in df['code'].values
@staticmethod
def get_file_reference_dictionary():
"""Loads up the xsl file that contains the IRB Pro Categories and converts it to
a Panda's data frame for processing."""
data_model = FileService.get_reference_file_data(FileService.IRB_PRO_CATEGORIES_FILE)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
df['id'] = df['id'].fillna(0)
df = df.astype({'id': 'Int64'})
for c in int_columns:
df[c] = df[c].fillna(0)
df = df.astype({c: 'Int64'})
df = df.fillna('')
df = df.applymap(str)
df = df.set_index('code')
# IF we need to convert the column names to something more sensible.
# df.columns = [snakeCase(x) for x in df.columns]
df = df.set_index(index_column)
return json.loads(df.to_json(orient='index'))
# # Pandas is lovely, but weird. Here we drop records without an Id, and convert it to an integer.
# df = df.drop_duplicates(subset='Id').astype({'Id': 'Int64'})
# Now we index on the ID column and convert to a dictionary, where the key is the id, and the value
# is a dictionary with all the remaining data in it. It's kinda pretty really.
# all_dict = df.set_index('Id').to_dict('index')
@staticmethod
def add_task_file(study_id, workflow_id, workflow_spec_id, task_id, name, content_type, binary_data,
@ -187,7 +183,6 @@ class FileService(object):
.filter(FileDataModel.version == file_model.latest_version) \
.first()
@staticmethod
def get_reference_file_data(file_name):
file_model = session.query(FileModel). \

View File

@ -4,6 +4,7 @@ from typing import List
import requests
from SpiffWorkflow import WorkflowException
from ldap3.core.exceptions import LDAPSocketOpenError
from crc import db, session, app
from crc.api.common import ApiError
@ -14,6 +15,7 @@ from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
@ -116,7 +118,8 @@ class StudyService(object):
pb_docs = []
# Loop through all known document types, get the counts for those files, and use pb_docs to mark those required.
doc_dictionary = FileService.get_file_reference_dictionary()
doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
documents = {}
for code, doc in doc_dictionary.items():
@ -154,6 +157,37 @@ class StudyService(object):
return documents
@staticmethod
def get_investigators(study_id):
# Loop through all known investigator types as set in the reference file
inv_dictionary = FileService.get_reference_data(FileService.INVESTIGATOR_LIST, 'code')
# Get PB required docs
pb_investigators = ProtocolBuilderService.get_investigators(study_id=study_id)
"""Convert array of investigators from protocol builder into a dictionary keyed on the type"""
for i_type in inv_dictionary:
pb_data = next((item for item in pb_investigators if item['INVESTIGATORTYPE'] == i_type), None)
if pb_data:
inv_dictionary[i_type]['user_id'] = pb_data["NETBADGEID"]
inv_dictionary[i_type].update(StudyService.get_ldap_dict_if_available(pb_data["NETBADGEID"]))
else:
inv_dictionary[i_type]['user_id'] = None
return inv_dictionary
@staticmethod
def get_ldap_dict_if_available(user_id):
try:
ldap_service = LdapService()
return ldap_service.user_info(user_id).__dict__
except ApiError as ae:
app.logger.info(str(ae))
return {"error": str(ae)}
except LDAPSocketOpenError:
app.logger.info("Failed to connect to LDAP Server.")
return {}
@staticmethod
def get_protocol(study_id):

Binary file not shown.

View File

@ -239,7 +239,14 @@ class ExampleDataLoader:
def load_reference_documents(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.IRB_PRO_CATEGORIES_FILE,
FileService.add_reference_file(FileService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.INVESTIGATOR_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()

View File

@ -207,14 +207,12 @@ class BaseTest(unittest.TestCase):
study = session.query(StudyModel).first()
spec = self.load_test_spec(workflow_name, category_id=category_id)
workflow_model = StudyService._create_workflow_model(study, spec)
#processor = WorkflowProcessor(workflow_model)
#workflow = session.query(WorkflowModel).filter_by(study_id=study.id, workflow_spec_id=workflow_name).first()
return workflow_model
def create_reference_document(self):
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'reference', 'irb_documents.xlsx')
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.IRB_PRO_CATEGORIES_FILE,
FileService.add_reference_file(FileService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()

View File

@ -74,6 +74,82 @@
"Staff"
]
}
}
},
{
"attributes": {
"cn": [
"Dan Funk (dhf8r)"
],
"displayName": "Dan Funk",
"givenName": [
"Dan"
],
"mail": [
"dhf8r@virginia.edu"
],
"objectClass": [
"top",
"person",
"organizationalPerson",
"inetOrgPerson",
"uvaPerson",
"uidObject"
],
"telephoneNumber": [
"+1 (434) 924-1723"
],
"title": [
"E42:He's a hoopy frood"
],
"uvaDisplayDepartment": [
"E0:EN-Eng Study of Parallel Universes"
],
"uvaPersonIAMAffiliation": [
"faculty"
],
"uvaPersonSponsoredType": [
"Staff"
]
},
"dn": "uid=dhf8r,ou=People,o=University of Virginia,c=US",
"raw": {
"cn": [
"Dan Funk (dhf84)"
],
"displayName": [
"Dan Funk"
],
"givenName": [
"Dan"
],
"mail": [
"dhf8r@virginia.edu"
],
"objectClass": [
"top",
"person",
"organizationalPerson",
"inetOrgPerson",
"uvaPerson",
"uidObject"
],
"telephoneNumber": [
"+1 (434) 924-1723"
],
"title": [
"E42:He's a hoopy frood"
],
"uvaDisplayDepartment": [
"E0:EN-Eng Study of Parallel Universes"
],
"uvaPersonIAMAffiliation": [
"faculty"
],
"uvaPersonSponsoredType": [
"Staff"
]
}
}
]
}

View File

@ -1,12 +1,14 @@
import io
import json
from datetime import datetime
from unittest.mock import patch
from crc import session
from crc.models.file import FileModel, FileType, FileModelSchema, FileDataModel
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from example_data import ExampleDataLoader
from tests.base_test import BaseTest
@ -102,7 +104,7 @@ class TestFilesApi(BaseTest):
self.assertEqual("application/vnd.ms-excel", file.content_type)
def test_set_reference_file_bad_extension(self):
file_name = FileService.IRB_PRO_CATEGORIES_FILE
file_name = FileService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.ppt")}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
@ -119,7 +121,9 @@ class TestFilesApi(BaseTest):
self.assertEqual(b"abcdef", data_out)
def test_list_reference_files(self):
file_name = FileService.IRB_PRO_CATEGORIES_FILE
ExampleDataLoader.clean_db()
file_name = FileService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())

View File

@ -24,7 +24,11 @@ class TestStudyDetailsDocumentsScript(BaseTest):
convention that we are implementing for the IRB.
"""
def test_validate_returns_error_if_reference_files_do_not_exist(self):
@patch('crc.services.protocol_builder.requests.get')
def test_validate_returns_error_if_reference_files_do_not_exist(self, mock_get):
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('required_docs.json')
self.load_example_data()
self.create_reference_document()
study = session.query(StudyModel).first()
@ -36,7 +40,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
# Remove the reference file.
file_model = db.session.query(FileModel). \
filter(FileModel.is_reference == True). \
filter(FileModel.name == FileService.IRB_PRO_CATEGORIES_FILE).first()
filter(FileModel.name == FileService.DOCUMENT_LIST).first()
if file_model:
db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
@ -46,7 +50,12 @@ class TestStudyDetailsDocumentsScript(BaseTest):
with self.assertRaises(ApiError):
StudyInfo().do_task_validate_only(task, study.id, "documents")
def test_no_validation_error_when_correct_file_exists(self):
@patch('crc.services.protocol_builder.requests.get')
def test_no_validation_error_when_correct_file_exists(self, mock_get):
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('required_docs.json')
self.load_example_data()
self.create_reference_document()
study = session.query(StudyModel).first()
@ -58,7 +67,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
def test_load_lookup_data(self):
self.create_reference_document()
dict = FileService.get_file_reference_dictionary()
dict = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
self.assertIsNotNone(dict)
def get_required_docs(self):

View File

@ -163,3 +163,29 @@ class TestStudyService(BaseTest):
# 'workflow_id': 456,
# 'workflow_spec_id': 'irb_api_details',
# 'status': 'complete',
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs
def test_get_personnel(self, mock_docs):
self.load_example_data()
# mock out the protocol builder
docs_response = self.protocol_builder_response('investigators.json')
mock_docs.return_value = json.loads(docs_response)
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
investigators = StudyService().get_investigators(workflow.study_id)
self.assertEquals(9, len(investigators))
# dhf8r is in the ldap mock data.
self.assertEquals("dhf8r", investigators['PI']['user_id'])
self.assertEquals("Dan Funk", investigators['PI']['display_name']) # Data from ldap
self.assertEquals("Primary Investigator", investigators['PI']['label']) # Data from xls file.
self.assertEquals("Always", investigators['PI']['display']) # Data from xls file.
# asd3v is not in ldap, so an error should be returned.
self.assertEquals("asd3v", investigators['DC']['user_id'])
self.assertEquals("Unable to locate a user with id asd3v in LDAP", investigators['DC']['error']) # Data from ldap
# No value is provided for Department Chair
self.assertIsNone(investigators['DEPT_CH']['user_id'])

View File

@ -1,5 +1,6 @@
import json
import os
import random
from unittest.mock import patch
from crc import session, app
@ -297,7 +298,7 @@ class TestTasksApi(BaseTest):
self.assertEquals(1, len(tasks))
self.assertEquals("UserTask", tasks[0].type)
self.assertEquals(MultiInstanceType.sequential, tasks[0].mi_type)
self.assertEquals(3, tasks[0].mi_count)
self.assertEquals(9, tasks[0].mi_count)
def test_lookup_endpoint_for_task_field_enumerations(self):
@ -383,7 +384,7 @@ class TestTasksApi(BaseTest):
@patch('crc.services.protocol_builder.requests.get')
def test_parallel_multi_instance(self, mock_get):
# Assure we get three investigators back from the API Call, as set in the investigators.json file.
# Assure we get nine investigators back from the API Call, as set in the investigators.json file.
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
@ -392,18 +393,13 @@ class TestTasksApi(BaseTest):
workflow = self.create_workflow('multi_instance_parallel')
tasks = self.get_workflow_api(workflow).user_tasks
self.assertEquals(3, len(tasks))
self.assertEquals(9, len(tasks))
self.assertEquals("UserTask", tasks[0].type)
self.assertEquals("MutiInstanceTask", tasks[0].name)
self.assertEquals("Gather more information", tasks[0].title)
self.complete_form(workflow, tasks[0], {"investigator":{"email": "dhf8r@virginia.edu"}})
tasks = self.get_workflow_api(workflow).user_tasks
self.complete_form(workflow, tasks[2], {"investigator":{"email": "abc@virginia.edu"}})
tasks = self.get_workflow_api(workflow).user_tasks
self.complete_form(workflow, tasks[1], {"investigator":{"email": "def@virginia.edu"}})
for i in random.sample(range(9), 9):
self.complete_form(workflow, tasks[i], {"investigator":{"email": "dhf8r@virginia.edu"}})
tasks = self.get_workflow_api(workflow).user_tasks
workflow = self.get_workflow_api(workflow)

View File

@ -13,6 +13,23 @@ from tests.base_test import BaseTest
class TestWorkflowProcessorMultiInstance(BaseTest):
"""Tests the Workflow Processor as it deals with a Multi-Instance task"""
mock_investigator_response = {'PI': {
'label': 'Primary Investigator',
'display': 'Always',
'unique': 'Yes',
'user_id': 'dhf8r',
'display_name': 'Dan Funk'},
'SC_I': {
'label': 'Study Coordinator I',
'display': 'Always',
'unique': 'Yes',
'user_id': None},
'DC': {
'label': 'Department Contact',
'display': 'Optional',
'unique': 'Yes',
'user_id': 'asd3v',
'error': 'Unable to locate a user with id asd3v in LDAP'}}
def _populate_form_with_random_data(self, task):
WorkflowProcessor.populate_form_with_random_data(task)
@ -21,11 +38,10 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
return WorkflowProcessor(workflow_model)
@patch('crc.services.protocol_builder.requests.get')
def test_create_and_complete_workflow(self, mock_get):
@patch('crc.services.study_service.StudyService.get_investigators')
def test_create_and_complete_workflow(self, mock_study_service):
# This depends on getting a list of investigators back from the protocol builder.
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
mock_study_service.return_value = self.mock_investigator_response
self.load_example_data()
workflow_spec_model = self.load_test_spec("multi_instance")
@ -40,16 +56,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
task = next_user_tasks[0]
self.assertEquals(
{
'DC': {'user_id': 'asd3v', 'type_full': 'Department Contact'},
'IRBC': {'user_id': 'asdf32', 'type_full': 'IRB Coordinator'},
'PI': {'user_id': 'dhf8r', 'type_full': 'Primary Investigator'}
},
task.data['StudyInfo']['investigators'])
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
self.assertEquals("asd3v", task.data["investigator"]["user_id"])
self.assertEquals("dhf8r", task.data["investigator"]["user_id"])
self.assertEqual("MutiInstanceTask", task.get_name())
api_task = WorkflowService.spiff_task_to_api_task(task)
@ -79,23 +87,21 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
processor.do_engine_steps()
task = processor.bpmn_workflow.last_task
self.assertEquals(
{
'DC': {'user_id': 'asd3v', 'type_full': 'Department Contact', 'email': 'asd3v@virginia.edu'},
'IRBC': {'user_id': 'asdf32', 'type_full': 'IRB Coordinator', "email": "asdf32@virginia.edu"},
'PI': {'user_id': 'dhf8r', 'type_full': 'Primary Investigator', "email": "dhf8r@virginia.edu"}
},
expected = self.mock_investigator_response
expected['PI']['email'] = "asd3v@virginia.edu"
expected['SC_I']['email'] = "asdf32@virginia.edu"
expected['DC']['email'] = "dhf8r@virginia.edu"
self.assertEquals(expected,
task.data['StudyInfo']['investigators'])
self.assertEqual(WorkflowStatus.complete, processor.get_status())
@patch('crc.services.protocol_builder.requests.get')
def test_create_and_complete_workflow_parallel(self, mock_get):
@patch('crc.services.study_service.StudyService.get_investigators')
def test_create_and_complete_workflow_parallel(self, mock_study_service):
"""Unlike the test above, the parallel task allows us to complete the items in any order."""
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('investigators.json')
# This depends on getting a list of investigators back from the protocol builder.
mock_study_service.return_value = self.mock_investigator_response
self.load_example_data()
workflow_spec_model = self.load_test_spec("multi_instance_parallel")
@ -110,16 +116,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
# We can complete the tasks out of order.
task = next_user_tasks[2]
self.assertEquals(
{
'DC': {'user_id': 'asd3v', 'type_full': 'Department Contact'},
'IRBC': {'user_id': 'asdf32', 'type_full': 'IRB Coordinator'},
'PI': {'user_id': 'dhf8r', 'type_full': 'Primary Investigator'}
},
task.data['StudyInfo']['investigators'])
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
self.assertEquals("dhf8r", task.data["investigator"]["user_id"]) # The last of the tasks
self.assertEquals("asd3v", task.data["investigator"]["user_id"]) # The last of the tasks
api_task = WorkflowService.spiff_task_to_api_task(task)
self.assertEquals(MultiInstanceType.parallel, api_task.mi_type)
@ -142,12 +140,11 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
processor.do_engine_steps()
# Completing the tasks out of order, still provides the correct information.
self.assertEquals(
{
'DC': {'user_id': 'asd3v', 'type_full': 'Department Contact', 'email': 'asd3v@virginia.edu'},
'IRBC': {'user_id': 'asdf32', 'type_full': 'IRB Coordinator', "email": "asdf32@virginia.edu"},
'PI': {'user_id': 'dhf8r', 'type_full': 'Primary Investigator', "email": "dhf8r@virginia.edu"}
},
expected = self.mock_investigator_response
expected['PI']['email'] = "asd3v@virginia.edu"
expected['SC_I']['email'] = "asdf32@virginia.edu"
expected['DC']['email'] = "dhf8r@virginia.edu"
self.assertEquals(expected,
task.data['StudyInfo']['investigators'])
self.assertEqual(WorkflowStatus.complete, processor.get_status())

View File

@ -1,9 +1,11 @@
import json
import unittest
from unittest.mock import patch
from crc import session
from crc.api.common import ApiErrorSchema
from crc.models.file import FileModel
from crc.models.protocol_builder import ProtocolBuilderStudySchema
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
from tests.base_test import BaseTest
@ -18,7 +20,22 @@ class TestWorkflowSpecValidation(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
return ApiErrorSchema(many=True).load(json_data)
def test_successful_validation_of_test_workflows(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
def test_successful_validation_of_test_workflows(self, mock_studies, mock_details, mock_docs, mock_investigators):
# Mock Protocol Builder responses
studies_response = self.protocol_builder_response('user_studies.json')
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
investigators_response = self.protocol_builder_response('investigators.json')
mock_investigators.return_value = json.loads(investigators_response)
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
self.assertEqual(0, len(self.validate_workflow("decision_table")))
self.assertEqual(0, len(self.validate_workflow("docx")))
@ -28,7 +45,22 @@ class TestWorkflowSpecValidation(BaseTest):
self.assertEqual(0, len(self.validate_workflow("study_details")))
self.assertEqual(0, len(self.validate_workflow("two_forms")))
def test_successful_validation_of_auto_loaded_workflows(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
def test_successful_validation_of_auto_loaded_workflows(self, mock_studies, mock_details, mock_docs, mock_investigators):
# Mock Protocol Builder responses
studies_response = self.protocol_builder_response('user_studies.json')
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
investigators_response = self.protocol_builder_response('investigators.json')
mock_investigators.return_value = json.loads(investigators_response)
self.load_example_data()
workflows = session.query(WorkflowSpecModel).all()
errors = []