diff --git a/crc/scripts/complete_template.py b/crc/scripts/complete_template.py
index 9d2a7458..a68bdd5f 100644
--- a/crc/scripts/complete_template.py
+++ b/crc/scripts/complete_template.py
@@ -5,7 +5,7 @@ from jinja2 import UndefinedError
from crc import session
from crc.api.common import ApiError
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
-from crc.models.workflow import WorkflowSpecModel
+from crc.models.workflow import WorkflowSpecModel, WorkflowModel
from docxtpl import DocxTemplate
import jinja2
@@ -33,11 +33,12 @@ Takes two arguments:
def do_task(self, task, study_id, *args, **kwargs):
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
final_document_stream = self.process_template(task, study_id, *args, **kwargs)
-
+ workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
file_name = args[0]
irb_doc_code = args[1]
FileService.add_task_file(study_id=study_id,
workflow_id=workflow_id,
+ workflow_spec_id=workflow.workflow_spec_id,
task_id=task.id,
name=file_name,
content_type=CONTENT_TYPES['docx'],
diff --git a/crc/scripts/documents.py b/crc/scripts/documents.py
deleted file mode 100644
index 051b1c4d..00000000
--- a/crc/scripts/documents.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from crc.api.common import ApiError
-from crc.scripts.script import Script, ScriptValidationError
-from crc.services.file_service import FileService
-from crc.services.protocol_builder import ProtocolBuilderService
-
-
-class Documents(Script):
- """Provides information about the documents required by Protocol Builder."""
- pb = ProtocolBuilderService()
-
- def get_description(self):
- return """
-Provides detailed information about the documents loaded as a part of completing tasks.
-Makes an immediate call to the IRB Protocol Builder API to get a list of currently required
-documents. It then collects all the information in a reference file called 'irb_pro_categories.xls',
-if the Id from Protocol Builder matches an Id in this table, all data available in that row
-is also provided.
-
-This place a dictionary of values in the current task, where the key is the code in the lookup table.
-
-For example:
-``` "Documents" :
- {
- "UVACompliance_PRCApproval": {
- "name": "Cancer Center's PRC Approval Form",
- "category1": "UVA Compliance",
- "category2": "PRC Approval",
- "category3": "",
- "Who Uploads?": "CRC",
- "required": True,
- "Id": 6
- "count": 0
- },
- 24: { ...
- }
-```
-"""
- def do_task_validate_only(self, task, study_id, *args, **kwargs):
- """For validation only, pretend no results come back from pb"""
- pb_docs = []
- self.add_data_to_task(task, self.get_documents(study_id, pb_docs))
-
- def do_task(self, task, study_id, *args, **kwargs):
- """Takes data from the protocol builder, and merges it with data from the IRB Pro Categories
- spreadsheet to return pertinent details about the required documents."""
- pb_docs = self.pb.get_required_docs(study_id, as_objects=True)
- self.add_data_to_task(task, self.get_documents(study_id, pb_docs))
-
- def get_documents(self, study_id, pb_docs):
- """Takes data from the protocol builder, and merges it with data from the IRB Pro Categories spreadsheet to return
- pertinent details about the required documents."""
-
- doc_dictionary = FileService.get_file_reference_dictionary()
- required_docs = {}
- for code, required_doc in doc_dictionary.items():
- try:
- pb_data = next((item for item in pb_docs if int(item.AUXDOCID) == int(required_doc['Id'])), None)
- except:
- pb_data = None
- required_doc['count'] = self.get_count(study_id, code)
- required_doc['required'] = False
- if pb_data:
- required_doc['required'] = True
- required_docs[code] = required_doc
- return required_docs
-
- def get_count(self, study_id, irb_doc_code):
- """Returns the total number of documents that have been uploaded that match
- the given document id. """
- return(len(FileService.get_files(study_id=study_id, irb_doc_code=irb_doc_code)))
-
- # Verifies that information is available for this script task to function
- # correctly. Returns a list of validation errors.
- @staticmethod
- def validate():
- errors = []
- try:
- dict = FileService.get_file_reference_dictionary()
- except ApiError as ae:
- errors.append(ScriptValidationError.from_api_error(ae))
- return errors
diff --git a/crc/scripts/script.py b/crc/scripts/script.py
index 69c505a3..f7bba546 100644
--- a/crc/scripts/script.py
+++ b/crc/scripts/script.py
@@ -24,13 +24,6 @@ class Script(object):
"does must provide a validate_only option that mimics the do_task, " +
"but does not make external calls or database updates." )
- def validate(self):
- """Override this method to perform an early check that the script has access to
- everything it needs to properly process requests.
- Should return an array of ScriptValidationErrors.
- """
- return []
-
@staticmethod
def get_all_subclasses():
return Script._get_all_subclasses(Script)
diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py
index 3d42b656..040beb32 100644
--- a/crc/scripts/study_info.py
+++ b/crc/scripts/study_info.py
@@ -4,7 +4,8 @@ from crc import session, app
from crc.api.common import ApiError
from crc.models.study import StudyModel, StudySchema
from crc.models.workflow import WorkflowStatus
-from crc.scripts.script import Script
+from crc.scripts.script import Script, ScriptValidationError
+from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.study_service import StudyService
@@ -15,21 +16,24 @@ class StudyInfo(Script):
"""Just your basic class that can pull in data from a few api endpoints and do a basic task."""
pb = ProtocolBuilderService()
- type_options = ['info', 'investigators', 'details', 'approvals', 'documents_status']
+ type_options = ['info', 'investigators', 'details', 'approvals', 'documents', 'protocol']
def get_description(self):
- return """StudyInfo [TYPE], where TYPE is one of 'info', 'investigators', or 'details'
+ return """StudyInfo [TYPE], where TYPE is one of 'info', 'investigators', or 'details', 'approvals',
+ 'documents' or 'protocol'.
Adds details about the current study to the Task Data. The type of information required should be
- provided as an argument. Basic returns the basic information such as the title. Investigators provides
- detailed information about each investigator in th study. Details provides a large number
- of details about the study, as gathered within the protocol builder, and 'required_docs',
- lists all the documents the Protocol Builder has determined will be required as a part of
- this study.
+ provided as an argument. 'info' returns the basic information such as the title. 'Investigators' provides
+ detailed information about each investigator in th study. 'Details' provides a large number
+ of details about the study, as gathered within the protocol builder, and 'documents',
+ lists all the documents that can be a part of the study, with documents from Protocol Builder
+ marked as required, and details about any files that were uploaded' .
"""
def do_task_validate_only(self, task, study_id, *args, **kwargs):
"""For validation only, pretend no results come back from pb"""
self.check_args(args)
+ # Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
+ FileService.get_file_reference_dictionary()
data = {
"study":{
"info": {
@@ -57,7 +61,8 @@ class StudyInfo(Script):
"status": WorkflowStatus.not_started.value,
"workflow_spec_id": "irb_api_details",
},
- "documents_status": [
+ "documents": {
+ "AD_CoCApp":
{
'category1': 'Ancillary Document',
'category2': 'CoC Application',
@@ -75,7 +80,10 @@ class StudyInfo(Script):
'workflow_spec_id': 'irb_api_details',
'status': 'complete',
}
- ]
+ },
+ 'protocol': {
+ id: 0,
+ }
}
}
self.add_data_to_task(task=task, data=data["study"])
@@ -99,8 +107,10 @@ class StudyInfo(Script):
self.add_data_to_task(task, {cmd: self.pb.get_study_details(study_id)})
if cmd == 'approvals':
self.add_data_to_task(task, {cmd: StudyService().get_approvals(study_id)})
- if cmd == 'documents_status':
+ if cmd == 'documents':
self.add_data_to_task(task, {cmd: StudyService().get_documents_status(study_id)})
+ if cmd == 'protocol':
+ self.add_data_to_task(task, {cmd: StudyService().get_protocol(study_id)})
def check_args(self, args):
diff --git a/crc/services/file_service.py b/crc/services/file_service.py
index 9eba4dd9..7698b5a2 100644
--- a/crc/services/file_service.py
+++ b/crc/services/file_service.py
@@ -56,7 +56,7 @@ class FileService(object):
data_model = FileService.get_reference_file_data(FileService.IRB_PRO_CATEGORIES_FILE)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
- return code in df['Code'].values
+ return code in df['code'].values
@staticmethod
def get_file_reference_dictionary():
@@ -65,11 +65,11 @@ class FileService(object):
data_model = FileService.get_reference_file_data(FileService.IRB_PRO_CATEGORIES_FILE)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
- df['Id'] = df['Id'].fillna(0)
- df = df.astype({'Id': 'Int64'})
+ df['id'] = df['id'].fillna(0)
+ df = df.astype({'id': 'Int64'})
df = df.fillna('')
df = df.applymap(str)
- df = df.set_index('Code')
+ df = df.set_index('code')
# IF we need to convert the column names to something more sensible.
# df.columns = [snakeCase(x) for x in df.columns]
return json.loads(df.to_json(orient='index'))
@@ -80,12 +80,13 @@ class FileService(object):
# all_dict = df.set_index('Id').to_dict('index')
@staticmethod
- def add_task_file(study_id, workflow_id, task_id, name, content_type, binary_data,
+ def add_task_file(study_id, workflow_id, workflow_spec_id, task_id, name, content_type, binary_data,
irb_doc_code=None):
"""Create a new file and associate it with an executing task within a workflow."""
file_model = FileModel(
study_id=study_id,
workflow_id=workflow_id,
+ workflow_spec_id=workflow_spec_id,
task_id=task_id,
name=name,
irb_doc_code=irb_doc_code
@@ -168,7 +169,7 @@ class FileService(object):
if form_field_key:
query = query.filter_by(form_field_key=form_field_key)
if name:
- query = query.filter_by(name=form_field_key)
+ query = query.filter_by(name=name)
if irb_doc_code:
query = query.filter_by(irb_doc_code=irb_doc_code)
diff --git a/crc/services/protocol_builder.py b/crc/services/protocol_builder.py
index ad6b5ebc..118d871a 100644
--- a/crc/services/protocol_builder.py
+++ b/crc/services/protocol_builder.py
@@ -41,14 +41,11 @@ class ProtocolBuilderService(object):
(response.status_code, response.text))
@staticmethod
- def get_required_docs(study_id, as_objects=False) -> Optional[List[ProtocolBuilderRequiredDocument]]:
+ def get_required_docs(study_id) -> Optional[List[ProtocolBuilderRequiredDocument]]:
ProtocolBuilderService.check_args(study_id)
response = requests.get(ProtocolBuilderService.REQUIRED_DOCS_URL % study_id)
if response.ok and response.text:
- if as_objects:
- return ProtocolBuilderRequiredDocumentSchema(many=True).loads(response.text)
- else:
- return json.loads(response.text)
+ return json.loads(response.text)
else:
raise ApiError("protocol_builder_error",
"Received an invalid response from the protocol builder (status %s): %s" %
diff --git a/crc/services/study_service.py b/crc/services/study_service.py
index 02a2f6c7..52450d80 100644
--- a/crc/services/study_service.py
+++ b/crc/services/study_service.py
@@ -1,17 +1,17 @@
from datetime import datetime
+import json
from typing import List
from SpiffWorkflow import WorkflowException
from crc import db, session
from crc.api.common import ApiError
-from crc.models.file import FileModel
+from crc.models.file import FileModel, FileModelSchema
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus
-from crc.scripts.documents import Documents
from crc.services.file_service import FileService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
@@ -39,6 +39,9 @@ class StudyService(object):
study = Study.from_model(study_model)
study.categories = StudyService.get_categories()
workflow_metas = StudyService.__get_workflow_metas(study_id)
+
+ # Calling this line repeatedly is very very slow. It creates the
+ # master spec and runs it.
status = StudyService.__get_study_status(study_model)
study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status)
@@ -101,52 +104,63 @@ class StudyService(object):
@staticmethod
def get_documents_status(study_id):
- """Returns a list of required documents and related workflow status."""
- doc_service = Documents()
+ """Returns a list of documents related to the study, if they are required, and any file information
+ that is available.."""
# Get PB required docs
- pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id, as_objects=True)
+ pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
- # Get required docs for study
- study_docs = doc_service.get_documents(study_id=study_id, pb_docs=pb_docs)
-
- # Container for results
- documents = []
-
- # For each required doc, get file(s)
- for code, doc in study_docs.items():
- if not doc['required']:
- continue
+ # Loop through all known document types, get the counts for those files, and use pb_docs to mark those required.
+ doc_dictionary = FileService.get_file_reference_dictionary()
+ documents = {}
+ for code, doc in doc_dictionary.items():
+ pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
+ doc['required'] = False
+ if pb_data:
+ doc['required'] = True
doc['study_id'] = study_id
doc['code'] = code
- # Make a display name out of categories if none exists
- if 'Name' in doc and len(doc['Name']) > 0:
- doc['display_name'] = doc['Name']
- else:
- name_list = []
- for cat_key in ['category1', 'category2', 'category3']:
- if doc[cat_key] not in ['', 'NULL']:
- name_list.append(doc[cat_key])
- doc['display_name'] = ' '.join(name_list)
+ # Make a display name out of categories
+ name_list = []
+ for cat_key in ['category1', 'category2', 'category3']:
+ if doc[cat_key] not in ['', 'NULL']:
+ name_list.append(doc[cat_key])
+ doc['display_name'] = ' / '.join(name_list)
# For each file, get associated workflow status
doc_files = FileService.get_files(study_id=study_id, irb_doc_code=code)
+ doc['count'] = len(doc_files)
+ doc['files'] = []
for file in doc_files:
- doc['file_id'] = file.id
- doc['task_id'] = file.task_id
- doc['workflow_id'] = file.workflow_id
- doc['workflow_spec_id'] = file.workflow_spec_id
+ doc['files'].append({'file_id': file.id,
+ 'task_id': file.task_id,
+ 'workflow_id': file.workflow_id,
+ 'workflow_spec_id': file.workflow_spec_id})
- if doc['status'] is None:
+ # update the document status to match the status of the workflow it is in.
+ if not 'status' in doc or doc['status'] is None:
workflow: WorkflowModel = session.query(WorkflowModel).filter_by(id=file.workflow_id).first()
doc['status'] = workflow.status.value
- documents.append(doc)
+ documents[code] = doc
return documents
+
+
+ @staticmethod
+ def get_protocol(study_id):
+ """Returns the study protocol, if it has been uploaded."""
+ file = db.session.query(FileModel)\
+ .filter_by(study_id=study_id)\
+ .filter_by(form_field_key='Study_Protocol_Document')\
+ .first()
+
+ return FileModelSchema().dump(file)
+
+
@staticmethod
def synch_all_studies_with_protocol_builder(user):
"""Assures that the studies we have locally for the given user are
diff --git a/crc/static/bpmn/core_info/SponsorList.xls b/crc/static/bpmn/core_info/SponsorList.xls
index 7bb3882c..30bb6c60 100644
Binary files a/crc/static/bpmn/core_info/SponsorList.xls and b/crc/static/bpmn/core_info/SponsorList.xls differ
diff --git a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn
index 05a865b6..42b6083c 100644
--- a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn
+++ b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn
@@ -13,23 +13,29 @@
# Documents & Approvals
> ## Protocol Document Management
-> [Upload Protocol Here](/)
+{% if StudyInfo.protocol is defined -%}
+{%- set p = StudyInfo.protocol -%}
+
+> [{{p.name}}](/study/{{p.study_id}}/workflow/{{p.workflow_id}}/task/{{p.task_id}})
+{%- else -%}
+> No protocol uploaded yet.
+{% endif %}
> ## Approvals
> | Name | Status | Help |
|:---- |:------ |:---- |
{% for approval in StudyInfo.approvals -%}
-| [{{approval.display_name}}](/study/{{approval.study_id}}/workflow/{{approval.workflow_id}}) | {{approval.status}} | [Context here](/help/{{approval.workflow_spec_id}}) |
+| [{{approval.display_name}}](/study/{{approval.study_id}}/workflow/{{approval.workflow_id}}) | {{approval.status}} | [?](/help/{{approval.workflow_spec_id}}) |
{% endfor %}
> ## Documents
> | Name | Status | Help | Download |
|:---- |:------ |:---- |:-------- |
-{% for doc in StudyInfo.documents_status -%}
+{% for doc in StudyInfo.documents -%}
{% if doc.file_id is defined -%}
- | [{{doc.display_name}}](/study/{{doc.study_id}}/workflow/{{doc.workflow_id}}/task/{{doc.task_id}}) | {{doc.status}} | [Context here](/help/documents/{{doc.code}}) | [Download](/file/{{doc.file_id}}) |
+ | [{{doc.display_name}}](/study/{{doc.study_id}}/workflow/{{doc.workflow_id}}/task/{{doc.task_id}}) | {{doc.status}} | [Context here](/help/documents/{{doc.code}}) | [Download](/file/{{doc.file_id}}/data) |
{%- else -%}
- | {{doc.display_name}} | Not started | [Context here](/help/documents/{{doc.code}}) | No file yet |
+ | {{doc.display_name}} | Not started | [?](/help/documents/{{doc.code}}) | No file yet |
{%- endif %}
{% endfor %}
@@ -46,47 +52,60 @@
StudyInfo approvals
- Flow_1k3su2q
+ Flow_0w20w9j
Flow_0c7ryff
StudyInfo documents_status
-
+
+
+ Flow_1k3su2q
+ Flow_0w20w9j
+ StudyInfo protocol
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
+
-
+
-
+
-
+
-
+
+
+
+
+
+
+
+
diff --git a/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn b/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn
index eebbda42..719b3257 100644
--- a/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn
+++ b/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn
@@ -217,7 +217,7 @@ Protocol Owner: **(need to insert value here)**
SequenceFlow_1dexemq
Flow_1x9d2mo
- Documents
+ StudyInfo documents
diff --git a/crc/static/bpmn/ids_full_submission/investigators_brochure.dmn b/crc/static/bpmn/ids_full_submission/investigators_brochure.dmn
index c581f066..28221755 100644
--- a/crc/static/bpmn/ids_full_submission/investigators_brochure.dmn
+++ b/crc/static/bpmn/ids_full_submission/investigators_brochure.dmn
@@ -7,7 +7,7 @@
- Documents.DrugDevDoc_InvestBrochure.count
+ StudyInfo.documents.DrugDevDoc_InvestBrochure.count
diff --git a/crc/static/bpmn/ids_full_submission/ivrs_iwrs_ixrs.dmn b/crc/static/bpmn/ids_full_submission/ivrs_iwrs_ixrs.dmn
index 39f0d20a..3604d8c1 100644
--- a/crc/static/bpmn/ids_full_submission/ivrs_iwrs_ixrs.dmn
+++ b/crc/static/bpmn/ids_full_submission/ivrs_iwrs_ixrs.dmn
@@ -7,7 +7,7 @@
- Documents.DrugDevDoc_IVRSIWRSIXRSMan.count
+ StudyInfo.documents.DrugDevDoc_IVRSIWRSIXRSMan.count
diff --git a/crc/static/bpmn/ids_full_submission/pharmacy_manual.dmn b/crc/static/bpmn/ids_full_submission/pharmacy_manual.dmn
index 67936df3..03fbe6b5 100644
--- a/crc/static/bpmn/ids_full_submission/pharmacy_manual.dmn
+++ b/crc/static/bpmn/ids_full_submission/pharmacy_manual.dmn
@@ -7,7 +7,7 @@
- Documents["DrugDevDoc_PharmManual"]["count"]
+ StudyInfo.documents.DrugDevDoc_PharmManual.count
diff --git a/crc/static/bpmn/top_level_workflow/data_security_plan.dmn b/crc/static/bpmn/top_level_workflow/data_security_plan.dmn
index 249b8cbd..2dd0bad9 100644
--- a/crc/static/bpmn/top_level_workflow/data_security_plan.dmn
+++ b/crc/static/bpmn/top_level_workflow/data_security_plan.dmn
@@ -7,7 +7,7 @@
- Documents['Study_DataSecurityPlan']['required']
+ StudyInfo.documents.Study_DataSecurityPlan.required
diff --git a/crc/static/bpmn/top_level_workflow/enter_core_info.dmn b/crc/static/bpmn/top_level_workflow/enter_core_info.dmn
index 7a204622..570b4a54 100644
--- a/crc/static/bpmn/top_level_workflow/enter_core_info.dmn
+++ b/crc/static/bpmn/top_level_workflow/enter_core_info.dmn
@@ -7,7 +7,7 @@
- Documents['UVACompl_PRCAppr']['required']
+ StudyInfo.documents.UVACompl_PRCAppr.required
diff --git a/crc/static/bpmn/top_level_workflow/ids_full_submission.dmn b/crc/static/bpmn/top_level_workflow/ids_full_submission.dmn
index 0aee7a81..72be2464 100644
--- a/crc/static/bpmn/top_level_workflow/ids_full_submission.dmn
+++ b/crc/static/bpmn/top_level_workflow/ids_full_submission.dmn
@@ -7,7 +7,7 @@
- Documents.UVACompl_IDSWaiverApp.required
+ StudyInfo.documents.UVACompl_IDSWaiverApp.required
diff --git a/crc/static/bpmn/top_level_workflow/ids_waiver.dmn b/crc/static/bpmn/top_level_workflow/ids_waiver.dmn
index 2a22a68d..515467d7 100644
--- a/crc/static/bpmn/top_level_workflow/ids_waiver.dmn
+++ b/crc/static/bpmn/top_level_workflow/ids_waiver.dmn
@@ -7,7 +7,7 @@
- Documents.UVACompl_IDSWaiverApp.required
+ StudyInfo.documents.UVACompl_IDSWaiverApp.required
diff --git a/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn b/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn
index 522f41c5..06f97675 100644
--- a/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn
+++ b/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn
@@ -7,7 +7,7 @@
- Documents['AD_LabManual']['required']
+ StudyInfo.documents.AD_LabManual.required
diff --git a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn
index b5414e48..f19e069d 100644
--- a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn
+++ b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn
@@ -11,7 +11,7 @@
SequenceFlow_1ees8ka
SequenceFlow_17ct47v
- Documents
+ StudyInfo documents
Flow_1m8285h
@@ -109,159 +109,159 @@
-
-
+
+
-
-
+
+
-
-
+
+
-
-
-
+
+
+
-
-
-
+
+
+
-
-
-
+
+
+
-
-
-
+
+
+
-
-
+
+
-
-
-
+
+
+
-
-
-
+
+
+
-
-
+
+
-
-
+
+
-
-
-
+
+
+
-
-
-
+
+
+
-
-
+
+
-
-
-
+
+
+
-
-
+
+
-
-
-
+
+
+
-
-
+
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
+
-
-
+
+
-
-
+
+
diff --git a/crc/static/reference/irb_documents.xlsx b/crc/static/reference/irb_documents.xlsx
index 7743f0bc..0fcc42f0 100644
Binary files a/crc/static/reference/irb_documents.xlsx and b/crc/static/reference/irb_documents.xlsx differ
diff --git a/tests/data/reference/irb_documents.xlsx b/tests/data/reference/irb_documents.xlsx
index 9f3b0cdd..0aaaae27 100644
Binary files a/tests/data/reference/irb_documents.xlsx and b/tests/data/reference/irb_documents.xlsx differ
diff --git a/tests/data/top_level_workflow/data_security_plan.dmn b/tests/data/top_level_workflow/data_security_plan.dmn
index d67da3b2..2d213d5f 100644
--- a/tests/data/top_level_workflow/data_security_plan.dmn
+++ b/tests/data/top_level_workflow/data_security_plan.dmn
@@ -7,7 +7,7 @@
- Documents['Study_DataSecurityPlan']['required']
+ StudyInfo.documents.Study_DataSecurityPlan.required
diff --git a/tests/data/top_level_workflow/enter_core_info.dmn b/tests/data/top_level_workflow/enter_core_info.dmn
index d4345af3..30e2aa06 100644
--- a/tests/data/top_level_workflow/enter_core_info.dmn
+++ b/tests/data/top_level_workflow/enter_core_info.dmn
@@ -7,7 +7,7 @@
- Documents['UVACompl_PRCAppr']['required']
+ StudyInfo.documents.UVACompl_PRCAppr.required
diff --git a/tests/data/top_level_workflow/sponsor_funding_source.dmn b/tests/data/top_level_workflow/sponsor_funding_source.dmn
index df7baf56..20ed4e14 100644
--- a/tests/data/top_level_workflow/sponsor_funding_source.dmn
+++ b/tests/data/top_level_workflow/sponsor_funding_source.dmn
@@ -7,7 +7,7 @@
- Documents['AD_LabManual']['required']
+ StudyInfo.documents.AD_LabManual.required
diff --git a/tests/data/top_level_workflow/top_level_workflow.bpmn b/tests/data/top_level_workflow/top_level_workflow.bpmn
index ea8f0c84..cc6e1c57 100644
--- a/tests/data/top_level_workflow/top_level_workflow.bpmn
+++ b/tests/data/top_level_workflow/top_level_workflow.bpmn
@@ -11,7 +11,7 @@
SequenceFlow_1ees8ka
SequenceFlow_17ct47v
- Documents
+ StudyInfo documents
Flow_1m8285h
diff --git a/tests/test_required_docs_script.py b/tests/test_required_docs_script.py
deleted file mode 100644
index 5601e98d..00000000
--- a/tests/test_required_docs_script.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import json
-from unittest.mock import patch
-
-from crc import db
-from crc.models.file import FileDataModel, FileModel
-from crc.models.protocol_builder import ProtocolBuilderRequiredDocumentSchema
-from crc.scripts.documents import Documents
-from crc.services.file_service import FileService
-from tests.base_test import BaseTest
-
-
-class TestRequiredDocsScript(BaseTest):
- test_uid = "dhf8r"
- test_study_id = 1
-
- """
- 1. get a list of only the required documents for the study.
- 2. For this study, is this document required accroding to the protocol builder?
- 3. For ALL uploaded documents, what the total number of files that were uploaded? per instance of this document naming
- convention that we are implementing for the IRB.
- """
-
- def test_validate_returns_error_if_reference_files_do_not_exist(self):
- file_model = db.session.query(FileModel). \
- filter(FileModel.is_reference == True). \
- filter(FileModel.name == FileService.IRB_PRO_CATEGORIES_FILE).first()
- if file_model:
- db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
- db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
- db.session.commit()
- db.session.flush()
- errors = Documents.validate()
- self.assertTrue(len(errors) > 0)
- self.assertEqual("file_not_found", errors[0].code)
-
- def test_no_validation_error_when_correct_file_exists(self):
- self.create_reference_document()
- errors = Documents.validate()
- self.assertTrue(len(errors) == 0)
-
- def test_load_lookup_data(self):
- self.create_reference_document()
- dict = FileService.get_file_reference_dictionary()
- self.assertIsNotNone(dict)
-
- def get_required_docs(self):
- string_data = self.protocol_builder_response('required_docs.json')
- return ProtocolBuilderRequiredDocumentSchema(many=True).loads(string_data)
-
- def test_get_required_docs(self):
- pb_docs = self.get_required_docs()
- self.create_reference_document()
- script = Documents()
- documents = script.get_documents(12, pb_docs) # Mocked out, any random study id works.
- self.assertIsNotNone(documents)
- self.assertTrue("UVACompl_PRCAppr" in documents.keys())
- self.assertEqual("Cancer Center's PRC Approval Form", documents["UVACompl_PRCAppr"]['Name'])
- self.assertEqual("UVA Compliance", documents["UVACompl_PRCAppr"]['category1'])
- self.assertEqual("PRC Approval", documents["UVACompl_PRCAppr"]['category2'])
- self.assertEqual("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
- self.assertEqual(0, documents["UVACompl_PRCAppr"]['count'])
- self.assertEqual(True, documents["UVACompl_PRCAppr"]['required'])
- self.assertEqual('6', documents["UVACompl_PRCAppr"]['Id'])
-
- def test_get_required_docs_has_correct_count_when_a_file_exists(self):
- self.load_example_data()
- pb_docs = self.get_required_docs()
- # Make sure the xslt reference document is in place.
- self.create_reference_document()
- script = Documents()
-
- # Add a document to the study with the correct code.
- workflow = self.create_workflow('docx')
- irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
- FileService.add_task_file(study_id=workflow.study_id, workflow_id=workflow.id,
- task_id="fakingthisout",
- name="anything.png", content_type="text",
- binary_data=b'1234', irb_doc_code=irb_code)
-
- docs = script.get_documents(workflow.study_id, pb_docs)
- self.assertIsNotNone(docs)
- self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
diff --git a/tests/test_study_details_documents.py b/tests/test_study_details_documents.py
new file mode 100644
index 00000000..e85dc87d
--- /dev/null
+++ b/tests/test_study_details_documents.py
@@ -0,0 +1,67 @@
+import json
+from unittest.mock import patch
+
+from crc import db, session
+from crc.api.common import ApiError
+from crc.models.file import FileDataModel, FileModel
+from crc.models.protocol_builder import ProtocolBuilderRequiredDocumentSchema
+from crc.models.study import StudyModel
+from crc.scripts.study_info import StudyInfo
+from crc.services.file_service import FileService
+from crc.services.study_service import StudyService
+from crc.services.workflow_processor import WorkflowProcessor
+from tests.base_test import BaseTest
+
+
+class TestStudyDetailsDocumentsScript(BaseTest):
+ test_uid = "dhf8r"
+ test_study_id = 1
+
+ """
+ 1. get a list of all documents related to the study.
+ 2. For this study, is this document required accroding to the protocol builder?
+ 3. For ALL uploaded documents, what the total number of files that were uploaded? per instance of this document naming
+ convention that we are implementing for the IRB.
+ """
+
+ def test_validate_returns_error_if_reference_files_do_not_exist(self):
+ self.load_example_data()
+ self.create_reference_document()
+ study = session.query(StudyModel).first()
+ workflow_spec_model = self.load_test_spec("two_forms")
+ workflow_model = StudyService._create_workflow_model(study, workflow_spec_model)
+ processor = WorkflowProcessor(workflow_model)
+ task = processor.next_task()
+
+ # Remove the reference file.
+ file_model = db.session.query(FileModel). \
+ filter(FileModel.is_reference == True). \
+ filter(FileModel.name == FileService.IRB_PRO_CATEGORIES_FILE).first()
+ if file_model:
+ db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
+ db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
+ db.session.commit()
+ db.session.flush()
+
+ with self.assertRaises(ApiError):
+ StudyInfo().do_task_validate_only(task, study.id, "documents")
+
+ def test_no_validation_error_when_correct_file_exists(self):
+ self.load_example_data()
+ self.create_reference_document()
+ study = session.query(StudyModel).first()
+ workflow_spec_model = self.load_test_spec("two_forms")
+ workflow_model = StudyService._create_workflow_model(study, workflow_spec_model)
+ processor = WorkflowProcessor(workflow_model)
+ task = processor.next_task()
+ StudyInfo().do_task_validate_only(task, study.id, "documents")
+
+ def test_load_lookup_data(self):
+ self.create_reference_document()
+ dict = FileService.get_file_reference_dictionary()
+ self.assertIsNotNone(dict)
+
+ def get_required_docs(self):
+ string_data = self.protocol_builder_response('required_docs.json')
+ return ProtocolBuilderRequiredDocumentSchema(many=True).loads(string_data)
+
diff --git a/tests/test_study_service.py b/tests/test_study_service.py
index fefd0eec..12837309 100644
--- a/tests/test_study_service.py
+++ b/tests/test_study_service.py
@@ -8,6 +8,7 @@ from crc.models.study import StudyModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowModel, WorkflowStatus, \
WorkflowSpecCategoryModel
+from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
from example_data import ExampleDataLoader
@@ -17,22 +18,28 @@ from tests.base_test import BaseTest
class TestStudyService(BaseTest):
"""Largely tested via the test_study_api, and time is tight, but adding new tests here."""
- @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
- def test_total_tasks_updated(self, mock_docs):
- """Assure that as a users progress is available when getting a list of studies for that user."""
+ def create_user_with_study_and_workflow(self):
- docs_response = self.protocol_builder_response('required_docs.json')
- mock_docs.return_value = json.loads(docs_response)
+ # clear it all out.
+ from example_data import ExampleDataLoader
+ ExampleDataLoader.clean_db()
# Assure some basic models are in place, This is a damn mess. Our database models need an overhaul to make
# this easier - better relationship modeling is now critical.
self.load_test_spec("top_level_workflow", master_spec=True)
- user = UserModel(uid="dhf8r", email_address="whatever@stuff.com", display_name="Stayathome Smellalots")
- db.session.add(user)
- db.session.commit()
+ user = db.session.query(UserModel).filter(UserModel.uid == "dhf8r").first()
+ if not user:
+ user = UserModel(uid="dhf8r", email_address="whatever@stuff.com", display_name="Stayathome Smellalots")
+ db.session.add(user)
+ db.session.commit()
+ else:
+ for study in db.session.query(StudyModel).all():
+ StudyService().delete_study(study.id)
+
study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.ACTIVE, user_uid=user.uid)
+ db.session.add(study)
cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0)
- db.session.add_all([study, cat])
+ db.session.add(cat)
db.session.commit()
self.assertIsNotNone(cat.id)
@@ -45,6 +52,16 @@ class TestStudyService(BaseTest):
db.session.commit()
# Assure there is a master specification, one standard spec, and lookup tables.
ExampleDataLoader().load_reference_documents()
+ return user
+
+ @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
+ def test_total_tasks_updated(self, mock_docs):
+ """Assure that as a users progress is available when getting a list of studies for that user."""
+
+ docs_response = self.protocol_builder_response('required_docs.json')
+ mock_docs.return_value = json.loads(docs_response)
+
+ user = self.create_user_with_study_and_workflow()
# The load example data script should set us up a user and at least one study, one category, and one workflow.
studies = StudyService.get_studies_for_user(user)
@@ -86,3 +103,63 @@ class TestStudyService(BaseTest):
approvals = StudyService.get_approvals(studies[0].id)
self.assertGreater(len(approvals), 0)
self.assertIsNotNone(approvals[0]['display_order'])
+
+ @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
+ def test_get_required_docs(self, mock_docs):
+
+ # mock out the protocol builder
+ docs_response = self.protocol_builder_response('required_docs.json')
+ mock_docs.return_value = json.loads(docs_response)
+
+ user = self.create_user_with_study_and_workflow()
+ studies = StudyService.get_studies_for_user(user)
+ study = studies[0]
+
+
+ study_service = StudyService()
+ documents = study_service.get_documents_status(study_id=study.id) # Mocked out, any random study id works.
+ self.assertIsNotNone(documents)
+ self.assertTrue("UVACompl_PRCAppr" in documents.keys())
+ self.assertEqual("UVACompl_PRCAppr", documents["UVACompl_PRCAppr"]['code'])
+ self.assertEqual("UVA Compliance / PRC Approval", documents["UVACompl_PRCAppr"]['display_name'])
+ self.assertEqual("Cancer Center's PRC Approval Form", documents["UVACompl_PRCAppr"]['description'])
+ self.assertEqual("UVA Compliance", documents["UVACompl_PRCAppr"]['category1'])
+ self.assertEqual("PRC Approval", documents["UVACompl_PRCAppr"]['category2'])
+ self.assertEqual("", documents["UVACompl_PRCAppr"]['category3'])
+ self.assertEqual("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
+ self.assertEqual(0, documents["UVACompl_PRCAppr"]['count'])
+ self.assertEqual(True, documents["UVACompl_PRCAppr"]['required'])
+ self.assertEqual('6', documents["UVACompl_PRCAppr"]['id'])
+
+ @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
+ def test_get_documents_has_file_details(self, mock_docs):
+
+ # mock out the protocol builder
+ docs_response = self.protocol_builder_response('required_docs.json')
+ mock_docs.return_value = json.loads(docs_response)
+
+ user = self.create_user_with_study_and_workflow()
+
+ # Add a document to the study with the correct code.
+ workflow = self.create_workflow('docx')
+ irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
+ FileService.add_task_file(study_id=workflow.study_id, workflow_id=workflow.id,
+ workflow_spec_id=workflow.workflow_spec_id,
+ task_id="fakingthisout",
+ name="anything.png", content_type="text",
+ binary_data=b'1234', irb_doc_code=irb_code)
+
+ docs = StudyService().get_documents_status(workflow.study_id)
+ self.assertIsNotNone(docs)
+ self.assertEqual("not_started", docs["UVACompl_PRCAppr"]['status'])
+ self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
+ self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
+ self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
+ self.assertEquals(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
+ self.assertEquals(workflow.workflow_spec_id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_spec_id'])
+
+ # 'file_id': 123,
+ # 'task_id': 'abcdef14236890',
+ # 'workflow_id': 456,
+ # 'workflow_spec_id': 'irb_api_details',
+ # 'status': 'complete',