diff --git a/crc/scripts/required_docs.py b/crc/scripts/documents.py
similarity index 75%
rename from crc/scripts/required_docs.py
rename to crc/scripts/documents.py
index f7f78d1e..5f198ebf 100644
--- a/crc/scripts/required_docs.py
+++ b/crc/scripts/documents.py
@@ -4,30 +4,31 @@ from crc.services.file_service import FileService
from crc.services.protocol_builder import ProtocolBuilderService
-class RequiredDocs(Script):
+class Documents(Script):
"""Provides information about the documents required by Protocol Builder."""
pb = ProtocolBuilderService()
def get_description(self):
return """
-Provides detailed information about the documents required by the Protocol Builder.
+Provides detailed information about the documents loaded as a part of completing tasks.
Makes an immediate call to the IRB Protocol Builder API to get a list of currently required
documents. It then collects all the information in a reference file called 'irb_pro_categories.xls',
if the Id from Protocol Builder matches an Id in this table, all data available in that row
is also provided.
-This place a dictionary of values in the current task, where the key is the numeric id.
+This place a dictionary of values in the current task, where the key is the code in the lookup table.
For example:
-``` "required_docs" :
+``` "documents" :
{
- 6: {
+ "UVACompliance_PRCApproval": {
"name": "Cancer Center's PRC Approval Form",
"category1": "UVA Compliance",
"category2": "PRC Approval",
"category3": "",
"Who Uploads?": "CRC",
"required": True,
+ "requirement_id": 6
"upload_count": 0
},
24: { ...
@@ -37,30 +38,30 @@ For example:
def do_task_validate_only(self, task, study_id, *args, **kwargs):
"""For validation only, pretend no results come back from pb"""
pb_docs = []
- self.get_required_docs(study_id, pb_docs)
- task.data["required_docs"] = self.get_required_docs(study_id, pb_docs)
+ task.data["required_docs"] = self.get_documents(study_id, pb_docs)
def do_task(self, task, study_id, *args, **kwargs):
"""Takes data from the protocol builder, and merges it with data from the IRB Pro Categories
spreadsheet to return pertinent details about the required documents."""
pb_docs = self.pb.get_required_docs(study_id, as_objects=True)
- self.get_required_docs(study_id, pb_docs)
- task.data["required_docs"] = self.get_required_docs(study_id, pb_docs)
+ task.data["documents"] = self.get_documents(study_id, pb_docs)
- def get_required_docs(self, study_id, pb_docs):
+ def get_documents(self, study_id, pb_docs):
"""Takes data from the protocol builder, and merges it with data from the IRB Pro Categories spreadsheet to return
pertinant details about the required documents."""
doc_dictionary = FileService.get_file_reference_dictionary()
required_docs = {}
- for doc in pb_docs:
- id = int(doc.AUXDOCID)
- required_doc = {'id': id, 'name': doc.AUXDOC, 'required': True,
- 'count': 0}
- if id in doc_dictionary:
- required_doc = {**required_doc, **doc_dictionary[id]}
- required_doc['count'] = self.get_count(study_id, doc_dictionary[id]["Code"])
- required_docs[id] = required_doc
+ for code, required_doc in doc_dictionary.items():
+ try:
+ pb_data = next((item for item in pb_docs if int(item.AUXDOCID) == int(required_doc['Id'])), None)
+ except:
+ pb_data = None
+ required_doc['required'] = False
+ if pb_data:
+ required_doc['required'] = True
+ required_doc['count'] = self.get_count(study_id, code)
+ required_docs[code] = required_doc
return required_docs
def get_count(self, study_id, irb_doc_code):
diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py
index 2cc0da4d..d272ba14 100644
--- a/crc/scripts/study_info.py
+++ b/crc/scripts/study_info.py
@@ -24,7 +24,28 @@ class StudyInfo(Script):
def do_task_validate_only(self, task, study_id, *args, **kwargs):
"""For validation only, pretend no results come back from pb"""
self.check_args(args)
-
+ data = {
+ "study":{
+ "info": {
+ "id": 12,
+ "title": "test",
+ "primary_investigator_id":21,
+ "user_uid": "dif84",
+ "sponsor": "sponsor",
+ "ind_number": "1234",
+ "inactive": False
+ },
+ "investigators":
+ {
+ "INVESTIGATORTYPE": "PI",
+ "INVESTIGATORTYPEFULL": "Primary Investigator",
+ "NETBADGEID": "dhf8r"
+ },
+ "details":
+ {}
+ }
+ }
+ task.data["study"] = data["study"]
def do_task(self, task, study_id, *args, **kwargs):
self.check_args(args)
diff --git a/crc/services/file_service.py b/crc/services/file_service.py
index 292012cb..e3ba27ff 100644
--- a/crc/services/file_service.py
+++ b/crc/services/file_service.py
@@ -67,12 +67,12 @@ class FileService(object):
data_model = FileService.get_reference_file_data(FileService.IRB_PRO_CATEGORIES_FILE)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
- # Pandas is lovely, but weird. Here we drop records without an Id, and convert it to an integer.
- df = df.drop_duplicates(subset='Id').astype({'Id': 'Int64'})
+ return df.set_index('Code').to_dict('index')
+# # Pandas is lovely, but weird. Here we drop records without an Id, and convert it to an integer.
+# df = df.drop_duplicates(subset='Id').astype({'Id': 'Int64'})
# Now we index on the ID column and convert to a dictionary, where the key is the id, and the value
# is a dictionary with all the remaining data in it. It's kinda pretty really.
- all_dict = df.set_index('Id').to_dict('index')
- return all_dict
+# all_dict = df.set_index('Id').to_dict('index')
@staticmethod
def add_task_file(study_id, workflow_id, task_id, name, content_type, binary_data,
diff --git a/crc/static/bpmn/top_level_workflow/data_security_plan.dmn b/crc/static/bpmn/top_level_workflow/data_security_plan.dmn
index 592e237e..bdb508ec 100644
--- a/crc/static/bpmn/top_level_workflow/data_security_plan.dmn
+++ b/crc/static/bpmn/top_level_workflow/data_security_plan.dmn
@@ -6,20 +6,28 @@
-
- required_docs.keys()
+
+ documents['UVACompl_PRCAppr']['required']
- contains(6)
+ true"required"
+
+
+ false
+
+
+ "disabled"
+
+
diff --git a/crc/static/bpmn/top_level_workflow/enter_core_info.dmn b/crc/static/bpmn/top_level_workflow/enter_core_info.dmn
index da5eee72..6f129294 100644
--- a/crc/static/bpmn/top_level_workflow/enter_core_info.dmn
+++ b/crc/static/bpmn/top_level_workflow/enter_core_info.dmn
@@ -6,8 +6,8 @@
-
- required_docs.keys()
+
+ documents['UVACompl_PRCAppr']['required']
diff --git a/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn b/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn
index 9cbbf85d..5d204a42 100644
--- a/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn
+++ b/crc/static/bpmn/top_level_workflow/sponsor_funding_source.dmn
@@ -5,15 +5,15 @@
-
-
- required_docs.keys()
+
+
+ documents['AD_LabManual']['required']
- contains(12)
+ true"required"
@@ -21,7 +21,7 @@
- not contains(12)
+ false"disabled"
diff --git a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn
index bbf6171e..ea8f0c84 100644
--- a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn
+++ b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn
@@ -11,7 +11,7 @@
SequenceFlow_1ees8kaSequenceFlow_17ct47v
- RequiredDocs
+ DocumentsFlow_1m8285h
diff --git a/crc/static/reference/irb_documents.xlsx b/crc/static/reference/irb_documents.xlsx
index 35feedf1..9f3b0cdd 100644
Binary files a/crc/static/reference/irb_documents.xlsx and b/crc/static/reference/irb_documents.xlsx differ
diff --git a/tests/data/file_upload_form/file_upload_form.bpmn b/tests/data/file_upload_form/file_upload_form.bpmn
index 11d8312f..402d0a9e 100644
--- a/tests/data/file_upload_form/file_upload_form.bpmn
+++ b/tests/data/file_upload_form/file_upload_form.bpmn
@@ -15,7 +15,7 @@
OGC will upload the Non-Funded Executed Agreement after it has been negotiated by OSP contract negotiator.
-
+
diff --git a/tests/data/reference/irb_documents.xlsx b/tests/data/reference/irb_documents.xlsx
index 35feedf1..9f3b0cdd 100644
Binary files a/tests/data/reference/irb_documents.xlsx and b/tests/data/reference/irb_documents.xlsx differ
diff --git a/tests/data/top_level_workflow/data_security_plan.dmn b/tests/data/top_level_workflow/data_security_plan.dmn
index 592e237e..582f1067 100644
--- a/tests/data/top_level_workflow/data_security_plan.dmn
+++ b/tests/data/top_level_workflow/data_security_plan.dmn
@@ -5,21 +5,29 @@
-
-
- required_docs.keys()
+
+
+ documents['Study_DataSecurityPlan']['required']
- contains(6)
+ true"required"
+
+
+ false
+
+
+ "disabled"
+
+
diff --git a/tests/data/top_level_workflow/enter_core_info.dmn b/tests/data/top_level_workflow/enter_core_info.dmn
index da5eee72..6f129294 100644
--- a/tests/data/top_level_workflow/enter_core_info.dmn
+++ b/tests/data/top_level_workflow/enter_core_info.dmn
@@ -6,8 +6,8 @@
-
- required_docs.keys()
+
+ documents['UVACompl_PRCAppr']['required']
diff --git a/tests/data/top_level_workflow/sponsor_funding_source.dmn b/tests/data/top_level_workflow/sponsor_funding_source.dmn
index 9cbbf85d..2c644ef3 100644
--- a/tests/data/top_level_workflow/sponsor_funding_source.dmn
+++ b/tests/data/top_level_workflow/sponsor_funding_source.dmn
@@ -5,15 +5,15 @@
-
-
- required_docs.keys()
+
+
+ documents['AD_LabManual']['required']
- contains(12)
+ true"required"
@@ -21,7 +21,7 @@
- not contains(12)
+ false"disabled"
diff --git a/tests/data/top_level_workflow/top_level_workflow.bpmn b/tests/data/top_level_workflow/top_level_workflow.bpmn
index bbf6171e..ea8f0c84 100644
--- a/tests/data/top_level_workflow/top_level_workflow.bpmn
+++ b/tests/data/top_level_workflow/top_level_workflow.bpmn
@@ -11,7 +11,7 @@
SequenceFlow_1ees8kaSequenceFlow_17ct47v
- RequiredDocs
+ DocumentsFlow_1m8285h
diff --git a/tests/test_required_docs_script.py b/tests/test_required_docs_script.py
index 1c41ac23..5b710db9 100644
--- a/tests/test_required_docs_script.py
+++ b/tests/test_required_docs_script.py
@@ -4,7 +4,7 @@ from unittest.mock import patch
from crc import db
from crc.models.file import FileDataModel, FileModel
from crc.models.protocol_builder import ProtocolBuilderRequiredDocumentSchema
-from crc.scripts.required_docs import RequiredDocs
+from crc.scripts.documents import Documents
from crc.services.file_service import FileService
from tests.base_test import BaseTest
@@ -29,13 +29,13 @@ class TestRequiredDocsScript(BaseTest):
db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
db.session.commit()
db.session.flush()
- errors = RequiredDocs.validate()
+ errors = Documents.validate()
self.assertTrue(len(errors) > 0)
self.assertEquals("file_not_found", errors[0].code)
def test_no_validation_error_when_correct_file_exists(self):
self.create_reference_document()
- errors = RequiredDocs.validate()
+ errors = Documents.validate()
self.assertTrue(len(errors) == 0)
def test_load_lookup_data(self):
@@ -50,31 +50,31 @@ class TestRequiredDocsScript(BaseTest):
def test_get_required_docs(self):
pb_docs = self.get_required_docs()
self.create_reference_document()
- script = RequiredDocs()
- required_docs = script.get_required_docs(12, pb_docs) # Mocked out, any random study id works.
- self.assertIsNotNone(required_docs)
- self.assertTrue(6 in required_docs.keys())
- self.assertEquals("Cancer Center's PRC Approval Form", required_docs[6]['name'])
- self.assertEquals("UVA Compliance", required_docs[6]['category1'])
- self.assertEquals("PRC Approval", required_docs[6]['category2'])
- self.assertEquals("CRC", required_docs[6]['Who Uploads?'])
- self.assertEquals(0, required_docs[6]['count'])
+ script = Documents()
+ documents = script.get_documents(12, pb_docs) # Mocked out, any random study id works.
+ self.assertIsNotNone(documents)
+ self.assertTrue("UVACompl_PRCAppr" in documents.keys())
+ self.assertEquals("Cancer Center's PRC Approval Form", documents["UVACompl_PRCAppr"]['Name'])
+ self.assertEquals("UVA Compliance", documents["UVACompl_PRCAppr"]['category1'])
+ self.assertEquals("PRC Approval", documents["UVACompl_PRCAppr"]['category2'])
+ self.assertEquals("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
+ self.assertEquals(0, documents["UVACompl_PRCAppr"]['count'])
def test_get_required_docs_has_correct_count_when_a_file_exists(self):
self.load_example_data()
pb_docs = self.get_required_docs()
# Make sure the xslt reference document is in place.
self.create_reference_document()
- script = RequiredDocs()
+ script = Documents()
# Add a document to the study with the correct code.
workflow = self.create_workflow('docx')
- irb_code = "UVACompliance.PRCApproval" # The first file referenced in pb required docs.
+ irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_task_file(study_id=workflow.study_id, workflow_id=workflow.id,
task_id="fakingthisout",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
- required_docs = script.get_required_docs(workflow.study_id, pb_docs)
- self.assertIsNotNone(required_docs)
- self.assertEquals(1, required_docs[6]['count'])
+ docs = script.get_documents(workflow.study_id, pb_docs)
+ self.assertIsNotNone(docs)
+ self.assertEquals(1, docs["UVACompl_PRCAppr"]['count'])
diff --git a/tests/test_workflow_processor.py b/tests/test_workflow_processor.py
index 0cf4664a..18d51650 100644
--- a/tests/test_workflow_processor.py
+++ b/tests/test_workflow_processor.py
@@ -368,13 +368,13 @@ class TestWorkflowProcessor(BaseTest):
# It should mark Enter Core Data as required, because it is always required.
self.assertTrue("enter_core_info" in data)
- self.assertEquals("required", data["enter_core_info"])
+ self.assertEqual("required", data["enter_core_info"])
# It should mark the Data Security Plan as required, because InfoSec Approval (24) is included in required docs.
self.assertTrue("data_security_plan" in data)
- self.assertEquals("required", data["data_security_plan"])
+ self.assertEqual("required", data["data_security_plan"])
# It should mark the sponsor funding source as disabled since the funding required (12) is not included in the required docs.
self.assertTrue("sponsor_funding_source" in data)
- self.assertEquals("disabled", data["sponsor_funding_source"])
+ self.assertEqual("disabled", data["sponsor_funding_source"])