From 384da075efaf728b2fe43605d75e2157807ea379 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 20 May 2021 11:54:12 -0400 Subject: [PATCH 01/31] Removed references to `protocol` from the description --- crc/scripts/study_info.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index b5e49e84..e6becf8e 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -118,7 +118,7 @@ class StudyInfo(Script): def get_description(self): return """ -StudyInfo [TYPE], where TYPE is one of 'info', 'investigators', 'details', 'documents' or 'protocol'. +StudyInfo [TYPE], where TYPE is one of 'info', 'investigators', 'details', or 'documents'. Adds details about the current study to the Task Data. The type of information required should be provided as an argument. The following arguments are available: @@ -157,9 +157,6 @@ Please note this is just a few examples, ALL known document types are returned i {documents_example} ``` -### Protocol ### -Returns information specific to the protocol. - """.format(info_example=self.example_to_string("info"), investigators_example=self.example_to_string("investigators"), From 1f67f5527542a4a5d9f0dde5a881457b400e684d Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 20 May 2021 13:10:04 -0400 Subject: [PATCH 02/31] Fixed definition for FRONTEND and BPMN. I was overwriting the environmental variables. --- config/default.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/default.py b/config/default.py index 464b4ef7..fe96d8f5 100644 --- a/config/default.py +++ b/config/default.py @@ -17,8 +17,8 @@ API_TOKEN = environ.get('API_TOKEN', default = 'af95596f327c9ecc007b60414fc84b61 NAME = "CR Connect Workflow" DEFAULT_PORT = "5000" FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default=DEFAULT_PORT) -FRONTEND = "localhost:4200" -BPMN = "localhost:5002" +FRONTEND = environ.get('FRONTEND', default="localhost:4200") +BPMN = environ.get('BPMN', default="localhost:5002") CORS_DEFAULT = f'{FRONTEND}, {BPMN}' CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default=CORS_DEFAULT)) TESTING = environ.get('TESTING', default="false") == "true" From acccf3b7047ab3c821a96b1d604eb4c697672d05 Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Wed, 26 May 2021 10:50:20 -0400 Subject: [PATCH 03/31] Make sure we are using the correct script engine --- crc/api/workflow.py | 1 + 1 file changed, 1 insertion(+) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index b51cdb2c..bddd1f8a 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -206,6 +206,7 @@ def update_task(workflow_id, task_id, body, terminate_loop=None, update_all=Fals processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) spiff_task = processor.bpmn_workflow.get_task(task_id) + spiff_task.workflow.script_engine = processor.bpmn_workflow.script_engine _verify_user_and_role(processor, spiff_task) user = UserService.current_user(allow_admin_impersonate=False) # Always log as the real user. From a5d67bb2453e50812fd2e63d424f7e2e69a5fb3d Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Thu, 27 May 2021 12:24:30 -0400 Subject: [PATCH 04/31] 337 partial fix if the user calls the file_data_set function for a valid file with the key 'irb_code' and a value of a valid IRB document code, then we should set the irb code on the file. --- crc/scripts/file_data_set.py | 14 ++++++- crc/services/file_service.py | 21 ++++++++++ tests/study/test_study_details_documents.py | 43 ++++++++++++++++++++- 3 files changed, 76 insertions(+), 2 deletions(-) diff --git a/crc/scripts/file_data_set.py b/crc/scripts/file_data_set.py index 99cbdd45..a852c17d 100644 --- a/crc/scripts/file_data_set.py +++ b/crc/scripts/file_data_set.py @@ -3,6 +3,7 @@ from flask import g from crc.api.common import ApiError from crc.scripts.data_store_base import DataStoreBase from crc.scripts.script import Script +from crc.services.file_service import FileService class FileDataSet(Script, DataStoreBase): @@ -34,8 +35,19 @@ class FileDataSet(Script, DataStoreBase): def do_task(self, task, study_id, workflow_id, *args, **kwargs): if self.validate_kw_args(**kwargs): myargs = [kwargs['key'],kwargs['value']] - fileid = kwargs['file_id'] + + try: + fileid = int(kwargs['file_id']) + except: + raise ApiError("invalid_file_id", + "Attempting to update DataStore for an invalid fileid '%s'" % kwargs['file_id']) + del(kwargs['file_id']) + if kwargs['key'] == 'irb_code': + irb_doc_code = kwargs['value'] + FileService.update_irb_code(fileid,irb_doc_code) + + return self.set_data_common(task.id, None, None, diff --git a/crc/services/file_service.py b/crc/services/file_service.py index 3d8a1e39..bed1c88f 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -97,6 +97,27 @@ class FileService(object): review = any([f.is_review for f in files]) return review + @staticmethod + def update_irb_code(file_id, irb_doc_code): + """Create a new file and associate it with the workflow + Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document.""" + if not FileService.is_allowed_document(irb_doc_code): + raise ApiError("invalid_form_field_key", + "When uploading files, the form field id must match a known document in the " + "irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code) + + """ """ + file_model = session.query(FileModel)\ + .filter(FileModel.id == file_id).first() + if file_model is None: + raise ApiError("invalid_file_id", + "When updating the irb_doc_code for a file, that file_id must already exist " + "This file_id is not found in the database '%d'" % file_id) + + file_model.irb_doc_code = irb_doc_code + session.commit() + return True + @staticmethod def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data): diff --git a/tests/study/test_study_details_documents.py b/tests/study/test_study_details_documents.py index 757ff938..3e14b166 100644 --- a/tests/study/test_study_details_documents.py +++ b/tests/study/test_study_details_documents.py @@ -113,4 +113,45 @@ class TestStudyDetailsDocumentsScript(BaseTest): docs = StudyInfo().do_task(task, study.id, workflow_model.id, "documents") self.assertTrue(isinstance(docs, Box)) self.assertEquals(1, len(docs.UVACompl_PRCAppr.files)) - self.assertEquals("doodle", docs.UVACompl_PRCAppr.files[0].data_store.ginger) \ No newline at end of file + self.assertEquals("doodle", docs.UVACompl_PRCAppr.files[0].data_store.ginger) + + @patch('crc.services.protocol_builder.requests.get') + def test_file_data_set_changes_irb_code(self, mock_get): + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('required_docs.json') + self.load_example_data() + self.create_reference_document() + study = session.query(StudyModel).first() + workflow_spec_model = self.load_test_spec("two_forms") + workflow_model = StudyService._create_workflow_model(study, workflow_spec_model) + irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs. + file = FileService.add_workflow_file(workflow_id=workflow_model.id, + name="anything.png", content_type="text", + binary_data=b'1234', irb_doc_code=irb_code) + processor = WorkflowProcessor(workflow_model) + task = processor.next_task() + FileDataSet().do_task(task, study.id, workflow_model.id, key="irb_code", value="Study_App_Doc", file_id=file.id) + docs = StudyInfo().do_task(task, study.id, workflow_model.id, "documents") + self.assertTrue(isinstance(docs, Box)) + self.assertEquals(1, len(docs.Study_App_Doc.files)) + self.assertEquals("Study_App_Doc", docs.Study_App_Doc.files[0].data_store.irb_code) + + + @patch('crc.services.protocol_builder.requests.get') + def test_file_data_set_invalid_irb_code_fails(self, mock_get): + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('required_docs.json') + self.load_example_data() + self.create_reference_document() + study = session.query(StudyModel).first() + workflow_spec_model = self.load_test_spec("two_forms") + workflow_model = StudyService._create_workflow_model(study, workflow_spec_model) + irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs. + file = FileService.add_workflow_file(workflow_id=workflow_model.id, + name="anything.png", content_type="text", + binary_data=b'1234', irb_doc_code=irb_code) + processor = WorkflowProcessor(workflow_model) + task = processor.next_task() + with self.assertRaises(ApiError): + FileDataSet().do_task(task, study.id, workflow_model.id, key="irb_code", value="My_Pretty_Pony", + file_id=file.id) From 8c04e228e9742f1a73c6dd6d216528a747f37a44 Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Tue, 1 Jun 2021 11:46:43 -0400 Subject: [PATCH 05/31] Add test and fix to make sure that an empty study associates list (or a blank list) will effectively clear the extra study associates --- crc/scripts/update_study_associates.py | 32 ++++---- .../study_sponsors_associates_delete.bpmn | 79 +++++++++++++++++++ tests/study/test_study_associate_script.py | 27 +++++++ 3 files changed, 124 insertions(+), 14 deletions(-) create mode 100644 tests/data/study_sponsors_associates_delete/study_sponsors_associates_delete.bpmn diff --git a/crc/scripts/update_study_associates.py b/crc/scripts/update_study_associates.py index a11830c7..8ee5b58b 100644 --- a/crc/scripts/update_study_associates.py +++ b/crc/scripts/update_study_associates.py @@ -4,13 +4,11 @@ from crc.services.study_service import StudyService class UpdateStudyAssociates(Script): - argument_error_message = "You must supply at least one argument to the " \ "update_study_associates task, an array of objects in the form " \ "{'uid':'someid', 'role': 'text', 'send_email: 'boolean', " \ "'access':'boolean'} " - def get_description(self): return """ Allows you to associate other users with a study - only 'uid' is required in the @@ -26,20 +24,26 @@ associations already in place. example : update_study_associates([{'uid':'sbp3ey','role':'Unicorn Herder', 'send_email': False, 'access':True}]) """ - def validate_arg(self,arg): - if not isinstance(arg,list): + + def validate_arg(self, arg): + if not isinstance(arg, list): raise ApiError("invalid parameter", "This function is expecting a list of dictionaries") - if not len(arg) > 0 and not isinstance(arg[0],dict): - raise ApiError("invalid paramemter","This function is expecting a list of dictionaries") + if len(arg[0]) > 0: + if not len(arg) > 0 and not isinstance(arg[0], dict): + raise ApiError("invalid paramemter", "This function is expecting a list of dictionaries") def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs): - items = args[0] - self.validate_arg(items) - return all([x.get('uid',False) for x in items]) - + if len(args) == 0: + items = [] + else: + items = args[0] + self.validate_arg(items) + return all([x.get('uid', False) for x in items]) def do_task(self, task, study_id, workflow_id, *args, **kwargs): - access_list = args[0] - self.validate_arg(access_list) - return StudyService.update_study_associates(study_id,access_list) - + if len(args) == 0: + access_list = [] + else: + access_list = args[0] + self.validate_arg(access_list) + return StudyService.update_study_associates(study_id, access_list) diff --git a/tests/data/study_sponsors_associates_delete/study_sponsors_associates_delete.bpmn b/tests/data/study_sponsors_associates_delete/study_sponsors_associates_delete.bpmn new file mode 100644 index 00000000..a4d96fe8 --- /dev/null +++ b/tests/data/study_sponsors_associates_delete/study_sponsors_associates_delete.bpmn @@ -0,0 +1,79 @@ + + + + + SequenceFlow_1nfe5m9 + + + + SequenceFlow_1nfe5m9 + SequenceFlow_1bqiin0 + sponsors = study_info('sponsors') + + + + + SequenceFlow_1bqiin0 + Flow_09cika8 + update_study_associate(uid='lb3dp',role='SuperGal',send_email=False,access=True) +update_study_associate(uid='lje5u',role='SuperGal2',send_email=False,access=False) + + + Flow_0axwrzg + + + This should just leave us a task to complete after the update_study_assocate script + Flow_1xi8k3i + Flow_0axwrzg + + + + + Flow_09cika8 + Flow_1xi8k3i + update_study_associates() + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/study/test_study_associate_script.py b/tests/study/test_study_associate_script.py index 75faf07f..07260822 100644 --- a/tests/study/test_study_associate_script.py +++ b/tests/study/test_study_associate_script.py @@ -161,3 +161,30 @@ class TestSudySponsorsScript(BaseTest): app.config['PB_ENABLED'] = False output = user_studies() self.assertEqual(len(output),0) + + + @patch('crc.services.protocol_builder.requests.get') + def test_study_sponsors_script_ensure_delete(self, mock_get): + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('sponsors.json') + flask.g.user = UserModel(uid='dhf8r') + app.config['PB_ENABLED'] = True + + self.load_example_data() + self.create_reference_document() + study = session.query(StudyModel).first() + workflow_spec_model = self.load_test_spec("study_sponsors_associates_delete") + workflow_model = StudyService._create_workflow_model(study, workflow_spec_model) + WorkflowService.test_spec("study_sponsors_associates_delete") + processor = WorkflowProcessor(workflow_model) + processor.do_engine_steps() + # change user and make sure we can access the study + flask.g.user = UserModel(uid='lb3dp') + flask.g.token = 'my spiffy token' + app.config['PB_ENABLED'] = False + output = user_studies() + self.assertEqual(len(output),0) + flask.g.token = 'my spiffy token' + app.config['PB_ENABLED'] = False + output = user_studies() + self.assertEqual(len(output),0) From e9fe555e05d33f359548b717d22efad6278c7dab Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Tue, 1 Jun 2021 13:49:21 -0400 Subject: [PATCH 06/31] New `/datastore/file/{file_id}` endpoint definition --- crc/api.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crc/api.yml b/crc/api.yml index 6501cebb..e032ccd5 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -1372,6 +1372,27 @@ paths: application/json: schema: $ref: "#/components/schemas/DataStore" + /datastore/file/{file_id}: + parameters: + - name: file_id + in: path + required: true + description: The file id we are concerned with + schema: + type: string + format: string + get: + operationId: crc.api.data_store.file_multi_get + summary: Gets all datastore items by file_id + tags: + - DataStore + responses: + '200': + description: Get all values from the data store for a file_id + content: + application/json: + schema: + $ref: "#/components/schemas/DataStore" components: securitySchemes: jwt: From 1ed144536fbf45db285565745a56ebc908412d1b Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Tue, 1 Jun 2021 13:50:04 -0400 Subject: [PATCH 07/31] New method for file datastore api endpoint --- crc/api/data_store.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crc/api/data_store.py b/crc/api/data_store.py index a87ff938..b5e68a3a 100644 --- a/crc/api/data_store.py +++ b/crc/api/data_store.py @@ -5,7 +5,7 @@ from crc import session from crc.api.common import ApiError from crc.models.data_store import DataStoreModel, DataStoreSchema from crc.scripts.data_store_base import DataStoreBase - +from crc.models.file import FileModel def study_multi_get(study_id): """Get all data_store values for a given study_id study""" @@ -30,6 +30,16 @@ def user_multi_get(user_id): return results +def file_multi_get(file_id): + """Get all data values in the data store for a file_id""" + if file_id is None: + raise ApiError(code='unknown_file', message='Please provide a valid file id.') + dsb = DataStoreBase() + retval = dsb.get_multi_common(None, None, file_id=file_id) + results = DataStoreSchema(many=True).dump(retval) + return results + + def datastore_del(id): """Delete a data store item for a key""" session.query(DataStoreModel).filter_by(id=id).delete() From 2e3d8c73430af5ac9d4565a8941a01a7d774580f Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Tue, 1 Jun 2021 13:51:10 -0400 Subject: [PATCH 08/31] Fix for DataStoreSchema missing the file_id column. The file_id column is a foreign key. --- crc/models/data_store.py | 1 + 1 file changed, 1 insertion(+) diff --git a/crc/models/data_store.py b/crc/models/data_store.py index 07017cee..a156e004 100644 --- a/crc/models/data_store.py +++ b/crc/models/data_store.py @@ -25,4 +25,5 @@ class DataStoreSchema(SQLAlchemyAutoSchema): class Meta: model = DataStoreModel load_instance = True + include_fk = True sqla_session = db.session From ec8353aaa61e8c9946a9e3415d34bfacded54a76 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Tue, 1 Jun 2021 13:51:40 -0400 Subject: [PATCH 09/31] Test for new datastore file api endpoint --- tests/test_datastore_api.py | 52 ++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/tests/test_datastore_api.py b/tests/test_datastore_api.py index 8e8ca8ef..ad7ba27e 100644 --- a/tests/test_datastore_api.py +++ b/tests/test_datastore_api.py @@ -6,6 +6,7 @@ from tests.base_test import BaseTest from datetime import datetime, timezone from unittest.mock import patch from crc.models.data_store import DataStoreModel, DataStoreSchema +from crc.models.file import FileModel from crc import session, app @@ -20,6 +21,13 @@ class DataStoreTest(BaseTest): "spec_id": "My Spec Name", "value": "Some Value" } + TEST_FILE_ITEM = { + "key": "MyKey", + "workflow_id": 12, + "task_id": "MyTask", + "spec_id": "My Spec Name", + "value": "Some Value" + } def add_test_study_data(self): study_data = DataStoreSchema().dump(self.TEST_STUDY_ITEM) @@ -42,7 +50,17 @@ class DataStoreTest(BaseTest): self.assert_success(rv) return json.loads(rv.get_data(as_text=True)) - + def add_test_file_data(self): + file_data = DataStoreSchema().dump(self.TEST_FILE_ITEM) + test_file = session.query(FileModel).first() + file_data['file_id'] = test_file.id + file_data['value'] = 'Some File Data Value' + rv = self.app.post('/v1.0/datastore', + content_type="application/json", + headers=self.logged_in_headers(), + data=json.dumps(file_data)) + self.assert_success(rv) + return json.loads(rv.get_data(as_text=True)) def test_get_study_data(self): """Generic test, but pretty detailed, in that the study should return a categorized list of workflows @@ -112,3 +130,35 @@ class DataStoreTest(BaseTest): self.assert_success(api_response) d = json.loads(api_response.get_data(as_text=True)) self.assertEqual(d[0]['value'],'Some Value') + + def test_datastore_user(self): + self.load_example_data() + new_user = self.add_test_user_data() + api_response = self.app.get(f'/v1.0/datastore/user/{new_user["user_id"]}', + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response) + data = json.loads(api_response.get_data(as_text=True)) + + print('test_datastore_user') + + def test_datastore_study(self): + self.load_example_data() + new_study = self.add_test_study_data() + api_response = self.app.get(f'/v1.0/datastore/study/{new_study["study_id"]}', + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response) + data = json.loads(api_response.get_data(as_text=True)) + + print('test_datastore_study') + + def test_datastore_file(self): + self.load_example_data() + new_file = self.add_test_file_data() + api_response = self.app.get(f'/v1.0/datastore/file/{new_file["file_id"]}', + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response) + data = json.loads(api_response.get_data(as_text=True)) + self.assertEqual('MyKey', data[0]['key']) + self.assertEqual('Some File Data Value', data[0]['value']) + + print('test_datastore_file') From 200a5b3bf34c184a2e5e9b823ba90e34cff1bf81 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Tue, 1 Jun 2021 16:27:55 -0400 Subject: [PATCH 10/31] Updated tests for file datastore endpoint Added test of 2 entries --- tests/test_datastore_api.py | 82 ++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 28 deletions(-) diff --git a/tests/test_datastore_api.py b/tests/test_datastore_api.py index ad7ba27e..1a3d8657 100644 --- a/tests/test_datastore_api.py +++ b/tests/test_datastore_api.py @@ -50,11 +50,10 @@ class DataStoreTest(BaseTest): self.assert_success(rv) return json.loads(rv.get_data(as_text=True)) - def add_test_file_data(self): + def add_test_file_data(self, file_id, value): file_data = DataStoreSchema().dump(self.TEST_FILE_ITEM) - test_file = session.query(FileModel).first() - file_data['file_id'] = test_file.id - file_data['value'] = 'Some File Data Value' + file_data['file_id'] = file_id + file_data['value'] = value rv = self.app.post('/v1.0/datastore', content_type="application/json", headers=self.logged_in_headers(), @@ -131,34 +130,61 @@ class DataStoreTest(BaseTest): d = json.loads(api_response.get_data(as_text=True)) self.assertEqual(d[0]['value'],'Some Value') - def test_datastore_user(self): - self.load_example_data() - new_user = self.add_test_user_data() - api_response = self.app.get(f'/v1.0/datastore/user/{new_user["user_id"]}', - headers=self.logged_in_headers(), content_type="application/json") - self.assert_success(api_response) - data = json.loads(api_response.get_data(as_text=True)) - - print('test_datastore_user') - - def test_datastore_study(self): - self.load_example_data() - new_study = self.add_test_study_data() - api_response = self.app.get(f'/v1.0/datastore/study/{new_study["study_id"]}', - headers=self.logged_in_headers(), content_type="application/json") - self.assert_success(api_response) - data = json.loads(api_response.get_data(as_text=True)) - - print('test_datastore_study') - def test_datastore_file(self): self.load_example_data() - new_file = self.add_test_file_data() - api_response = self.app.get(f'/v1.0/datastore/file/{new_file["file_id"]}', - headers=self.logged_in_headers(), content_type="application/json") + test_file = session.query(FileModel).first() + + # make sure we don't already have a datastore + api_response = self.app.get(f'/v1.0/datastore/file/{test_file.id}', + headers=self.logged_in_headers(), + content_type="application/json") self.assert_success(api_response) data = json.loads(api_response.get_data(as_text=True)) + self.assertEqual(0, len(data)) + + # add datastore + self.add_test_file_data(test_file.id, 'Some File Data Value') + + # make sure we can get the datastore + api_response = self.app.get(f'/v1.0/datastore/file/{test_file.id}', + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(api_response) + data = json.loads(api_response.get_data(as_text=True)) + + self.assertEqual(1, len(data)) self.assertEqual('MyKey', data[0]['key']) self.assertEqual('Some File Data Value', data[0]['value']) - print('test_datastore_file') + def test_datastore_files(self): + self.load_example_data() + test_file = session.query(FileModel).first() + + # add datastore + value_1 = 'Some File Data Value 1' + self.add_test_file_data(test_file.id, value_1) + + # make sure we have 1 item in the datastore + api_response_1 = self.app.get(f'/v1.0/datastore/file/{test_file.id}', + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response_1) + data_1 = json.loads(api_response_1.get_data(as_text=True)) + + self.assertEqual(1, len(data_1)) + self.assertEqual('MyKey', data_1[0]['key']) + self.assertEqual(value_1, data_1[0]['value']) + + # add second datastore + value_2 = 'Some File Data Value 2' + self.add_test_file_data(test_file.id, value_2) + + # make sure we have 2 items in the datastore + api_response_2 = self.app.get(f'/v1.0/datastore/file/{test_file.id}', + headers=self.logged_in_headers(), content_type="application/json") + self.assert_success(api_response_2) + data_2 = json.loads(api_response_2.get_data(as_text=True)) + self.assertEqual(2, len(data_2)) + self.assertEqual(value_1, data_2[0]['value']) + self.assertEqual(value_2, data_2[1]['value']) + + print('test_datastore_files') From 162dee45d3fe0e306d0721d7e9aa7dc7c1404428 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Tue, 1 Jun 2021 16:34:21 -0400 Subject: [PATCH 11/31] Unused import --- crc/api/data_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crc/api/data_store.py b/crc/api/data_store.py index b5e68a3a..327ba939 100644 --- a/crc/api/data_store.py +++ b/crc/api/data_store.py @@ -5,7 +5,7 @@ from crc import session from crc.api.common import ApiError from crc.models.data_store import DataStoreModel, DataStoreSchema from crc.scripts.data_store_base import DataStoreBase -from crc.models.file import FileModel + def study_multi_get(study_id): """Get all data_store values for a given study_id study""" From e2e35b673d67594d914b42b3a635371e89e240a0 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Wed, 2 Jun 2021 09:59:41 -0400 Subject: [PATCH 12/31] Moved data store code from script to service --- .../data_store_base.py => services/data_store_service.py} | 3 --- 1 file changed, 3 deletions(-) rename crc/{scripts/data_store_base.py => services/data_store_service.py} (99%) diff --git a/crc/scripts/data_store_base.py b/crc/services/data_store_service.py similarity index 99% rename from crc/scripts/data_store_base.py rename to crc/services/data_store_service.py index f9694fde..8f6c0bed 100644 --- a/crc/scripts/data_store_base.py +++ b/crc/services/data_store_service.py @@ -1,6 +1,3 @@ -import importlib -import os -import pkgutil from crc import session from crc.api.common import ApiError from crc.models.data_store import DataStoreModel From 506b84a49adf5ea02f474ecd6a4e7b46d2b1fcd5 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Wed, 2 Jun 2021 10:00:18 -0400 Subject: [PATCH 13/31] Import DataStoreBase from service now --- crc/api/data_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crc/api/data_store.py b/crc/api/data_store.py index 327ba939..11f25685 100644 --- a/crc/api/data_store.py +++ b/crc/api/data_store.py @@ -4,7 +4,7 @@ from datetime import datetime from crc import session from crc.api.common import ApiError from crc.models.data_store import DataStoreModel, DataStoreSchema -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase def study_multi_get(study_id): From 7f0d8a131d2830f4529b9f7728ff4f8e5b0fc835 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Wed, 2 Jun 2021 10:00:55 -0400 Subject: [PATCH 14/31] Import DataStoreBase from service now --- crc/scripts/file_data_get.py | 2 +- crc/scripts/file_data_set.py | 2 +- crc/scripts/study_data_get.py | 2 +- crc/scripts/study_data_set.py | 2 +- crc/scripts/user_data_get.py | 2 +- crc/scripts/user_data_set.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crc/scripts/file_data_get.py b/crc/scripts/file_data_get.py index b630b2e4..5ce6eb14 100644 --- a/crc/scripts/file_data_get.py +++ b/crc/scripts/file_data_get.py @@ -1,7 +1,7 @@ from flask import g from crc.api.common import ApiError -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase from crc.scripts.script import Script diff --git a/crc/scripts/file_data_set.py b/crc/scripts/file_data_set.py index 99cbdd45..2ba8f722 100644 --- a/crc/scripts/file_data_set.py +++ b/crc/scripts/file_data_set.py @@ -1,7 +1,7 @@ from flask import g from crc.api.common import ApiError -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase from crc.scripts.script import Script diff --git a/crc/scripts/study_data_get.py b/crc/scripts/study_data_get.py index 6ef6be4d..fa162d1b 100644 --- a/crc/scripts/study_data_get.py +++ b/crc/scripts/study_data_get.py @@ -1,4 +1,4 @@ -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase from crc.scripts.script import Script diff --git a/crc/scripts/study_data_set.py b/crc/scripts/study_data_set.py index 9c4135ab..2d0c2e6b 100644 --- a/crc/scripts/study_data_set.py +++ b/crc/scripts/study_data_set.py @@ -1,4 +1,4 @@ -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase from crc.scripts.script import Script diff --git a/crc/scripts/user_data_get.py b/crc/scripts/user_data_get.py index 4f181c13..103475b1 100644 --- a/crc/scripts/user_data_get.py +++ b/crc/scripts/user_data_get.py @@ -1,6 +1,6 @@ from flask import g -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase from crc.scripts.script import Script diff --git a/crc/scripts/user_data_set.py b/crc/scripts/user_data_set.py index 906afab2..9baf77fd 100644 --- a/crc/scripts/user_data_set.py +++ b/crc/scripts/user_data_set.py @@ -1,6 +1,6 @@ from flask import g -from crc.scripts.data_store_base import DataStoreBase +from crc.services.data_store_service import DataStoreBase from crc.scripts.script import Script From a2e577829a831ae2341c176ec02e8f69abae9790 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Wed, 2 Jun 2021 10:02:26 -0400 Subject: [PATCH 15/31] Cleanup: - removed unused imports - removed testing print statement --- tests/test_datastore_api.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tests/test_datastore_api.py b/tests/test_datastore_api.py index 1a3d8657..ee042a4c 100644 --- a/tests/test_datastore_api.py +++ b/tests/test_datastore_api.py @@ -1,15 +1,10 @@ -import json -from profile import Profile - from tests.base_test import BaseTest -from datetime import datetime, timezone -from unittest.mock import patch from crc.models.data_store import DataStoreModel, DataStoreSchema from crc.models.file import FileModel -from crc import session, app - +from crc import session +import json class DataStoreTest(BaseTest): @@ -186,5 +181,3 @@ class DataStoreTest(BaseTest): self.assertEqual(2, len(data_2)) self.assertEqual(value_1, data_2[0]['value']) self.assertEqual(value_2, data_2[1]['value']) - - print('test_datastore_files') From d657744816f98d7ae89d5855dbb6a1157e4addaf Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 3 Jun 2021 14:17:12 -0400 Subject: [PATCH 16/31] Added optional `validate_study_id` parameter to the validation api endpoint. --- crc/api.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crc/api.yml b/crc/api.yml index 6501cebb..3209a594 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -512,6 +512,12 @@ paths: description: The unique id of an existing workflow specification to validate. schema: type: string + - name: validate_study_id + in: query + required: false + description: Optional id of study to test under different scenarios + schema: + type: string get: operationId: crc.api.workflow.validate_workflow_specification summary: Loads and attempts to execute a Workflow Specification, returning a list of errors encountered From 0dfc96d7f603d7fd60cbf93b1bc9914ca46289d2 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 3 Jun 2021 14:19:41 -0400 Subject: [PATCH 17/31] Modified `validate_workflow_specification` to accept new optional `validate_study_id` parameter. We then pass validate_study_id on to WorkflowService.test_spec --- crc/api/workflow.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index fcba4d26..789965c9 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -46,16 +46,16 @@ def get_workflow_specification(spec_id): return WorkflowSpecModelSchema().dump(spec) -def validate_workflow_specification(spec_id): +def validate_workflow_specification(spec_id, validate_study_id=None): errors = {} try: - WorkflowService.test_spec(spec_id) + WorkflowService.test_spec(spec_id, validate_study_id) except ApiError as ae: ae.message = "When populating all fields ... \n" + ae.message errors['all'] = ae try: # Run the validation twice, the second time, just populate the required fields. - WorkflowService.test_spec(spec_id, required_only=True) + WorkflowService.test_spec(spec_id, validate_study_id, required_only=True) except ApiError as ae: ae.message = "When populating only required fields ... \n" + ae.message errors['required'] = ae From c41657301ab03bbcef0a73f7d7269084710d80e2 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 3 Jun 2021 14:22:26 -0400 Subject: [PATCH 18/31] Modify `test_spec` and `make_test_workflow` to accept new optional `validate_study_id` parameter. Modify `make_test_workflow` to use study_id when creating test workflow for validation. --- crc/services/workflow_service.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index ee37c688..5b26296d 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -52,17 +52,27 @@ class WorkflowService(object): handles the testing of a workflow specification by completing it with random selections, attempting to mimic a front end as much as possible. """ + from crc.services.user_service import UserService @staticmethod - def make_test_workflow(spec_id): - user = db.session.query(UserModel).filter_by(uid="test").first() + def make_test_workflow(spec_id, validate_study_id=None): + try: + user = UserService.current_user() + except ApiError as e: + user = None + if not user: + user = db.session.query(UserModel).filter_by(uid="test").first() if not user: db.session.add(UserModel(uid="test")) db.session.commit() - study = db.session.query(StudyModel).filter_by(user_uid="test").first() + user = db.session.query(UserModel).filter_by(uid="test").first() + if validate_study_id: + study = db.session.query(StudyModel).filter_by(id=validate_study_id).first() + else: + study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first() if not study: - db.session.add(StudyModel(user_uid="test", title="test")) + db.session.add(StudyModel(user_uid=user.uid, title="test")) db.session.commit() - study = db.session.query(StudyModel).filter_by(user_uid="test").first() + study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first() workflow_model = WorkflowModel(status=WorkflowStatus.not_started, workflow_spec_id=spec_id, last_updated=datetime.utcnow(), @@ -80,7 +90,7 @@ class WorkflowService(object): db.session.delete(user) @staticmethod - def test_spec(spec_id, required_only=False): + def test_spec(spec_id, validate_study_id=None, required_only=False): """Runs a spec through it's paces to see if it results in any errors. Not fool-proof, but a good sanity check. Returns the final data output form the last task if successful. @@ -89,7 +99,7 @@ class WorkflowService(object): spec, only completing the required fields, rather than everything. """ - workflow_model = WorkflowService.make_test_workflow(spec_id) + workflow_model = WorkflowService.make_test_workflow(spec_id, validate_study_id) try: processor = WorkflowProcessor(workflow_model, validate_only=True) From 9a63ab9c4fe182aab6c72902acb9a98b2face2c5 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 3 Jun 2021 14:28:24 -0400 Subject: [PATCH 19/31] Change `do_task_validate_only` in study_info script to call the real do_task. We do this so we can seed settings into the workflow for testing different scenarios using the study from validate_study_id --- crc/scripts/study_info.py | 198 ++------------------------------------ 1 file changed, 6 insertions(+), 192 deletions(-) diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index b5e49e84..e5df40f8 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -4,9 +4,10 @@ from SpiffWorkflow.bpmn.PythonScriptEngine import Box from crc import session from crc.api.common import ApiError +from crc.api.workflow import get_workflow from crc.models.protocol_builder import ProtocolBuilderInvestigatorType from crc.models.study import StudyModel, StudySchema -from crc.models.workflow import WorkflowStatus +from crc.api import workflow as workflow_api from crc.scripts.script import Script from crc.services.cache_service import timeit from crc.services.file_service import FileService @@ -172,197 +173,10 @@ Returns information specific to the protocol. # Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.) FileService.get_reference_file_data(FileService.DOCUMENT_LIST) FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST) - data = { - "study": { - "info": { - "id": 12, - "title": "test", - "short_title": "tst", - "primary_investigator_id": 21, - "user_uid": "dif84", - "sponsor": "sponsor", - "ind_number": "1234", - "inactive": False - }, - "sponsors": [ - { - "COMMONRULEAGENCY": None, - "SPONSOR_ID": 2453, - "SP_NAME": "Abbott Ltd", - "SP_TYPE": "Private", - "SP_TYPE_GROUP_NAME": None, - "SS_STUDY": 2 - }, - { - "COMMONRULEAGENCY": None, - "SPONSOR_ID": 2387, - "SP_NAME": "Abbott-Price", - "SP_TYPE": "Incoming Sub Award", - "SP_TYPE_GROUP_NAME": "Government", - "SS_STUDY": 2 - }, - { - "COMMONRULEAGENCY": None, - "SPONSOR_ID": 1996, - "SP_NAME": "Abernathy-Heidenreich", - "SP_TYPE": "Foundation/Not for Profit", - "SP_TYPE_GROUP_NAME": "Other External Funding", - "SS_STUDY": 2 - } - ], - - "investigators": { - "PI": { - "label": ProtocolBuilderInvestigatorType.PI.value, - "display": "Always", - "unique": "Yes", - "user_id": "dhf8r", - "title": "", - "display_name": "Daniel Harold Funk", - "sponsor_type": "Contractor", - "telephone_number": "0000000000", - "department": "", - "email_address": "dhf8r@virginia.edu", - "given_name": "Daniel", - "uid": "dhf8r", - "affiliation": "", - "date_cached": "2020-08-04T19:32:08.006128+00:00" - }, - "SC_I": { - "label": ProtocolBuilderInvestigatorType.SC_I.value, - "display": "Always", - "unique": "Yes", - "user_id": "ajl2j", - "title": "", - "display_name": "Aaron Louie", - "sponsor_type": "Contractor", - "telephone_number": "0000000000", - "department": "", - "email_address": "ajl2j@virginia.edu", - "given_name": "Aaron", - "uid": "ajl2j", - "affiliation": "sponsored", - "date_cached": "2020-08-04T19:32:10.699666+00:00" - }, - "SC_II": { - "label": ProtocolBuilderInvestigatorType.SC_II.value, - "display": "Optional", - "unique": "Yes", - "user_id": "cah3us", - "title": "", - "display_name": "Alex Herron", - "sponsor_type": "Contractor", - "telephone_number": "0000000000", - "department": "", - "email_address": "cah3us@virginia.edu", - "given_name": "Alex", - "uid": "cah3us", - "affiliation": "sponsored", - "date_cached": "2020-08-04T19:32:10.075852+00:00" - }, - }, - "pi": { - "PI": { - "label": ProtocolBuilderInvestigatorType.PI.value, - "display": "Always", - "unique": "Yes", - "user_id": "dhf8r", - "title": "", - "display_name": "Daniel Harold Funk", - "sponsor_type": "Contractor", - "telephone_number": "0000000000", - "department": "", - "email_address": "dhf8r@virginia.edu", - "given_name": "Daniel", - "uid": "dhf8r", - "affiliation": "", - "date_cached": "2020-08-04T19:32:08.006128+00:00" - } - }, - "roles": - { - "INVESTIGATORTYPE": "PI", - "INVESTIGATORTYPEFULL": ProtocolBuilderInvestigatorType.PI.value, - "NETBADGEID": "dhf8r" - }, - "details": - { - "DSMB": None, - "DSMB_FREQUENCY": None, - "GCRC_NUMBER": None, - "IBC_NUMBER": None, - "IDE": None, - "IND_1": 1234, - "IND_2": None, - "IND_3": None, - "IRBREVIEWERADMIN": None, - "IS_ADULT_PARTICIPANT": None, - "IS_APPROVED_DEVICE": None, - "IS_AUX": None, - "IS_BIOMEDICAL": None, - "IS_CANCER_PATIENT": None, - "IS_CENTRAL_REG_DB": None, - "IS_CHART_REVIEW": None, - "IS_COMMITTEE_CONFLICT": None, - "IS_CONSENT_WAIVER": None, - "IS_DB": None, - "IS_ELDERLY_POP": None, - "IS_ENGAGED_RESEARCH": None, - "IS_FETUS_POP": None, - "IS_FINANCIAL_CONFLICT": None, - "IS_FOR_CANCER_CENTER": None, - "IS_FUNDING_SOURCE": None, - "IS_GCRC": None, - "IS_GENE_TRANSFER": None, - "IS_GRANT": None, - "IS_HGT": None, - "IS_IBC": None, - "IS_IDE": None, - "IS_IND": 1, - "IS_MENTAL_IMPAIRMENT_POP": None, - "IS_MINOR": None, - "IS_MINOR_PARTICIPANT": None, - "IS_MULTI_SITE": None, - "IS_NOT_CONSENT_WAIVER": None, - "IS_NOT_PRC_WAIVER": None, - "IS_OTHER_VULNERABLE_POP": None, - "IS_OUTSIDE_CONTRACT": None, - "IS_PI_INITIATED": None, - "IS_PI_SCHOOL": None, - "IS_PRC": None, - "IS_PRC_DSMP": None, - "IS_PREGNANT_POP": None, - "IS_PRISONERS_POP": None, - "IS_QUALITATIVE": None, - "IS_RADIATION": None, - "IS_REVIEW_BY_CENTRAL_IRB": None, - "IS_SPONSOR": None, - "IS_SPONSOR_MONITORING": None, - "IS_SURROGATE_CONSENT": None, - "IS_TISSUE_BANKING": None, - "IS_UVA_DB": None, - "IS_UVA_IDE": None, - "IS_UVA_IND": None, - "IS_UVA_LOCATION": None, - "IS_UVA_PI_MULTI": None, - "MULTI_SITE_LOCATIONS": None, - "NON_UVA_LOCATION": None, - "OTHER_VULNERABLE_DESC": None, - "PRC_NUMBER": None, - "SPONSORS_PROTOCOL_REVISION_DATE": None, - "UPLOAD_COMPLETE": None - }, - 'protocol': { - 'id': 0, - } - } - } - if args[0] == 'documents': - return self.box_it(StudyService().get_documents_status(study_id)) - return self.box_it(data['study'][args[0]]) - - # self.add_data_to_task(task=task, data=data["study"]) - # self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)}) + # we call the real do_task so we can + # seed workflow validations with settings from studies in PB Mock + # in order to test multiple paths thru the workflow + return self.do_task(task, study_id, workflow_id, args[0]) @timeit def do_task(self, task, study_id, workflow_id, *args, **kwargs): From 37ba46fb8f610bdab931262fec0b342db4a25792 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Thu, 3 Jun 2021 14:30:00 -0400 Subject: [PATCH 20/31] These tests now require PB Mock because of the validate_study_id changes --- tests/study/test_study_details.py | 3 ++- tests/test_verify_end_event.py | 3 ++- tests/workflow/test_workflow_infinite_loop.py | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/study/test_study_details.py b/tests/study/test_study_details.py index 7d142515..0b7538c9 100644 --- a/tests/study/test_study_details.py +++ b/tests/study/test_study_details.py @@ -5,7 +5,7 @@ from SpiffWorkflow.bpmn.PythonScriptEngine import Box from tests.base_test import BaseTest from unittest.mock import patch -from crc import db, session +from crc import app, session from crc.api.common import ApiError from crc.models.file import FileDataModel, FileModel from crc.models.protocol_builder import ProtocolBuilderRequiredDocumentSchema, ProtocolBuilderStudySchema @@ -30,6 +30,7 @@ class TestStudyDetailsScript(BaseTest): self.task = self.processor.next_task() def test_study_info_returns_a_box_object_for_all_validations(self): + app.config['PB_ENABLED'] = True for option in StudyInfo.type_options: data = StudyInfo().do_task_validate_only(self.task, self.study.id, self.workflow_model.id, option) if isinstance(data, list): diff --git a/tests/test_verify_end_event.py b/tests/test_verify_end_event.py index 18069f99..0d431b58 100644 --- a/tests/test_verify_end_event.py +++ b/tests/test_verify_end_event.py @@ -1,12 +1,13 @@ from tests.base_test import BaseTest +from crc import app from crc.services.workflow_service import WorkflowService from crc.api.common import ApiError -from jinja2.exceptions import TemplateSyntaxError class TestValidateEndEvent(BaseTest): def test_validate_end_event(self): + app.config['PB_ENABLED'] = True error_string = """Error processing template for task EndEvent_1qvyxg7: expected token 'end of statement block', got '='""" diff --git a/tests/workflow/test_workflow_infinite_loop.py b/tests/workflow/test_workflow_infinite_loop.py index a9846084..cd9fdd4a 100644 --- a/tests/workflow/test_workflow_infinite_loop.py +++ b/tests/workflow/test_workflow_infinite_loop.py @@ -6,6 +6,7 @@ import json class TestWorkflowInfiniteLoop(BaseTest): def test_workflow_infinite_loop(self): + app.config['PB_ENABLED'] = True self.load_example_data() spec_model = self.load_test_spec('infinite_loop') rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers()) From a3a485dd344ce5c7c2b8e7babb0401c676d0b137 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Fri, 4 Jun 2021 11:44:55 -0400 Subject: [PATCH 21/31] Changed do_task_validate_only so it returns a mocked value, instead of True/False. True/False caused problems for workflow validations --- crc/scripts/get_study_associate.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/crc/scripts/get_study_associate.py b/crc/scripts/get_study_associate.py index c5af0804..8c0fd5d8 100644 --- a/crc/scripts/get_study_associate.py +++ b/crc/scripts/get_study_associate.py @@ -16,16 +16,14 @@ example : get_study_associate('sbp3ey') => {'uid':'sbp3ey','role':'Unicorn Herde """ def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs): - if len(args)<1: - return False - return True + if len(args) < 1: + raise ApiError('no_user_id_specified', 'A uva uid is the sole argument to this function') + return {'uid': 'sbp3ey', 'role': 'Unicorn Herder', 'send_email': False, 'access': True} def do_task(self, task, study_id, workflow_id, *args, **kwargs): - if len(args)<1: + if len(args) < 1: raise ApiError('no_user_id_specified', 'A uva uid is the sole argument to this function') - if not isinstance(args[0],type('')): + if not isinstance(args[0], str): raise ApiError('argument_should_be_string', 'A uva uid is always a string, please check type') - return StudyService.get_study_associate(study_id=study_id,uid=args[0]) - - + return StudyService.get_study_associate(study_id=study_id, uid=args[0]) From 70651e2a6a5afbe9228c4194eb6611bd231a4fc8 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Fri, 4 Jun 2021 11:45:51 -0400 Subject: [PATCH 22/31] Test and BPMN files for testing --- .../get_study_associate.bpmn | 62 +++++++++++++++++++ .../test_get_study_associate_validation.py | 13 ++++ 2 files changed, 75 insertions(+) create mode 100644 tests/data/get_study_associate/get_study_associate.bpmn create mode 100644 tests/scripts/test_get_study_associate_validation.py diff --git a/tests/data/get_study_associate/get_study_associate.bpmn b/tests/data/get_study_associate/get_study_associate.bpmn new file mode 100644 index 00000000..5e79f176 --- /dev/null +++ b/tests/data/get_study_associate/get_study_associate.bpmn @@ -0,0 +1,62 @@ + + + + + Flow_1aycav1 + + + + Flow_1aycav1 + Flow_0wkyatv + pi = study_info('investigators').get('PI', False) +if pi: + try: + pi_assc = get_study_associate(pi.user_id) + except: + pi_assc_chk = False + else: + if pi_assc['role'] == "Primary Investigator": + pi_assc_chk = True + else: + pi_assc_chk = False + + + + pi_assc_chk is {{pi_assc_chk}} + Flow_0wkyatv + Flow_0784fc6 + + + Flow_0784fc6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/scripts/test_get_study_associate_validation.py b/tests/scripts/test_get_study_associate_validation.py new file mode 100644 index 00000000..15ae4804 --- /dev/null +++ b/tests/scripts/test_get_study_associate_validation.py @@ -0,0 +1,13 @@ +from tests.base_test import BaseTest +from crc import app + + +class TestGetStudyAssociateValidation(BaseTest): + + def test_get_study_associate_validation(self): + + self.load_example_data() + workflow = self.create_workflow('get_study_associate') + rv = self.app.get('/v1.0/workflow-specification/%s/validate' % workflow.workflow_spec_id, + headers=self.logged_in_headers()) + self.assertEqual(0, len(rv.json)) From 9dc587184888f76f7d9edae804652c72cc5d0a06 Mon Sep 17 00:00:00 2001 From: mike cullerton Date: Mon, 7 Jun 2021 10:08:44 -0400 Subject: [PATCH 23/31] Added mocked PB data for tests using study_info script. --- tests/study/test_study_associate_script.py | 6 +++++- tests/study/test_study_data_store_script.py | 6 +++++- tests/study/test_study_details.py | 16 +++++++++++++++- tests/study/test_study_sponsors_script.py | 7 ++++++- tests/test_verify_end_event.py | 6 +++++- tests/workflow/test_workflow_infinite_loop.py | 6 +++++- 6 files changed, 41 insertions(+), 6 deletions(-) diff --git a/tests/study/test_study_associate_script.py b/tests/study/test_study_associate_script.py index 75faf07f..cfd5e96b 100644 --- a/tests/study/test_study_associate_script.py +++ b/tests/study/test_study_associate_script.py @@ -19,7 +19,11 @@ class TestSudySponsorsScript(BaseTest): test_study_id = 1 - def test_study_sponsors_script_validation(self): + @patch('crc.services.protocol_builder.requests.get') + def test_study_sponsors_script_validation(self, mock_get): + app.config['PB_ENABLED'] = True + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('sponsors.json') flask.g.user = UserModel(uid='dhf8r') self.load_example_data() # study_info script complains if irb_documents.xls is not loaded # during the validate phase I'm going to assume that we will never diff --git a/tests/study/test_study_data_store_script.py b/tests/study/test_study_data_store_script.py index ab24f9a5..4f134945 100644 --- a/tests/study/test_study_data_store_script.py +++ b/tests/study/test_study_data_store_script.py @@ -16,7 +16,11 @@ class TestSudySponsorsScript(BaseTest): test_study_id = 1 - def test_study_sponsors_script_validation(self): + @patch('crc.services.protocol_builder.requests.get') + def test_study_sponsors_script_validation(self, mock_get): + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('sponsors.json') + app.config['PB_ENABLED'] = True flask.g.user = UserModel(uid='dhf8r') self.load_example_data() # study_info script complains if irb_documents.xls is not loaded # during the validate phase I'm going to assume that we will never diff --git a/tests/study/test_study_details.py b/tests/study/test_study_details.py index 0b7538c9..4829a5a2 100644 --- a/tests/study/test_study_details.py +++ b/tests/study/test_study_details.py @@ -29,9 +29,23 @@ class TestStudyDetailsScript(BaseTest): self.processor = WorkflowProcessor(self.workflow_model) self.task = self.processor.next_task() - def test_study_info_returns_a_box_object_for_all_validations(self): + @patch('crc.services.protocol_builder.requests.get') + def test_study_info_returns_a_box_object_for_all_validations(self, mock_get): app.config['PB_ENABLED'] = True + mock_get.return_value.ok = True for option in StudyInfo.type_options: + if option == 'info': + mock_get.return_value.text = self.protocol_builder_response('irb_info.json') + elif option == 'investigators': + mock_get.return_value.text = self.protocol_builder_response('investigators.json') + elif option == 'roles': + mock_get.return_value.text = self.protocol_builder_response('investigators.json') + elif option == 'details': + mock_get.return_value.text = self.protocol_builder_response('study_details.json') + elif option == 'documents': + mock_get.return_value.text = self.protocol_builder_response('required_docs.json') + elif option == 'sponsors': + mock_get.return_value.text = self.protocol_builder_response('sponsors.json') data = StudyInfo().do_task_validate_only(self.task, self.study.id, self.workflow_model.id, option) if isinstance(data, list): for x in data: diff --git a/tests/study/test_study_sponsors_script.py b/tests/study/test_study_sponsors_script.py index 3dff8c7c..b47927c4 100644 --- a/tests/study/test_study_sponsors_script.py +++ b/tests/study/test_study_sponsors_script.py @@ -13,7 +13,12 @@ class TestSudySponsorsScript(BaseTest): test_study_id = 1 - def test_study_sponsors_script_validation(self): + @patch('crc.services.protocol_builder.requests.get') + def test_study_sponsors_script_validation(self, mock_get): + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('sponsors.json') + app.config['PB_ENABLED'] = True + self.load_example_data() # study_info script complains if irb_documents.xls is not loaded # during the validate phase I'm going to assume that we will never # have a case where irb_documents.xls is not loaded ?? diff --git a/tests/test_verify_end_event.py b/tests/test_verify_end_event.py index 0d431b58..5954d545 100644 --- a/tests/test_verify_end_event.py +++ b/tests/test_verify_end_event.py @@ -2,12 +2,16 @@ from tests.base_test import BaseTest from crc import app from crc.services.workflow_service import WorkflowService from crc.api.common import ApiError +from unittest.mock import patch class TestValidateEndEvent(BaseTest): - def test_validate_end_event(self): + @patch('crc.services.protocol_builder.requests.get') + def test_validate_end_event(self, mock_get): app.config['PB_ENABLED'] = True + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('study_details.json') error_string = """Error processing template for task EndEvent_1qvyxg7: expected token 'end of statement block', got '='""" diff --git a/tests/workflow/test_workflow_infinite_loop.py b/tests/workflow/test_workflow_infinite_loop.py index cd9fdd4a..506f9eed 100644 --- a/tests/workflow/test_workflow_infinite_loop.py +++ b/tests/workflow/test_workflow_infinite_loop.py @@ -1,12 +1,16 @@ from tests.base_test import BaseTest from crc import app import json +from unittest.mock import patch class TestWorkflowInfiniteLoop(BaseTest): - def test_workflow_infinite_loop(self): + @patch('crc.services.protocol_builder.requests.get') + def test_workflow_infinite_loop(self, mock_get): app.config['PB_ENABLED'] = True + mock_get.return_value.ok = True + mock_get.return_value.text = self.protocol_builder_response('investigators.json') self.load_example_data() spec_model = self.load_test_spec('infinite_loop') rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers()) From 59f605c3dfa9268eeab242da29a583c4a7906f09 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 08:03:14 -0400 Subject: [PATCH 24/31] Prefer tasks that share a parent over just the the next available task when returning the next_task in the workflow processor. --- crc/api/file.py | 2 +- crc/api/workflow.py | 1 + crc/models/api_models.py | 7 ++- crc/models/file.py | 9 ++- crc/services/file_service.py | 10 ---- crc/services/workflow_service.py | 20 +++++++ .../file_upload_form/file_upload_form.bpmn | 56 ++++++++++++------- tests/files/test_files_api.py | 12 ++++ 8 files changed, 83 insertions(+), 34 deletions(-) diff --git a/crc/api/file.py b/crc/api/file.py index 911b2996..5d03bf2f 100644 --- a/crc/api/file.py +++ b/crc/api/file.py @@ -57,7 +57,7 @@ def get_document_directory(study_id, workflow_id=None): if file.irb_doc_code in doc_dict: doc_code = doc_dict[file.irb_doc_code] else: - doc_code = {'category1': "Unknown", 'category2': None, 'category3': None} + doc_code = {'category1': "Unknown", 'category2': '', 'category3': ''} if workflow_id: expand = file.workflow_id == int(workflow_id) else: diff --git a/crc/api/workflow.py b/crc/api/workflow.py index fcba4d26..4782f833 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -244,6 +244,7 @@ def __update_task(processor, task, data, user): here because we need to do it multiple times when completing all tasks in a multi-instance task""" task.update_data(data) + WorkflowService.post_process_form(task) # some properties may update the data store. processor.complete_task(task) processor.do_engine_steps() processor.save() diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 308823fc..90b56136 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -26,7 +26,8 @@ class Task(object): PROP_EXTENSIONS_TITLE = "display_name" - # Autocomplete field + # Field Types + FIELD_TYPE_FILE = "file" FIELD_TYPE_AUTO_COMPLETE = "autocomplete" FIELD_PROP_AUTO_COMPLETE_MAX = "autocomplete_num" # Not used directly, passed in from the front end. @@ -59,6 +60,10 @@ class Task(object): FIELD_PROP_REPLEAT_TITLE = "repeat_title" FIELD_PROP_REPLEAT_BUTTON = "repeat_button_label" + # File specific field properties + FIELD_PROP_DOC_CODE = "doc_code" # to associate a file upload field with a doc code + FIELD_PROP_FILE_DATA = "file_data" # to associate a bit of data with a specific file upload file. + # Additional properties FIELD_PROP_ENUM_TYPE = "enum_type" FIELD_PROP_TEXT_AREA_ROWS = "rows" diff --git a/crc/models/file.py b/crc/models/file.py index 9f3073e3..54ccd325 100644 --- a/crc/models/file.py +++ b/crc/models/file.py @@ -90,7 +90,7 @@ class FileModel(db.Model): # it instead, hide it in the interface. is_review = db.Column(db.Boolean, default=False, nullable=True) archived = db.Column(db.Boolean, default=False, nullable=False) - tags = relationship("DataStoreModel", cascade="all,delete", backref="file") + data_stores = relationship("DataStoreModel", cascade="all,delete", backref="file") class File(object): @classmethod @@ -123,6 +123,11 @@ class File(object): else: instance.last_modified = None instance.latest_version = None + + instance.data_store = {} + for ds in model.data_stores: + instance.data_store[ds.key] = ds.value + return instance @@ -142,7 +147,7 @@ class FileSchema(Schema): fields = ["id", "name", "is_status", "is_reference", "content_type", "primary", "primary_process_id", "workflow_spec_id", "workflow_id", "irb_doc_code", "last_modified", "latest_version", "type", "categories", - "description", "category", "description", "download_name", "size"] + "description", "category", "download_name", "size", "data_store"] unknown = INCLUDE type = EnumField(FileType) diff --git a/crc/services/file_service.py b/crc/services/file_service.py index 3d8a1e39..1efd93b9 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -100,16 +100,6 @@ class FileService(object): @staticmethod def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data): - """Create a new file and associate it with the workflow - Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document.""" - if not FileService.is_allowed_document(irb_doc_code): - raise ApiError("invalid_form_field_key", - "When uploading files, the form field id must match a known document in the " - "irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code) - - """Assure this is unique to the workflow, task, and document code AND the Name - Because we will allow users to upload multiple files for the same form field - in some cases """ file_model = session.query(FileModel)\ .filter(FileModel.workflow_id == workflow_id)\ .filter(FileModel.name == name)\ diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index ee37c688..7ae48ee3 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -22,6 +22,7 @@ from jinja2 import Template from crc import db, app from crc.api.common import ApiError from crc.models.api_models import Task, MultiInstanceType, WorkflowApi +from crc.models.data_store import DataStoreModel from crc.models.file import LookupDataModel, FileModel from crc.models.study import StudyModel from crc.models.task_event import TaskEventModel @@ -242,6 +243,25 @@ class WorkflowService(object): f'The field {field.id} contains an unsupported ' f'property: {name}', task=task) + + @staticmethod + def post_process_form(task): + """Looks through the fields in a submitted form, acting on any properties.""" + for field in task.task_spec.form.fields: + if field.has_property(Task.FIELD_PROP_DOC_CODE) and \ + field.type == Task.FIELD_TYPE_FILE: + file_id = task.data[field.id] + file = db.session.query(FileModel).filter(FileModel.id == file_id).first() + doc_code = WorkflowService.evaluate_property(Task.FIELD_PROP_DOC_CODE, field, task) + file.irb_doc_code = doc_code + db.session.commit() + # Set the doc code on the file. + if field.has_property(Task.FIELD_PROP_FILE_DATA) and \ + field.get_property(Task.FIELD_PROP_FILE_DATA) in task.data: + file_id = task.data[field.get_property(Task.FIELD_PROP_FILE_DATA)] + data_store = DataStoreModel(file_id=file_id, key=field.id, value=task.data[field.id]) + db.session.add(data_store) + @staticmethod def evaluate_property(property_name, field, task): expression = field.get_property(property_name) diff --git a/tests/data/file_upload_form/file_upload_form.bpmn b/tests/data/file_upload_form/file_upload_form.bpmn index f76db85d..0a58a601 100644 --- a/tests/data/file_upload_form/file_upload_form.bpmn +++ b/tests/data/file_upload_form/file_upload_form.bpmn @@ -1,13 +1,14 @@ - + SequenceFlow_0ea9hvd - SequenceFlow_1h0d349 + Flow_0t55959 - + + #### Non-Funded Executed Agreement @@ -15,40 +16,55 @@ OGC will upload the Non-Funded Executed Agreement after it has been negotiated by OSP contract negotiator. - + + + + + - - + + + + + + + + + + + + + + SequenceFlow_0ea9hvd - SequenceFlow_1h0d349 + Flow_0t55959 - - + + + + + + + + + - + - - + + - - - - - - - - diff --git a/tests/files/test_files_api.py b/tests/files/test_files_api.py index cce55fb5..74129ebb 100644 --- a/tests/files/test_files_api.py +++ b/tests/files/test_files_api.py @@ -8,6 +8,7 @@ from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema from crc.models.workflow import WorkflowSpecModel from crc.services.file_service import FileService from crc.services.workflow_processor import WorkflowProcessor +from crc.models.data_store import DataStoreModel from example_data import ExampleDataLoader @@ -232,6 +233,17 @@ class TestFilesApi(BaseTest): self.assertEqual("text/xml; charset=utf-8", rv.content_type) self.assertTrue(rv.content_length > 1) + def test_get_file_contains_data_store_elements(self): + self.load_example_data() + spec = session.query(WorkflowSpecModel).first() + file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first() + ds = DataStoreModel(key="my_key", value="my_value", file_id=file.id); + db.session.add(ds) + rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers()) + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + self.assertEqual("my_value", json_data['data_store']['my_key']) + def test_get_files_for_form_field_returns_only_those_files(self): self.create_reference_document() workflow = self.create_workflow('file_upload_form') From 8d79fe9d94f3328dd60387157745794f3eba1877 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 11:16:10 -0400 Subject: [PATCH 25/31] Fixing failing tests, and now asserting that we only perform the post_process_form if we actually have a form. --- crc/services/workflow_service.py | 1 + tests/files/test_files_api.py | 5 ----- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 7ae48ee3..c49f3b85 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -247,6 +247,7 @@ class WorkflowService(object): @staticmethod def post_process_form(task): """Looks through the fields in a submitted form, acting on any properties.""" + if not hasattr(task.task_spec, 'form'): return for field in task.task_spec.form.fields: if field.has_property(Task.FIELD_PROP_DOC_CODE) and \ field.type == Task.FIELD_TYPE_FILE: diff --git a/tests/files/test_files_api.py b/tests/files/test_files_api.py index 74129ebb..15693e3f 100644 --- a/tests/files/test_files_api.py +++ b/tests/files/test_files_api.py @@ -74,11 +74,6 @@ class TestFilesApi(BaseTest): data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} correct_name = task.task_spec.form.fields[0].id - rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_id=%i&form_field_key=%s' % - (workflow.study_id, workflow.id, task.id, "not_a_known_file"), data=data, follow_redirects=True, - content_type='multipart/form-data', headers=self.logged_in_headers()) - self.assert_failure(rv, error_code="invalid_form_field_key") - data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_id=%i&form_field_key=%s' % (workflow.study_id, workflow.id, task.id, correct_name), data=data, follow_redirects=True, From 895e7867d28951e9aaeafc02c11992bff0b8ee6f Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 11:51:53 -0400 Subject: [PATCH 26/31] Updating spiffworkflow. --- Pipfile.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pipfile.lock b/Pipfile.lock index 22bb2e12..d19a0724 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -957,7 +957,7 @@ }, "spiffworkflow": { "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "834ca6fdfa4284262f6df5fe00c7b518353511ce" + "ref": "3f340c227579784be94807ce933c2866b36f6022" }, "sqlalchemy": { "hashes": [ From 076d198fc68878ba147880f8d8dba5272b075c42 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 12:18:16 -0400 Subject: [PATCH 27/31] fixing a failing test for study associate. --- tests/scripts/__init__.py | 0 tests/scripts/test_get_study_associate_validation.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 tests/scripts/__init__.py diff --git a/tests/scripts/__init__.py b/tests/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/scripts/test_get_study_associate_validation.py b/tests/scripts/test_get_study_associate_validation.py index 15ae4804..62a30124 100644 --- a/tests/scripts/test_get_study_associate_validation.py +++ b/tests/scripts/test_get_study_associate_validation.py @@ -5,7 +5,7 @@ from crc import app class TestGetStudyAssociateValidation(BaseTest): def test_get_study_associate_validation(self): - + app.config['PB_ENABLED'] = True self.load_example_data() workflow = self.create_workflow('get_study_associate') rv = self.app.get('/v1.0/workflow-specification/%s/validate' % workflow.workflow_spec_id, From 50ad42d3a84a107788f00226b6f888ed7221563b Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 12:36:47 -0400 Subject: [PATCH 28/31] You have to mock out the protocol builder in tests that rely on it. --- tests/scripts/test_get_study_associate_validation.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/scripts/test_get_study_associate_validation.py b/tests/scripts/test_get_study_associate_validation.py index 62a30124..73803c99 100644 --- a/tests/scripts/test_get_study_associate_validation.py +++ b/tests/scripts/test_get_study_associate_validation.py @@ -1,10 +1,16 @@ +import json +from unittest.mock import patch + from tests.base_test import BaseTest from crc import app class TestGetStudyAssociateValidation(BaseTest): + @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') + def test_get_study_associate_validation(self, mock): + response = self.protocol_builder_response('investigators.json') + mock.return_value = json.loads(response) - def test_get_study_associate_validation(self): app.config['PB_ENABLED'] = True self.load_example_data() workflow = self.create_workflow('get_study_associate') From 286803d10bb545858946c0e32a3270142499fd06 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 14:24:59 -0400 Subject: [PATCH 29/31] fixing a big stupid bug I created when merging all the code and making changes to the files. --- crc/api/workflow.py | 2 +- tests/test_user_roles.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 0466ff5d..bf180fef 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -216,7 +216,7 @@ def update_task(workflow_id, task_id, body, terminate_loop=None, update_all=Fals raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") - if terminate_loop: + if terminate_loop and spiff_task.is_looping(): spiff_task.terminate_loop() # Extract the details specific to the form submitted diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 7dadbefd..27ec4e32 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -269,3 +269,12 @@ class TestTasksApi(BaseTest): self.assertEqual(0, len(self.get_assignment_task_events(submitter.uid))) self.assertEqual(0, len(self.get_assignment_task_events(supervisor.uid))) + + def test_no_error_when_calling_end_loop_on_non_looping_task(self): + + workflow = self.create_workflow('hello_world') + workflow_api = self.get_workflow_api(workflow) + + data = workflow_api.next_task.data + data['name'] = "john" + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, terminate_loop=True) From 232b92b39a69b5b64cf2abeecadf3394db4e1c05 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 8 Jun 2021 14:32:47 -0400 Subject: [PATCH 30/31] upgrade Spiffworkflow. --- Pipfile.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pipfile.lock b/Pipfile.lock index d19a0724..76f7471c 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -957,7 +957,7 @@ }, "spiffworkflow": { "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "3f340c227579784be94807ce933c2866b36f6022" + "ref": "ce939de158246e9d10e7e154c92230669354bc64" }, "sqlalchemy": { "hashes": [ From fce9166c4ab4870aa7bb26daae0d8c4e219bdb7f Mon Sep 17 00:00:00 2001 From: Dan Date: Wed, 9 Jun 2021 09:51:30 -0400 Subject: [PATCH 31/31] Do not require users to be admins for viewing the document directory tab. --- crc/api.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index d91e5b0e..f02ef86e 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -83,8 +83,6 @@ paths: type : integer get: operationId: crc.api.file.get_document_directory - security: - - auth_admin: ['secret'] summary: Returns a directory of all files for study in a nested structure tags: - Document Categories