From a10ef9066d6a7f8dc638fa71bca9102c8ad15822 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 15 Jul 2020 07:00:25 -0600 Subject: [PATCH 01/60] Github integration with admin --- Pipfile | 1 + Pipfile.lock | 23 ++++++++++++++++++- crc/api/admin.py | 58 ++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 77 insertions(+), 5 deletions(-) diff --git a/Pipfile b/Pipfile index 0e5e21dd..67ee8473 100644 --- a/Pipfile +++ b/Pipfile @@ -44,6 +44,7 @@ webtest = "*" werkzeug = "*" xlrd = "*" xlsxwriter = "*" +pygithub = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index 909cf764..42cee26b 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "97a15c4ade88db2b384d52436633889a4d9b0bdcaeea86b8a679ebda6f73fb59" + "sha256": "a4a720761a082a0ca31d2be17c2ea137e1d487ba2de538db334c8dc396770665" }, "pipfile-spec": 6, "requires": { @@ -235,6 +235,13 @@ "index": "pypi", "version": "==5.2" }, + "deprecated": { + "hashes": [ + "sha256:525ba66fb5f90b07169fdd48b6373c18f1ee12728ca277ca44567a367d9d7f74", + "sha256:a766c1dccb30c5f6eb2b203f87edd1d8588847709c78589e1521d769addc8218" + ], + "version": "==1.2.10" + }, "docutils": { "hashes": [ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", @@ -669,6 +676,14 @@ ], "version": "==2.20" }, + "pygithub": { + "hashes": [ + "sha256:8375a058ec651cc0774244a3bc7395cf93617298735934cdd59e5bcd9a1df96e", + "sha256:d2d17d1e3f4474e070353f201164685a95b5a92f5ee0897442504e399c7bc249" + ], + "index": "pypi", + "version": "==1.51" + }, "pygments": { "hashes": [ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", @@ -988,6 +1003,12 @@ "index": "pypi", "version": "==1.0.1" }, + "wrapt": { + "hashes": [ + "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7" + ], + "version": "==1.12.1" + }, "wtforms": { "hashes": [ "sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b", diff --git a/crc/api/admin.py b/crc/api/admin.py index 4e96fcd8..3943626f 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -3,16 +3,18 @@ import json from flask import url_for from flask_admin import Admin +from flask_admin.actions import action from flask_admin.contrib import sqla from flask_admin.contrib.sqla import ModelView +from github import Github, UnknownObjectException from werkzeug.utils import redirect from jinja2 import Markup from crc import db, app from crc.api.user import verify_token, verify_token_admin from crc.models.approval import ApprovalModel -from crc.models.file import FileModel -from crc.models.task_event import TaskEventModel +from crc.models.file import FileModel, FileDataModel +from crc.models.stats import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel from crc.models.workflow import WorkflowModel @@ -34,26 +36,73 @@ class AdminModelView(sqla.ModelView): # redirect to login page if user doesn't have access return redirect(url_for('home')) + class UserView(AdminModelView): column_filters = ['uid'] + class StudyView(AdminModelView): column_filters = ['id', 'primary_investigator_id'] column_searchable_list = ['title'] + class ApprovalView(AdminModelView): column_filters = ['study_id', 'approver_uid'] + class WorkflowView(AdminModelView): column_filters = ['study_id', 'id'] + class FileView(AdminModelView): - column_filters = ['workflow_id'] + column_filters = ['workflow_id', 'type'] + + @action('publish', 'Publish', 'Are you sure you want to publish this file(s)?') + def action_publish(self, ids): + # TODO: Move token to settings and replace docs repo + _github = Github('d082288d6192b45b2f8cefcefc1a0a2806554c9e') + repo = _github.get_user().get_repo('crispy-fiesta') + + for file_id in ids: + file_data_model = FileDataModel.query.filter_by(file_model_id=file_id).first() + try: + repo_file = repo.get_contents(file_data_model.file_model.name) + except UnknownObjectException: + repo.create_file( + path=file_data_model.file_model.name, + message=f'Creating {file_data_model.file_model.name}', + content=file_data_model.data + ) + else: + updated = repo.update_file( + path=repo_file.path, + message=f'Updating {file_data_model.file_model.name}', + content=file_data_model.data, + sha=repo_file.sha + ) + + @action('update', 'Update', 'Are you sure you want to update this file(s)?') + def action_update(self, ids): + _github = Github('d082288d6192b45b2f8cefcefc1a0a2806554c9e') + repo = _github.get_user().get_repo('crispy-fiesta') + + for file_id in ids: + file_data_model = FileDataModel.query.filter_by(file_model_id=file_id).first() + try: + repo_file = repo.get_contents(file_data_model.file_model.name) + except UnknownObjectException: + # Add message indicating file is not in the repo + pass + else: + file_data_model.data = repo_file.content + db.session.add(file_data_model) + db.session.commit() + def json_formatter(view, context, model, name): value = getattr(model, name) json_value = json.dumps(value, ensure_ascii=False, indent=2) - return Markup('
{}
'.format(json_value)) + return Markup(f'
{json_value}
') class TaskEventView(AdminModelView): column_filters = ['workflow_id', 'action'] @@ -62,6 +111,7 @@ class TaskEventView(AdminModelView): 'form_data': json_formatter, } + admin = Admin(app) admin.add_view(StudyView(StudyModel, db.session)) From 419d06c95b3c0c7fc88bfb611d848c3a5648f65b Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 17 Jul 2020 09:49:47 -0600 Subject: [PATCH 02/60] Updating file by latest version --- config/default.py | 3 +++ crc/api/admin.py | 20 ++++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/config/default.py b/config/default.py index 5c8f8c51..357ad8bb 100644 --- a/config/default.py +++ b/config/default.py @@ -45,6 +45,9 @@ PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http:// LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1)) +# Github token +GH_TOKEN = '6cbd5f3a1764a8d15b27d66f64ac80ae13b393a9' + # Email configuration DEFAULT_SENDER = 'askresearch@virginia.edu' FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com'] diff --git a/crc/api/admin.py b/crc/api/admin.py index 3943626f..4edce46e 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -7,6 +7,7 @@ from flask_admin.actions import action from flask_admin.contrib import sqla from flask_admin.contrib.sqla import ModelView from github import Github, UnknownObjectException +from sqlalchemy import desc from werkzeug.utils import redirect from jinja2 import Markup @@ -60,7 +61,8 @@ class FileView(AdminModelView): @action('publish', 'Publish', 'Are you sure you want to publish this file(s)?') def action_publish(self, ids): # TODO: Move token to settings and replace docs repo - _github = Github('d082288d6192b45b2f8cefcefc1a0a2806554c9e') + gh_token = app.config['GH_TOKEN'] + _github = Github(gh_token) repo = _github.get_user().get_repo('crispy-fiesta') for file_id in ids: @@ -83,20 +85,26 @@ class FileView(AdminModelView): @action('update', 'Update', 'Are you sure you want to update this file(s)?') def action_update(self, ids): - _github = Github('d082288d6192b45b2f8cefcefc1a0a2806554c9e') + gh_token = app.config['GH_TOKEN'] + _github = Github(gh_token) repo = _github.get_user().get_repo('crispy-fiesta') for file_id in ids: - file_data_model = FileDataModel.query.filter_by(file_model_id=file_id).first() + file_data_model = FileDataModel.query.filter_by( + file_model_id=file_id + ).order_by( + desc(FileDataModel.version) + ).first() try: repo_file = repo.get_contents(file_data_model.file_model.name) except UnknownObjectException: # Add message indicating file is not in the repo pass else: - file_data_model.data = repo_file.content - db.session.add(file_data_model) - db.session.commit() + import pdb; pdb.set_trace() + file_data_model.data = repo_file.decoded_content + self.session.add(file_data_model) + self.session.commit() def json_formatter(view, context, model, name): From f4eb592b87e0cedcb6520102ab217d0afb6b4f6c Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 17 Jul 2020 12:10:55 -0600 Subject: [PATCH 03/60] Extracting token to env var --- config/default.py | 2 +- crc/api/admin.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/config/default.py b/config/default.py index 357ad8bb..f1afc810 100644 --- a/config/default.py +++ b/config/default.py @@ -46,7 +46,7 @@ LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1)) # Github token -GH_TOKEN = '6cbd5f3a1764a8d15b27d66f64ac80ae13b393a9' +GITHUB_TOKEN = environ.get('GITHUB_TOKEN', None) # Email configuration DEFAULT_SENDER = 'askresearch@virginia.edu' diff --git a/crc/api/admin.py b/crc/api/admin.py index 4edce46e..aa10ab24 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -60,8 +60,8 @@ class FileView(AdminModelView): @action('publish', 'Publish', 'Are you sure you want to publish this file(s)?') def action_publish(self, ids): - # TODO: Move token to settings and replace docs repo - gh_token = app.config['GH_TOKEN'] + # TODO: Replace docs repo + gh_token = app.config['GITHUB_TOKEN'] _github = Github(gh_token) repo = _github.get_user().get_repo('crispy-fiesta') @@ -85,7 +85,7 @@ class FileView(AdminModelView): @action('update', 'Update', 'Are you sure you want to update this file(s)?') def action_update(self, ids): - gh_token = app.config['GH_TOKEN'] + gh_token = app.config['GITHUB_TOKEN'] _github = Github(gh_token) repo = _github.get_user().get_repo('crispy-fiesta') @@ -101,7 +101,6 @@ class FileView(AdminModelView): # Add message indicating file is not in the repo pass else: - import pdb; pdb.set_trace() file_data_model.data = repo_file.decoded_content self.session.add(file_data_model) self.session.commit() From 331a6c0aebc045aaaa5a773d8809aabbd0a21296 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 17 Jul 2020 12:52:09 -0600 Subject: [PATCH 04/60] Fixing tests --- crc/api/admin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crc/api/admin.py b/crc/api/admin.py index aa10ab24..3e1f560c 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -15,7 +15,7 @@ from crc import db, app from crc.api.user import verify_token, verify_token_admin from crc.models.approval import ApprovalModel from crc.models.file import FileModel, FileDataModel -from crc.models.stats import TaskEventModel +from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel from crc.models.workflow import WorkflowModel From d34d08b12106b789fc692259ad05225552ba2474 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 17 Jul 2020 13:33:42 -0600 Subject: [PATCH 05/60] Trying to force re-run to clear SonarCloud --- crc/api/admin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crc/api/admin.py b/crc/api/admin.py index 3e1f560c..2af990a0 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -98,7 +98,7 @@ class FileView(AdminModelView): try: repo_file = repo.get_contents(file_data_model.file_model.name) except UnknownObjectException: - # Add message indicating file is not in the repo + # TODO: Add message indicating file is not in the repo pass else: file_data_model.data = repo_file.decoded_content From 73400ed6c73d5516631d875a8a49f680be69f566 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 17 Jul 2020 16:59:25 -0600 Subject: [PATCH 06/60] Adding proper tests for files publishing --- crc/api/admin.py | 44 +-------------- crc/services/file_service.py | 49 ++++++++++++++++ tests/files/test_file_service.py | 95 ++++++++++++++++++++++++++++++++ tests/test_ldap_service.py | 2 +- 4 files changed, 147 insertions(+), 43 deletions(-) diff --git a/crc/api/admin.py b/crc/api/admin.py index 2af990a0..7e85eace 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -6,7 +6,6 @@ from flask_admin import Admin from flask_admin.actions import action from flask_admin.contrib import sqla from flask_admin.contrib.sqla import ModelView -from github import Github, UnknownObjectException from sqlalchemy import desc from werkzeug.utils import redirect from jinja2 import Markup @@ -60,50 +59,11 @@ class FileView(AdminModelView): @action('publish', 'Publish', 'Are you sure you want to publish this file(s)?') def action_publish(self, ids): - # TODO: Replace docs repo - gh_token = app.config['GITHUB_TOKEN'] - _github = Github(gh_token) - repo = _github.get_user().get_repo('crispy-fiesta') - - for file_id in ids: - file_data_model = FileDataModel.query.filter_by(file_model_id=file_id).first() - try: - repo_file = repo.get_contents(file_data_model.file_model.name) - except UnknownObjectException: - repo.create_file( - path=file_data_model.file_model.name, - message=f'Creating {file_data_model.file_model.name}', - content=file_data_model.data - ) - else: - updated = repo.update_file( - path=repo_file.path, - message=f'Updating {file_data_model.file_model.name}', - content=file_data_model.data, - sha=repo_file.sha - ) + FileService.publish_to_github(ids) @action('update', 'Update', 'Are you sure you want to update this file(s)?') def action_update(self, ids): - gh_token = app.config['GITHUB_TOKEN'] - _github = Github(gh_token) - repo = _github.get_user().get_repo('crispy-fiesta') - - for file_id in ids: - file_data_model = FileDataModel.query.filter_by( - file_model_id=file_id - ).order_by( - desc(FileDataModel.version) - ).first() - try: - repo_file = repo.get_contents(file_data_model.file_model.name) - except UnknownObjectException: - # TODO: Add message indicating file is not in the repo - pass - else: - file_data_model.data = repo_file.decoded_content - self.session.add(file_data_model) - self.session.commit() + FileService.update_from_github(ids) def json_formatter(view, context, model, name): diff --git a/crc/services/file_service.py b/crc/services/file_service.py index 6ba2e1ad..90b30e42 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -2,6 +2,7 @@ import hashlib import json import os from datetime import datetime +from github import Github, UnknownObjectException from uuid import UUID from lxml import etree @@ -332,3 +333,51 @@ class FileService(object): file_model.archived = True session.commit() app.logger.info("Failed to delete file, so archiving it instead. %i, due to %s" % (file_id, str(ie))) + + @staticmethod + def update_from_github(file_ids): + gh_token = app.config['GITHUB_TOKEN'] + _github = Github(gh_token) + repo = _github.get_user().get_repo('crispy-fiesta') + + for file_id in file_ids: + file_data_model = FileDataModel.query.filter_by( + file_model_id=file_id + ).order_by( + desc(FileDataModel.version) + ).first() + try: + repo_file = repo.get_contents(file_data_model.file_model.name) + except UnknownObjectException: + # TODO: Add message indicating file is not in the repo + pass + else: + file_data_model.data = repo_file.decoded_content + session.add(file_data_model) + session.commit() + + @staticmethod + def publish_to_github(file_ids): + gh_token = app.config['GITHUB_TOKEN'] + _github = Github(gh_token) + repo = _github.get_user().get_repo('crispy-fiesta') + + for file_id in file_ids: + file_data_model = FileDataModel.query.filter_by(file_model_id=file_id).first() + try: + repo_file = repo.get_contents(file_data_model.file_model.name) + except UnknownObjectException: + repo.create_file( + path=file_data_model.file_model.name, + message=f'Creating {file_data_model.file_model.name}', + content=file_data_model.data + ) + return {'created': True} + else: + updated = repo.update_file( + path=repo_file.path, + message=f'Updating {file_data_model.file_model.name}', + content=file_data_model.data, + sha=repo_file.sha + ) + return {'updated': True} diff --git a/tests/files/test_file_service.py b/tests/files/test_file_service.py index dd95e458..e32f6bfb 100644 --- a/tests/files/test_file_service.py +++ b/tests/files/test_file_service.py @@ -1,9 +1,45 @@ +from github import UnknownObjectException +from sqlalchemy import desc from tests.base_test import BaseTest +from unittest.mock import patch, Mock from crc import db +from crc.models.file import FileDataModel from crc.services.file_service import FileService from crc.services.workflow_processor import WorkflowProcessor + +class FakeGithubCreates(Mock): + def get_user(var): + class FakeUser(Mock): + def get_repo(var, name): + class FakeRepo(Mock): + def get_contents(var, filename): + raise UnknownObjectException(status='Failure', data='Failed data') + def update_file(var, path, message, content, sha): + pass + return FakeRepo() + return FakeUser() + + +class FakeGithub(Mock): + def get_user(var): + class FakeUser(Mock): + def get_repo(var, name): + class FakeRepo(Mock): + def get_contents(var, filename): + fake_file = Mock() + fake_file.decoded_content = b'Some bytes' + fake_file.path = '/el/path/' + fake_file.data = 'Serious data' + fake_file.sha = 'Sha' + return fake_file + def update_file(var, path, message, content, sha): + pass + return FakeRepo() + return FakeUser() + + class TestFileService(BaseTest): """Largely tested via the test_file_api, and time is tight, but adding new tests here.""" @@ -103,3 +139,62 @@ class TestFileService(BaseTest): binary_data=b'5678') file_models = FileService.get_workflow_files(workflow_id=workflow.id) self.assertEqual(2, len(file_models)) + + @patch('crc.services.file_service.Github') + def test_update_from_github(self, mock_github): + mock_github.return_value = FakeGithub() + + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('file_upload_form') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs. + file_model = FileService.add_workflow_file(workflow_id=workflow.id, + irb_doc_code=irb_code, + name="anything.png", content_type="text", + binary_data=b'1234') + FileService.update_from_github([file_model.id]) + + file_model_data = FileDataModel.query.filter_by( + file_model_id=file_model.id + ).order_by( + desc(FileDataModel.version) + ).first() + self.assertEqual(file_model_data.data, b'Some bytes') + + @patch('crc.services.file_service.Github') + def test_publish_to_github_creates(self, mock_github): + mock_github.return_value = FakeGithubCreates() + + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('file_upload_form') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs. + file_model = FileService.add_workflow_file(workflow_id=workflow.id, + irb_doc_code=irb_code, + name="anything.png", content_type="text", + binary_data=b'1234') + result = FileService.publish_to_github([file_model.id]) + + self.assertEqual(result['created'], True) + + @patch('crc.services.file_service.Github') + def test_publish_to_github_updates(self, mock_github): + mock_github.return_value = FakeGithub() + + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('file_upload_form') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs. + file_model = FileService.add_workflow_file(workflow_id=workflow.id, + irb_doc_code=irb_code, + name="anything.png", content_type="text", + binary_data=b'1234') + result = FileService.publish_to_github([file_model.id]) + + self.assertEqual(result['updated'], True) diff --git a/tests/test_ldap_service.py b/tests/test_ldap_service.py index a6c4b364..d1e0ee21 100644 --- a/tests/test_ldap_service.py +++ b/tests/test_ldap_service.py @@ -30,4 +30,4 @@ class TestLdapService(BaseTest): user_info = LdapService.user_info("nosuch") self.assertFalse(True, "An API error should be raised.") except ApiError as ae: - self.assertEqual("missing_ldap_record", ae.code) \ No newline at end of file + self.assertEqual("missing_ldap_record", ae.code) From 59d04feb2340691931807e0b9ec989947186c271 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 17 Jul 2020 17:08:44 -0600 Subject: [PATCH 07/60] Adding missing import --- crc/api/admin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/crc/api/admin.py b/crc/api/admin.py index 7e85eace..74ea3c37 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -18,6 +18,7 @@ from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel from crc.models.workflow import WorkflowModel +from crc.services.file_service import FileService class AdminModelView(sqla.ModelView): From a39cacdf00818ad87bc7e2d48ade9c33db0242a4 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Sun, 19 Jul 2020 21:53:18 -0600 Subject: [PATCH 08/60] Ldap lookup script --- crc/scripts/ldap_lookup.py | 78 +++++++++++++++++++++++++++ crc/scripts/request_approval.py | 2 +- tests/ldap/test_ldap_lookup_script.py | 51 ++++++++++++++++++ 3 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 crc/scripts/ldap_lookup.py create mode 100644 tests/ldap/test_ldap_lookup_script.py diff --git a/crc/scripts/ldap_lookup.py b/crc/scripts/ldap_lookup.py new file mode 100644 index 00000000..62bd287a --- /dev/null +++ b/crc/scripts/ldap_lookup.py @@ -0,0 +1,78 @@ +import copy + +from crc import app +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService + + +USER_DETAILS = { + "PIComputingID": { + "value": "", + "data": { + }, + "label": "invalid uid" + } +} + + +class LdapLookup(Script): + """This Script allows to be introduced as part of a workflow and called from there, taking + a UID as input and looking it up through LDAP to return the person's details """ + + def get_description(self): + return """ +Attempts to create a dictionary with person details, using the +provided argument (a UID) and look it up through LDAP. + +Example: +LdapLookup PIComputingID +""" + + def do_task_validate_only(self, task, *args, **kwargs): + self.get_user_info(task, args) + + def do_task(self, task, *args, **kwargs): + args = [arg for arg in args if type(arg) == str] + user_info = self.get_user_info(task, args) + + user_details = copy.deepcopy(USER_DETAILS) + user_details['PIComputingID']['value'] = user_info['uid'] + if len(user_info.keys()) > 1: + user_details['PIComputingID']['label'] = user_info.pop('label') + else: + user_info.pop('uid') + user_details['PIComputingID']['data'] = user_info + return user_details + + def get_user_info(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Ldap lookup script requires one argument. The " + "UID for the person we want to look up") + + arg = args.pop() # Extracting only one value for now + uid = task.workflow.script_engine.evaluate_expression(task, arg) + if not isinstance(uid, str): + raise ApiError(code="invalid_argument", + message="Ldap lookup script requires one 1 UID argument, of type string.") + user_info_dict = {} + try: + user_info = LdapService.user_info(uid) + user_info_dict = { + "display_name": user_info.display_name, + "given_name": user_info.given_name, + "email_address": user_info.email_address, + "telephone_number": user_info.telephone_number, + "title": user_info.title, + "department": user_info.department, + "affiliation": user_info.affiliation, + "sponsor_type": user_info.sponsor_type, + "uid": user_info.uid, + "label": user_info.proper_name() + } + except: + user_info_dict['uid'] = uid + app.logger.error(f'Ldap lookup failed for UID {uid}') + + return user_info_dict diff --git a/crc/scripts/request_approval.py b/crc/scripts/request_approval.py index 0a4c76ff..a82e17a0 100644 --- a/crc/scripts/request_approval.py +++ b/crc/scripts/request_approval.py @@ -11,7 +11,7 @@ class RequestApproval(Script): return """ Creates an approval request on this workflow, by the given approver_uid(s)," Takes multiple arguments, which should point to data located in current task -or be quoted strings. The order is important. Approvals will be processed +or be quoted strings. The order is important. Approvals will be processed in this order. Example: diff --git a/tests/ldap/test_ldap_lookup_script.py b/tests/ldap/test_ldap_lookup_script.py new file mode 100644 index 00000000..7cb7ff55 --- /dev/null +++ b/tests/ldap/test_ldap_lookup_script.py @@ -0,0 +1,51 @@ +from tests.base_test import BaseTest + +from crc.services.workflow_processor import WorkflowProcessor +from crc.scripts.ldap_lookup import LdapLookup +from crc import db, mail + + +class TestLdapLookupScript(BaseTest): + + def test_get_existing_user_details(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + + task.data = { + 'PIComputingID': 'dhf8r' + } + + script = LdapLookup() + user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") + + self.assertEqual(user_details['PIComputingID']['label'], 'Dan Funk - (dhf8r)') + self.assertEqual(user_details['PIComputingID']['value'], 'dhf8r') + self.assertEqual(user_details['PIComputingID']['data']['display_name'], 'Dan Funk') + self.assertEqual(user_details['PIComputingID']['data']['given_name'], 'Dan') + self.assertEqual(user_details['PIComputingID']['data']['email_address'], 'dhf8r@virginia.edu') + self.assertEqual(user_details['PIComputingID']['data']['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(user_details['PIComputingID']['data']['title'], 'E42:He\'s a hoopy frood') + self.assertEqual(user_details['PIComputingID']['data']['department'], 'E0:EN-Eng Study of Parallel Universes') + self.assertEqual(user_details['PIComputingID']['data']['affiliation'], 'faculty') + self.assertEqual(user_details['PIComputingID']['data']['sponsor_type'], 'Staff') + self.assertEqual(user_details['PIComputingID']['data']['uid'], 'dhf8r') + + def test_get_invalid_user_details(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + + task.data = { + 'PIComputingID': 'rec3z' + } + + script = LdapLookup() + user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") + self.assertEqual(user_details['PIComputingID']['label'], 'invalid uid') + self.assertEqual(user_details['PIComputingID']['value'], 'rec3z') + self.assertEqual(user_details['PIComputingID']['data'], {}) From 3d9eeab502e2779999f2c68834d822fe73166d6f Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 21 Jul 2020 15:18:08 -0400 Subject: [PATCH 09/60] Add a few more details to the workflow metadata model. --- crc/models/study.py | 7 +++++-- crc/models/task_event.py | 1 - example_data.py | 1 - tests/base_test.py | 19 ++++++++++++++----- tests/study/test_study_service.py | 9 ++++----- tests/test_user_roles.py | 3 ++- 6 files changed, 25 insertions(+), 15 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index 47d4eb8f..7bb2db33 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -40,7 +40,7 @@ class StudyModel(db.Model): class WorkflowMetadata(object): - def __init__(self, id, name, display_name, description, spec_version, category_id, state: WorkflowState, status: WorkflowStatus, + def __init__(self, id, name, display_name, description, spec_version, category_id, category_display_name, state: WorkflowState, status: WorkflowStatus, total_tasks, completed_tasks, display_order): self.id = id self.name = name @@ -48,6 +48,7 @@ class WorkflowMetadata(object): self.description = description self.spec_version = spec_version self.category_id = category_id + self.category_display_name = category_display_name self.state = state self.status = status self.total_tasks = total_tasks @@ -64,6 +65,7 @@ class WorkflowMetadata(object): description=workflow.workflow_spec.description, spec_version=workflow.spec_version(), category_id=workflow.workflow_spec.category_id, + category_display_name=workflow.workflow_spec.category.display_name, state=WorkflowState.optional, status=workflow.status, total_tasks=workflow.total_tasks, @@ -79,7 +81,8 @@ class WorkflowMetadataSchema(ma.Schema): class Meta: model = WorkflowMetadata additional = ["id", "name", "display_name", "description", - "total_tasks", "completed_tasks", "display_order"] + "total_tasks", "completed_tasks", "display_order", + "category_id", "category_display_name"] unknown = INCLUDE diff --git a/crc/models/task_event.py b/crc/models/task_event.py index a6cb1a2d..e3914468 100644 --- a/crc/models/task_event.py +++ b/crc/models/task_event.py @@ -56,7 +56,6 @@ class TaskEventSchema(ma.Schema): study = fields.Nested(StudySchema, dump_only=True) workflow = fields.Nested(WorkflowMetadataSchema, dump_only=True) - class Meta: model = TaskEvent additional = ["id", "user_uid", "action", "task_id", "task_title", diff --git a/example_data.py b/example_data.py index efdfe3b3..8b9b0c27 100644 --- a/example_data.py +++ b/example_data.py @@ -251,7 +251,6 @@ class ExampleDataLoader: master_spec=False, from_tests=True) - def create_spec(self, id, name, display_name="", description="", filepath=None, master_spec=False, category_id=None, display_order=None, from_tests=False): """Assumes that a directory exists in static/bpmn with the same name as the given id. diff --git a/tests/base_test.py b/tests/base_test.py index 6ea1966d..3f0b2405 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -19,7 +19,7 @@ from crc.models.protocol_builder import ProtocolBuilderStatus from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel -from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel +from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel from crc.services.file_service import FileService from crc.services.study_service import StudyService from crc.services.workflow_service import WorkflowService @@ -164,14 +164,21 @@ class BaseTest(unittest.TestCase): self.assertGreater(len(file_data), 0) @staticmethod - def load_test_spec(dir_name, master_spec=False, category_id=None): + def load_test_spec(dir_name, display_name=None, master_spec=False, category_id=None): """Loads a spec into the database based on a directory in /tests/data""" + if category_id is None: + category = WorkflowSpecCategoryModel(name="test", display_name="Test Workflows", display_order=0) + db.session.add(category) + db.session.commit() + category_id = category.id if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0: return session.query(WorkflowSpecModel).filter_by(id=dir_name).first() filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*") + if display_name is None: + display_name = dir_name return ExampleDataLoader().create_spec(id=dir_name, name=dir_name, filepath=filepath, master_spec=master_spec, - category_id=category_id) + display_name=display_name, category_id=category_id) @staticmethod def protocol_builder_response(file_name): @@ -263,11 +270,13 @@ class BaseTest(unittest.TestCase): return full_study - def create_workflow(self, workflow_name, study=None, category_id=None, as_user="dhf8r"): + def create_workflow(self, workflow_name, display_name=None, study=None, category_id=None, as_user="dhf8r"): db.session.flush() spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first() if spec is None: - spec = self.load_test_spec(workflow_name, category_id=category_id) + if display_name is None: + display_name = workflow_name + spec = self.load_test_spec(workflow_name, display_name, category_id=category_id) if study is None: study = self.create_study(uid=as_user) workflow_model = StudyService._create_workflow_model(study, spec) diff --git a/tests/study/test_study_service.py b/tests/study/test_study_service.py index b436835f..f1e43c8a 100644 --- a/tests/study/test_study_service.py +++ b/tests/study/test_study_service.py @@ -27,7 +27,10 @@ class TestStudyService(BaseTest): # Assure some basic models are in place, This is a damn mess. Our database models need an overhaul to make # this easier - better relationship modeling is now critical. - self.load_test_spec("top_level_workflow", master_spec=True) + cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0) + db.session.add(cat) + db.session.commit() + self.load_test_spec("top_level_workflow", master_spec=True, category_id=cat.id) user = db.session.query(UserModel).filter(UserModel.uid == "dhf8r").first() if not user: user = UserModel(uid="dhf8r", email_address="whatever@stuff.com", display_name="Stayathome Smellalots") @@ -39,11 +42,7 @@ class TestStudyService(BaseTest): study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.ACTIVE, user_uid=user.uid) db.session.add(study) - cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0) - db.session.add(cat) - db.session.commit() - self.assertIsNotNone(cat.id) self.load_test_spec("random_fact", category_id=cat.id) self.assertIsNotNone(study.id) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 6104641c..cc7ff613 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -68,7 +68,7 @@ class TestTasksApi(BaseTest): def test_get_outstanding_tasks_awaiting_current_user(self): submitter = self.create_user(uid='lje5u') supervisor = self.create_user(uid='lb3dp') - workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow = self.create_workflow('roles', display_name="Roles", as_user=submitter.uid) workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) # User lje5u can complete the first task, and set her supervisor @@ -94,6 +94,7 @@ class TestTasksApi(BaseTest): self.assertEquals(1, len(tasks)) self.assertEquals(workflow.id, tasks[0]['workflow']['id']) self.assertEquals(workflow.study.id, tasks[0]['study']['id']) + self.assertEquals("Test Workflows", tasks[0]['workflow']['category_display_name']) # Assure we can say something sensible like: # You have a task called "Approval" to be completed in the "Supervisor Approval" workflow From 313770d5389d37c5988f76d1a0edb14d7b8baa2a Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Sun, 19 Jul 2020 21:53:18 -0600 Subject: [PATCH 10/60] Ldap lookup script --- crc/scripts/ldap_lookup.py | 78 +++++++++++++++++++++++++++ crc/scripts/request_approval.py | 2 +- tests/ldap/test_ldap_lookup_script.py | 51 ++++++++++++++++++ 3 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 crc/scripts/ldap_lookup.py create mode 100644 tests/ldap/test_ldap_lookup_script.py diff --git a/crc/scripts/ldap_lookup.py b/crc/scripts/ldap_lookup.py new file mode 100644 index 00000000..62bd287a --- /dev/null +++ b/crc/scripts/ldap_lookup.py @@ -0,0 +1,78 @@ +import copy + +from crc import app +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService + + +USER_DETAILS = { + "PIComputingID": { + "value": "", + "data": { + }, + "label": "invalid uid" + } +} + + +class LdapLookup(Script): + """This Script allows to be introduced as part of a workflow and called from there, taking + a UID as input and looking it up through LDAP to return the person's details """ + + def get_description(self): + return """ +Attempts to create a dictionary with person details, using the +provided argument (a UID) and look it up through LDAP. + +Example: +LdapLookup PIComputingID +""" + + def do_task_validate_only(self, task, *args, **kwargs): + self.get_user_info(task, args) + + def do_task(self, task, *args, **kwargs): + args = [arg for arg in args if type(arg) == str] + user_info = self.get_user_info(task, args) + + user_details = copy.deepcopy(USER_DETAILS) + user_details['PIComputingID']['value'] = user_info['uid'] + if len(user_info.keys()) > 1: + user_details['PIComputingID']['label'] = user_info.pop('label') + else: + user_info.pop('uid') + user_details['PIComputingID']['data'] = user_info + return user_details + + def get_user_info(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Ldap lookup script requires one argument. The " + "UID for the person we want to look up") + + arg = args.pop() # Extracting only one value for now + uid = task.workflow.script_engine.evaluate_expression(task, arg) + if not isinstance(uid, str): + raise ApiError(code="invalid_argument", + message="Ldap lookup script requires one 1 UID argument, of type string.") + user_info_dict = {} + try: + user_info = LdapService.user_info(uid) + user_info_dict = { + "display_name": user_info.display_name, + "given_name": user_info.given_name, + "email_address": user_info.email_address, + "telephone_number": user_info.telephone_number, + "title": user_info.title, + "department": user_info.department, + "affiliation": user_info.affiliation, + "sponsor_type": user_info.sponsor_type, + "uid": user_info.uid, + "label": user_info.proper_name() + } + except: + user_info_dict['uid'] = uid + app.logger.error(f'Ldap lookup failed for UID {uid}') + + return user_info_dict diff --git a/crc/scripts/request_approval.py b/crc/scripts/request_approval.py index 0a4c76ff..a82e17a0 100644 --- a/crc/scripts/request_approval.py +++ b/crc/scripts/request_approval.py @@ -11,7 +11,7 @@ class RequestApproval(Script): return """ Creates an approval request on this workflow, by the given approver_uid(s)," Takes multiple arguments, which should point to data located in current task -or be quoted strings. The order is important. Approvals will be processed +or be quoted strings. The order is important. Approvals will be processed in this order. Example: diff --git a/tests/ldap/test_ldap_lookup_script.py b/tests/ldap/test_ldap_lookup_script.py new file mode 100644 index 00000000..7cb7ff55 --- /dev/null +++ b/tests/ldap/test_ldap_lookup_script.py @@ -0,0 +1,51 @@ +from tests.base_test import BaseTest + +from crc.services.workflow_processor import WorkflowProcessor +from crc.scripts.ldap_lookup import LdapLookup +from crc import db, mail + + +class TestLdapLookupScript(BaseTest): + + def test_get_existing_user_details(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + + task.data = { + 'PIComputingID': 'dhf8r' + } + + script = LdapLookup() + user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") + + self.assertEqual(user_details['PIComputingID']['label'], 'Dan Funk - (dhf8r)') + self.assertEqual(user_details['PIComputingID']['value'], 'dhf8r') + self.assertEqual(user_details['PIComputingID']['data']['display_name'], 'Dan Funk') + self.assertEqual(user_details['PIComputingID']['data']['given_name'], 'Dan') + self.assertEqual(user_details['PIComputingID']['data']['email_address'], 'dhf8r@virginia.edu') + self.assertEqual(user_details['PIComputingID']['data']['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(user_details['PIComputingID']['data']['title'], 'E42:He\'s a hoopy frood') + self.assertEqual(user_details['PIComputingID']['data']['department'], 'E0:EN-Eng Study of Parallel Universes') + self.assertEqual(user_details['PIComputingID']['data']['affiliation'], 'faculty') + self.assertEqual(user_details['PIComputingID']['data']['sponsor_type'], 'Staff') + self.assertEqual(user_details['PIComputingID']['data']['uid'], 'dhf8r') + + def test_get_invalid_user_details(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + + task.data = { + 'PIComputingID': 'rec3z' + } + + script = LdapLookup() + user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") + self.assertEqual(user_details['PIComputingID']['label'], 'invalid uid') + self.assertEqual(user_details['PIComputingID']['value'], 'rec3z') + self.assertEqual(user_details['PIComputingID']['data'], {}) From 522f848682f296351ec8d849ab5ee07d37e70df6 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Tue, 21 Jul 2020 20:53:24 -0600 Subject: [PATCH 11/60] Addressing feedback --- crc/scripts/ldap_lookup.py | 88 +++++++++-------------- tests/data/ldap_replace/ldap_replace.bpmn | 69 ++++++++++++++++++ tests/ldap/test_ldap_lookup_script.py | 84 +++++++++++++++++----- 3 files changed, 171 insertions(+), 70 deletions(-) create mode 100644 tests/data/ldap_replace/ldap_replace.bpmn diff --git a/crc/scripts/ldap_lookup.py b/crc/scripts/ldap_lookup.py index 62bd287a..88e2986a 100644 --- a/crc/scripts/ldap_lookup.py +++ b/crc/scripts/ldap_lookup.py @@ -6,73 +6,55 @@ from crc.scripts.script import Script from crc.services.ldap_service import LdapService -USER_DETAILS = { - "PIComputingID": { - "value": "", - "data": { - }, - "label": "invalid uid" - } -} - - -class LdapLookup(Script): +class LdapReplace(Script): """This Script allows to be introduced as part of a workflow and called from there, taking - a UID as input and looking it up through LDAP to return the person's details """ + a UID (or several) as input and looking it up through LDAP to return the person's details """ def get_description(self): return """ Attempts to create a dictionary with person details, using the provided argument (a UID) and look it up through LDAP. -Example: -LdapLookup PIComputingID +Examples: +#! LdapReplace supervisor +#! LdapReplace supervisor collaborator +#! LdapReplace supervisor cosupervisor collaborator """ def do_task_validate_only(self, task, *args, **kwargs): - self.get_user_info(task, args) + self.set_users_info_in_task(task, args) def do_task(self, task, *args, **kwargs): args = [arg for arg in args if type(arg) == str] - user_info = self.get_user_info(task, args) + self.set_users_info_in_task(task, args) - user_details = copy.deepcopy(USER_DETAILS) - user_details['PIComputingID']['value'] = user_info['uid'] - if len(user_info.keys()) > 1: - user_details['PIComputingID']['label'] = user_info.pop('label') - else: - user_info.pop('uid') - user_details['PIComputingID']['data'] = user_info - return user_details - - def get_user_info(self, task, args): + def set_users_info_in_task(self, task, args): if len(args) < 1: raise ApiError(code="missing_argument", - message="Ldap lookup script requires one argument. The " - "UID for the person we want to look up") + message="Ldap replace script requires at least one argument. The " + "UID for the person(s) we want to look up") - arg = args.pop() # Extracting only one value for now - uid = task.workflow.script_engine.evaluate_expression(task, arg) - if not isinstance(uid, str): - raise ApiError(code="invalid_argument", - message="Ldap lookup script requires one 1 UID argument, of type string.") - user_info_dict = {} - try: - user_info = LdapService.user_info(uid) - user_info_dict = { - "display_name": user_info.display_name, - "given_name": user_info.given_name, - "email_address": user_info.email_address, - "telephone_number": user_info.telephone_number, - "title": user_info.title, - "department": user_info.department, - "affiliation": user_info.affiliation, - "sponsor_type": user_info.sponsor_type, - "uid": user_info.uid, - "label": user_info.proper_name() - } - except: - user_info_dict['uid'] = uid - app.logger.error(f'Ldap lookup failed for UID {uid}') - - return user_info_dict + users_info = {} + for arg in args: + uid = task.workflow.script_engine.evaluate_expression(task, arg) + if not isinstance(uid, str): + raise ApiError(code="invalid_argument", + message="Ldap replace script found an invalid argument, type string is required") + user_info_dict = {} + try: + user_info = LdapService.user_info(uid) + user_info_dict = { + "display_name": user_info.display_name, + "given_name": user_info.given_name, + "email_address": user_info.email_address, + "telephone_number": user_info.telephone_number, + "title": user_info.title, + "department": user_info.department, + "affiliation": user_info.affiliation, + "sponsor_type": user_info.sponsor_type, + "uid": user_info.uid, + "proper_name": user_info.proper_name() + } + except: + app.logger.error(f'Ldap replace failed for UID {uid}') + task.data[arg] = user_info_dict diff --git a/tests/data/ldap_replace/ldap_replace.bpmn b/tests/data/ldap_replace/ldap_replace.bpmn new file mode 100644 index 00000000..77f8c7ad --- /dev/null +++ b/tests/data/ldap_replace/ldap_replace.bpmn @@ -0,0 +1,69 @@ + + + + + Flow_1synsig + + + Flow_11e7jgz + + + Flow_08n2npe + Flow_1xlrgne + #! LdapReplace Supervisor Investigator + + + + + + + + + + + + Flow_1synsig + Flow_08n2npe + + + + Flow_1xlrgne + Flow_11e7jgz + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/ldap/test_ldap_lookup_script.py b/tests/ldap/test_ldap_lookup_script.py index 7cb7ff55..d56126cc 100644 --- a/tests/ldap/test_ldap_lookup_script.py +++ b/tests/ldap/test_ldap_lookup_script.py @@ -1,7 +1,7 @@ from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor -from crc.scripts.ldap_lookup import LdapLookup +from crc.scripts.ldap_lookup import LdapReplace from crc import db, mail @@ -18,20 +18,56 @@ class TestLdapLookupScript(BaseTest): 'PIComputingID': 'dhf8r' } - script = LdapLookup() + script = LdapReplace() user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") - self.assertEqual(user_details['PIComputingID']['label'], 'Dan Funk - (dhf8r)') - self.assertEqual(user_details['PIComputingID']['value'], 'dhf8r') - self.assertEqual(user_details['PIComputingID']['data']['display_name'], 'Dan Funk') - self.assertEqual(user_details['PIComputingID']['data']['given_name'], 'Dan') - self.assertEqual(user_details['PIComputingID']['data']['email_address'], 'dhf8r@virginia.edu') - self.assertEqual(user_details['PIComputingID']['data']['telephone_number'], '+1 (434) 924-1723') - self.assertEqual(user_details['PIComputingID']['data']['title'], 'E42:He\'s a hoopy frood') - self.assertEqual(user_details['PIComputingID']['data']['department'], 'E0:EN-Eng Study of Parallel Universes') - self.assertEqual(user_details['PIComputingID']['data']['affiliation'], 'faculty') - self.assertEqual(user_details['PIComputingID']['data']['sponsor_type'], 'Staff') - self.assertEqual(user_details['PIComputingID']['data']['uid'], 'dhf8r') + self.assertEqual(task.data['PIComputingID']['display_name'], 'Dan Funk') + self.assertEqual(task.data['PIComputingID']['given_name'], 'Dan') + self.assertEqual(task.data['PIComputingID']['email_address'], 'dhf8r@virginia.edu') + self.assertEqual(task.data['PIComputingID']['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(task.data['PIComputingID']['title'], 'E42:He\'s a hoopy frood') + self.assertEqual(task.data['PIComputingID']['department'], 'E0:EN-Eng Study of Parallel Universes') + self.assertEqual(task.data['PIComputingID']['affiliation'], 'faculty') + self.assertEqual(task.data['PIComputingID']['sponsor_type'], 'Staff') + self.assertEqual(task.data['PIComputingID']['uid'], 'dhf8r') + self.assertEqual(task.data['PIComputingID']['proper_name'], 'Dan Funk - (dhf8r)') + + def test_get_existing_users_details(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + + task.data = { + 'supervisor': 'dhf8r', + 'investigator': 'lb3dp' + } + + script = LdapReplace() + user_details = script.do_task(task, workflow.study_id, workflow.id, "supervisor", "investigator") + + self.assertEqual(task.data['supervisor']['display_name'], 'Dan Funk') + self.assertEqual(task.data['supervisor']['given_name'], 'Dan') + self.assertEqual(task.data['supervisor']['email_address'], 'dhf8r@virginia.edu') + self.assertEqual(task.data['supervisor']['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(task.data['supervisor']['title'], 'E42:He\'s a hoopy frood') + self.assertEqual(task.data['supervisor']['department'], 'E0:EN-Eng Study of Parallel Universes') + self.assertEqual(task.data['supervisor']['affiliation'], 'faculty') + self.assertEqual(task.data['supervisor']['sponsor_type'], 'Staff') + self.assertEqual(task.data['supervisor']['uid'], 'dhf8r') + self.assertEqual(task.data['supervisor']['proper_name'], 'Dan Funk - (dhf8r)') + + self.assertEqual(task.data['investigator']['display_name'], 'Laura Barnes') + self.assertEqual(task.data['investigator']['given_name'], 'Laura') + self.assertEqual(task.data['investigator']['email_address'], 'lb3dp@virginia.edu') + self.assertEqual(task.data['investigator']['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(task.data['investigator']['title'], 'E0:Associate Professor of Systems and Information Engineering') + self.assertEqual(task.data['investigator']['department'], 'E0:EN-Eng Sys and Environment') + self.assertEqual(task.data['investigator']['affiliation'], 'faculty') + self.assertEqual(task.data['investigator']['sponsor_type'], 'Staff') + self.assertEqual(task.data['investigator']['uid'], 'lb3dp') + self.assertEqual(task.data['investigator']['proper_name'], 'Laura Barnes - (lb3dp)') def test_get_invalid_user_details(self): self.load_example_data() @@ -44,8 +80,22 @@ class TestLdapLookupScript(BaseTest): 'PIComputingID': 'rec3z' } - script = LdapLookup() + script = LdapReplace() user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") - self.assertEqual(user_details['PIComputingID']['label'], 'invalid uid') - self.assertEqual(user_details['PIComputingID']['value'], 'rec3z') - self.assertEqual(user_details['PIComputingID']['data'], {}) + + self.assertEqual(task.data['PIComputingID'], {}) + + def test_bpmn_task_receives_user_details(self): + workflow = self.create_workflow('ldap_replace') + + task_data = { + 'Supervisor': 'dhf8r', + 'Investigator': 'lb3dp' + } + task = self.get_workflow_api(workflow).next_task + + self.complete_form(workflow, task, task_data) + + task = self.get_workflow_api(workflow).next_task + + self.assertEqual(task.data['Supervisor']['proper_name'], 'Dan Funk - (dhf8r)') From 41cbce8e01c054e589025bbab9964c31e30403bd Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Tue, 21 Jul 2020 21:08:08 -0600 Subject: [PATCH 12/60] Updating bpmn test --- crc/scripts/{ldap_lookup.py => ldap_replace.py} | 0 tests/ldap/test_ldap_lookup_script.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename crc/scripts/{ldap_lookup.py => ldap_replace.py} (100%) diff --git a/crc/scripts/ldap_lookup.py b/crc/scripts/ldap_replace.py similarity index 100% rename from crc/scripts/ldap_lookup.py rename to crc/scripts/ldap_replace.py diff --git a/tests/ldap/test_ldap_lookup_script.py b/tests/ldap/test_ldap_lookup_script.py index d56126cc..c19f0144 100644 --- a/tests/ldap/test_ldap_lookup_script.py +++ b/tests/ldap/test_ldap_lookup_script.py @@ -1,7 +1,7 @@ from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor -from crc.scripts.ldap_lookup import LdapReplace +from crc.scripts.ldap_replace import LdapReplace from crc import db, mail From 855f5544e51604b29ed91bffb7730da066f1c81e Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 09:35:08 -0400 Subject: [PATCH 13/60] Adds enrollment_date to study model --- crc/models/study.py | 7 +- crc/services/workflow_service.py | 5 +- .../bpmn/notifications/notifications.bpmn | 100 +++++++++++++++--- migrations/versions/c4ddb69e7ef4_.py | 28 +++++ 4 files changed, 122 insertions(+), 18 deletions(-) create mode 100644 migrations/versions/c4ddb69e7ef4_.py diff --git a/crc/models/study.py b/crc/models/study.py index 7bb2db33..854ce62f 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -25,6 +25,7 @@ class StudyModel(db.Model): investigator_uids = db.Column(db.ARRAY(db.String), nullable=True) requirements = db.Column(db.ARRAY(db.Integer), nullable=True) on_hold = db.Column(db.Boolean, default=False) + enrollment_date = db.Column(db.DateTime(timezone=True), nullable=True) def update_from_protocol_builder(self, pbs: ProtocolBuilderStudy): self.hsr_number = pbs.HSRNUMBER @@ -108,7 +109,7 @@ class Study(object): id=None, protocol_builder_status=None, sponsor="", hsr_number="", ind_number="", categories=[], - files=[], approvals=[], **argsv): + files=[], approvals=[], enrollment_date=None, **argsv): self.id = id self.user_uid = user_uid self.title = title @@ -122,6 +123,7 @@ class Study(object): self.approvals = approvals self.warnings = [] self.files = files + self.enrollment_date = enrollment_date @classmethod def from_model(cls, study_model: StudyModel): @@ -154,11 +156,12 @@ class StudySchema(ma.Schema): ind_number = fields.String(allow_none=True) files = fields.List(fields.Nested(FileSchema), dump_only=True) approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True) + enrollment_date = fields.Date(allow_none=True) class Meta: model = Study additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid", - "sponsor", "ind_number", "approvals", "files"] + "sponsor", "ind_number", "approvals", "files", "enrollment_date"] unknown = INCLUDE @marshmallow.post_load diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 3205e800..65794037 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -154,10 +154,9 @@ class WorkflowService(object): if len(field.options) > 0: random_choice = random.choice(field.options) if isinstance(random_choice, dict): - choice = random.choice(field.options) return { - 'value': choice['id'], - 'label': choice['name'] + 'value': random_choice['id'], + 'label': random_choice['name'] } else: # fixme: why it is sometimes an EnumFormFieldOption, and other times not? diff --git a/crc/static/bpmn/notifications/notifications.bpmn b/crc/static/bpmn/notifications/notifications.bpmn index 4c01a711..cd73505f 100644 --- a/crc/static/bpmn/notifications/notifications.bpmn +++ b/crc/static/bpmn/notifications/notifications.bpmn @@ -1,42 +1,116 @@ - + + + + + + + StartEvent_1 + Activity_1qpy9ra + Event_1m9fnmv + + + Gateway_0ved0t9 + Activity_107ojvq + + Flow_0q51aiq - - + - + Flow_0q51aiq - Flow_0ai4j1x + Flow_11tnx3n + Flow_0d2snmk + + + + Flow_0apr3nj + Flow_0mhtlkt + Flow_11tnx3n + + + + is_study_approved == True + - Flow_0ai4j1x + Flow_0mhtlkt - + + is_study_approved == False + + + + + + + + Flow_0d2snmk + Flow_0apr3nj + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + - + + + + diff --git a/migrations/versions/c4ddb69e7ef4_.py b/migrations/versions/c4ddb69e7ef4_.py new file mode 100644 index 00000000..533d2f86 --- /dev/null +++ b/migrations/versions/c4ddb69e7ef4_.py @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: c4ddb69e7ef4 +Revises: ffef4661a37d +Create Date: 2020-07-22 09:04:09.769239 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'c4ddb69e7ef4' +down_revision = 'ffef4661a37d' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('study', sa.Column('enrollment_date', sa.DateTime(timezone=True), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('study', 'enrollment_date') + # ### end Alembic commands ### From 60f907f852c4bb5e872d3f92b97b2fbca2ea1ea3 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 14:47:25 -0400 Subject: [PATCH 14/60] Updates package versions. --- Pipfile.lock | 75 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 6 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index bd8581a5..f726450c 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -35,6 +35,7 @@ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.6.0" }, "aniso8601": { @@ -49,6 +50,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -56,6 +58,7 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -79,6 +82,7 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -107,6 +111,7 @@ "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.4.6" }, "certifi": { @@ -161,6 +166,7 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -182,6 +188,7 @@ "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" ], + "markers": "python_version >= '3.6'", "version": "==5.0.0" }, "connexion": { @@ -240,6 +247,7 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -322,12 +330,14 @@ "sha256:05b31d2034dd3f2a685cbbae4cfc4ed906b2a733cff7964ada450fd5e462b84e", "sha256:bfc7150eaf809b1c283879302f04c42791136060c6eeb12c0c6674fb1291fae5" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.4" }, "future": { "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "gunicorn": { @@ -350,6 +360,7 @@ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "imagesize": { @@ -357,6 +368,7 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -372,6 +384,7 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], + "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -379,6 +392,7 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -393,6 +407,7 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -407,11 +422,16 @@ "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.6.11" }, "ldap3": { "hashes": [ + "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", + "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", + "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", + "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", @@ -459,6 +479,7 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, "markdown": { @@ -505,6 +526,7 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -560,6 +582,7 @@ "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e", "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc" ], + "markers": "python_version >= '3.6'", "version": "==1.19.1" }, "openapi-spec-validator": { @@ -583,6 +606,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { @@ -645,8 +669,19 @@ }, "pyasn1": { "hashes": [ + "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", + "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", + "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" + "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", + "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", + "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", + "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3", + "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", + "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", + "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", + "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776" ], "version": "==0.4.8" }, @@ -655,6 +690,7 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -662,6 +698,7 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], + "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -677,6 +714,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -690,6 +728,7 @@ "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111", "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f" ], + "markers": "python_version >= '3.6'", "version": "==5.0.1" }, "python-dateutil": { @@ -708,9 +747,11 @@ }, "python-editor": { "hashes": [ - "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", + "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", + "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", + "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d" ], "version": "==1.0.4" }, @@ -824,6 +865,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -838,6 +880,7 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], + "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -853,6 +896,7 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -860,6 +904,7 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -867,6 +912,7 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -874,6 +920,7 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -881,6 +928,7 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -888,6 +936,7 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], + "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { @@ -926,6 +975,7 @@ "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.18" }, "swagger-ui-bundle": { @@ -938,16 +988,18 @@ }, "urllib3": { "hashes": [ - "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", - "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" + "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", + "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], - "version": "==1.25.9" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", + "version": "==1.25.10" }, "vine": { "hashes": [ "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.0" }, "waitress": { @@ -955,6 +1007,7 @@ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -962,6 +1015,7 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -1008,6 +1062,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -1017,6 +1072,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -1072,6 +1128,7 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], + "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -1079,6 +1136,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -1094,6 +1152,7 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { @@ -1101,6 +1160,7 @@ "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.9.0" }, "pyparsing": { @@ -1108,6 +1168,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1123,6 +1184,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { @@ -1137,6 +1199,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } } From b87f55fbd75df75c53b84541107afd053733cd54 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 14:47:53 -0400 Subject: [PATCH 15/60] Exposes date in TaskEvent endpoint --- crc/models/task_event.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crc/models/task_event.py b/crc/models/task_event.py index e3914468..c696bc26 100644 --- a/crc/models/task_event.py +++ b/crc/models/task_event.py @@ -50,6 +50,7 @@ class TaskEvent(object): self.task_type = model.task_type self.task_state = model.task_state self.task_lane = model.task_lane + self.date = model.date class TaskEventSchema(ma.Schema): @@ -59,5 +60,5 @@ class TaskEventSchema(ma.Schema): class Meta: model = TaskEvent additional = ["id", "user_uid", "action", "task_id", "task_title", - "task_name", "task_type", "task_state", "task_lane"] + "task_name", "task_type", "task_state", "task_lane", "date"] unknown = INCLUDE From 6fae89b1fc841fff5d5bceb0dd28d925f20ac175 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 14:48:36 -0400 Subject: [PATCH 16/60] Adds manual task --- .../bpmn/notifications/notifications.bpmn | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/crc/static/bpmn/notifications/notifications.bpmn b/crc/static/bpmn/notifications/notifications.bpmn index cd73505f..a9fdedbf 100644 --- a/crc/static/bpmn/notifications/notifications.bpmn +++ b/crc/static/bpmn/notifications/notifications.bpmn @@ -9,6 +9,7 @@ StartEvent_1 Activity_1qpy9ra Event_1m9fnmv + Activity_0c5drp3 Gateway_0ved0t9 @@ -25,7 +26,7 @@ Flow_0q51aiq - Flow_11tnx3n + Flow_1ugh4wn Flow_0d2snmk @@ -42,7 +43,7 @@ Flow_0mhtlkt - + is_study_approved == False @@ -54,6 +55,12 @@ Flow_0d2snmk Flow_0apr3nj + + + Your request was not approved. Try again. + Flow_11tnx3n + Flow_1ugh4wn + @@ -68,10 +75,9 @@ - - + - + @@ -94,6 +100,10 @@ + + + + @@ -112,6 +122,9 @@ + + + From 74e5e07114b09c6c824fc7b6afa4a1aedea0f909 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Thu, 23 Jul 2020 07:41:29 -0600 Subject: [PATCH 17/60] Testing for all values --- tests/ldap/test_ldap_lookup_script.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/ldap/test_ldap_lookup_script.py b/tests/ldap/test_ldap_lookup_script.py index c19f0144..220ca9c8 100644 --- a/tests/ldap/test_ldap_lookup_script.py +++ b/tests/ldap/test_ldap_lookup_script.py @@ -98,4 +98,13 @@ class TestLdapLookupScript(BaseTest): task = self.get_workflow_api(workflow).next_task + self.assertEqual(task.data['Supervisor']['display_name'], 'Dan Funk') + self.assertEqual(task.data['Supervisor']['given_name'], 'Dan') + self.assertEqual(task.data['Supervisor']['email_address'], 'dhf8r@virginia.edu') + self.assertEqual(task.data['Supervisor']['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(task.data['Supervisor']['title'], 'E42:He\'s a hoopy frood') + self.assertEqual(task.data['Supervisor']['department'], 'E0:EN-Eng Study of Parallel Universes') + self.assertEqual(task.data['Supervisor']['affiliation'], 'faculty') + self.assertEqual(task.data['Supervisor']['sponsor_type'], 'Staff') + self.assertEqual(task.data['Supervisor']['uid'], 'dhf8r') self.assertEqual(task.data['Supervisor']['proper_name'], 'Dan Funk - (dhf8r)') From 5ec5fcb4e425b723f804f8fa0ff0e7ec5f3530e9 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Thu, 23 Jul 2020 12:00:24 -0400 Subject: [PATCH 18/60] Correcting an issue with the Navigation where it did not correctly handle looking back to a previous task within the workflow. In some cases the session was not getting committed, leaving rogue assignments outstanding for a workflow. --- Pipfile.lock | 71 ++------------------------------ crc/services/workflow_service.py | 1 + tests/test_user_roles.py | 63 +++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 68 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index f726450c..8cee7118 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -35,7 +35,6 @@ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.6.0" }, "aniso8601": { @@ -50,7 +49,6 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -58,7 +56,6 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -82,7 +79,6 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -111,7 +107,6 @@ "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.4.6" }, "certifi": { @@ -166,7 +161,6 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -188,7 +182,6 @@ "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" ], - "markers": "python_version >= '3.6'", "version": "==5.0.0" }, "connexion": { @@ -247,7 +240,6 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -330,14 +322,12 @@ "sha256:05b31d2034dd3f2a685cbbae4cfc4ed906b2a733cff7964ada450fd5e462b84e", "sha256:bfc7150eaf809b1c283879302f04c42791136060c6eeb12c0c6674fb1291fae5" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.4" }, "future": { "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "gunicorn": { @@ -360,7 +350,6 @@ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "imagesize": { @@ -368,7 +357,6 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -384,7 +372,6 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], - "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -392,7 +379,6 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -407,7 +393,6 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -422,16 +407,11 @@ "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.6.11" }, "ldap3": { "hashes": [ - "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", - "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", - "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", - "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", @@ -479,7 +459,6 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, "markdown": { @@ -526,7 +505,6 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -582,7 +560,6 @@ "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e", "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc" ], - "markers": "python_version >= '3.6'", "version": "==1.19.1" }, "openapi-spec-validator": { @@ -606,7 +583,6 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { @@ -669,19 +645,8 @@ }, "pyasn1": { "hashes": [ - "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", - "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", - "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", - "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", - "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", - "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3", - "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", - "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", - "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", - "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776" + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" ], "version": "==0.4.8" }, @@ -690,7 +655,6 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -698,7 +662,6 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], - "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -714,7 +677,6 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -728,7 +690,6 @@ "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111", "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f" ], - "markers": "python_version >= '3.6'", "version": "==5.0.1" }, "python-dateutil": { @@ -747,11 +708,9 @@ }, "python-editor": { "hashes": [ - "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", - "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522", + "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", - "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d" + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" ], "version": "==1.0.4" }, @@ -865,7 +824,6 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -880,7 +838,6 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], - "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -896,7 +853,6 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], - "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -904,7 +860,6 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], - "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -912,7 +867,6 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], - "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -920,7 +874,6 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], - "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -928,7 +881,6 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], - "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -936,13 +888,12 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], - "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "74529738b4e16be5aadd846669a201560f81a6d4" + "ref": "5785d3cab99e319596e1bf0006df96f215febafd" }, "sqlalchemy": { "hashes": [ @@ -975,7 +926,6 @@ "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.18" }, "swagger-ui-bundle": { @@ -991,7 +941,6 @@ "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.25.10" }, "vine": { @@ -999,7 +948,6 @@ "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.0" }, "waitress": { @@ -1007,7 +955,6 @@ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -1015,7 +962,6 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -1062,7 +1008,6 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], - "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -1072,7 +1017,6 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -1128,7 +1072,6 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], - "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -1136,7 +1079,6 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -1152,7 +1094,6 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { @@ -1160,7 +1101,6 @@ "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.9.0" }, "pyparsing": { @@ -1168,7 +1108,6 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1184,7 +1123,6 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { @@ -1199,7 +1137,6 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], - "markers": "python_version >= '3.6'", "version": "==3.1.0" } } diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 65794037..e078166b 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -470,6 +470,7 @@ class WorkflowService(object): db.session.query(TaskEventModel). \ filter(TaskEventModel.workflow_id == processor.workflow_model.id). \ filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).delete() + db.session.commit() for task in processor.get_current_user_tasks(): user_ids = WorkflowService.get_users_assigned_to_task(processor, task) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index cc7ff613..d1e85563 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -200,4 +200,65 @@ class TestTasksApi(BaseTest): workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) self.assertEquals('COMPLETED', workflow_api.next_task.state) self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end. - self.assertEquals(WorkflowStatus.complete, workflow_api.status) \ No newline at end of file + self.assertEquals(WorkflowStatus.complete, workflow_api.status) + + def get_assignment_task_events(self, uid): + return db.session.query(TaskEventModel). \ + filter(TaskEventModel.user_uid == uid). \ + filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).all() + + def test_workflow_reset_correctly_resets_the_task_events(self): + + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', display_name="Roles", as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + # User lje5u can complete the first task, and set her supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + + # At this point there should be a task_log with an action of ASSIGNMENT on it for + # the supervisor. + self.assertEquals(1, len(self.get_assignment_task_events(supervisor.uid))) + + # Resetting the workflow at this point should clear the event log. + workflow_api = self.get_workflow_api(workflow, hard_reset=True, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) + + # Re-complete first task, and awaiting tasks should shift to 0 for for submitter, and 1 for supervisor + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(1, len(self.get_assignment_task_events(supervisor.uid))) + + # Complete the supervisor task with rejected approval, and the assignments should switch. + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + data = workflow_api.next_task.data + data["approval"] = False + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + self.assertEquals(1, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) + + # Mark the return form review page as complete, and then recomplete the form, and assignments switch yet again. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(1, len(self.get_assignment_task_events(supervisor.uid))) + + # Complete the supervisor task, accepting the approval, and the workflow is completed. + # When it is all done, there should be no outstanding assignments. + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + data = workflow_api.next_task.data + data["approval"] = True + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + self.assertEquals(WorkflowStatus.complete, workflow_api.status) + self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end. + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) + + # Sending any subsequent complete forms does not result in a new task event + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) From 842d2ee100798ca020f262e05759d0a14294fc81 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Thu, 23 Jul 2020 10:58:24 -0600 Subject: [PATCH 19/60] Supporting study status update --- crc/api.yml | 12 +++++++----- crc/api/study.py | 6 ++++-- crc/models/protocol_builder.py | 8 ++++---- crc/models/study.py | 25 ++++++++++++++++++++++--- 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index 4c6ebd1b..a9261d08 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -1046,22 +1046,24 @@ components: example: dhf8r protocol_builder_status: type: string - enum: [INCOMPLETE, ACTIVE, HOLD, OPEN, ABANDONED] + enum: [incomplete, active, hold, open, abandoned] example: done sponsor: type: string + x-nullable: true example: "Sartography Pharmaceuticals" ind_number: type: string + x-nullable: true example: "27b-6-42" hsr_number: type: string x-nullable: true example: "27b-6-1212" - categories: - type: array - items: - $ref: "#/components/schemas/WorkflowSpecCategory" + # categories: + # type: array + # items: + # $ref: "#/components/schemas/WorkflowSpecCategory" WorkflowSpec: properties: id: diff --git a/crc/api/study.py b/crc/api/study.py index 8fdd1b4a..ba2b7c0d 100644 --- a/crc/api/study.py +++ b/crc/api/study.py @@ -6,7 +6,7 @@ from sqlalchemy.exc import IntegrityError from crc import session from crc.api.common import ApiError, ApiErrorSchema from crc.models.protocol_builder import ProtocolBuilderStatus -from crc.models.study import StudySchema, StudyModel, Study +from crc.models.study import StudySchema, StudyForUpdateSchema, StudyModel, Study from crc.services.study_service import StudyService @@ -40,10 +40,12 @@ def update_study(study_id, body): if study_model is None: raise ApiError('unknown_study', 'The study "' + study_id + '" is not recognized.') - study: Study = StudySchema().load(body) + study: Study = StudyForUpdateSchema().load(body) study.update_model(study_model) session.add(study_model) session.commit() + # Need to reload the full study to return it to the frontend + study = StudyService.get_study(study_id) return StudySchema().dump(study) diff --git a/crc/models/protocol_builder.py b/crc/models/protocol_builder.py index 9ff1098f..a6bc02cf 100644 --- a/crc/models/protocol_builder.py +++ b/crc/models/protocol_builder.py @@ -23,10 +23,10 @@ class ProtocolBuilderStatus(enum.Enum): # • Open To Enrollment: has start date and HSR number? # • Abandoned: deleted in PB INCOMPLETE = 'incomplete' # Found in PB but not ready to start (not q_complete) - ACTIVE = 'active', # found in PB, marked as "q_complete" and no HSR number and not hold - HOLD = 'hold', # CR Connect side, if the Study ias marked as "hold". - OPEN = 'open', # Open To Enrollment: has start date and HSR number? - ABANDONED = 'Abandoned' # Not found in PB + ACTIVE = 'active' # found in PB, marked as "q_complete" and no HSR number and not hold + HOLD = 'hold' # CR Connect side, if the Study ias marked as "hold". + OPEN = 'open' # Open To Enrollment: has start date and HSR number? + ABANDONED = 'abandoned' # Not found in PB #DRAFT = 'draft', # !Q_COMPLETE diff --git a/crc/models/study.py b/crc/models/study.py index 854ce62f..bc92e5e1 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -133,9 +133,7 @@ class Study(object): return instance def update_model(self, study_model: StudyModel): - for k,v in self.__dict__.items(): - if not k.startswith('_'): - study_model.__dict__[k] = v + study_model.protocol_builder_status = ProtocolBuilderStatus(self.protocol_builder_status) def model_args(self): """Arguments that can be passed into the Study Model to update it.""" @@ -145,6 +143,27 @@ class Study(object): return self_dict +class StudyForUpdateSchema(ma.Schema): + + id = fields.Integer(required=False, allow_none=True) + protocol_builder_status = EnumField(ProtocolBuilderStatus, by_value=True) + hsr_number = fields.String(allow_none=True) + sponsor = fields.String(allow_none=True) + ind_number = fields.String(allow_none=True) + enrollment_date = fields.Date(allow_none=True) + + class Meta: + model = Study + # additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid", + # "sponsor", "ind_number", "approvals", "files", "enrollment_date"] + unknown = INCLUDE + + @marshmallow.post_load + def make_study(self, data, **kwargs): + """Can load the basic study data for updates to the database, but categories are write only""" + return Study(**data) + + class StudySchema(ma.Schema): id = fields.Integer(required=False, allow_none=True) From ce8d7cad16c006082a29b1095279f863dc48d22e Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Thu, 23 Jul 2020 14:56:12 -0400 Subject: [PATCH 20/60] We resolved a problem with a test with some changes to Spiff, change the correct so that it is correct. --- tests/test_user_roles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 6104641c..212bd463 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -178,7 +178,7 @@ class TestTasksApi(BaseTest): workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) nav = workflow_api.navigation self.assertEquals(5, len(nav)) - self.assertEquals('COMPLETED', nav[0]['state']) # We still have some issues here, the navigation will be off when looping back. + self.assertEquals('READY', nav[0]['state']) # Issue resolved - KPM - was COMPLETED self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked. self.assertEquals('READY', workflow_api.next_task.state) From f5ab2835380edd840627f7db9c2fb357ea53aad6 Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Fri, 24 Jul 2020 12:08:46 -0400 Subject: [PATCH 21/60] Test of adding in the ability of augmenting the workflow to include internal scripts like StudyInfo This is the first waypoint on a larger effort to make all of the 'special scripts' that currently require a shebang to be just another python function. --- crc/scripts/study_info.py | 40 ++++++++++++++++++--- crc/services/workflow_processor.py | 16 ++++++++- tests/data/study_details/study_details.bpmn | 21 ++++++++--- 3 files changed, 68 insertions(+), 9 deletions(-) diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index f274b899..a559a3fc 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -8,7 +8,7 @@ from crc.scripts.script import Script from crc.services.file_service import FileService from crc.services.protocol_builder import ProtocolBuilderService from crc.services.study_service import StudyService - +from box import Box class StudyInfo(Script): """Please see the detailed description that is provided below. """ @@ -199,9 +199,41 @@ Returns information specific to the protocol. self.add_data_to_task(task=task, data=data["study"]) self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)}) + def return_data(self, task, study_id, workflow_id, *args, **kwargs): + self.check_args(args,2) + prefix = None + if len(args) > 1: + prefix = args[1] + cmd = args[0] + study_info = {} + if self.__class__.__name__ in task.data: + study_info = task.data[self.__class__.__name__] + retval = None + if cmd == 'info': + study = session.query(StudyModel).filter_by(id=study_id).first() + schema = StudySchema() + retval = schema.dump(study) + if cmd == 'investigators': + retval = StudyService().get_investigators(study_id) + if cmd == 'roles': + retval = StudyService().get_investigators(study_id, all=True) + if cmd == 'details': + retval = self.pb.get_study_details(study_id) + if cmd == 'approvals': + retval = StudyService().get_approvals(study_id) + if cmd == 'documents': + retval = StudyService().get_documents_status(study_id) + if cmd == 'protocol': + retval = StudyService().get_protocol(study_id) + if isinstance(retval,dict) and prefix is not None: + return Box({x:retval[x] for x in retval.keys() if x[:len(prefix)] == prefix}) + elif isinstance(retval,dict): + return Box(retval) + else: + return retval + def do_task(self, task, study_id, workflow_id, *args, **kwargs): self.check_args(args) - cmd = args[0] study_info = {} if self.__class__.__name__ in task.data: @@ -225,8 +257,8 @@ Returns information specific to the protocol. self.add_data_to_task(task, {cmd: StudyService().get_protocol(study_id)}) - def check_args(self, args): - if len(args) != 1 or (args[0] not in StudyInfo.type_options): + def check_args(self, args, maxlen=1): + if len(args) < 1 or len(args) > maxlen or (args[0] not in StudyInfo.type_options): raise ApiError(code="missing_argument", message="The StudyInfo script requires a single argument which must be " "one of %s" % ",".join(StudyInfo.type_options)) diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 165d3313..cb39be44 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -17,6 +17,7 @@ from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser from SpiffWorkflow.exceptions import WorkflowTaskExecException from SpiffWorkflow.specs import WorkflowSpec +import crc from crc import session, app from crc.api.common import ApiError from crc.models.file import FileDataModel, FileModel, FileType @@ -41,9 +42,22 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): valid Python. """ # Shlex splits the whole string while respecting double quoted strings within + study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] + if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data: + workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] + else: + workflow_id = None + if not script.startswith('#!'): try: - super().execute(task, script, data) + augmentMethods = {'studyInfo':lambda *args: crc.scripts.study_info.StudyInfo.return_data( + crc.scripts.study_info.StudyInfo(), + task, + study_id, + workflow_id, + *args)} + + super().execute(task, script, data,externalMethods=augmentMethods) except SyntaxError as e: raise ApiError.from_task('syntax_error', f'If you are running a pre-defined script, please' diff --git a/tests/data/study_details/study_details.bpmn b/tests/data/study_details/study_details.bpmn index 2b46f935..888a28d9 100644 --- a/tests/data/study_details/study_details.bpmn +++ b/tests/data/study_details/study_details.bpmn @@ -10,10 +10,16 @@ SequenceFlow_1bqiin0 #! StudyInfo info - + - SequenceFlow_1bqiin0 + Flow_0ochvmi + + + SequenceFlow_1bqiin0 + Flow_0ochvmi + study = studyInfo('info','p') + @@ -29,10 +35,17 @@ - + - + + + + + + + + From a124e13c6ae2dc28510ae386334c85924b1449eb Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Fri, 24 Jul 2020 14:33:24 -0400 Subject: [PATCH 22/60] Replace all legacy style calls with new calls. Still having issues where we try to eval an empty definition, not quite sure why there is a difference from what we had before. I may need to revert some of it and determine what is going on. --- crc/scripts/fact_service.py | 2 +- crc/scripts/script.py | 33 ++++-- crc/scripts/study_info.py | 25 +---- crc/services/workflow_processor.py | 102 +++++++++--------- crc/static/bpmn/core_info/core_info.bpmn | 3 +- .../data_security_plan.bpmn | 2 +- .../documents_approvals.bpmn | 6 +- .../bpmn/ide_supplement/ide_supplement.bpmn | 3 +- .../ids_full_submission.bpmn | 3 +- crc/static/bpmn/ind_update/ind_update.bpmn | 3 +- .../bpmn/irb_api_details/irb_api_details.bpmn | 3 +- .../irb_api_personnel/irb_api_personnel.bpmn | 3 +- .../bpmn/research_rampup/research_rampup.bpmn | 4 +- .../top_level_workflow.bpmn | 9 +- tests/data/docx/docx.bpmn | 2 +- tests/data/email/email.bpmn | 2 +- tests/data/invalid_script/invalid_script.bpmn | 2 +- tests/data/multi_instance/multi_instance.bpmn | 3 +- .../multi_instance_parallel.bpmn | 3 +- tests/data/random_fact/random_fact.bpmn | 2 +- tests/data/study_details/study_details.bpmn | 5 +- .../top_level_workflow.bpmn | 3 +- 22 files changed, 115 insertions(+), 108 deletions(-) diff --git a/crc/scripts/fact_service.py b/crc/scripts/fact_service.py index b3701312..19e5cb3f 100644 --- a/crc/scripts/fact_service.py +++ b/crc/scripts/fact_service.py @@ -40,7 +40,7 @@ class FactService(Script): else: details = "unknown fact type." - self.add_data_to_task(task, details) + #self.add_data_to_task(task, details) print(details) return details diff --git a/crc/scripts/script.py b/crc/scripts/script.py index ac4ce38d..ba5af5b7 100644 --- a/crc/scripts/script.py +++ b/crc/scripts/script.py @@ -23,6 +23,27 @@ class Script(object): "This is an internal error. The script you are trying to execute '%s' " % self.__class__.__name__ + "does must provide a validate_only option that mimics the do_task, " + "but does not make external calls or database updates." ) + @staticmethod + def generate_augmented_list(task, study_id,workflow_id): + """ + this makes a dictionary of lambda functions that are closed over the class instance that + They represent. This is passed into PythonScriptParser as a list of helper functions that are + available for running. In general, they maintain the do_task call structure that they had, but + they always return a value rather than updating the task data. + + We may be able to remove the task for each of these calls if we are not using it other than potentially + updating the task data. + """ + def make_closure(subclass,task,study_id,workflow_id): + instance = subclass() + return lambda *a : subclass.do_task(instance,task,study_id,workflow_id,*a) + execlist = {} + subclasses = Script.get_all_subclasses() + for x in range(len(subclasses)): + subclass = subclasses[x] + execlist[subclass.__module__.split('.')[-1]] = make_closure(subclass,task,study_id, + workflow_id) + return execlist @staticmethod def get_all_subclasses(): @@ -46,12 +67,12 @@ class Script(object): return all_subclasses - def add_data_to_task(self, task, data): - key = self.__class__.__name__ - if key in task.data: - task.data[key].update(data) - else: - task.data[key] = data + # def add_data_to_task(self, task, data): + # key = self.__class__.__name__ + # if key in task.data: + # task.data[key].update(data) + # else: + # task.data[key] = data class ScriptValidationError: diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index a559a3fc..c392d40c 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -199,7 +199,7 @@ Returns information specific to the protocol. self.add_data_to_task(task=task, data=data["study"]) self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)}) - def return_data(self, task, study_id, workflow_id, *args, **kwargs): + def do_task(self, task, study_id, workflow_id, *args, **kwargs): self.check_args(args,2) prefix = None if len(args) > 1: @@ -232,29 +232,6 @@ Returns information specific to the protocol. else: return retval - def do_task(self, task, study_id, workflow_id, *args, **kwargs): - self.check_args(args) - cmd = args[0] - study_info = {} - if self.__class__.__name__ in task.data: - study_info = task.data[self.__class__.__name__] - - if cmd == 'info': - study = session.query(StudyModel).filter_by(id=study_id).first() - schema = StudySchema() - self.add_data_to_task(task, {cmd: schema.dump(study)}) - if cmd == 'investigators': - self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id)}) - if cmd == 'roles': - self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id, all=True)}) - if cmd == 'details': - self.add_data_to_task(task, {cmd: self.pb.get_study_details(study_id)}) - if cmd == 'approvals': - self.add_data_to_task(task, {cmd: StudyService().get_approvals(study_id)}) - if cmd == 'documents': - self.add_data_to_task(task, {cmd: StudyService().get_documents_status(study_id)}) - if cmd == 'protocol': - self.add_data_to_task(task, {cmd: StudyService().get_protocol(study_id)}) def check_args(self, args, maxlen=1): diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index cb39be44..d29c3a0e 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -48,58 +48,52 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): else: workflow_id = None - if not script.startswith('#!'): - try: - augmentMethods = {'studyInfo':lambda *args: crc.scripts.study_info.StudyInfo.return_data( - crc.scripts.study_info.StudyInfo(), - task, - study_id, - workflow_id, - *args)} - - super().execute(task, script, data,externalMethods=augmentMethods) - except SyntaxError as e: - raise ApiError.from_task('syntax_error', - f'If you are running a pre-defined script, please' - f' proceed the script with "#!", otherwise this is assumed to be' - f' pure python: {script}, {e.msg}', task=task) - else: - self.run_predefined_script(task, script[2:], data) # strip off the first two characters. - - def run_predefined_script(self, task: SpiffTask, script, data): - commands = shlex.split(script) - path_and_command = commands[0].rsplit(".", 1) - if len(path_and_command) == 1: - module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0]) - class_name = path_and_command[0] - else: - module_name = "crc.scripts." + path_and_command[0] + "." + self.camel_to_snake(path_and_command[1]) - class_name = path_and_command[1] try: - mod = __import__(module_name, fromlist=[class_name]) - klass = getattr(mod, class_name) - study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] - if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data: - workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] - else: - workflow_id = None + augmentMethods = Script.generate_augmented_list(task,study_id,workflow_id) - if not isinstance(klass(), Script): - raise ApiError.from_task("invalid_script", - "This is an internal error. The script '%s:%s' you called " % - (module_name, class_name) + - "does not properly implement the CRC Script class.", - task=task) - if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]: - """If this is running a validation, and not a normal process, then we want to - mimic running the script, but not make any external calls or database changes.""" - klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:]) - else: - klass().do_task(task, study_id, workflow_id, *commands[1:]) - except ModuleNotFoundError: - raise ApiError.from_task("invalid_script", - "Unable to locate Script: '%s:%s'" % (module_name, class_name), - task=task) + super().execute(task, script, data, externalMethods=augmentMethods) + except SyntaxError as e: + raise ApiError.from_task('syntax_error', + f'If you are running a pre-defined script, please' + f' proceed the script with "#!", otherwise this is assumed to be' + f' pure python: {script}, {e.msg}', task=task) + # else: + # self.run_predefined_script(task, script[2:], data) # strip off the first two characters. + + # def run_predefined_script(self, task: SpiffTask, script, data): + # commands = shlex.split(script) + # path_and_command = commands[0].rsplit(".", 1) + # if len(path_and_command) == 1: + # module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0]) + # class_name = path_and_command[0] + # else: + # module_name = "crc.scripts." + path_and_command[0] + "." + self.camel_to_snake(path_and_command[1]) + # class_name = path_and_command[1] + # try: + # mod = __import__(module_name, fromlist=[class_name]) + # klass = getattr(mod, class_name) + # study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] + # if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data: + # workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] + # else: + # workflow_id = None + # + # if not isinstance(klass(), Script): + # raise ApiError.from_task("invalid_script", + # "This is an internal error. The script '%s:%s' you called " % + # (module_name, class_name) + + # "does not properly implement the CRC Script class.", + # task=task) + # if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]: + # """If this is running a validation, and not a normal process, then we want to + # mimic running the script, but not make any external calls or database changes.""" + # klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:]) + # else: + # klass().do_task(task, study_id, workflow_id, *commands[1:]) + # except ModuleNotFoundError: + # raise ApiError.from_task("invalid_script", + # "Unable to locate Script: '%s:%s'" % (module_name, class_name), + # task=task) def evaluate_expression(self, task, expression): """ @@ -194,10 +188,10 @@ class WorkflowProcessor(object): bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine) bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only - try: - bpmn_workflow.do_engine_steps() - except WorkflowException as we: - raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) + #try: + bpmn_workflow.do_engine_steps() + # except WorkflowException as we: + # raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) return bpmn_workflow def save(self): diff --git a/crc/static/bpmn/core_info/core_info.bpmn b/crc/static/bpmn/core_info/core_info.bpmn index 8c69ffb3..9763ced6 100644 --- a/crc/static/bpmn/core_info/core_info.bpmn +++ b/crc/static/bpmn/core_info/core_info.bpmn @@ -212,7 +212,8 @@ SequenceFlow_1r3yrhy Flow_09h1imz - #! StudyInfo details + StudyInfo = {} +StudyInfo['details'] = study_info('details') Flow_09h1imz diff --git a/crc/static/bpmn/data_security_plan/data_security_plan.bpmn b/crc/static/bpmn/data_security_plan/data_security_plan.bpmn index 86426d6d..3bf309b7 100644 --- a/crc/static/bpmn/data_security_plan/data_security_plan.bpmn +++ b/crc/static/bpmn/data_security_plan/data_security_plan.bpmn @@ -453,7 +453,7 @@ Indicate all the possible formats in which you will transmit your data outside o SequenceFlow_0k2r83n SequenceFlow_0t6xl9i SequenceFlow_16kyite - #! CompleteTemplate NEW_DSP_template.docx Study_DataSecurityPlan + complete_template('NEW_DSP_template.docx','Study_DataSecurityPlan')/bpmn:script> ##### Instructions diff --git a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn index 12e85e34..858e95d6 100644 --- a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn +++ b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn @@ -53,12 +53,14 @@ Flow_0c7ryff Flow_142jtxs - #! StudyInfo approvals + StudyInfo = {} +StudyInfo['approvals'] = study_info('approvals') Flow_1k3su2q Flow_0c7ryff - #! StudyInfo documents + StudyInfo = {} +StudyInfo['documents'] = study_info('documents') diff --git a/crc/static/bpmn/ide_supplement/ide_supplement.bpmn b/crc/static/bpmn/ide_supplement/ide_supplement.bpmn index 7a83643b..394a1d46 100644 --- a/crc/static/bpmn/ide_supplement/ide_supplement.bpmn +++ b/crc/static/bpmn/ide_supplement/ide_supplement.bpmn @@ -36,7 +36,8 @@ SequenceFlow_1dhb8f4 SequenceFlow_1uzcl1f - #! StudyInfo details + StudyInfo = {} +StudyInfo['details'] = study_info('details') diff --git a/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn b/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn index 25a9ad6e..4e940cf4 100644 --- a/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn +++ b/crc/static/bpmn/ids_full_submission/ids_full_submission.bpmn @@ -217,7 +217,8 @@ Protocol Owner: **(need to insert value here)** SequenceFlow_1dexemq Flow_1x9d2mo - #! StudyInfo documents + StudyInfo = {} +StudyInfo['documents'] = study_info('documents') diff --git a/crc/static/bpmn/ind_update/ind_update.bpmn b/crc/static/bpmn/ind_update/ind_update.bpmn index 528a87ce..1d288dc6 100644 --- a/crc/static/bpmn/ind_update/ind_update.bpmn +++ b/crc/static/bpmn/ind_update/ind_update.bpmn @@ -12,7 +12,8 @@ SequenceFlow_1dhb8f4 SequenceFlow_1uzcl1f - #! StudyInfo details + StudyInfo = {} +StudyInfo['details'] = study_info('details') diff --git a/crc/static/bpmn/irb_api_details/irb_api_details.bpmn b/crc/static/bpmn/irb_api_details/irb_api_details.bpmn index b4f540f5..9e0b6271 100644 --- a/crc/static/bpmn/irb_api_details/irb_api_details.bpmn +++ b/crc/static/bpmn/irb_api_details/irb_api_details.bpmn @@ -8,7 +8,8 @@ SequenceFlow_1fmyo77 SequenceFlow_18nr0gf - #! StudyInfo details + StudyInfo = {} +StudyInfo['details'] = study_info('details') diff --git a/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn b/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn index a5258cbe..c347fd93 100644 --- a/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn +++ b/crc/static/bpmn/irb_api_personnel/irb_api_personnel.bpmn @@ -7,7 +7,8 @@ Flow_0kcrx5l Flow_1dcsioh - #! StudyInfo investigators + StudyInfo = {} +StudyInfo['investigators'] = study_info('investigators') ## The following information was gathered: diff --git a/crc/static/bpmn/research_rampup/research_rampup.bpmn b/crc/static/bpmn/research_rampup/research_rampup.bpmn index 4a04eb6d..5703daaf 100644 --- a/crc/static/bpmn/research_rampup/research_rampup.bpmn +++ b/crc/static/bpmn/research_rampup/research_rampup.bpmn @@ -598,7 +598,7 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur This step is internal to the system and do not require and user interaction Flow_11uqavk Flow_0aqgwvu - #! CompleteTemplate ResearchRampUpPlan.docx RESEARCH_RAMPUP + complete_template('ResearchRampUpPlan.docx','RESEARCH_RAMPUP')/bpmn:script> @@ -764,7 +764,7 @@ This step is internal to the system and do not require and user interaction Flow_16y8glw Flow_0uc4o6c - #! UpdateStudy title:PIComputingID.label pi:PIComputingID.value + update_study('title:PIComputingID.label','pi:PIComputingID.value') #### Weekly Personnel Schedule(s) diff --git a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn index 23d6ff72..61e59156 100644 --- a/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn +++ b/crc/static/bpmn/top_level_workflow/top_level_workflow.bpmn @@ -11,7 +11,8 @@ SequenceFlow_1ees8ka SequenceFlow_17ct47v - #! StudyInfo documents + StudyInfo = {} +StudyInfo['documents'] = study_info('documents') Flow_1m8285h @@ -62,7 +63,8 @@ Flow_0pwtiqm Flow_0eq6px2 - #! StudyInfo details + StudyInfo = {} +StudyInfo['details'] = study_info('details') Flow_14ce1d7 @@ -91,7 +93,8 @@ Flow_1qyrmzn Flow_0vo6ul1 - #! StudyInfo investigators + StudyInfo = {} +StudyInfo['investigators'] = study_info('investigators') diff --git a/tests/data/docx/docx.bpmn b/tests/data/docx/docx.bpmn index 8c741114..e5b0cdcf 100644 --- a/tests/data/docx/docx.bpmn +++ b/tests/data/docx/docx.bpmn @@ -27,7 +27,7 @@ SequenceFlow_1i7hk1a SequenceFlow_11c35oq - #! CompleteTemplate Letter.docx AD_CoCApp + complete_template('Letter.docx AD_CoCApp')/bpmn:script> SequenceFlow_11c35oq diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn index 11ecec2e..3395e788 100644 --- a/tests/data/email/email.bpmn +++ b/tests/data/email/email.bpmn @@ -20,7 +20,7 @@ Email content to be delivered to {{ ApprvlApprvr1 }} --- Flow_08n2npe Flow_1xlrgne - #! Email "Camunda Email Subject" ApprvlApprvr1 PIComputingID + email("Camunda Email Subject",'ApprvlApprvr1','PIComputingID') diff --git a/tests/data/invalid_script/invalid_script.bpmn b/tests/data/invalid_script/invalid_script.bpmn index b85e2bc4..af470aa1 100644 --- a/tests/data/invalid_script/invalid_script.bpmn +++ b/tests/data/invalid_script/invalid_script.bpmn @@ -11,7 +11,7 @@ SequenceFlow_1pnq3kg SequenceFlow_12pf6um - #! NoSuchScript withArg1 + no_such_script('withArg1') diff --git a/tests/data/multi_instance/multi_instance.bpmn b/tests/data/multi_instance/multi_instance.bpmn index c1e610a5..50674246 100644 --- a/tests/data/multi_instance/multi_instance.bpmn +++ b/tests/data/multi_instance/multi_instance.bpmn @@ -29,7 +29,8 @@ Flow_0t6p1sb SequenceFlow_1p568pp - #! StudyInfo investigators + StudyInfo = {} +StudyInfo['investigators'] = study_info('investigators') diff --git a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn index d20c8499..0c4ff40c 100644 --- a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn +++ b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn @@ -29,7 +29,8 @@ Flow_0t6p1sb SequenceFlow_1p568pp - #! StudyInfo investigators + StudyInfo = {} +StudyInfo['investigators'] = study_info('investigators') diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn index d5ffcbed..234dd160 100644 --- a/tests/data/random_fact/random_fact.bpmn +++ b/tests/data/random_fact/random_fact.bpmn @@ -132,7 +132,7 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see) SequenceFlow_0641sh6 SequenceFlow_0t29gjo - #! FactService + fact_service() # Great Job! diff --git a/tests/data/study_details/study_details.bpmn b/tests/data/study_details/study_details.bpmn index 888a28d9..62714433 100644 --- a/tests/data/study_details/study_details.bpmn +++ b/tests/data/study_details/study_details.bpmn @@ -8,7 +8,8 @@ SequenceFlow_1nfe5m9 SequenceFlow_1bqiin0 - #! StudyInfo info + StudyInfo = {} +StudyInfo['info'] = study_info('info') @@ -18,7 +19,7 @@ SequenceFlow_1bqiin0 Flow_0ochvmi - study = studyInfo('info','p') + study = study_info('info','p') diff --git a/tests/data/top_level_workflow/top_level_workflow.bpmn b/tests/data/top_level_workflow/top_level_workflow.bpmn index 8b1bb888..3cb74fd9 100644 --- a/tests/data/top_level_workflow/top_level_workflow.bpmn +++ b/tests/data/top_level_workflow/top_level_workflow.bpmn @@ -11,7 +11,8 @@ SequenceFlow_1ees8ka SequenceFlow_17ct47v - #! StudyInfo documents + StudyInfo = {} +StudyInfo['documents'] = study_info('documents') Flow_1m8285h From 824582dab19467355530f2595f9d506c50334edd Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 27 Jul 2020 11:25:29 -0400 Subject: [PATCH 23/60] Hot fix to correct for a failing test due to updates in Spiffworkflow library, and modifying the token authorization so that we can log in as different users when not in production mode. --- crc/api/user.py | 12 +++++++----- tests/test_user_roles.py | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/crc/api/user.py b/crc/api/user.py index a298808d..fc86bd02 100644 --- a/crc/api/user.py +++ b/crc/api/user.py @@ -31,10 +31,6 @@ def verify_token(token=None): failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate", status_code=403) - if not _is_production() and (token is None or 'user' not in g): - g.user = UserModel.query.first() - token = g.user.encode_auth_token() - if token: try: token_info = UserModel.decode_auth_token(token) @@ -47,7 +43,7 @@ def verify_token(token=None): raise failure_error # If there's no token and we're in production, get the user from the SSO headers and return their token - if not token and _is_production(): + elif _is_production(): uid = _get_request_uid(request) if uid is not None: @@ -63,6 +59,12 @@ def verify_token(token=None): raise ApiError("no_user", "User not found. Please login via the frontend app before accessing this feature.", status_code=403) + else: + # Fall back to a default user if this is not production. + g.user = UserModel.query.first() + token = g.user.encode_auth_token() + + def verify_token_admin(token=None): """ diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 6104641c..8a0ea8ae 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -178,7 +178,7 @@ class TestTasksApi(BaseTest): workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) nav = workflow_api.navigation self.assertEquals(5, len(nav)) - self.assertEquals('COMPLETED', nav[0]['state']) # We still have some issues here, the navigation will be off when looping back. + self.assertEquals('READY', nav[0]['state']) # When you loop back the task is again in the ready state. self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user. self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked. self.assertEquals('READY', workflow_api.next_task.state) From 70ad3872a7ca84b8dda943faec0b645d6939b216 Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Mon, 27 Jul 2020 12:02:34 -0400 Subject: [PATCH 24/60] Fix several bugs, most had an issue with the bpmn document --- crc/services/workflow_processor.py | 15 +++++++++++---- .../data_security_plan/data_security_plan.bpmn | 2 +- .../bpmn/research_rampup/research_rampup.bpmn | 4 ++-- tests/data/docx/docx.bpmn | 2 +- .../multi_instance_parallel.bpmn | 2 +- tests/data/random_fact/random_fact.bpmn | 2 +- .../workflow/test_workflow_spec_validation_api.py | 4 ++-- 7 files changed, 19 insertions(+), 12 deletions(-) diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index d29c3a0e..55100afd 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -53,10 +53,17 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): super().execute(task, script, data, externalMethods=augmentMethods) except SyntaxError as e: - raise ApiError.from_task('syntax_error', - f'If you are running a pre-defined script, please' - f' proceed the script with "#!", otherwise this is assumed to be' - f' pure python: {script}, {e.msg}', task=task) + del(task.data['task']) + raise ApiError('syntax_error', + f'Something is wrong with your python script ' + f'please correct the following:' + f' {script}, {e.msg}') + except NameError as e: + del(task.data['task']) + raise ApiError('name_error', + f'something you are referencing does not exist:' + f' {script}, {e.name}') + # else: # self.run_predefined_script(task, script[2:], data) # strip off the first two characters. diff --git a/crc/static/bpmn/data_security_plan/data_security_plan.bpmn b/crc/static/bpmn/data_security_plan/data_security_plan.bpmn index 3bf309b7..87b931b9 100644 --- a/crc/static/bpmn/data_security_plan/data_security_plan.bpmn +++ b/crc/static/bpmn/data_security_plan/data_security_plan.bpmn @@ -453,7 +453,7 @@ Indicate all the possible formats in which you will transmit your data outside o SequenceFlow_0k2r83n SequenceFlow_0t6xl9i SequenceFlow_16kyite - complete_template('NEW_DSP_template.docx','Study_DataSecurityPlan')/bpmn:script> + complete_template('NEW_DSP_template.docx','Study_DataSecurityPlan') ##### Instructions diff --git a/crc/static/bpmn/research_rampup/research_rampup.bpmn b/crc/static/bpmn/research_rampup/research_rampup.bpmn index 5703daaf..eaa1dab7 100644 --- a/crc/static/bpmn/research_rampup/research_rampup.bpmn +++ b/crc/static/bpmn/research_rampup/research_rampup.bpmn @@ -598,7 +598,7 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur This step is internal to the system and do not require and user interaction Flow_11uqavk Flow_0aqgwvu - complete_template('ResearchRampUpPlan.docx','RESEARCH_RAMPUP')/bpmn:script> + complete_template('ResearchRampUpPlan.docx','RESEARCH_RAMPUP') @@ -755,7 +755,7 @@ Notify the Area Monitor for This step is internal to the system and do not require and user interaction Flow_0j4rs82 Flow_07ge8uf - #!RequestApproval ApprvlApprvr1 ApprvlApprvr2 + request_approval('ApprvlApprvr1','ApprvlApprvr2') #### Script Task diff --git a/tests/data/docx/docx.bpmn b/tests/data/docx/docx.bpmn index e5b0cdcf..fe11b7c5 100644 --- a/tests/data/docx/docx.bpmn +++ b/tests/data/docx/docx.bpmn @@ -27,7 +27,7 @@ SequenceFlow_1i7hk1a SequenceFlow_11c35oq - complete_template('Letter.docx AD_CoCApp')/bpmn:script> + complete_template('Letter.docx','AD_CoCApp') SequenceFlow_11c35oq diff --git a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn index 0c4ff40c..0c31670e 100644 --- a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn +++ b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn @@ -11,7 +11,7 @@ # Please provide addtional information about: -## Investigator ID: {{investigator.user_id}} +## Investigator ID: {{investigator.user_id}} ## Role: {{investigator.type_full}} diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn index 234dd160..6bee51b7 100644 --- a/tests/data/random_fact/random_fact.bpmn +++ b/tests/data/random_fact/random_fact.bpmn @@ -132,7 +132,7 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see) SequenceFlow_0641sh6 SequenceFlow_0t29gjo - fact_service() + FactService = fact_service() # Great Job! diff --git a/tests/workflow/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py index 0c17892e..da389168 100644 --- a/tests/workflow/test_workflow_spec_validation_api.py +++ b/tests/workflow/test_workflow_spec_validation_api.py @@ -89,7 +89,7 @@ class TestWorkflowSpecValidation(BaseTest): self.load_example_data() errors = self.validate_workflow("invalid_script") self.assertEqual(2, len(errors)) - self.assertEqual("error_loading_workflow", errors[0]['code']) + self.assertEqual("workflow_validation_exception", errors[0]['code']) self.assertTrue("NoSuchScript" in errors[0]['message']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) @@ -99,7 +99,7 @@ class TestWorkflowSpecValidation(BaseTest): self.load_example_data() errors = self.validate_workflow("invalid_script2") self.assertEqual(2, len(errors)) - self.assertEqual("error_loading_workflow", errors[0]['code']) + self.assertEqual("workflow_validation_exception", errors[0]['code']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) self.assertEqual("invalid_script2.bpmn", errors[0]['file_name']) From 2979a4ef5baaa46834aef088740aa44209d29aab Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 27 Jul 2020 12:08:26 -0400 Subject: [PATCH 25/60] Don't use editable in the pipfile unless you are pointing to a local directory. --- Pipfile | 2 +- Pipfile.lock | 327 +++++++++++++++++---------------------------------- 2 files changed, 109 insertions(+), 220 deletions(-) diff --git a/Pipfile b/Pipfile index 6b28197a..a40f0e71 100644 --- a/Pipfile +++ b/Pipfile @@ -38,7 +38,7 @@ recommonmark = "*" requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} sphinx = "*" -spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"} +spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"} #spiffworkflow = {editable = true,path="/home/kelly/sartography/SpiffWorkflow/"} swagger-ui-bundle = "*" webtest = "*" diff --git a/Pipfile.lock b/Pipfile.lock index bd8581a5..72657ab7 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "97a15c4ade88db2b384d52436633889a4d9b0bdcaeea86b8a679ebda6f73fb59" + "sha256": "2057a84011229daa6b8a9491d729a0bae5225e6ce11c7ca45136d3c1fad85ec0" }, "pipfile-spec": 6, "requires": { @@ -30,13 +30,6 @@ "index": "pypi", "version": "==1.4.2" }, - "amqp": { - "hashes": [ - "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", - "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" - ], - "version": "==2.6.0" - }, "aniso8601": { "hashes": [ "sha256:529dcb1f5f26ee0df6c0a1ee84b7b27197c3c50fc3a6321d66c544689237d072", @@ -89,26 +82,12 @@ ], "version": "==4.9.1" }, - "billiard": { - "hashes": [ - "sha256:bff575450859a6e0fbc2f9877d9b715b0bbc07c3565bb7ed2280526a0cdf5ede", - "sha256:d91725ce6425f33a97dfa72fb6bfef0e47d4652acd98a032bd1a7fbf06d5fa6a" - ], - "version": "==3.6.3.0" - }, "blinker": { "hashes": [ "sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6" ], "version": "==1.4" }, - "celery": { - "hashes": [ - "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", - "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" - ], - "version": "==4.4.6" - }, "certifi": { "hashes": [ "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", @@ -118,36 +97,36 @@ }, "cffi": { "hashes": [ - "sha256:001bf3242a1bb04d985d63e138230802c6c8d4db3668fb545fb5005ddf5bb5ff", - "sha256:00789914be39dffba161cfc5be31b55775de5ba2235fe49aa28c148236c4e06b", - "sha256:028a579fc9aed3af38f4892bdcc7390508adabc30c6af4a6e4f611b0c680e6ac", - "sha256:14491a910663bf9f13ddf2bc8f60562d6bc5315c1f09c704937ef17293fb85b0", - "sha256:1cae98a7054b5c9391eb3249b86e0e99ab1e02bb0cc0575da191aedadbdf4384", - "sha256:2089ed025da3919d2e75a4d963d008330c96751127dd6f73c8dc0c65041b4c26", - "sha256:2d384f4a127a15ba701207f7639d94106693b6cd64173d6c8988e2c25f3ac2b6", - "sha256:337d448e5a725bba2d8293c48d9353fc68d0e9e4088d62a9571def317797522b", - "sha256:399aed636c7d3749bbed55bc907c3288cb43c65c4389964ad5ff849b6370603e", - "sha256:3b911c2dbd4f423b4c4fcca138cadde747abdb20d196c4a48708b8a2d32b16dd", - "sha256:3d311bcc4a41408cf5854f06ef2c5cab88f9fded37a3b95936c9879c1640d4c2", - "sha256:62ae9af2d069ea2698bf536dcfe1e4eed9090211dbaafeeedf5cb6c41b352f66", - "sha256:66e41db66b47d0d8672d8ed2708ba91b2f2524ece3dee48b5dfb36be8c2f21dc", - "sha256:675686925a9fb403edba0114db74e741d8181683dcf216be697d208857e04ca8", - "sha256:7e63cbcf2429a8dbfe48dcc2322d5f2220b77b2e17b7ba023d6166d84655da55", - "sha256:8a6c688fefb4e1cd56feb6c511984a6c4f7ec7d2a1ff31a10254f3c817054ae4", - "sha256:8c0ffc886aea5df6a1762d0019e9cb05f825d0eec1f520c51be9d198701daee5", - "sha256:95cd16d3dee553f882540c1ffe331d085c9e629499ceadfbda4d4fde635f4b7d", - "sha256:99f748a7e71ff382613b4e1acc0ac83bf7ad167fb3802e35e90d9763daba4d78", - "sha256:b8c78301cefcf5fd914aad35d3c04c2b21ce8629b5e4f4e45ae6812e461910fa", - "sha256:c420917b188a5582a56d8b93bdd8e0f6eca08c84ff623a4c16e809152cd35793", - "sha256:c43866529f2f06fe0edc6246eb4faa34f03fe88b64a0a9a942561c8e22f4b71f", - "sha256:cab50b8c2250b46fe738c77dbd25ce017d5e6fb35d3407606e7a4180656a5a6a", - "sha256:cef128cb4d5e0b3493f058f10ce32365972c554572ff821e175dbc6f8ff6924f", - "sha256:cf16e3cf6c0a5fdd9bc10c21687e19d29ad1fe863372b5543deaec1039581a30", - "sha256:e56c744aa6ff427a607763346e4170629caf7e48ead6921745986db3692f987f", - "sha256:e577934fc5f8779c554639376beeaa5657d54349096ef24abe8c74c5d9c117c3", - "sha256:f2b0fa0c01d8a0c7483afd9f31d7ecf2d71760ca24499c8697aeb5ca37dc090c" + "sha256:267adcf6e68d77ba154334a3e4fc921b8e63cbb38ca00d33d40655d4228502bc", + "sha256:26f33e8f6a70c255767e3c3f957ccafc7f1f706b966e110b855bfe944511f1f9", + "sha256:3cd2c044517f38d1b577f05927fb9729d3396f1d44d0c659a445599e79519792", + "sha256:4a03416915b82b81af5502459a8a9dd62a3c299b295dcdf470877cb948d655f2", + "sha256:4ce1e995aeecf7cc32380bc11598bfdfa017d592259d5da00fc7ded11e61d022", + "sha256:4f53e4128c81ca3212ff4cf097c797ab44646a40b42ec02a891155cd7a2ba4d8", + "sha256:4fa72a52a906425416f41738728268072d5acfd48cbe7796af07a923236bcf96", + "sha256:66dd45eb9530e3dde8f7c009f84568bc7cac489b93d04ac86e3111fb46e470c2", + "sha256:6923d077d9ae9e8bacbdb1c07ae78405a9306c8fd1af13bfa06ca891095eb995", + "sha256:833401b15de1bb92791d7b6fb353d4af60dc688eaa521bd97203dcd2d124a7c1", + "sha256:8416ed88ddc057bab0526d4e4e9f3660f614ac2394b5e019a628cdfff3733849", + "sha256:892daa86384994fdf4856cb43c93f40cbe80f7f95bb5da94971b39c7f54b3a9c", + "sha256:98be759efdb5e5fa161e46d404f4e0ce388e72fbf7d9baf010aff16689e22abe", + "sha256:a6d28e7f14ecf3b2ad67c4f106841218c8ab12a0683b1528534a6c87d2307af3", + "sha256:b1d6ebc891607e71fd9da71688fcf332a6630b7f5b7f5549e6e631821c0e5d90", + "sha256:b2a2b0d276a136146e012154baefaea2758ef1f56ae9f4e01c612b0831e0bd2f", + "sha256:b87dfa9f10a470eee7f24234a37d1d5f51e5f5fa9eeffda7c282e2b8f5162eb1", + "sha256:bac0d6f7728a9cc3c1e06d4fcbac12aaa70e9379b3025b27ec1226f0e2d404cf", + "sha256:c991112622baee0ae4d55c008380c32ecfd0ad417bcd0417ba432e6ba7328caa", + "sha256:cda422d54ee7905bfc53ee6915ab68fe7b230cacf581110df4272ee10462aadc", + "sha256:d3148b6ba3923c5850ea197a91a42683f946dba7e8eb82dfa211ab7e708de939", + "sha256:d6033b4ffa34ef70f0b8086fd4c3df4bf801fee485a8a7d4519399818351aa8e", + "sha256:ddff0b2bd7edcc8c82d1adde6dbbf5e60d57ce985402541cd2985c27f7bec2a0", + "sha256:e23cb7f1d8e0f93addf0cae3c5b6f00324cccb4a7949ee558d7b6ca973ab8ae9", + "sha256:effd2ba52cee4ceff1a77f20d2a9f9bf8d50353c854a282b8760ac15b9833168", + "sha256:f90c2267101010de42f7273c94a1f026e56cbc043f9330acd8a80e64300aba33", + "sha256:f960375e9823ae6a07072ff7f8a85954e5a6434f97869f50d0e41649a1c8144f", + "sha256:fcf32bf76dc25e30ed793145a57426064520890d7c02866eb93d3e4abe516948" ], - "version": "==1.14.0" + "version": "==1.14.1" }, "chardet": { "hashes": [ @@ -177,13 +156,6 @@ ], "version": "==0.9.1" }, - "configparser": { - "hashes": [ - "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", - "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" - ], - "version": "==5.0.0" - }, "connexion": { "extras": [ "swagger-ui" @@ -197,43 +169,43 @@ }, "coverage": { "hashes": [ - "sha256:0fc4e0d91350d6f43ef6a61f64a48e917637e1dcfcba4b4b7d543c628ef82c2d", - "sha256:10f2a618a6e75adf64329f828a6a5b40244c1c50f5ef4ce4109e904e69c71bd2", - "sha256:12eaccd86d9a373aea59869bc9cfa0ab6ba8b1477752110cb4c10d165474f703", - "sha256:1874bdc943654ba46d28f179c1846f5710eda3aeb265ff029e0ac2b52daae404", - "sha256:1dcebae667b73fd4aa69237e6afb39abc2f27520f2358590c1b13dd90e32abe7", - "sha256:1e58fca3d9ec1a423f1b7f2aa34af4f733cbfa9020c8fe39ca451b6071237405", - "sha256:214eb2110217f2636a9329bc766507ab71a3a06a8ea30cdeebb47c24dce5972d", - "sha256:25fe74b5b2f1b4abb11e103bb7984daca8f8292683957d0738cd692f6a7cc64c", - "sha256:32ecee61a43be509b91a526819717d5e5650e009a8d5eda8631a59c721d5f3b6", - "sha256:3740b796015b889e46c260ff18b84683fa2e30f0f75a171fb10d2bf9fb91fc70", - "sha256:3b2c34690f613525672697910894b60d15800ac7e779fbd0fccf532486c1ba40", - "sha256:41d88736c42f4a22c494c32cc48a05828236e37c991bd9760f8923415e3169e4", - "sha256:42fa45a29f1059eda4d3c7b509589cc0343cd6bbf083d6118216830cd1a51613", - "sha256:4bb385a747e6ae8a65290b3df60d6c8a692a5599dc66c9fa3520e667886f2e10", - "sha256:509294f3e76d3f26b35083973fbc952e01e1727656d979b11182f273f08aa80b", - "sha256:5c74c5b6045969b07c9fb36b665c9cac84d6c174a809fc1b21bdc06c7836d9a0", - "sha256:60a3d36297b65c7f78329b80120f72947140f45b5c7a017ea730f9112b40f2ec", - "sha256:6f91b4492c5cde83bfe462f5b2b997cdf96a138f7c58b1140f05de5751623cf1", - "sha256:7403675df5e27745571aba1c957c7da2dacb537c21e14007ec3a417bf31f7f3d", - "sha256:87bdc8135b8ee739840eee19b184804e5d57f518578ffc797f5afa2c3c297913", - "sha256:8a3decd12e7934d0254939e2bf434bf04a5890c5bf91a982685021786a08087e", - "sha256:9702e2cb1c6dec01fb8e1a64c015817c0800a6eca287552c47a5ee0ebddccf62", - "sha256:a4d511012beb967a39580ba7d2549edf1e6865a33e5fe51e4dce550522b3ac0e", - "sha256:bbb387811f7a18bdc61a2ea3d102be0c7e239b0db9c83be7bfa50f095db5b92a", - "sha256:bfcc811883699ed49afc58b1ed9f80428a18eb9166422bce3c31a53dba00fd1d", - "sha256:c32aa13cc3fe86b0f744dfe35a7f879ee33ac0a560684fef0f3e1580352b818f", - "sha256:ca63dae130a2e788f2b249200f01d7fa240f24da0596501d387a50e57aa7075e", - "sha256:d54d7ea74cc00482a2410d63bf10aa34ebe1c49ac50779652106c867f9986d6b", - "sha256:d67599521dff98ec8c34cd9652cbcfe16ed076a2209625fca9dc7419b6370e5c", - "sha256:d82db1b9a92cb5c67661ca6616bdca6ff931deceebb98eecbd328812dab52032", - "sha256:d9ad0a988ae20face62520785ec3595a5e64f35a21762a57d115dae0b8fb894a", - "sha256:ebf2431b2d457ae5217f3a1179533c456f3272ded16f8ed0b32961a6d90e38ee", - "sha256:ed9a21502e9223f563e071759f769c3d6a2e1ba5328c31e86830368e8d78bc9c", - "sha256:f50632ef2d749f541ca8e6c07c9928a37f87505ce3a9f20c8446ad310f1aa87b" + "sha256:098a703d913be6fbd146a8c50cc76513d726b022d170e5e98dc56d958fd592fb", + "sha256:16042dc7f8e632e0dcd5206a5095ebd18cb1d005f4c89694f7f8aafd96dd43a3", + "sha256:1adb6be0dcef0cf9434619d3b892772fdb48e793300f9d762e480e043bd8e716", + "sha256:27ca5a2bc04d68f0776f2cdcb8bbd508bbe430a7bf9c02315cd05fb1d86d0034", + "sha256:28f42dc5172ebdc32622a2c3f7ead1b836cdbf253569ae5673f499e35db0bac3", + "sha256:2fcc8b58953d74d199a1a4d633df8146f0ac36c4e720b4a1997e9b6327af43a8", + "sha256:304fbe451698373dc6653772c72c5d5e883a4aadaf20343592a7abb2e643dae0", + "sha256:30bc103587e0d3df9e52cd9da1dd915265a22fad0b72afe54daf840c984b564f", + "sha256:40f70f81be4d34f8d491e55936904db5c527b0711b2a46513641a5729783c2e4", + "sha256:4186fc95c9febeab5681bc3248553d5ec8c2999b8424d4fc3a39c9cba5796962", + "sha256:46794c815e56f1431c66d81943fa90721bb858375fb36e5903697d5eef88627d", + "sha256:4869ab1c1ed33953bb2433ce7b894a28d724b7aa76c19b11e2878034a4e4680b", + "sha256:4f6428b55d2916a69f8d6453e48a505c07b2245653b0aa9f0dee38785939f5e4", + "sha256:52f185ffd3291196dc1aae506b42e178a592b0b60a8610b108e6ad892cfc1bb3", + "sha256:538f2fd5eb64366f37c97fdb3077d665fa946d2b6d95447622292f38407f9258", + "sha256:64c4f340338c68c463f1b56e3f2f0423f7b17ba6c3febae80b81f0e093077f59", + "sha256:675192fca634f0df69af3493a48224f211f8db4e84452b08d5fcebb9167adb01", + "sha256:700997b77cfab016533b3e7dbc03b71d33ee4df1d79f2463a318ca0263fc29dd", + "sha256:8505e614c983834239f865da2dd336dcf9d72776b951d5dfa5ac36b987726e1b", + "sha256:962c44070c281d86398aeb8f64e1bf37816a4dfc6f4c0f114756b14fc575621d", + "sha256:9e536783a5acee79a9b308be97d3952b662748c4037b6a24cbb339dc7ed8eb89", + "sha256:9ea749fd447ce7fb1ac71f7616371f04054d969d412d37611716721931e36efd", + "sha256:a34cb28e0747ea15e82d13e14de606747e9e484fb28d63c999483f5d5188e89b", + "sha256:a3ee9c793ffefe2944d3a2bd928a0e436cd0ac2d9e3723152d6fd5398838ce7d", + "sha256:aab75d99f3f2874733946a7648ce87a50019eb90baef931698f96b76b6769a46", + "sha256:b1ed2bdb27b4c9fc87058a1cb751c4df8752002143ed393899edb82b131e0546", + "sha256:b360d8fd88d2bad01cb953d81fd2edd4be539df7bfec41e8753fe9f4456a5082", + "sha256:b8f58c7db64d8f27078cbf2a4391af6aa4e4767cc08b37555c4ae064b8558d9b", + "sha256:c1bbb628ed5192124889b51204de27c575b3ffc05a5a91307e7640eff1d48da4", + "sha256:c2ff24df02a125b7b346c4c9078c8936da06964cc2d276292c357d64378158f8", + "sha256:c890728a93fffd0407d7d37c1e6083ff3f9f211c83b4316fae3778417eab9811", + "sha256:c96472b8ca5dc135fb0aa62f79b033f02aa434fb03a8b190600a5ae4102df1fd", + "sha256:ce7866f29d3025b5b34c2e944e66ebef0d92e4a4f2463f7266daa03a1332a651", + "sha256:e26c993bd4b220429d4ec8c1468eca445a4064a61c74ca08da7429af9bc53bb0" ], "index": "pypi", - "version": "==5.2" + "version": "==5.2.1" }, "docutils": { "hashes": [ @@ -324,12 +296,6 @@ ], "version": "==2.4.4" }, - "future": { - "hashes": [ - "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" - ], - "version": "==0.18.2" - }, "gunicorn": { "hashes": [ "sha256:1904bb2b8a43658807108d59c3f3d56c2b6121a701161de0ddf9ad140073c626", @@ -402,13 +368,6 @@ ], "version": "==3.2.0" }, - "kombu": { - "hashes": [ - "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", - "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" - ], - "version": "==4.6.11" - }, "ldap3": { "hashes": [ "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", @@ -564,11 +523,11 @@ }, "openapi-spec-validator": { "hashes": [ - "sha256:0caacd9829e9e3051e830165367bf58d436d9487b29a09220fa7edb9f47ff81b", - "sha256:d4da8aef72bf5be40cf0df444abd20009a41baf9048a8e03750c07a934f1bdd8", - "sha256:e489c7a273284bc78277ac22791482e8058d323b4a265015e9fcddf6a8045bcd" + "sha256:6dd75e50c94f1bb454d0e374a56418e7e06a07affb2c7f1df88564c5d728dac3", + "sha256:79381a69b33423ee400ae1624a461dae7725e450e2e306e32f2dd8d16a4d85cb", + "sha256:ec1b01a00e20955a527358886991ae34b4b791b253027ee9f7df5f84b59d91c7" ], - "version": "==0.2.8" + "version": "==0.2.9" }, "openpyxl": { "hashes": [ @@ -685,13 +644,6 @@ ], "version": "==0.16.0" }, - "python-box": { - "hashes": [ - "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111", - "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f" - ], - "version": "==5.0.1" - }, "python-dateutil": { "hashes": [ "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", @@ -714,61 +666,6 @@ ], "version": "==1.0.4" }, - "python-levenshtein-wheels": { - "hashes": [ - "sha256:0065529c8aec4c044468286177761857d36981ba6f7fdb62d7d5f7ffd143de5d", - "sha256:016924a59d689f9f47d5f7b26b70f31e309255e8dd72602c91e93ceb752b9f92", - "sha256:089d046ea7727e583233c71fef1046663ed67b96967063ae8ddc9f551e86a4fc", - "sha256:09f9faaaa8f65726f91b44c11d3d622fee0f1780cfbe2bf3f410dd0e7345adcb", - "sha256:0aea217eab612acd45dcc3424a2e8dbd977cc309f80359d0c01971f1e65b9a9b", - "sha256:0beb91ad80b1573829066e5af36b80190c367be6e0a65292f073353b0388c7fc", - "sha256:0ec1bc73f5ed3a1a06e02d13bb3cd22a0b32ebf65a9667bbccba106bfa0546f1", - "sha256:0fa2ca69ef803bc6037a8c919e2e8a17b55e94c9c9ffcb4c21befbb15a1d0f40", - "sha256:11c77d0d74ab7f46f89a58ae9c2d67349ebc1ae3e18636627f9939d810167c31", - "sha256:19a68716a322486ddffc8bf7e5cf44a82f7700b05a10658e6e7fc5c7ae92b13d", - "sha256:19a95a01d28d63b042438ba860c4ace90362906a038fa77962ba33325d377d10", - "sha256:1a61f3a51e00a3608659bbaabb3f27af37c9dbe84d843369061a3e45cf0d5103", - "sha256:1c50aebebab403fb2dd415d70355446ac364dece502b0e2737a1a085bb9a4aa4", - "sha256:1d2390d04f9b673391e5ce1a0b054d0565f2e00ea5d1187a044221dc5c02c3e6", - "sha256:1e51cdc123625a28709662d24ea0cb4cf6f991845e6054d9f803c78da1d6b08f", - "sha256:1eca6dc97dfcf588f53281fe48a6d5c423d4e14bdab658a1aa6efd447acc64e0", - "sha256:1f0056d3216b0fe38f25c6f8ebc84bd9f6d34c55a7a9414341b674fb98961399", - "sha256:228b59460e9a786e498bdfc8011838b89c6054650b115c86c9c819a055a793b0", - "sha256:23020f9ff2cb3457a926dcc470b84f9bd5b7646bd8b8e06b915bdbbc905cb23f", - "sha256:2b7b7cf0f43b677f818aa9a610464abf06106c19a51b9ac35bd051a439f337a5", - "sha256:3b591c9a7e91480f0d7bf2041d325f578b9b9c2f2d593304377cb28862e7f9a2", - "sha256:3ca9c70411ab587d071c1d8fc8b69d0558be8e4aa920f2595e2cb5eb229ccc4c", - "sha256:3e6bcca97a7ff4e720352b57ddc26380c0583dcdd4b791acef7b574ad58468a7", - "sha256:3ed88f9e638da57647149115c34e0e120cae6f3d35eee7d77e22cc9c1d8eced3", - "sha256:445bf7941cb1fa05d6c2a4a502ad4868a5cacd92e8eb77b2bd008cdda9d37c55", - "sha256:4ba5e147d76d7ee884fd6eae461438b080bcc9f2c6eb9b576811e1bcfe8f808e", - "sha256:4bb128b719c30f3b9feacfe71a338ae07d39dbffc077139416f3535c89f12362", - "sha256:4e951907b9b5d40c9f1b611c8bdfe46ff8cf8371877cebbd589bf5840feab662", - "sha256:53c0c9964390368fd64460b690f168221c669766b193b7e80ae3950c2b9551f8", - "sha256:57c4edef81611098d37176278f2b6a3712bf864eed313496d7d80504805896d1", - "sha256:5b36e406937c6463d1c1ef3dd82d3f771d9d845f21351e8a026fe4dd398ea8d0", - "sha256:7d0821dab24b430dfdc2cba70a06e6d7a45cb839d0dd0e6db97bb99e23c3d884", - "sha256:7f7283dfe50eac8a8cd9b777de9eb50b1edf7dbb46fc7cc9d9b0050d0c135021", - "sha256:7f9759095b3fc825464a72b1cae95125e610eba3c70f91557754c32a0bf32ea2", - "sha256:8005a4df455569c0d490ddfd9e5a163f21293477fd0ed4ea9effdd723ddd8eaa", - "sha256:86e865f29ad3dc3bb4733e5247220173d90f05ac8d2ad18e9689a220f90de55f", - "sha256:98727050ba70eb8d318ec8a8203531c20119347fc8f281102b097326812742ab", - "sha256:ac9cdf044dcb9481c7da782db01b50c1f0e7cdd78c8507b963b6d072829c0263", - "sha256:acfad8ffed96891fe7c583d92717cd8ec0c03b59a954c389fd4e26a5cdeac610", - "sha256:ad15f25abff8220e556d64e2a27c646241b08f00faf1bc02313655696cd3edfa", - "sha256:b679f951f842c38665aa54bea4d7403099131f71fac6d8584f893a731fe1266d", - "sha256:b8c183dc4aa4e95dc5c373eedc3d205c176805835611fcfec5d9050736c695c4", - "sha256:c097a6829967c76526a037ed34500a028f78f0d765c8e3dbd1a7717afd09fb92", - "sha256:c2c76f483d05eddec60a5cd89e92385adef565a4f243b1d9a6abe2f6bd2a7c0a", - "sha256:c388baa3c04272a7c585d3da24030c142353eb26eb531dd2681502e6be7d7a26", - "sha256:cb0f2a711db665b5bf8697b5af3b9884bb1139385c5c12c2e472e4bbee62da99", - "sha256:cbac984d7b36e75b440d1c8ff9d3425d778364a0cbc23f8943383d4decd35d5e", - "sha256:f55adf069be2d655f8d668594fe1be1b84d9dc8106d380a9ada06f34941c33c8", - "sha256:f9084ed3b8997ad4353d124b903f2860a9695b9e080663276d9e58c32e293244", - "sha256:fb7df3504222fcb1fa593f76623abbb54d6019eec15aac5d05cd07ad90ac016c" - ], - "version": "==0.13.1" - }, "pytz": { "hashes": [ "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed", @@ -891,9 +788,8 @@ "version": "==1.1.4" }, "spiffworkflow": { - "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "74529738b4e16be5aadd846669a201560f81a6d4" + "ref": "11ad40bbcb0fbd3c5bc1078e4989dc38b749f7f3" }, "sqlalchemy": { "hashes": [ @@ -938,17 +834,10 @@ }, "urllib3": { "hashes": [ - "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", - "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" + "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", + "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], - "version": "==1.25.9" - }, - "vine": { - "hashes": [ - "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", - "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" - ], - "version": "==1.3.0" + "version": "==1.25.10" }, "waitress": { "hashes": [ @@ -1021,43 +910,43 @@ }, "coverage": { "hashes": [ - "sha256:0fc4e0d91350d6f43ef6a61f64a48e917637e1dcfcba4b4b7d543c628ef82c2d", - "sha256:10f2a618a6e75adf64329f828a6a5b40244c1c50f5ef4ce4109e904e69c71bd2", - "sha256:12eaccd86d9a373aea59869bc9cfa0ab6ba8b1477752110cb4c10d165474f703", - "sha256:1874bdc943654ba46d28f179c1846f5710eda3aeb265ff029e0ac2b52daae404", - "sha256:1dcebae667b73fd4aa69237e6afb39abc2f27520f2358590c1b13dd90e32abe7", - "sha256:1e58fca3d9ec1a423f1b7f2aa34af4f733cbfa9020c8fe39ca451b6071237405", - "sha256:214eb2110217f2636a9329bc766507ab71a3a06a8ea30cdeebb47c24dce5972d", - "sha256:25fe74b5b2f1b4abb11e103bb7984daca8f8292683957d0738cd692f6a7cc64c", - "sha256:32ecee61a43be509b91a526819717d5e5650e009a8d5eda8631a59c721d5f3b6", - "sha256:3740b796015b889e46c260ff18b84683fa2e30f0f75a171fb10d2bf9fb91fc70", - "sha256:3b2c34690f613525672697910894b60d15800ac7e779fbd0fccf532486c1ba40", - "sha256:41d88736c42f4a22c494c32cc48a05828236e37c991bd9760f8923415e3169e4", - "sha256:42fa45a29f1059eda4d3c7b509589cc0343cd6bbf083d6118216830cd1a51613", - "sha256:4bb385a747e6ae8a65290b3df60d6c8a692a5599dc66c9fa3520e667886f2e10", - "sha256:509294f3e76d3f26b35083973fbc952e01e1727656d979b11182f273f08aa80b", - "sha256:5c74c5b6045969b07c9fb36b665c9cac84d6c174a809fc1b21bdc06c7836d9a0", - "sha256:60a3d36297b65c7f78329b80120f72947140f45b5c7a017ea730f9112b40f2ec", - "sha256:6f91b4492c5cde83bfe462f5b2b997cdf96a138f7c58b1140f05de5751623cf1", - "sha256:7403675df5e27745571aba1c957c7da2dacb537c21e14007ec3a417bf31f7f3d", - "sha256:87bdc8135b8ee739840eee19b184804e5d57f518578ffc797f5afa2c3c297913", - "sha256:8a3decd12e7934d0254939e2bf434bf04a5890c5bf91a982685021786a08087e", - "sha256:9702e2cb1c6dec01fb8e1a64c015817c0800a6eca287552c47a5ee0ebddccf62", - "sha256:a4d511012beb967a39580ba7d2549edf1e6865a33e5fe51e4dce550522b3ac0e", - "sha256:bbb387811f7a18bdc61a2ea3d102be0c7e239b0db9c83be7bfa50f095db5b92a", - "sha256:bfcc811883699ed49afc58b1ed9f80428a18eb9166422bce3c31a53dba00fd1d", - "sha256:c32aa13cc3fe86b0f744dfe35a7f879ee33ac0a560684fef0f3e1580352b818f", - "sha256:ca63dae130a2e788f2b249200f01d7fa240f24da0596501d387a50e57aa7075e", - "sha256:d54d7ea74cc00482a2410d63bf10aa34ebe1c49ac50779652106c867f9986d6b", - "sha256:d67599521dff98ec8c34cd9652cbcfe16ed076a2209625fca9dc7419b6370e5c", - "sha256:d82db1b9a92cb5c67661ca6616bdca6ff931deceebb98eecbd328812dab52032", - "sha256:d9ad0a988ae20face62520785ec3595a5e64f35a21762a57d115dae0b8fb894a", - "sha256:ebf2431b2d457ae5217f3a1179533c456f3272ded16f8ed0b32961a6d90e38ee", - "sha256:ed9a21502e9223f563e071759f769c3d6a2e1ba5328c31e86830368e8d78bc9c", - "sha256:f50632ef2d749f541ca8e6c07c9928a37f87505ce3a9f20c8446ad310f1aa87b" + "sha256:098a703d913be6fbd146a8c50cc76513d726b022d170e5e98dc56d958fd592fb", + "sha256:16042dc7f8e632e0dcd5206a5095ebd18cb1d005f4c89694f7f8aafd96dd43a3", + "sha256:1adb6be0dcef0cf9434619d3b892772fdb48e793300f9d762e480e043bd8e716", + "sha256:27ca5a2bc04d68f0776f2cdcb8bbd508bbe430a7bf9c02315cd05fb1d86d0034", + "sha256:28f42dc5172ebdc32622a2c3f7ead1b836cdbf253569ae5673f499e35db0bac3", + "sha256:2fcc8b58953d74d199a1a4d633df8146f0ac36c4e720b4a1997e9b6327af43a8", + "sha256:304fbe451698373dc6653772c72c5d5e883a4aadaf20343592a7abb2e643dae0", + "sha256:30bc103587e0d3df9e52cd9da1dd915265a22fad0b72afe54daf840c984b564f", + "sha256:40f70f81be4d34f8d491e55936904db5c527b0711b2a46513641a5729783c2e4", + "sha256:4186fc95c9febeab5681bc3248553d5ec8c2999b8424d4fc3a39c9cba5796962", + "sha256:46794c815e56f1431c66d81943fa90721bb858375fb36e5903697d5eef88627d", + "sha256:4869ab1c1ed33953bb2433ce7b894a28d724b7aa76c19b11e2878034a4e4680b", + "sha256:4f6428b55d2916a69f8d6453e48a505c07b2245653b0aa9f0dee38785939f5e4", + "sha256:52f185ffd3291196dc1aae506b42e178a592b0b60a8610b108e6ad892cfc1bb3", + "sha256:538f2fd5eb64366f37c97fdb3077d665fa946d2b6d95447622292f38407f9258", + "sha256:64c4f340338c68c463f1b56e3f2f0423f7b17ba6c3febae80b81f0e093077f59", + "sha256:675192fca634f0df69af3493a48224f211f8db4e84452b08d5fcebb9167adb01", + "sha256:700997b77cfab016533b3e7dbc03b71d33ee4df1d79f2463a318ca0263fc29dd", + "sha256:8505e614c983834239f865da2dd336dcf9d72776b951d5dfa5ac36b987726e1b", + "sha256:962c44070c281d86398aeb8f64e1bf37816a4dfc6f4c0f114756b14fc575621d", + "sha256:9e536783a5acee79a9b308be97d3952b662748c4037b6a24cbb339dc7ed8eb89", + "sha256:9ea749fd447ce7fb1ac71f7616371f04054d969d412d37611716721931e36efd", + "sha256:a34cb28e0747ea15e82d13e14de606747e9e484fb28d63c999483f5d5188e89b", + "sha256:a3ee9c793ffefe2944d3a2bd928a0e436cd0ac2d9e3723152d6fd5398838ce7d", + "sha256:aab75d99f3f2874733946a7648ce87a50019eb90baef931698f96b76b6769a46", + "sha256:b1ed2bdb27b4c9fc87058a1cb751c4df8752002143ed393899edb82b131e0546", + "sha256:b360d8fd88d2bad01cb953d81fd2edd4be539df7bfec41e8753fe9f4456a5082", + "sha256:b8f58c7db64d8f27078cbf2a4391af6aa4e4767cc08b37555c4ae064b8558d9b", + "sha256:c1bbb628ed5192124889b51204de27c575b3ffc05a5a91307e7640eff1d48da4", + "sha256:c2ff24df02a125b7b346c4c9078c8936da06964cc2d276292c357d64378158f8", + "sha256:c890728a93fffd0407d7d37c1e6083ff3f9f211c83b4316fae3778417eab9811", + "sha256:c96472b8ca5dc135fb0aa62f79b033f02aa434fb03a8b190600a5ae4102df1fd", + "sha256:ce7866f29d3025b5b34c2e944e66ebef0d92e4a4f2463f7266daa03a1332a651", + "sha256:e26c993bd4b220429d4ec8c1468eca445a4064a61c74ca08da7429af9bc53bb0" ], "index": "pypi", - "version": "==5.2" + "version": "==5.2.1" }, "importlib-metadata": { "hashes": [ From cc55aed89cc4ed1a078c80121ea0715172ac2f6d Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Mon, 27 Jul 2020 12:18:28 -0400 Subject: [PATCH 26/60] Change exception name --- crc/scripts/study_info.py | 4 ++-- tests/workflow/test_workflow_spec_validation_api.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index c392d40c..6daf91ec 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -196,8 +196,8 @@ Returns information specific to the protocol. } } } - self.add_data_to_task(task=task, data=data["study"]) - self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)}) + #self.add_data_to_task(task=task, data=data["study"]) + #self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)}) def do_task(self, task, study_id, workflow_id, *args, **kwargs): self.check_args(args,2) diff --git a/tests/workflow/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py index da389168..29fd5a14 100644 --- a/tests/workflow/test_workflow_spec_validation_api.py +++ b/tests/workflow/test_workflow_spec_validation_api.py @@ -90,7 +90,7 @@ class TestWorkflowSpecValidation(BaseTest): errors = self.validate_workflow("invalid_script") self.assertEqual(2, len(errors)) self.assertEqual("workflow_validation_exception", errors[0]['code']) - self.assertTrue("NoSuchScript" in errors[0]['message']) + #self.assertTrue("NoSuchScript" in errors[0]['message']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) self.assertEqual("invalid_script.bpmn", errors[0]['file_name']) From 6379b26a71d8534006c2ce15b9c90e9a5924f5a8 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 21 Jul 2020 15:18:08 -0400 Subject: [PATCH 27/60] Add a few more details to the workflow metadata model. --- crc/models/study.py | 7 +++++-- crc/models/task_event.py | 1 - example_data.py | 1 - tests/base_test.py | 19 ++++++++++++++----- tests/study/test_study_service.py | 9 ++++----- tests/test_user_roles.py | 3 ++- 6 files changed, 25 insertions(+), 15 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index 47d4eb8f..7bb2db33 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -40,7 +40,7 @@ class StudyModel(db.Model): class WorkflowMetadata(object): - def __init__(self, id, name, display_name, description, spec_version, category_id, state: WorkflowState, status: WorkflowStatus, + def __init__(self, id, name, display_name, description, spec_version, category_id, category_display_name, state: WorkflowState, status: WorkflowStatus, total_tasks, completed_tasks, display_order): self.id = id self.name = name @@ -48,6 +48,7 @@ class WorkflowMetadata(object): self.description = description self.spec_version = spec_version self.category_id = category_id + self.category_display_name = category_display_name self.state = state self.status = status self.total_tasks = total_tasks @@ -64,6 +65,7 @@ class WorkflowMetadata(object): description=workflow.workflow_spec.description, spec_version=workflow.spec_version(), category_id=workflow.workflow_spec.category_id, + category_display_name=workflow.workflow_spec.category.display_name, state=WorkflowState.optional, status=workflow.status, total_tasks=workflow.total_tasks, @@ -79,7 +81,8 @@ class WorkflowMetadataSchema(ma.Schema): class Meta: model = WorkflowMetadata additional = ["id", "name", "display_name", "description", - "total_tasks", "completed_tasks", "display_order"] + "total_tasks", "completed_tasks", "display_order", + "category_id", "category_display_name"] unknown = INCLUDE diff --git a/crc/models/task_event.py b/crc/models/task_event.py index a6cb1a2d..e3914468 100644 --- a/crc/models/task_event.py +++ b/crc/models/task_event.py @@ -56,7 +56,6 @@ class TaskEventSchema(ma.Schema): study = fields.Nested(StudySchema, dump_only=True) workflow = fields.Nested(WorkflowMetadataSchema, dump_only=True) - class Meta: model = TaskEvent additional = ["id", "user_uid", "action", "task_id", "task_title", diff --git a/example_data.py b/example_data.py index efdfe3b3..8b9b0c27 100644 --- a/example_data.py +++ b/example_data.py @@ -251,7 +251,6 @@ class ExampleDataLoader: master_spec=False, from_tests=True) - def create_spec(self, id, name, display_name="", description="", filepath=None, master_spec=False, category_id=None, display_order=None, from_tests=False): """Assumes that a directory exists in static/bpmn with the same name as the given id. diff --git a/tests/base_test.py b/tests/base_test.py index 6ea1966d..3f0b2405 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -19,7 +19,7 @@ from crc.models.protocol_builder import ProtocolBuilderStatus from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel -from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel +from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel from crc.services.file_service import FileService from crc.services.study_service import StudyService from crc.services.workflow_service import WorkflowService @@ -164,14 +164,21 @@ class BaseTest(unittest.TestCase): self.assertGreater(len(file_data), 0) @staticmethod - def load_test_spec(dir_name, master_spec=False, category_id=None): + def load_test_spec(dir_name, display_name=None, master_spec=False, category_id=None): """Loads a spec into the database based on a directory in /tests/data""" + if category_id is None: + category = WorkflowSpecCategoryModel(name="test", display_name="Test Workflows", display_order=0) + db.session.add(category) + db.session.commit() + category_id = category.id if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0: return session.query(WorkflowSpecModel).filter_by(id=dir_name).first() filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*") + if display_name is None: + display_name = dir_name return ExampleDataLoader().create_spec(id=dir_name, name=dir_name, filepath=filepath, master_spec=master_spec, - category_id=category_id) + display_name=display_name, category_id=category_id) @staticmethod def protocol_builder_response(file_name): @@ -263,11 +270,13 @@ class BaseTest(unittest.TestCase): return full_study - def create_workflow(self, workflow_name, study=None, category_id=None, as_user="dhf8r"): + def create_workflow(self, workflow_name, display_name=None, study=None, category_id=None, as_user="dhf8r"): db.session.flush() spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first() if spec is None: - spec = self.load_test_spec(workflow_name, category_id=category_id) + if display_name is None: + display_name = workflow_name + spec = self.load_test_spec(workflow_name, display_name, category_id=category_id) if study is None: study = self.create_study(uid=as_user) workflow_model = StudyService._create_workflow_model(study, spec) diff --git a/tests/study/test_study_service.py b/tests/study/test_study_service.py index b436835f..f1e43c8a 100644 --- a/tests/study/test_study_service.py +++ b/tests/study/test_study_service.py @@ -27,7 +27,10 @@ class TestStudyService(BaseTest): # Assure some basic models are in place, This is a damn mess. Our database models need an overhaul to make # this easier - better relationship modeling is now critical. - self.load_test_spec("top_level_workflow", master_spec=True) + cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0) + db.session.add(cat) + db.session.commit() + self.load_test_spec("top_level_workflow", master_spec=True, category_id=cat.id) user = db.session.query(UserModel).filter(UserModel.uid == "dhf8r").first() if not user: user = UserModel(uid="dhf8r", email_address="whatever@stuff.com", display_name="Stayathome Smellalots") @@ -39,11 +42,7 @@ class TestStudyService(BaseTest): study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.ACTIVE, user_uid=user.uid) db.session.add(study) - cat = WorkflowSpecCategoryModel(name="approvals", display_name="Approvals", display_order=0) - db.session.add(cat) - db.session.commit() - self.assertIsNotNone(cat.id) self.load_test_spec("random_fact", category_id=cat.id) self.assertIsNotNone(study.id) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 8a0ea8ae..ed879d2e 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -68,7 +68,7 @@ class TestTasksApi(BaseTest): def test_get_outstanding_tasks_awaiting_current_user(self): submitter = self.create_user(uid='lje5u') supervisor = self.create_user(uid='lb3dp') - workflow = self.create_workflow('roles', as_user=submitter.uid) + workflow = self.create_workflow('roles', display_name="Roles", as_user=submitter.uid) workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) # User lje5u can complete the first task, and set her supervisor @@ -94,6 +94,7 @@ class TestTasksApi(BaseTest): self.assertEquals(1, len(tasks)) self.assertEquals(workflow.id, tasks[0]['workflow']['id']) self.assertEquals(workflow.study.id, tasks[0]['study']['id']) + self.assertEquals("Test Workflows", tasks[0]['workflow']['category_display_name']) # Assure we can say something sensible like: # You have a task called "Approval" to be completed in the "Supervisor Approval" workflow From 4adb6a1b44ce131fe4eb91d844e90eae5dc1db17 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 09:35:08 -0400 Subject: [PATCH 28/60] Adds enrollment_date to study model --- crc/models/study.py | 7 +- crc/services/workflow_service.py | 5 +- .../bpmn/notifications/notifications.bpmn | 100 +++++++++++++++--- migrations/versions/c4ddb69e7ef4_.py | 28 +++++ 4 files changed, 122 insertions(+), 18 deletions(-) create mode 100644 migrations/versions/c4ddb69e7ef4_.py diff --git a/crc/models/study.py b/crc/models/study.py index 7bb2db33..854ce62f 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -25,6 +25,7 @@ class StudyModel(db.Model): investigator_uids = db.Column(db.ARRAY(db.String), nullable=True) requirements = db.Column(db.ARRAY(db.Integer), nullable=True) on_hold = db.Column(db.Boolean, default=False) + enrollment_date = db.Column(db.DateTime(timezone=True), nullable=True) def update_from_protocol_builder(self, pbs: ProtocolBuilderStudy): self.hsr_number = pbs.HSRNUMBER @@ -108,7 +109,7 @@ class Study(object): id=None, protocol_builder_status=None, sponsor="", hsr_number="", ind_number="", categories=[], - files=[], approvals=[], **argsv): + files=[], approvals=[], enrollment_date=None, **argsv): self.id = id self.user_uid = user_uid self.title = title @@ -122,6 +123,7 @@ class Study(object): self.approvals = approvals self.warnings = [] self.files = files + self.enrollment_date = enrollment_date @classmethod def from_model(cls, study_model: StudyModel): @@ -154,11 +156,12 @@ class StudySchema(ma.Schema): ind_number = fields.String(allow_none=True) files = fields.List(fields.Nested(FileSchema), dump_only=True) approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True) + enrollment_date = fields.Date(allow_none=True) class Meta: model = Study additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid", - "sponsor", "ind_number", "approvals", "files"] + "sponsor", "ind_number", "approvals", "files", "enrollment_date"] unknown = INCLUDE @marshmallow.post_load diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 3205e800..65794037 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -154,10 +154,9 @@ class WorkflowService(object): if len(field.options) > 0: random_choice = random.choice(field.options) if isinstance(random_choice, dict): - choice = random.choice(field.options) return { - 'value': choice['id'], - 'label': choice['name'] + 'value': random_choice['id'], + 'label': random_choice['name'] } else: # fixme: why it is sometimes an EnumFormFieldOption, and other times not? diff --git a/crc/static/bpmn/notifications/notifications.bpmn b/crc/static/bpmn/notifications/notifications.bpmn index 4c01a711..cd73505f 100644 --- a/crc/static/bpmn/notifications/notifications.bpmn +++ b/crc/static/bpmn/notifications/notifications.bpmn @@ -1,42 +1,116 @@ - + + + + + + + StartEvent_1 + Activity_1qpy9ra + Event_1m9fnmv + + + Gateway_0ved0t9 + Activity_107ojvq + + Flow_0q51aiq - - + - + Flow_0q51aiq - Flow_0ai4j1x + Flow_11tnx3n + Flow_0d2snmk + + + + Flow_0apr3nj + Flow_0mhtlkt + Flow_11tnx3n + + + + is_study_approved == True + - Flow_0ai4j1x + Flow_0mhtlkt - + + is_study_approved == False + + + + + + + + Flow_0d2snmk + Flow_0apr3nj + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + + - + + + + diff --git a/migrations/versions/c4ddb69e7ef4_.py b/migrations/versions/c4ddb69e7ef4_.py new file mode 100644 index 00000000..533d2f86 --- /dev/null +++ b/migrations/versions/c4ddb69e7ef4_.py @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: c4ddb69e7ef4 +Revises: ffef4661a37d +Create Date: 2020-07-22 09:04:09.769239 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'c4ddb69e7ef4' +down_revision = 'ffef4661a37d' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('study', sa.Column('enrollment_date', sa.DateTime(timezone=True), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('study', 'enrollment_date') + # ### end Alembic commands ### From 91c835906cd768e54076e8d321a1b7942a8ee3f8 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 14:47:25 -0400 Subject: [PATCH 29/60] Updates package versions. --- Pipfile.lock | 117 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 114 insertions(+), 3 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index 72657ab7..c3fb647d 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -30,6 +30,14 @@ "index": "pypi", "version": "==1.4.2" }, + "amqp": { + "hashes": [ + "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", + "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==2.6.0" + }, "aniso8601": { "hashes": [ "sha256:529dcb1f5f26ee0df6c0a1ee84b7b27197c3c50fc3a6321d66c544689237d072", @@ -42,6 +50,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -49,6 +58,7 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -72,6 +82,7 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -88,6 +99,14 @@ ], "version": "==1.4" }, + "celery": { + "hashes": [ + "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", + "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.4.6" + }, "certifi": { "hashes": [ "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", @@ -140,6 +159,7 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -156,6 +176,14 @@ ], "version": "==0.9.1" }, + "configparser": { + "hashes": [ + "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", + "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" + ], + "markers": "python_version >= '3.6'", + "version": "==5.0.0" + }, "connexion": { "extras": [ "swagger-ui" @@ -212,6 +240,7 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -294,8 +323,16 @@ "sha256:05b31d2034dd3f2a685cbbae4cfc4ed906b2a733cff7964ada450fd5e462b84e", "sha256:bfc7150eaf809b1c283879302f04c42791136060c6eeb12c0c6674fb1291fae5" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.4" }, + "future": { + "hashes": [ + "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" + ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.18.2" + }, "gunicorn": { "hashes": [ "sha256:1904bb2b8a43658807108d59c3f3d56c2b6121a701161de0ddf9ad140073c626", @@ -316,6 +353,7 @@ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "imagesize": { @@ -323,6 +361,7 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -338,6 +377,7 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], + "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -345,6 +385,7 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -359,6 +400,7 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -368,9 +410,21 @@ ], "version": "==3.2.0" }, + "kombu": { + "hashes": [ + "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", + "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.6.11" + }, "ldap3": { "hashes": [ + "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", + "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", + "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", + "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", @@ -418,6 +472,7 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, "markdown": { @@ -464,6 +519,7 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -519,6 +575,7 @@ "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e", "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc" ], + "markers": "python_version >= '3.6'", "version": "==1.19.1" }, "openapi-spec-validator": { @@ -542,6 +599,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { @@ -604,8 +662,19 @@ }, "pyasn1": { "hashes": [ + "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", + "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", + "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" + "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", + "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", + "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", + "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3", + "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", + "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", + "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", + "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776" ], "version": "==0.4.8" }, @@ -614,6 +683,7 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -621,6 +691,7 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], + "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -636,6 +707,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -644,6 +716,14 @@ ], "version": "==0.16.0" }, + "python-box": { + "hashes": [ + "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111", + "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f" + ], + "markers": "python_version >= '3.6'", + "version": "==5.0.1" + }, "python-dateutil": { "hashes": [ "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", @@ -660,9 +740,11 @@ }, "python-editor": { "hashes": [ - "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", + "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", + "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", + "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d" ], "version": "==1.0.4" }, @@ -721,6 +803,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -735,6 +818,7 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], + "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -750,6 +834,7 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -757,6 +842,7 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -764,6 +850,7 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -771,6 +858,7 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -778,6 +866,7 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -785,6 +874,7 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], + "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { @@ -822,6 +912,7 @@ "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.18" }, "swagger-ui-bundle": { @@ -837,13 +928,23 @@ "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.25.10" }, + "vine": { + "hashes": [ + "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", + "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.3.0" + }, "waitress": { "hashes": [ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -851,6 +952,7 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -897,6 +999,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -906,6 +1009,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -961,6 +1065,7 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], + "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -968,6 +1073,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -983,6 +1089,7 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { @@ -990,6 +1097,7 @@ "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.9.0" }, "pyparsing": { @@ -997,6 +1105,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1012,6 +1121,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { @@ -1026,6 +1136,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } } From 6cdb9c3b3a72819db445257b914d1f9cd9fa882c Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 14:47:53 -0400 Subject: [PATCH 30/60] Exposes date in TaskEvent endpoint --- crc/models/task_event.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crc/models/task_event.py b/crc/models/task_event.py index e3914468..c696bc26 100644 --- a/crc/models/task_event.py +++ b/crc/models/task_event.py @@ -50,6 +50,7 @@ class TaskEvent(object): self.task_type = model.task_type self.task_state = model.task_state self.task_lane = model.task_lane + self.date = model.date class TaskEventSchema(ma.Schema): @@ -59,5 +60,5 @@ class TaskEventSchema(ma.Schema): class Meta: model = TaskEvent additional = ["id", "user_uid", "action", "task_id", "task_title", - "task_name", "task_type", "task_state", "task_lane"] + "task_name", "task_type", "task_state", "task_lane", "date"] unknown = INCLUDE From 263ea4d00f76046f931c9e5a30cc1160373a3ed3 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 22 Jul 2020 14:48:36 -0400 Subject: [PATCH 31/60] Adds manual task --- .../bpmn/notifications/notifications.bpmn | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/crc/static/bpmn/notifications/notifications.bpmn b/crc/static/bpmn/notifications/notifications.bpmn index cd73505f..a9fdedbf 100644 --- a/crc/static/bpmn/notifications/notifications.bpmn +++ b/crc/static/bpmn/notifications/notifications.bpmn @@ -9,6 +9,7 @@ StartEvent_1 Activity_1qpy9ra Event_1m9fnmv + Activity_0c5drp3 Gateway_0ved0t9 @@ -25,7 +26,7 @@ Flow_0q51aiq - Flow_11tnx3n + Flow_1ugh4wn Flow_0d2snmk @@ -42,7 +43,7 @@ Flow_0mhtlkt - + is_study_approved == False @@ -54,6 +55,12 @@ Flow_0d2snmk Flow_0apr3nj + + + Your request was not approved. Try again. + Flow_11tnx3n + Flow_1ugh4wn + @@ -68,10 +75,9 @@ - - + - + @@ -94,6 +100,10 @@ + + + + @@ -112,6 +122,9 @@ + + + From 0d2cb8c1b8df579a0d884bb5358ff341ec4acff5 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Thu, 23 Jul 2020 12:00:24 -0400 Subject: [PATCH 32/60] Correcting an issue with the Navigation where it did not correctly handle looking back to a previous task within the workflow. In some cases the session was not getting committed, leaving rogue assignments outstanding for a workflow. --- Pipfile.lock | 71 ++------------------------------ crc/services/workflow_service.py | 1 + tests/test_user_roles.py | 63 +++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 68 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index c3fb647d..a1bb7222 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -35,7 +35,6 @@ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.6.0" }, "aniso8601": { @@ -50,7 +49,6 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -58,7 +56,6 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -82,7 +79,6 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -104,7 +100,6 @@ "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.4.6" }, "certifi": { @@ -159,7 +154,6 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -181,7 +175,6 @@ "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" ], - "markers": "python_version >= '3.6'", "version": "==5.0.0" }, "connexion": { @@ -240,7 +233,6 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -323,14 +315,12 @@ "sha256:05b31d2034dd3f2a685cbbae4cfc4ed906b2a733cff7964ada450fd5e462b84e", "sha256:bfc7150eaf809b1c283879302f04c42791136060c6eeb12c0c6674fb1291fae5" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.4" }, "future": { "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "gunicorn": { @@ -353,7 +343,6 @@ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "imagesize": { @@ -361,7 +350,6 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -377,7 +365,6 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], - "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -385,7 +372,6 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -400,7 +386,6 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -415,16 +400,11 @@ "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.6.11" }, "ldap3": { "hashes": [ - "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", - "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", - "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", - "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", @@ -472,7 +452,6 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, "markdown": { @@ -519,7 +498,6 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -575,7 +553,6 @@ "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e", "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc" ], - "markers": "python_version >= '3.6'", "version": "==1.19.1" }, "openapi-spec-validator": { @@ -599,7 +576,6 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { @@ -662,19 +638,8 @@ }, "pyasn1": { "hashes": [ - "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", - "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", - "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", - "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", - "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", - "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3", - "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", - "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", - "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", - "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776" + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" ], "version": "==0.4.8" }, @@ -683,7 +648,6 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -691,7 +655,6 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], - "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -707,7 +670,6 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -721,7 +683,6 @@ "sha256:2df0d0e0769b6d6e7daed8d5e0b10a38e0b5486ee75914c30f2a927f7a374111", "sha256:ddea019b4ee53fe3f822407b0b26ec54ff6233042c68b54244d3503ae4d6218f" ], - "markers": "python_version >= '3.6'", "version": "==5.0.1" }, "python-dateutil": { @@ -740,11 +701,9 @@ }, "python-editor": { "hashes": [ - "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", - "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522", + "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", - "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d" + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" ], "version": "==1.0.4" }, @@ -803,7 +762,6 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -818,7 +776,6 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], - "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -834,7 +791,6 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], - "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -842,7 +798,6 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], - "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -850,7 +805,6 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], - "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -858,7 +812,6 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], - "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -866,7 +819,6 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], - "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -874,12 +826,11 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], - "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "11ad40bbcb0fbd3c5bc1078e4989dc38b749f7f3" + "ref": "5785d3cab99e319596e1bf0006df96f215febafd" }, "sqlalchemy": { "hashes": [ @@ -912,7 +863,6 @@ "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.18" }, "swagger-ui-bundle": { @@ -928,7 +878,6 @@ "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a", "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.25.10" }, "vine": { @@ -936,7 +885,6 @@ "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.0" }, "waitress": { @@ -944,7 +892,6 @@ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -952,7 +899,6 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -999,7 +945,6 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], - "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -1009,7 +954,6 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -1065,7 +1009,6 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], - "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -1073,7 +1016,6 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -1089,7 +1031,6 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { @@ -1097,7 +1038,6 @@ "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.9.0" }, "pyparsing": { @@ -1105,7 +1045,6 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1121,7 +1060,6 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { @@ -1136,7 +1074,6 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], - "markers": "python_version >= '3.6'", "version": "==3.1.0" } } diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 65794037..e078166b 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -470,6 +470,7 @@ class WorkflowService(object): db.session.query(TaskEventModel). \ filter(TaskEventModel.workflow_id == processor.workflow_model.id). \ filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).delete() + db.session.commit() for task in processor.get_current_user_tasks(): user_ids = WorkflowService.get_users_assigned_to_task(processor, task) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index ed879d2e..084df85d 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -200,4 +200,65 @@ class TestTasksApi(BaseTest): workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) self.assertEquals('COMPLETED', workflow_api.next_task.state) self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end. - self.assertEquals(WorkflowStatus.complete, workflow_api.status) \ No newline at end of file + self.assertEquals(WorkflowStatus.complete, workflow_api.status) + + def get_assignment_task_events(self, uid): + return db.session.query(TaskEventModel). \ + filter(TaskEventModel.user_uid == uid). \ + filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).all() + + def test_workflow_reset_correctly_resets_the_task_events(self): + + submitter = self.create_user(uid='lje5u') + supervisor = self.create_user(uid='lb3dp') + workflow = self.create_workflow('roles', display_name="Roles", as_user=submitter.uid) + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + + # User lje5u can complete the first task, and set her supervisor + data = workflow_api.next_task.data + data['supervisor'] = supervisor.uid + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + + # At this point there should be a task_log with an action of ASSIGNMENT on it for + # the supervisor. + self.assertEquals(1, len(self.get_assignment_task_events(supervisor.uid))) + + # Resetting the workflow at this point should clear the event log. + workflow_api = self.get_workflow_api(workflow, hard_reset=True, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) + + # Re-complete first task, and awaiting tasks should shift to 0 for for submitter, and 1 for supervisor + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(1, len(self.get_assignment_task_events(supervisor.uid))) + + # Complete the supervisor task with rejected approval, and the assignments should switch. + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + data = workflow_api.next_task.data + data["approval"] = False + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + self.assertEquals(1, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) + + # Mark the return form review page as complete, and then recomplete the form, and assignments switch yet again. + workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid) + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(1, len(self.get_assignment_task_events(supervisor.uid))) + + # Complete the supervisor task, accepting the approval, and the workflow is completed. + # When it is all done, there should be no outstanding assignments. + workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid) + data = workflow_api.next_task.data + data["approval"] = True + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + self.assertEquals(WorkflowStatus.complete, workflow_api.status) + self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end. + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) + + # Sending any subsequent complete forms does not result in a new task event + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) + self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) From 452f2c3723c98462dd3e9553d3ec90b4514c685d Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 27 Jul 2020 14:38:57 -0400 Subject: [PATCH 33/60] Building out a user service for getting the current user, it will provide a number of functions, one of which will allow administrative users to impersonate other users in some circumstances (but will assure that we log events correctly when an impersonation occures) --- README.md | 3 +++ crc/api/common.py | 3 +++ crc/api/study.py | 8 ++++--- crc/api/user.py | 23 ++++++++------------ crc/api/workflow.py | 28 +++++++++++------------- crc/models/user.py | 5 ++++- crc/services/user_service.py | 37 ++++++++++++++++++++++++++++++++ crc/services/workflow_service.py | 5 +++-- tests/base_test.py | 5 ++++- 9 files changed, 80 insertions(+), 37 deletions(-) create mode 100644 crc/services/user_service.py diff --git a/README.md b/README.md index 6bd7dd67..e559f044 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,9 @@ Make sure all of the following are properly installed on your system: - [Install pipenv](https://pipenv-es.readthedocs.io/es/stable/) - [Add ${HOME}/.local/bin to your PATH](https://github.com/pypa/pipenv/issues/2122#issue-319600584) +### Running Postgres + + ### Project Initialization 1. Clone this repository. 2. In PyCharm: diff --git a/crc/api/common.py b/crc/api/common.py index cb527c73..31a2c8df 100644 --- a/crc/api/common.py +++ b/crc/api/common.py @@ -1,5 +1,6 @@ from SpiffWorkflow import WorkflowException from SpiffWorkflow.exceptions import WorkflowTaskExecException +from flask import g from crc import ma, app @@ -60,3 +61,5 @@ class ApiErrorSchema(ma.Schema): def handle_invalid_usage(error): response = ApiErrorSchema().dump(error) return response, error.status_code + + diff --git a/crc/api/study.py b/crc/api/study.py index 8fdd1b4a..e288ee2f 100644 --- a/crc/api/study.py +++ b/crc/api/study.py @@ -8,6 +8,7 @@ from crc.api.common import ApiError, ApiErrorSchema from crc.models.protocol_builder import ProtocolBuilderStatus from crc.models.study import StudySchema, StudyModel, Study from crc.services.study_service import StudyService +from crc.services.user_service import UserService def add_study(body): @@ -17,7 +18,7 @@ def add_study(body): if 'title' not in body: raise ApiError("missing_title", "Can't create a new study without a title.") - study_model = StudyModel(user_uid=g.user.uid, + study_model = StudyModel(user_uid=UserService.current_user().uid, title=body['title'], primary_investigator_id=body['primary_investigator_id'], last_updated=datetime.now(), @@ -65,8 +66,9 @@ def delete_study(study_id): def user_studies(): """Returns all the studies associated with the current user. """ - StudyService.synch_with_protocol_builder_if_enabled(g.user) - studies = StudyService.get_studies_for_user(g.user) + user = UserService.current_user(allow_admin_impersonate=True) + StudyService.synch_with_protocol_builder_if_enabled(user) + studies = StudyService.get_studies_for_user(user) results = StudySchema(many=True).dump(studies) return results diff --git a/crc/api/user.py b/crc/api/user.py index fc86bd02..49b447ac 100644 --- a/crc/api/user.py +++ b/crc/api/user.py @@ -63,13 +63,15 @@ def verify_token(token=None): # Fall back to a default user if this is not production. g.user = UserModel.query.first() token = g.user.encode_auth_token() + token_info = UserModel.decode_auth_token(token) + return token_info def verify_token_admin(token=None): """ - Verifies the token for the user (if provided) in non-production environment. If in production environment, - checks that the user is in the list of authorized admins + Verifies the token for the user (if provided) in non-production environment. + If in production environment, checks that the user is in the list of authorized admins Args: token: Optional[str] @@ -77,18 +79,11 @@ def verify_token_admin(token=None): Returns: token: str """ - - # If this is production, check that the user is in the list of admins - if _is_production(): - uid = _get_request_uid(request) - - if uid is not None and uid in app.config['ADMIN_UIDS']: - return verify_token() - - # If we're not in production, just use the normal verify_token method - else: - return verify_token(token) - + verify_token(token) + if "user" in g and g.user.is_admin(): + token = g.user.encode_auth_token() + token_info = UserModel.decode_auth_token(token) + return token_info def get_current_user(): return UserModelSchema().dump(g.user) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index a290d340..0279e6bf 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -13,6 +13,7 @@ from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, Workflow from crc.services.file_service import FileService from crc.services.lookup_service import LookupService from crc.services.study_service import StudyService +from crc.services.user_service import UserService from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService @@ -104,8 +105,10 @@ def get_workflow(workflow_id, soft_reset=False, hard_reset=False): def get_task_events(action): - """Provides a way to see a history of what has happened, or get a list of tasks that need your attention.""" - query = session.query(TaskEventModel).filter(TaskEventModel.user_uid == g.user.uid) + """Provides a way to see a history of what has happened, or get a list of + tasks that need your attention.""" + user = UserService.current_user(allow_admin_impersonate=True) + query = session.query(TaskEventModel).filter(TaskEventModel.user_uid == user.uid) if action: query = query.filter(TaskEventModel.action == action) events = query.all() @@ -130,7 +133,7 @@ def set_current_task(workflow_id, task_id): task_id = uuid.UUID(task_id) spiff_task = processor.bpmn_workflow.get_task(task_id) _verify_user_and_role(processor, spiff_task) - user_uid = g.user.uid + user_uid = UserService.current_user(allow_admin_impersonate=True).uid if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not move the token to a task who's state is not " "currently set to COMPLETE or READY.") @@ -173,7 +176,8 @@ def update_task(workflow_id, task_id, body, terminate_loop=None): processor.save() # Log the action, and any pending task assignments in the event of lanes in the workflow. - WorkflowService.log_task_action(g.user.uid, processor, spiff_task, WorkflowService.TASK_ACTION_COMPLETE) + user = UserService.current_user(allow_admin_impersonate=False) # Always log as the real user. + WorkflowService.log_task_action(user.uid, processor, spiff_task, WorkflowService.TASK_ACTION_COMPLETE) WorkflowService.update_task_assignments(processor) workflow_api_model = WorkflowService.processor_to_workflow_api(processor) @@ -233,19 +237,11 @@ def lookup(workflow_id, field_id, query=None, value=None, limit=10): def _verify_user_and_role(processor, spiff_task): """Assures the currently logged in user can access the given workflow and task, or - raises an error. - Allow administrators to modify tasks, otherwise assure that the current user - is allowed to edit or update the task. Will raise the appropriate error if user - is not authorized. """ - - if 'user' not in g: - raise ApiError("logged_out", "You are no longer logged in.", status_code=401) - - if g.user.uid in app.config['ADMIN_UIDS']: - return g.user.uid + raises an error. """ + user = UserService.current_user(allow_admin_impersonate=True) allowed_users = WorkflowService.get_users_assigned_to_task(processor, spiff_task) - if g.user.uid not in allowed_users: + if user.uid not in allowed_users: raise ApiError.from_task("permission_denied", f"This task must be completed by '{allowed_users}', " - f"but you are {g.user.uid}", spiff_task) + f"but you are {user.uid}", spiff_task) diff --git a/crc/models/user.py b/crc/models/user.py index 221176bc..e621455b 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -18,9 +18,12 @@ class UserModel(db.Model): first_name = db.Column(db.String, nullable=True) last_name = db.Column(db.String, nullable=True) title = db.Column(db.String, nullable=True) - # TODO: Add Department and School + def is_admin(self): + # Currently admin abilities are set in the configuration, but this + # may change in the future. + return self.uid in app.config['ADMIN_UIDS'] def encode_auth_token(self): """ diff --git a/crc/services/user_service.py b/crc/services/user_service.py new file mode 100644 index 00000000..6b2887f5 --- /dev/null +++ b/crc/services/user_service.py @@ -0,0 +1,37 @@ +from flask import g + +from crc.api.common import ApiError + + +class UserService(object): + """Provides common tools for working with users""" + + @staticmethod + def has_user(): + if 'user' not in g or not g.user: + return False + else: + return True + + @staticmethod + def current_user(allow_admin_impersonate=False): + + if not UserService.has_user(): + raise ApiError("logged_out", "You are no longer logged in.", status_code=401) + + # Admins can pretend to be different users and act on a users behalf in + # some circumstances. + if g.user.is_admin() and allow_admin_impersonate and "impersonate_user" in g: + return g.impersonate_user + else: + return g.user + + @staticmethod + def in_list(uids, allow_admin_impersonate=False): + """Returns true if the current user's id is in the given list of ids. False if there + is no user, or the user is not in the list.""" + if UserService.has_user(): # If someone is logged in, lock tasks that don't belong to them. + user = UserService.current_user(allow_admin_impersonate) + if user.uid in uids: + return True + return False diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 3205e800..d27fe223 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -30,6 +30,7 @@ from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel from crc.services.file_service import FileService from crc.services.lookup_service import LookupService from crc.services.study_service import StudyService +from crc.services.user_service import UserService from crc.services.workflow_processor import WorkflowProcessor @@ -239,7 +240,7 @@ class WorkflowService(object): nav_item['title'] = nav_item['task'].title # Prefer the task title. user_uids = WorkflowService.get_users_assigned_to_task(processor, spiff_task) - if 'user' not in g or not g.user or g.user.uid not in user_uids: + if not UserService.in_list(user_uids, allow_admin_impersonate=True): nav_item['state'] = WorkflowService.TASK_STATE_LOCKED else: @@ -272,7 +273,7 @@ class WorkflowService(object): workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) # Update the state of the task to locked if the current user does not own the task. user_uids = WorkflowService.get_users_assigned_to_task(processor, next_task) - if 'user' not in g or not g.user or g.user.uid not in user_uids: + if not UserService.in_list(user_uids, allow_admin_impersonate=True): workflow_api.next_task.state = WorkflowService.TASK_STATE_LOCKED return workflow_api diff --git a/tests/base_test.py b/tests/base_test.py index 6ea1966d..1ff1af6f 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -2,6 +2,8 @@ # IMPORTANT - Environment must be loaded before app, models, etc.... import os +from crc.services.user_service import UserService + os.environ["TESTING"] = "true" import json @@ -118,7 +120,8 @@ class BaseTest(unittest.TestCase): self.assertIsNotNone(user_model.display_name) self.assertEqual(user_model.uid, uid) self.assertTrue('user' in g, 'User should be in Flask globals') - self.assertEqual(uid, g.user.uid, 'Logged in user should match given user uid') + user = UserService.current_user(allow_admin_impersonate=True) + self.assertEqual(uid, user.uid, 'Logged in user should match given user uid') return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode()) From de0fe705c32a44ce0b82083f6c54e1893ad1a05a Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 27 Jul 2020 14:00:44 -0600 Subject: [PATCH 34/60] Wrapping LOCKED task update attempt into a try-catch block for tests --- tests/test_user_roles.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 084df85d..74871476 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -259,6 +259,8 @@ class TestTasksApi(BaseTest): self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) # Sending any subsequent complete forms does not result in a new task event - workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + with self.assertRaises(AssertionError) as _api_error: + workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid) + self.assertEquals(0, len(self.get_assignment_task_events(submitter.uid))) self.assertEquals(0, len(self.get_assignment_task_events(supervisor.uid))) From 8d42d520a019e6181b319829fe5d2eecc249c5ab Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 27 Jul 2020 16:32:23 -0400 Subject: [PATCH 35/60] Adding events to the study that is returned via the api. --- crc/models/study.py | 7 +++++-- crc/services/study_service.py | 16 +++++++++++----- tests/study/test_study_api.py | 17 +++++++++++++++++ 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index bc92e5e1..f6c03736 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -41,8 +41,10 @@ class StudyModel(db.Model): class WorkflowMetadata(object): - def __init__(self, id, name, display_name, description, spec_version, category_id, category_display_name, state: WorkflowState, status: WorkflowStatus, - total_tasks, completed_tasks, display_order): + def __init__(self, id, name = None, display_name = None, description = None, spec_version = None, + category_id = None, category_display_name = None, state: WorkflowState = None, + status: WorkflowStatus = None, total_tasks = None, completed_tasks = None, + display_order = None): self.id = id self.name = name self.display_name = display_name @@ -176,6 +178,7 @@ class StudySchema(ma.Schema): files = fields.List(fields.Nested(FileSchema), dump_only=True) approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True) enrollment_date = fields.Date(allow_none=True) + events = fields.List(fields.Nested('TaskEventSchema'), dump_only=True) class Meta: model = Study diff --git a/crc/services/study_service.py b/crc/services/study_service.py index fbc62d01..cbf3434d 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -1,6 +1,5 @@ from copy import copy from datetime import datetime -import json from typing import List import requests @@ -13,16 +12,15 @@ from crc.api.common import ApiError from crc.models.file import FileModel, FileModelSchema, File from crc.models.ldap import LdapSchema from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus -from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel, Study, Category, WorkflowMetadata +from crc.models.task_event import TaskEventModel, TaskEvent from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \ WorkflowStatus +from crc.services.approval_service import ApprovalService from crc.services.file_service import FileService from crc.services.ldap_service import LdapService from crc.services.protocol_builder import ProtocolBuilderService from crc.services.workflow_processor import WorkflowProcessor -from crc.services.approval_service import ApprovalService -from crc.models.approval import Approval class StudyService(object): @@ -63,7 +61,7 @@ class StudyService(object): files = (File.from_models(model, FileService.get_file_data(model.id), FileService.get_doc_dictionary()) for model in files) study.files = list(files) - + study.events = StudyService.get_events(study_id) # Calling this line repeatedly is very very slow. It creates the # master spec and runs it. Don't execute this for Abandoned studies, as # we don't have the information to process them. @@ -77,6 +75,14 @@ class StudyService(object): return study + @staticmethod + def get_events(study_id): + event_models = db.session.query(TaskEventModel).filter(TaskEventModel.study_id == study_id).all() + events = [] + for event_model in event_models: + events.append(TaskEvent(event_model, None, WorkflowMetadata(id=event_model.workflow_id))) + return events + @staticmethod def delete_study(study_id): session.query(TaskEventModel).filter_by(study_id=study_id).delete() diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 3b781f50..9ed7bb2c 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -1,4 +1,5 @@ import json + from tests.base_test import BaseTest from datetime import datetime, timezone @@ -13,6 +14,7 @@ from crc.models.study import StudyModel, StudySchema from crc.models.workflow import WorkflowSpecModel, WorkflowModel from crc.services.file_service import FileService from crc.services.workflow_processor import WorkflowProcessor +from crc.services.workflow_service import WorkflowService class TestStudyApi(BaseTest): @@ -112,6 +114,21 @@ class TestStudyApi(BaseTest): for approval in study.approvals: self.assertEqual(full_study['study'].title, approval['title']) + def test_get_study_has_details_about_events(self): + # Set up the study and attach a file to it. + self.load_example_data() + workflow = self.create_workflow('file_upload_form') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + WorkflowService.log_task_action('dhf8r', processor, task, 'my_action') + api_response = self.app.get('/v1.0/study/%i' % workflow.study_id, + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(api_response) + study = json.loads(api_response.get_data(as_text=True)) + self.assertEqual(1, len(study['events'])) + self.assertEqual('my_action', study['events'][0]['action']) + def test_add_study(self): self.load_example_data() study = self.add_test_study() From 51d6d6a5fae8dc8fec3ecbb6628fbe36e40f2be6 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 27 Jul 2020 16:54:46 -0400 Subject: [PATCH 36/60] Fixing failing tests around a notifications workflow. --- crc/static/bpmn/notifications/notifications.bpmn | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crc/static/bpmn/notifications/notifications.bpmn b/crc/static/bpmn/notifications/notifications.bpmn index a9fdedbf..3fdbbba6 100644 --- a/crc/static/bpmn/notifications/notifications.bpmn +++ b/crc/static/bpmn/notifications/notifications.bpmn @@ -22,7 +22,11 @@ - + + + + + Flow_0q51aiq @@ -49,7 +53,11 @@ - + + + + + Flow_0d2snmk From 0cb480801ba4ce434fce55ed11eb3e475158aa71 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 27 Jul 2020 17:05:01 -0400 Subject: [PATCH 37/60] Provide event data on the Study api endpoint. Speed up the tests a little, because that got out of hand. Need to dig into what is causing this problem. --- crc/models/study.py | 7 +++++-- crc/services/study_service.py | 16 +++++++++++----- tests/study/test_study_api.py | 17 +++++++++++++++++ tests/test_tasks_api.py | 20 -------------------- 4 files changed, 33 insertions(+), 27 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index 854ce62f..669ca535 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -41,8 +41,10 @@ class StudyModel(db.Model): class WorkflowMetadata(object): - def __init__(self, id, name, display_name, description, spec_version, category_id, category_display_name, state: WorkflowState, status: WorkflowStatus, - total_tasks, completed_tasks, display_order): + def __init__(self, id, name = None, display_name = None, description = None, spec_version = None, + category_id = None, category_display_name = None, state: WorkflowState = None, + status: WorkflowStatus = None, total_tasks = None, completed_tasks = None, + display_order = None): self.id = id self.name = name self.display_name = display_name @@ -157,6 +159,7 @@ class StudySchema(ma.Schema): files = fields.List(fields.Nested(FileSchema), dump_only=True) approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True) enrollment_date = fields.Date(allow_none=True) + events = fields.List(fields.Nested('TaskEventSchema'), dump_only=True) class Meta: model = Study diff --git a/crc/services/study_service.py b/crc/services/study_service.py index fbc62d01..cbf3434d 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -1,6 +1,5 @@ from copy import copy from datetime import datetime -import json from typing import List import requests @@ -13,16 +12,15 @@ from crc.api.common import ApiError from crc.models.file import FileModel, FileModelSchema, File from crc.models.ldap import LdapSchema from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus -from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel, Study, Category, WorkflowMetadata +from crc.models.task_event import TaskEventModel, TaskEvent from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \ WorkflowStatus +from crc.services.approval_service import ApprovalService from crc.services.file_service import FileService from crc.services.ldap_service import LdapService from crc.services.protocol_builder import ProtocolBuilderService from crc.services.workflow_processor import WorkflowProcessor -from crc.services.approval_service import ApprovalService -from crc.models.approval import Approval class StudyService(object): @@ -63,7 +61,7 @@ class StudyService(object): files = (File.from_models(model, FileService.get_file_data(model.id), FileService.get_doc_dictionary()) for model in files) study.files = list(files) - + study.events = StudyService.get_events(study_id) # Calling this line repeatedly is very very slow. It creates the # master spec and runs it. Don't execute this for Abandoned studies, as # we don't have the information to process them. @@ -77,6 +75,14 @@ class StudyService(object): return study + @staticmethod + def get_events(study_id): + event_models = db.session.query(TaskEventModel).filter(TaskEventModel.study_id == study_id).all() + events = [] + for event_model in event_models: + events.append(TaskEvent(event_model, None, WorkflowMetadata(id=event_model.workflow_id))) + return events + @staticmethod def delete_study(study_id): session.query(TaskEventModel).filter_by(study_id=study_id).delete() diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 3b781f50..9ed7bb2c 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -1,4 +1,5 @@ import json + from tests.base_test import BaseTest from datetime import datetime, timezone @@ -13,6 +14,7 @@ from crc.models.study import StudyModel, StudySchema from crc.models.workflow import WorkflowSpecModel, WorkflowModel from crc.services.file_service import FileService from crc.services.workflow_processor import WorkflowProcessor +from crc.services.workflow_service import WorkflowService class TestStudyApi(BaseTest): @@ -112,6 +114,21 @@ class TestStudyApi(BaseTest): for approval in study.approvals: self.assertEqual(full_study['study'].title, approval['title']) + def test_get_study_has_details_about_events(self): + # Set up the study and attach a file to it. + self.load_example_data() + workflow = self.create_workflow('file_upload_form') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + WorkflowService.log_task_action('dhf8r', processor, task, 'my_action') + api_response = self.app.get('/v1.0/study/%i' % workflow.study_id, + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(api_response) + study = json.loads(api_response.get_data(as_text=True)) + self.assertEqual(1, len(study['events'])) + self.assertEqual('my_action', study['events'][0]['action']) + def test_add_study(self): self.load_example_data() study = self.add_test_study() diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 8284313d..9b8b5d68 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -69,7 +69,6 @@ class TestTasksApi(BaseTest): self.assertIsNotNone(val) def test_error_message_on_bad_gateway_expression(self): - self.load_example_data() workflow = self.create_workflow('exclusive_gateway') # get the first form in the two form workflow. @@ -77,7 +76,6 @@ class TestTasksApi(BaseTest): self.complete_form(workflow, task, {"has_bananas": True}) def test_workflow_with_parallel_forms(self): - self.load_example_data() workflow = self.create_workflow('exclusive_gateway') # get the first form in the two form workflow. @@ -89,7 +87,6 @@ class TestTasksApi(BaseTest): self.assertEqual("Task_Num_Bananas", workflow_api.next_task.name) def test_navigation_with_parallel_forms(self): - self.load_example_data() workflow = self.create_workflow('exclusive_gateway') # get the first form in the two form workflow. @@ -107,7 +104,6 @@ class TestTasksApi(BaseTest): self.assertEqual("NOOP", nav[3]['state']) def test_navigation_with_exclusive_gateway(self): - self.load_example_data() workflow = self.create_workflow('exclusive_gateway_2') # get the first form in the two form workflow. @@ -124,7 +120,6 @@ class TestTasksApi(BaseTest): self.assertEqual("Task 3", nav[6]['title']) def test_document_added_to_workflow_shows_up_in_file_list(self): - self.load_example_data() self.create_reference_document() workflow = self.create_workflow('docx') @@ -153,7 +148,6 @@ class TestTasksApi(BaseTest): def test_get_documentation_populated_in_end(self): - self.load_example_data() workflow = self.create_workflow('random_fact') workflow_api = self.get_workflow_api(workflow) task = workflow_api.next_task @@ -167,9 +161,7 @@ class TestTasksApi(BaseTest): self.assertTrue("norris" in workflow_api.next_task.documentation) def test_load_workflow_from_outdated_spec(self): - # Start the basic two_forms workflow and complete a task. - self.load_example_data() workflow = self.create_workflow('two_forms') workflow_api = self.get_workflow_api(workflow) self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}) @@ -194,9 +186,7 @@ class TestTasksApi(BaseTest): self.assertTrue(workflow_api.is_latest_spec) def test_soft_reset_errors_out_and_next_result_is_on_original_version(self): - # Start the basic two_forms workflow and complete a task. - self.load_example_data() workflow = self.create_workflow('two_forms') workflow_api = self.get_workflow_api(workflow) self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}) @@ -221,7 +211,6 @@ class TestTasksApi(BaseTest): def test_manual_task_with_external_documentation(self): - self.load_example_data() workflow = self.create_workflow('manual_task_with_external_documentation') # get the first form in the two form workflow. @@ -235,7 +224,6 @@ class TestTasksApi(BaseTest): self.assertTrue('Dan' in workflow_api.next_task.documentation) def test_bpmn_extension_properties_are_populated(self): - self.load_example_data() workflow = self.create_workflow('manual_task_with_external_documentation') # get the first form in the two form workflow. @@ -268,9 +256,7 @@ class TestTasksApi(BaseTest): # Assure that the names for each task are properly updated, so they aren't all the same. self.assertEqual("Primary Investigator", workflow.next_task.properties['display_name']) - def test_lookup_endpoint_for_task_field_enumerations(self): - self.load_example_data() workflow = self.create_workflow('enum_options_with_search') # get the first form in the two form workflow. workflow = self.get_workflow_api(workflow) @@ -286,7 +272,6 @@ class TestTasksApi(BaseTest): self.assert_options_populated(results, ['CUSTOMER_NUMBER', 'CUSTOMER_NAME', 'CUSTOMER_CLASS_MEANING']) def test_lookup_endpoint_for_task_field_using_lookup_entry_id(self): - self.load_example_data() workflow = self.create_workflow('enum_options_with_search') # get the first form in the two form workflow. workflow = self.get_workflow_api(workflow) @@ -316,7 +301,6 @@ class TestTasksApi(BaseTest): # the key/values from the spreadsheet are added directly to the form and it shows up as # a dropdown. This tests the case of wanting to get additional data when a user selects # something from a dropdown. - self.load_example_data() workflow = self.create_workflow('enum_options_from_file') # get the first form in the two form workflow. workflow = self.get_workflow_api(workflow) @@ -334,7 +318,6 @@ class TestTasksApi(BaseTest): self.assertIsInstance(results[0]['data'], dict) def test_enum_from_task_data(self): - self.load_example_data() workflow = self.create_workflow('enum_options_from_task_data') # get the first form in the two form workflow. workflow_api = self.get_workflow_api(workflow) @@ -359,7 +342,6 @@ class TestTasksApi(BaseTest): self.assertEqual('Chesterfield', options[2]['data']['first_name']) def test_lookup_endpoint_for_task_ldap_field_lookup(self): - self.load_example_data() workflow = self.create_workflow('ldap_lookup') # get the first form workflow = self.get_workflow_api(workflow) @@ -378,7 +360,6 @@ class TestTasksApi(BaseTest): self.assertEqual(1, len(results)) def test_sub_process(self): - self.load_example_data() workflow = self.create_workflow('subprocess') workflow_api = self.get_workflow_api(workflow) @@ -399,7 +380,6 @@ class TestTasksApi(BaseTest): self.assertEqual(WorkflowStatus.complete, workflow_api.status) def test_update_task_resets_token(self): - self.load_example_data() workflow = self.create_workflow('exclusive_gateway') # Start the workflow. From 300026cbc842d899682673535e4be198796a4ff8 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 28 Jul 2020 10:16:48 -0400 Subject: [PATCH 38/60] Expanding the task events endpoint to accept workflow and study ids as additional filters. Removing events from the study endpoint, too noisy. --- crc/api.yml | 12 ++++++++++ crc/api/workflow.py | 6 ++++- crc/models/study.py | 1 - crc/models/task_event.py | 1 + crc/services/file_service.py | 3 ++- crc/services/study_service.py | 9 -------- tests/study/test_study_api.py | 16 +------------ tests/test_events.py | 43 +++++++++++++++++++++++++++++++++++ tests/test_user_roles.py | 1 + 9 files changed, 65 insertions(+), 27 deletions(-) create mode 100644 tests/test_events.py diff --git a/crc/api.yml b/crc/api.yml index 4c6ebd1b..f23f0ace 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -572,6 +572,18 @@ paths: description: The type of action the event documents, options include "ASSIGNMENT" for tasks that are waiting on you, "COMPLETE" for things have completed. schema: type: string + - name: workflow + in: query + required: false + description: Restrict results to the given workflow. + schema: + type: number + - name: study + in: query + required: false + description: Restrict results to the given study. + schema: + type: number get: operationId: crc.api.workflow.get_task_events summary: Returns a list of task events related to the current user. Can be filtered by type. diff --git a/crc/api/workflow.py b/crc/api/workflow.py index a290d340..3418d50a 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -103,11 +103,15 @@ def get_workflow(workflow_id, soft_reset=False, hard_reset=False): return WorkflowApiSchema().dump(workflow_api_model) -def get_task_events(action): +def get_task_events(action = None, workflow = None, study = None): """Provides a way to see a history of what has happened, or get a list of tasks that need your attention.""" query = session.query(TaskEventModel).filter(TaskEventModel.user_uid == g.user.uid) if action: query = query.filter(TaskEventModel.action == action) + if workflow: + query = query.filter(TaskEventModel.workflow_id == workflow) + if study: + query = query.filter(TaskEventModel.study_id == study) events = query.all() # Turn the database records into something a little richer for the UI to use. diff --git a/crc/models/study.py b/crc/models/study.py index 669ca535..e14fe0a6 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -159,7 +159,6 @@ class StudySchema(ma.Schema): files = fields.List(fields.Nested(FileSchema), dump_only=True) approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True) enrollment_date = fields.Date(allow_none=True) - events = fields.List(fields.Nested('TaskEventSchema'), dump_only=True) class Meta: model = Study diff --git a/crc/models/task_event.py b/crc/models/task_event.py index c696bc26..aa05a4f7 100644 --- a/crc/models/task_event.py +++ b/crc/models/task_event.py @@ -57,6 +57,7 @@ class TaskEventSchema(ma.Schema): study = fields.Nested(StudySchema, dump_only=True) workflow = fields.Nested(WorkflowMetadataSchema, dump_only=True) + task_lane = fields.String(allow_none=True, required=False) class Meta: model = TaskEvent additional = ["id", "user_uid", "action", "task_id", "task_title", diff --git a/crc/services/file_service.py b/crc/services/file_service.py index 6ba2e1ad..8b5665c6 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -78,7 +78,8 @@ class FileService(object): """ Opens a reference file (assumes that it is xls file) and returns the data as a dictionary, each row keyed on the given index_column name. If there are columns that should be represented as integers, pass these as an array of int_columns, lest - you get '1.0' rather than '1' """ + you get '1.0' rather than '1' + fixme: This is stupid stupid slow. Place it in the database and just check if it is up to date.""" data_model = FileService.get_reference_file_data(reference_file_name) xls = ExcelFile(data_model.data) df = xls.parse(xls.sheet_names[0]) diff --git a/crc/services/study_service.py b/crc/services/study_service.py index cbf3434d..4eb8dde7 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -61,7 +61,6 @@ class StudyService(object): files = (File.from_models(model, FileService.get_file_data(model.id), FileService.get_doc_dictionary()) for model in files) study.files = list(files) - study.events = StudyService.get_events(study_id) # Calling this line repeatedly is very very slow. It creates the # master spec and runs it. Don't execute this for Abandoned studies, as # we don't have the information to process them. @@ -75,14 +74,6 @@ class StudyService(object): return study - @staticmethod - def get_events(study_id): - event_models = db.session.query(TaskEventModel).filter(TaskEventModel.study_id == study_id).all() - events = [] - for event_model in event_models: - events.append(TaskEvent(event_model, None, WorkflowMetadata(id=event_model.workflow_id))) - return events - @staticmethod def delete_study(study_id): session.query(TaskEventModel).filter_by(study_id=study_id).delete() diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 9ed7bb2c..fb0a4dcf 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -1,4 +1,5 @@ import json +from profile import Profile from tests.base_test import BaseTest @@ -114,21 +115,6 @@ class TestStudyApi(BaseTest): for approval in study.approvals: self.assertEqual(full_study['study'].title, approval['title']) - def test_get_study_has_details_about_events(self): - # Set up the study and attach a file to it. - self.load_example_data() - workflow = self.create_workflow('file_upload_form') - processor = WorkflowProcessor(workflow) - task = processor.next_task() - WorkflowService.log_task_action('dhf8r', processor, task, 'my_action') - api_response = self.app.get('/v1.0/study/%i' % workflow.study_id, - headers=self.logged_in_headers(), - content_type="application/json") - self.assert_success(api_response) - study = json.loads(api_response.get_data(as_text=True)) - self.assertEqual(1, len(study['events'])) - self.assertEqual('my_action', study['events'][0]['action']) - def test_add_study(self): self.load_example_data() study = self.add_test_study() diff --git a/tests/test_events.py b/tests/test_events.py new file mode 100644 index 00000000..06005ee1 --- /dev/null +++ b/tests/test_events.py @@ -0,0 +1,43 @@ +import json + +from tests.base_test import BaseTest +from crc.models.workflow import WorkflowStatus +from crc import db +from crc.api.common import ApiError +from crc.models.task_event import TaskEventModel, TaskEventSchema +from crc.services.workflow_service import WorkflowService + + +class TestEvents(BaseTest): + + + def test_list_events_by_workflow(self): + workflow_one = self.create_workflow('exclusive_gateway') + + # Start a the workflow. + first_task = self.get_workflow_api(workflow_one).next_task + self.complete_form(workflow_one, first_task, {"has_bananas": True}) + workflow_one = self.get_workflow_api(workflow_one) + self.assertEqual('Task_Num_Bananas', workflow_one.next_task.name) + + # Start a second workflow + workflow_two = self.create_workflow('subprocess') + workflow_api_two = self.get_workflow_api(workflow_two) + + # Get all action events across workflows + rv = self.app.get('/v1.0/task_events?action=ASSIGNMENT', + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + tasks = TaskEventSchema(many=True).load(json_data) + self.assertEqual(2, len(tasks)) + + # Get action events for a single workflow + rv = self.app.get(f'/v1.0/task_events?action=ASSIGNMENT&workflow={workflow_one.id}', + headers=self.logged_in_headers(), + content_type="application/json") + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + tasks = TaskEventSchema(many=True).load(json_data) + self.assertEqual(1, len(tasks)) diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py index 74871476..ce3b03b5 100644 --- a/tests/test_user_roles.py +++ b/tests/test_user_roles.py @@ -111,6 +111,7 @@ class TestTasksApi(BaseTest): data['approval'] = True self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid) + def test_navigation_and_current_task_updates_through_workflow(self): submitter = self.create_user(uid='lje5u') From d617af85659b50b97283cba2485a0edcfdeb85ca Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Tue, 28 Jul 2020 11:02:49 -0400 Subject: [PATCH 39/60] All tests are passing - may need to refactor a bit, / remove comments --- crc/scripts/script.py | 38 ++++++++++++++++--- crc/scripts/study_info.py | 15 +++++--- crc/services/workflow_processor.py | 7 ++-- .../documents_approvals.bpmn | 3 +- 4 files changed, 46 insertions(+), 17 deletions(-) diff --git a/crc/scripts/script.py b/crc/scripts/script.py index ba5af5b7..84c2ee05 100644 --- a/crc/scripts/script.py +++ b/crc/scripts/script.py @@ -45,6 +45,32 @@ class Script(object): workflow_id) return execlist + @staticmethod + def generate_augmented_validate_list(task, study_id, workflow_id): + """ + this makes a dictionary of lambda functions that are closed over the class instance that + They represent. This is passed into PythonScriptParser as a list of helper functions that are + available for running. In general, they maintain the do_task call structure that they had, but + they always return a value rather than updating the task data. + + We may be able to remove the task for each of these calls if we are not using it other than potentially + updating the task data. + """ + + def make_closure_validate(subclass,task,study_id,workflow_id): + instance = subclass() + return lambda *a : subclass.do_task_validate_only(instance,task,study_id,workflow_id,*a) + execlist = {} + subclasses = Script.get_all_subclasses() + for x in range(len(subclasses)): + subclass = subclasses[x] + execlist[subclass.__module__.split('.')[-1]] = make_closure_validate(subclass,task,study_id, + workflow_id) + return execlist + + + + @staticmethod def get_all_subclasses(): return Script._get_all_subclasses(Script) @@ -67,12 +93,12 @@ class Script(object): return all_subclasses - # def add_data_to_task(self, task, data): - # key = self.__class__.__name__ - # if key in task.data: - # task.data[key].update(data) - # else: - # task.data[key] = data + def add_data_to_task(self, task, data): + key = self.__class__.__name__ + if key in task.data: + task.data[key].update(data) + else: + task.data[key] = data class ScriptValidationError: diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py index 6daf91ec..6b55e0fd 100644 --- a/crc/scripts/study_info.py +++ b/crc/scripts/study_info.py @@ -149,11 +149,11 @@ Returns information specific to the protocol. def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs): """For validation only, pretend no results come back from pb""" - self.check_args(args) + self.check_args(args,2) # Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.) FileService.get_reference_file_data(FileService.DOCUMENT_LIST) FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST) - data = { + data = Box({ "study":{ "info": { "id": 12, @@ -195,7 +195,10 @@ Returns information specific to the protocol. 'id': 0, } } - } + }) + if args[0]=='documents': + return StudyService().get_documents_status(study_id) + return data['study'][args[0]] #self.add_data_to_task(task=task, data=data["study"]) #self.add_data_to_task(task, {"documents": StudyService().get_documents_status(study_id)}) @@ -205,9 +208,9 @@ Returns information specific to the protocol. if len(args) > 1: prefix = args[1] cmd = args[0] - study_info = {} - if self.__class__.__name__ in task.data: - study_info = task.data[self.__class__.__name__] + # study_info = {} + # if self.__class__.__name__ in task.data: + # study_info = task.data[self.__class__.__name__] retval = None if cmd == 'info': study = session.query(StudyModel).filter_by(id=study_id).first() diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 55100afd..f9243e68 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -49,17 +49,18 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): workflow_id = None try: - augmentMethods = Script.generate_augmented_list(task,study_id,workflow_id) + if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]: + augmentMethods = Script.generate_augmented_validate_list(task, study_id, workflow_id) + else: + augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id) super().execute(task, script, data, externalMethods=augmentMethods) except SyntaxError as e: - del(task.data['task']) raise ApiError('syntax_error', f'Something is wrong with your python script ' f'please correct the following:' f' {script}, {e.msg}') except NameError as e: - del(task.data['task']) raise ApiError('name_error', f'something you are referencing does not exist:' f' {script}, {e.name}') diff --git a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn index 858e95d6..c7130ee4 100644 --- a/crc/static/bpmn/documents_approvals/documents_approvals.bpmn +++ b/crc/static/bpmn/documents_approvals/documents_approvals.bpmn @@ -53,8 +53,7 @@ Flow_0c7ryff Flow_142jtxs - StudyInfo = {} -StudyInfo['approvals'] = study_info('approvals') + StudyInfo['approvals'] = study_info('approvals') Flow_1k3su2q From f15626033d95803c95bbd14e8d7d6ec4c7ae3c66 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 28 Jul 2020 13:33:38 -0400 Subject: [PATCH 40/60] Allow the workflow to be requested without making changes to the workflow - requires that you specify a read_only flag of true, otherwise it assumes that you want a fully prepared workflow with the next ready task set to run. --- crc/api.yml | 6 ++++++ crc/api/common.py | 5 +++++ crc/api/workflow.py | 13 +++++++++--- crc/models/api_models.py | 8 ++++--- crc/services/workflow_processor.py | 11 +++++----- crc/services/workflow_service.py | 5 +++-- tests/base_test.py | 9 ++++---- tests/files/test_files_api.py | 3 ++- tests/study/test_study_service.py | 1 + tests/test_tasks_api.py | 21 +++++++++++++++++++ tests/workflow/test_workflow_processor.py | 6 ++++++ .../test_workflow_processor_multi_instance.py | 1 + .../test_workflow_spec_validation_api.py | 4 ++-- 13 files changed, 73 insertions(+), 20 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index f23f0ace..6304d513 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -622,6 +622,12 @@ paths: description: Set this to true to reset the workflow schema: type: boolean + - name: read_only + in: query + required: false + description: Does not run any automatic or script tasks and should not be used for updates. + schema: + type: boolean tags: - Workflows and Tasks responses: diff --git a/crc/api/common.py b/crc/api/common.py index cb527c73..f200401d 100644 --- a/crc/api/common.py +++ b/crc/api/common.py @@ -24,6 +24,11 @@ class ApiError(Exception): instance.task_id = task.task_spec.name or "" instance.task_name = task.task_spec.description or "" instance.file_name = task.workflow.spec.file or "" + + # Fixme: spiffworkflow is doing something weird where task ends up referenced in the data in some cases. + if "task" in task.data: + task.data.pop("task") + instance.task_data = task.data app.logger.error(message, exc_info=True) return instance diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 3418d50a..5a9f28e6 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -95,11 +95,18 @@ def delete_workflow_specification(spec_id): session.commit() -def get_workflow(workflow_id, soft_reset=False, hard_reset=False): +def get_workflow(workflow_id, soft_reset=False, hard_reset=False, read_only=False): + """Soft reset will attempt to update to the latest spec without starting over, + Hard reset will update to the latest spec and start from the beginning. + Read Only will return the workflow in a read only state, without running any + engine tasks or logging any events. """ workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - workflow_api_model = WorkflowService.processor_to_workflow_api(processor) - WorkflowService.update_task_assignments(processor) + if not read_only: + processor.do_engine_steps() + processor.save() + WorkflowService.update_task_assignments(processor) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor, read_only=read_only) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 843609e0..6b8d17db 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -143,7 +143,8 @@ class NavigationItemSchema(ma.Schema): class WorkflowApi(object): def __init__(self, id, status, next_task, navigation, - spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated, title): + spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, + last_updated, title, read_only): self.id = id self.status = status self.next_task = next_task # The next task that requires user input. @@ -155,13 +156,14 @@ class WorkflowApi(object): self.completed_tasks = completed_tasks self.last_updated = last_updated self.title = title + self.read_only = read_only class WorkflowApiSchema(ma.Schema): class Meta: model = WorkflowApi fields = ["id", "status", "next_task", "navigation", "workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks", - "last_updated", "title"] + "last_updated", "title", "read_only"] unknown = INCLUDE status = EnumField(WorkflowStatus) @@ -172,7 +174,7 @@ class WorkflowApiSchema(ma.Schema): def make_workflow(self, data, **kwargs): keys = ['id', 'status', 'next_task', 'navigation', 'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks", - "last_updated", "title"] + "last_updated", "title", "read_only"] filtered_fields = {key: data[key] for key in keys} filtered_fields['next_task'] = TaskSchema().make_task(data['next_task']) return WorkflowApi(**filtered_fields) diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 165d3313..535fb3eb 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -117,7 +117,8 @@ class WorkflowProcessor(object): STUDY_ID_KEY = "study_id" VALIDATION_PROCESS_KEY = "validate_only" - def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False, validate_only=False): + def __init__(self, workflow_model: WorkflowModel, + soft_reset=False, hard_reset=False, validate_only=False): """Create a Workflow Processor based on the serialized information available in the workflow model. If soft_reset is set to true, it will try to use the latest version of the workflow specification without resetting to the beginning of the workflow. This will work for some minor changes to the spec. @@ -180,10 +181,10 @@ class WorkflowProcessor(object): bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine) bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only - try: - bpmn_workflow.do_engine_steps() - except WorkflowException as we: - raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) +# try: +# bpmn_workflow.do_engine_steps() +# except WorkflowException as we: +# raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) return bpmn_workflow def save(self): diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index e078166b..9adbbd3c 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -216,7 +216,7 @@ class WorkflowService(object): return ''.join(random.choice(letters) for i in range(string_length)) @staticmethod - def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None): + def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None, read_only=False): """Returns an API model representing the state of the current workflow, if requested, and possible, next_task is set to the current_task.""" @@ -260,7 +260,8 @@ class WorkflowService(object): total_tasks=len(navigation), completed_tasks=processor.workflow_model.completed_tasks, last_updated=processor.workflow_model.last_updated, - title=spec.display_name + title=spec.display_name, + read_only=read_only ) if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. # This may or may not work, sometimes there is no next task to complete. diff --git a/tests/base_test.py b/tests/base_test.py index 3f0b2405..d627fb9f 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -308,12 +308,13 @@ class BaseTest(unittest.TestCase): db.session.commit() return approval - def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"): + def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, read_only=False, user_uid="dhf8r"): user = session.query(UserModel).filter_by(uid=user_uid).first() self.assertIsNotNone(user) - - rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' % - (workflow.id, str(soft_reset), str(hard_reset)), + rv = self.app.get(f'/v1.0/workflow/{workflow.id}' + f'?soft_reset={str(soft_reset)}' + f'&hard_reset={str(hard_reset)}' + f'&read_only={str(read_only)}', headers=self.logged_in_headers(user), content_type="application/json") self.assert_success(rv) diff --git a/tests/files/test_files_api.py b/tests/files/test_files_api.py index 59e6c1f6..02feb8d0 100644 --- a/tests/files/test_files_api.py +++ b/tests/files/test_files_api.py @@ -72,10 +72,10 @@ class TestFilesApi(BaseTest): self.assertEqual(file, file2) def test_add_file_from_task_and_form_errors_on_invalid_form_field_name(self): - self.load_example_data() self.create_reference_document() workflow = self.create_workflow('file_upload_form') processor = WorkflowProcessor(workflow) + processor.do_engine_steps() task = processor.next_task() data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} correct_name = task.task_spec.form.fields[0].id @@ -96,6 +96,7 @@ class TestFilesApi(BaseTest): self.create_reference_document() workflow = self.create_workflow('file_upload_form') processor = WorkflowProcessor(workflow) + processor.do_engine_steps() task = processor.next_task() data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')} correct_name = task.task_spec.form.fields[0].id diff --git a/tests/study/test_study_service.py b/tests/study/test_study_service.py index f1e43c8a..7ba5f568 100644 --- a/tests/study/test_study_service.py +++ b/tests/study/test_study_service.py @@ -79,6 +79,7 @@ class TestStudyService(BaseTest): # Initialize the Workflow with the workflow processor. workflow_model = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow.id).first() processor = WorkflowProcessor(workflow_model) + processor.do_engine_steps() # Assure the workflow is now started, and knows the total and completed tasks. studies = StudyService.get_studies_for_user(user) diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 9b8b5d68..c1be63b6 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -9,6 +9,7 @@ from crc import session, app from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema from crc.models.file import FileModelSchema from crc.models.workflow import WorkflowStatus +from crc.models.task_event import TaskEventModel class TestTasksApi(BaseTest): @@ -42,6 +43,24 @@ class TestTasksApi(BaseTest): """ self.assertTrue(str.startswith(task.documentation, expected_docs)) + def test_get_read_only_workflow(self): + # Set up a new workflow + workflow = self.create_workflow('two_forms') + # get the first form in the two form workflow. + workflow_api = self.get_workflow_api(workflow, read_only=True) + + # There should be no task event logs related to the workflow at this point. + task_events = session.query(TaskEventModel).filter(TaskEventModel.workflow_id == workflow.id).all() + self.assertEqual(0, len(task_events)) + + # Since the workflow was not started, the call to read-only should not execute any engine steps the + # current task should be the start event. + self.assertEqual("Start", workflow_api.next_task.name) + + # the workflow_api should have a read_only attribute set to true + self.assertEquals(True, workflow_api.read_only) + + def test_two_forms_task(self): # Set up a new workflow self.load_example_data() @@ -457,3 +476,5 @@ class TestTasksApi(BaseTest): workflow = self.get_workflow_api(workflow) self.assertEqual(WorkflowStatus.complete, workflow.status) + + diff --git a/tests/workflow/test_workflow_processor.py b/tests/workflow/test_workflow_processor.py index a51f029d..8b75dfb3 100644 --- a/tests/workflow/test_workflow_processor.py +++ b/tests/workflow/test_workflow_processor.py @@ -36,6 +36,7 @@ class TestWorkflowProcessor(BaseTest): workflow_spec_model = self.load_test_spec("random_fact") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() self.assertEqual(study.id, processor.bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY]) self.assertIsNotNone(processor) self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) @@ -62,6 +63,7 @@ class TestWorkflowProcessor(BaseTest): files = session.query(FileModel).filter_by(workflow_spec_id='decision_table').all() self.assertEqual(2, len(files)) processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) next_user_tasks = processor.next_user_tasks() self.assertEqual(1, len(next_user_tasks)) @@ -86,6 +88,7 @@ class TestWorkflowProcessor(BaseTest): workflow_spec_model = self.load_test_spec("parallel_tasks") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) # Complete the first steps of the 4 parallel tasks @@ -127,6 +130,7 @@ class TestWorkflowProcessor(BaseTest): study = session.query(StudyModel).first() workflow_spec_model = self.load_test_spec("parallel_tasks") processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) next_user_tasks = processor.next_user_tasks() self.assertEqual(4, len(next_user_tasks)) @@ -215,6 +219,7 @@ class TestWorkflowProcessor(BaseTest): self.assertEqual(2, len(files)) workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id="docx").first() processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) next_user_tasks = processor.next_user_tasks() self.assertEqual(1, len(next_user_tasks)) @@ -278,6 +283,7 @@ class TestWorkflowProcessor(BaseTest): study = session.query(StudyModel).first() workflow_spec_model = self.load_test_spec("two_forms") processor = self.get_processor(study, workflow_spec_model) + processor.do_engine_steps() self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) task = processor.next_task() task.data = {"color": "blue"} diff --git a/tests/workflow/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py index a67cae7f..1473ed3a 100644 --- a/tests/workflow/test_workflow_processor_multi_instance.py +++ b/tests/workflow/test_workflow_processor_multi_instance.py @@ -47,6 +47,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): workflow_spec_model = self.load_test_spec("multi_instance") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor.bpmn_workflow.do_engine_steps() self.assertEqual(study.id, processor.bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY]) self.assertIsNotNone(processor) self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) diff --git a/tests/workflow/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py index 0c17892e..da389168 100644 --- a/tests/workflow/test_workflow_spec_validation_api.py +++ b/tests/workflow/test_workflow_spec_validation_api.py @@ -89,7 +89,7 @@ class TestWorkflowSpecValidation(BaseTest): self.load_example_data() errors = self.validate_workflow("invalid_script") self.assertEqual(2, len(errors)) - self.assertEqual("error_loading_workflow", errors[0]['code']) + self.assertEqual("workflow_validation_exception", errors[0]['code']) self.assertTrue("NoSuchScript" in errors[0]['message']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) @@ -99,7 +99,7 @@ class TestWorkflowSpecValidation(BaseTest): self.load_example_data() errors = self.validate_workflow("invalid_script2") self.assertEqual(2, len(errors)) - self.assertEqual("error_loading_workflow", errors[0]['code']) + self.assertEqual("workflow_validation_exception", errors[0]['code']) self.assertEqual("Invalid_Script_Task", errors[0]['task_id']) self.assertEqual("An Invalid Script Reference", errors[0]['task_name']) self.assertEqual("invalid_script2.bpmn", errors[0]['file_name']) From 4d11fc04a014e40baa456ec4986e537063bb0931 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 28 Jul 2020 13:51:29 -0400 Subject: [PATCH 41/60] dropping the "read_only" flag in favor of a "do_engine_steps" flag, which more clearly defines what is happening. --- crc/api.yml | 4 ++-- crc/api/workflow.py | 6 +++--- crc/models/api_models.py | 7 +++---- crc/services/workflow_service.py | 5 ++--- tests/base_test.py | 4 ++-- tests/test_tasks_api.py | 8 ++++---- 6 files changed, 16 insertions(+), 18 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index 6304d513..922c96af 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -622,10 +622,10 @@ paths: description: Set this to true to reset the workflow schema: type: boolean - - name: read_only + - name: do_engine_steps in: query required: false - description: Does not run any automatic or script tasks and should not be used for updates. + description: Defaults to true, can be set to false if you are just looking at the workflow not completeing it. schema: type: boolean tags: diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 5a9f28e6..5d185ae7 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -95,18 +95,18 @@ def delete_workflow_specification(spec_id): session.commit() -def get_workflow(workflow_id, soft_reset=False, hard_reset=False, read_only=False): +def get_workflow(workflow_id, soft_reset=False, hard_reset=False, do_engine_steps=True): """Soft reset will attempt to update to the latest spec without starting over, Hard reset will update to the latest spec and start from the beginning. Read Only will return the workflow in a read only state, without running any engine tasks or logging any events. """ workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - if not read_only: + if do_engine_steps: processor.do_engine_steps() processor.save() WorkflowService.update_task_assignments(processor) - workflow_api_model = WorkflowService.processor_to_workflow_api(processor, read_only=read_only) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 6b8d17db..7d1088e9 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -144,7 +144,7 @@ class NavigationItemSchema(ma.Schema): class WorkflowApi(object): def __init__(self, id, status, next_task, navigation, spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, - last_updated, title, read_only): + last_updated, title): self.id = id self.status = status self.next_task = next_task # The next task that requires user input. @@ -156,14 +156,13 @@ class WorkflowApi(object): self.completed_tasks = completed_tasks self.last_updated = last_updated self.title = title - self.read_only = read_only class WorkflowApiSchema(ma.Schema): class Meta: model = WorkflowApi fields = ["id", "status", "next_task", "navigation", "workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks", - "last_updated", "title", "read_only"] + "last_updated", "title"] unknown = INCLUDE status = EnumField(WorkflowStatus) @@ -174,7 +173,7 @@ class WorkflowApiSchema(ma.Schema): def make_workflow(self, data, **kwargs): keys = ['id', 'status', 'next_task', 'navigation', 'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks", - "last_updated", "title", "read_only"] + "last_updated", "title"] filtered_fields = {key: data[key] for key in keys} filtered_fields['next_task'] = TaskSchema().make_task(data['next_task']) return WorkflowApi(**filtered_fields) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 9adbbd3c..e078166b 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -216,7 +216,7 @@ class WorkflowService(object): return ''.join(random.choice(letters) for i in range(string_length)) @staticmethod - def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None, read_only=False): + def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None): """Returns an API model representing the state of the current workflow, if requested, and possible, next_task is set to the current_task.""" @@ -260,8 +260,7 @@ class WorkflowService(object): total_tasks=len(navigation), completed_tasks=processor.workflow_model.completed_tasks, last_updated=processor.workflow_model.last_updated, - title=spec.display_name, - read_only=read_only + title=spec.display_name ) if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. # This may or may not work, sometimes there is no next task to complete. diff --git a/tests/base_test.py b/tests/base_test.py index d627fb9f..056ce090 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -308,13 +308,13 @@ class BaseTest(unittest.TestCase): db.session.commit() return approval - def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, read_only=False, user_uid="dhf8r"): + def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, do_engine_steps=True, user_uid="dhf8r"): user = session.query(UserModel).filter_by(uid=user_uid).first() self.assertIsNotNone(user) rv = self.app.get(f'/v1.0/workflow/{workflow.id}' f'?soft_reset={str(soft_reset)}' f'&hard_reset={str(hard_reset)}' - f'&read_only={str(read_only)}', + f'&do_engine_steps={str(do_engine_steps)}', headers=self.logged_in_headers(user), content_type="application/json") self.assert_success(rv) diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index c1be63b6..02ad65ca 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -43,11 +43,11 @@ class TestTasksApi(BaseTest): """ self.assertTrue(str.startswith(task.documentation, expected_docs)) - def test_get_read_only_workflow(self): + def test_get_workflow_without_running_engine_steps(self): # Set up a new workflow workflow = self.create_workflow('two_forms') # get the first form in the two form workflow. - workflow_api = self.get_workflow_api(workflow, read_only=True) + workflow_api = self.get_workflow_api(workflow, do_engine_steps=False) # There should be no task event logs related to the workflow at this point. task_events = session.query(TaskEventModel).filter(TaskEventModel.workflow_id == workflow.id).all() @@ -57,8 +57,8 @@ class TestTasksApi(BaseTest): # current task should be the start event. self.assertEqual("Start", workflow_api.next_task.name) - # the workflow_api should have a read_only attribute set to true - self.assertEquals(True, workflow_api.read_only) + def test_get_form_for_previously_completed_task(self): + """Assure we can look at previously completed steps without moving the token for the workflow.""" def test_two_forms_task(self): From 0ea4c13d09b46b085d20b53faa6bc05e5037f42c Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 28 Jul 2020 17:16:48 -0400 Subject: [PATCH 42/60] Convert protocol builder status to always be in lower case in order to better match the front end. And also fixing an issue with the multi_instance that is oddly broken suddenly, and I don't know why. --- crc/api.yml | 2 +- crc/api/study.py | 2 +- crc/models/protocol_builder.py | 10 +++--- crc/models/study.py | 6 ++-- crc/services/study_service.py | 4 +-- migrations/versions/2e7b377cbc7b_.py | 32 +++++++++++++++++++ tests/base_test.py | 6 ++-- tests/data/multi_instance/multi_instance.bpmn | 4 +-- tests/study/test_study_api.py | 10 +++--- tests/study/test_study_service.py | 2 +- tests/test_authentication.py | 2 +- 11 files changed, 56 insertions(+), 24 deletions(-) create mode 100644 migrations/versions/2e7b377cbc7b_.py diff --git a/crc/api.yml b/crc/api.yml index 922c96af..b3d61fc1 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -1064,7 +1064,7 @@ components: example: dhf8r protocol_builder_status: type: string - enum: [INCOMPLETE, ACTIVE, HOLD, OPEN, ABANDONED] + enum: ['incomplete', 'active', 'hold', 'open', 'abandoned'] example: done sponsor: type: string diff --git a/crc/api/study.py b/crc/api/study.py index 8fdd1b4a..b5572527 100644 --- a/crc/api/study.py +++ b/crc/api/study.py @@ -21,7 +21,7 @@ def add_study(body): title=body['title'], primary_investigator_id=body['primary_investigator_id'], last_updated=datetime.now(), - protocol_builder_status=ProtocolBuilderStatus.ACTIVE) + protocol_builder_status=ProtocolBuilderStatus.active) session.add(study_model) errors = StudyService._add_all_workflow_specs_to_study(study_model) diff --git a/crc/models/protocol_builder.py b/crc/models/protocol_builder.py index 9ff1098f..a91ae84b 100644 --- a/crc/models/protocol_builder.py +++ b/crc/models/protocol_builder.py @@ -22,11 +22,11 @@ class ProtocolBuilderStatus(enum.Enum): # • Hold: store boolean value in CR Connect (add to Study Model) # • Open To Enrollment: has start date and HSR number? # • Abandoned: deleted in PB - INCOMPLETE = 'incomplete' # Found in PB but not ready to start (not q_complete) - ACTIVE = 'active', # found in PB, marked as "q_complete" and no HSR number and not hold - HOLD = 'hold', # CR Connect side, if the Study ias marked as "hold". - OPEN = 'open', # Open To Enrollment: has start date and HSR number? - ABANDONED = 'Abandoned' # Not found in PB + incomplete = 'incomplete' # Found in PB but not ready to start (not q_complete) + active = 'active' # found in PB, marked as "q_complete" and no HSR number and not hold + hold = 'hold' # CR Connect side, if the Study ias marked as "hold". + open = 'open' # Open To Enrollment: has start date and HSR number? + abandoned = 'abandoned' # Not found in PB #DRAFT = 'draft', # !Q_COMPLETE diff --git a/crc/models/study.py b/crc/models/study.py index e14fe0a6..f1ad0099 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -33,11 +33,11 @@ class StudyModel(db.Model): self.user_uid = pbs.NETBADGEID self.last_updated = pbs.DATE_MODIFIED - self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE + self.protocol_builder_status = ProtocolBuilderStatus.active if pbs.HSRNUMBER: - self.protocol_builder_status = ProtocolBuilderStatus.OPEN + self.protocol_builder_status = ProtocolBuilderStatus.open if self.on_hold: - self.protocol_builder_status = ProtocolBuilderStatus.HOLD + self.protocol_builder_status = ProtocolBuilderStatus.hold class WorkflowMetadata(object): diff --git a/crc/services/study_service.py b/crc/services/study_service.py index 4eb8dde7..1d15d361 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -64,7 +64,7 @@ class StudyService(object): # Calling this line repeatedly is very very slow. It creates the # master spec and runs it. Don't execute this for Abandoned studies, as # we don't have the information to process them. - if study.protocol_builder_status != ProtocolBuilderStatus.ABANDONED: + if study.protocol_builder_status != ProtocolBuilderStatus.abandoned: status = StudyService.__get_study_status(study_model) study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status) @@ -265,7 +265,7 @@ class StudyService(object): for study in db_studies: pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None) if not pb_study: - study.protocol_builder_status = ProtocolBuilderStatus.ABANDONED + study.protocol_builder_status = ProtocolBuilderStatus.abandoned db.session.commit() diff --git a/migrations/versions/2e7b377cbc7b_.py b/migrations/versions/2e7b377cbc7b_.py new file mode 100644 index 00000000..c0eb5250 --- /dev/null +++ b/migrations/versions/2e7b377cbc7b_.py @@ -0,0 +1,32 @@ +"""empty message + +Revision ID: 2e7b377cbc7b +Revises: c4ddb69e7ef4 +Create Date: 2020-07-28 17:03:23.586828 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '2e7b377cbc7b' +down_revision = 'c4ddb69e7ef4' +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute('update study set protocol_builder_status = NULL;') + op.execute('ALTER TYPE protocolbuilderstatus RENAME TO pbs_old;') + op.execute("CREATE TYPE protocolbuilderstatus AS ENUM('incomplete', 'active', 'hold', 'open', 'abandoned')") + op.execute("ALTER TABLE study ALTER COLUMN protocol_builder_status TYPE protocolbuilderstatus USING protocol_builder_status::text::protocolbuilderstatus;") + op.execute('DROP TYPE pbs_old;') + op.execute("update study set protocol_builder_status = 'incomplete';") + +def downgrade(): + op.execute('update study set protocol_builder_status = NULL;') + op.execute('ALTER TYPE protocolbuilderstatus RENAME TO pbs_old;') + op.execute("CREATE TYPE protocolbuilderstatus AS ENUM('INCOMPLETE', 'ACTIVE', 'HOLD', 'OPEN', 'ABANDONED')") + op.execute("ALTER TABLE study ALTER COLUMN protocol_builder_status TYPE protocolbuilderstatus USING protocol_builder_status::text::protocolbuilderstatus;") + op.execute('DROP TYPE pbs_old;') diff --git a/tests/base_test.py b/tests/base_test.py index 056ce090..af0b1a20 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -60,7 +60,7 @@ class BaseTest(unittest.TestCase): 'id':0, 'title':'The impact of fried pickles on beer consumption in bipedal software developers.', 'last_updated':datetime.datetime.now(), - 'protocol_builder_status':ProtocolBuilderStatus.ACTIVE, + 'protocol_builder_status':ProtocolBuilderStatus.active, 'primary_investigator_id':'dhf8r', 'sponsor':'Sartography Pharmaceuticals', 'ind_number':'1234', @@ -70,7 +70,7 @@ class BaseTest(unittest.TestCase): 'id':1, 'title':'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels', 'last_updated':datetime.datetime.now(), - 'protocol_builder_status':ProtocolBuilderStatus.ACTIVE, + 'protocol_builder_status':ProtocolBuilderStatus.active, 'primary_investigator_id':'dhf8r', 'sponsor':'Makerspace & Co.', 'ind_number':'5678', @@ -241,7 +241,7 @@ class BaseTest(unittest.TestCase): study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first() if study is None: user = self.create_user(uid=uid) - study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE, + study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.active, user_uid=user.uid, primary_investigator_id=primary_investigator_id) db.session.add(study) db.session.commit() diff --git a/tests/data/multi_instance/multi_instance.bpmn b/tests/data/multi_instance/multi_instance.bpmn index 600bea80..1e0d9255 100644 --- a/tests/data/multi_instance/multi_instance.bpmn +++ b/tests/data/multi_instance/multi_instance.bpmn @@ -1,5 +1,5 @@ - + Flow_0t6p1sb @@ -18,7 +18,7 @@ - + SequenceFlow_1p568pp diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index fb0a4dcf..697e90f6 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -24,7 +24,7 @@ class TestStudyApi(BaseTest): "title": "Phase III Trial of Genuine People Personalities (GPP) Autonomous Intelligent Emotional Agents " "for Interstellar Spacecraft", "last_updated": datetime.now(tz=timezone.utc), - "protocol_builder_status": ProtocolBuilderStatus.ACTIVE, + "protocol_builder_status": ProtocolBuilderStatus.active, "primary_investigator_id": "tmm2x", "user_uid": "dhf8r", } @@ -135,7 +135,7 @@ class TestStudyApi(BaseTest): self.load_example_data() study: StudyModel = session.query(StudyModel).first() study.title = "Pilot Study of Fjord Placement for Single Fraction Outcomes to Cortisol Susceptibility" - study.protocol_builder_status = ProtocolBuilderStatus.ACTIVE + study.protocol_builder_status = ProtocolBuilderStatus.active rv = self.app.put('/v1.0/study/%i' % study.id, content_type="application/json", headers=self.logged_in_headers(), @@ -185,11 +185,11 @@ class TestStudyApi(BaseTest): num_open = 0 for study in json_data: - if study['protocol_builder_status'] == 'ABANDONED': # One study does not exist in user_studies.json + if study['protocol_builder_status'] == 'abandoned': # One study does not exist in user_studies.json num_abandoned += 1 - if study['protocol_builder_status'] == 'ACTIVE': # One study is marked complete without HSR Number + if study['protocol_builder_status'] == 'active': # One study is marked complete without HSR Number num_active += 1 - if study['protocol_builder_status'] == 'OPEN': # One study is marked complete and has an HSR Number + if study['protocol_builder_status'] == 'open': # One study is marked complete and has an HSR Number num_open += 1 db_studies_after = session.query(StudyModel).all() diff --git a/tests/study/test_study_service.py b/tests/study/test_study_service.py index 7ba5f568..e9711362 100644 --- a/tests/study/test_study_service.py +++ b/tests/study/test_study_service.py @@ -40,7 +40,7 @@ class TestStudyService(BaseTest): for study in db.session.query(StudyModel).all(): StudyService().delete_study(study.id) - study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.ACTIVE, user_uid=user.uid) + study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.active, user_uid=user.uid) db.session.add(study) self.load_test_spec("random_fact", category_id=cat.id) diff --git a/tests/test_authentication.py b/tests/test_authentication.py index 7d706949..829d71e3 100644 --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -220,7 +220,7 @@ class TestAuthentication(BaseTest): return { "title": "blah", "last_updated": datetime.now(tz=timezone.utc), - "protocol_builder_status": ProtocolBuilderStatus.ACTIVE, + "protocol_builder_status": ProtocolBuilderStatus.active, "primary_investigator_id": uid, "user_uid": uid, } From 6aa21638dea19e2413fab354066491d216a7556d Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 27 Jul 2020 22:39:19 -0600 Subject: [PATCH 43/60] Updating properly study status & fixing tests --- crc/models/study.py | 50 ++++++++++++++++++++++++++-- migrations/versions/369d65dcb269_.py | 28 ++++++++++++++++ tests/study/test_study_api.py | 10 +++--- 3 files changed, 81 insertions(+), 7 deletions(-) create mode 100644 migrations/versions/369d65dcb269_.py diff --git a/crc/models/study.py b/crc/models/study.py index f1ad0099..5e4684ee 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -1,3 +1,6 @@ +import datetime +import json + import marshmallow from marshmallow import INCLUDE, fields from marshmallow_enum import EnumField @@ -26,6 +29,7 @@ class StudyModel(db.Model): requirements = db.Column(db.ARRAY(db.Integer), nullable=True) on_hold = db.Column(db.Boolean, default=False) enrollment_date = db.Column(db.DateTime(timezone=True), nullable=True) + changes_history = db.Column(db.JSON, nullable=True) def update_from_protocol_builder(self, pbs: ProtocolBuilderStudy): self.hsr_number = pbs.HSRNUMBER @@ -135,9 +139,29 @@ class Study(object): return instance def update_model(self, study_model: StudyModel): - for k,v in self.__dict__.items(): - if not k.startswith('_'): - study_model.__dict__[k] = v + """As the case for update was very reduced, it's mostly and specifically + updating only the study status and generating a history record + """ + pb_status = ProtocolBuilderStatus(self.protocol_builder_status) + study_model.last_updated = datetime.datetime.now() + study_model.protocol_builder_status = pb_status + + if pb_status == ProtocolBuilderStatus.OPEN: + study_model.enrollment_date = self.enrollment_date + + change = { + 'status': ProtocolBuilderStatus(self.protocol_builder_status).value, + 'comment': '' if not hasattr(self, 'comment') else self.comment, + 'date': str(datetime.datetime.now()) + } + + if study_model.changes_history: + changes_history = json.loads(study_model.changes_history) + changes_history.append(change) + else: + changes_history = [change] + study_model.changes_history = json.dumps(changes_history) + def model_args(self): """Arguments that can be passed into the Study Model to update it.""" @@ -147,6 +171,26 @@ class Study(object): return self_dict +class StudyForUpdateSchema(ma.Schema): + + id = fields.Integer(required=False, allow_none=True) + protocol_builder_status = EnumField(ProtocolBuilderStatus, by_value=True) + hsr_number = fields.String(allow_none=True) + sponsor = fields.String(allow_none=True) + ind_number = fields.String(allow_none=True) + enrollment_date = fields.DateTime(allow_none=True) + comment = fields.String(allow_none=True) + + class Meta: + model = Study + unknown = INCLUDE + + @marshmallow.post_load + def make_study(self, data, **kwargs): + """Can load the basic study data for updates to the database, but categories are write only""" + return Study(**data) + + class StudySchema(ma.Schema): id = fields.Integer(required=False, allow_none=True) diff --git a/migrations/versions/369d65dcb269_.py b/migrations/versions/369d65dcb269_.py new file mode 100644 index 00000000..d13d7736 --- /dev/null +++ b/migrations/versions/369d65dcb269_.py @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: 369d65dcb269 +Revises: c4ddb69e7ef4 +Create Date: 2020-07-27 20:05:29.524553 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '369d65dcb269' +down_revision = 'c4ddb69e7ef4' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('study', sa.Column('changes_history', sa.JSON(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('study', 'changes_history') + # ### end Alembic commands ### diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 697e90f6..5e714e53 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -24,16 +24,17 @@ class TestStudyApi(BaseTest): "title": "Phase III Trial of Genuine People Personalities (GPP) Autonomous Intelligent Emotional Agents " "for Interstellar Spacecraft", "last_updated": datetime.now(tz=timezone.utc), - "protocol_builder_status": ProtocolBuilderStatus.active, "primary_investigator_id": "tmm2x", "user_uid": "dhf8r", } def add_test_study(self): + study_schema = StudySchema().dump(self.TEST_STUDY) + study_schema['protocol_builder_status'] = ProtocolBuilderStatus.ACTIVE.value rv = self.app.post('/v1.0/study', content_type="application/json", headers=self.logged_in_headers(), - data=json.dumps(StudySchema().dump(self.TEST_STUDY))) + data=json.dumps(study_schema)) self.assert_success(rv) return json.loads(rv.get_data(as_text=True)) @@ -135,11 +136,12 @@ class TestStudyApi(BaseTest): self.load_example_data() study: StudyModel = session.query(StudyModel).first() study.title = "Pilot Study of Fjord Placement for Single Fraction Outcomes to Cortisol Susceptibility" - study.protocol_builder_status = ProtocolBuilderStatus.active + study_schema = StudySchema().dump(study) + study_schema['protocol_builder_status'] = ProtocolBuilderStatus.ACTIVE.value rv = self.app.put('/v1.0/study/%i' % study.id, content_type="application/json", headers=self.logged_in_headers(), - data=json.dumps(StudySchema().dump(study))) + data=json.dumps(study_schema)) self.assert_success(rv) json_data = json.loads(rv.get_data(as_text=True)) self.assertEqual(study.title, json_data['title']) From 73a6b7adf1052947bf07db2b21d8bfdc13771581 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Tue, 28 Jul 2020 13:39:52 -0600 Subject: [PATCH 44/60] Fixing tests --- crc/models/study.py | 2 +- tests/study/test_study_api.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index 5e4684ee..51f3be2a 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -196,7 +196,7 @@ class StudySchema(ma.Schema): id = fields.Integer(required=False, allow_none=True) categories = fields.List(fields.Nested(CategorySchema), dump_only=True) warnings = fields.List(fields.Nested(ApiErrorSchema), dump_only=True) - protocol_builder_status = EnumField(ProtocolBuilderStatus) + protocol_builder_status = EnumField(ProtocolBuilderStatus, by_value=True) hsr_number = fields.String(allow_none=True) sponsor = fields.String(allow_none=True) ind_number = fields.String(allow_none=True) diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 5e714e53..50e984f9 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -145,7 +145,7 @@ class TestStudyApi(BaseTest): self.assert_success(rv) json_data = json.loads(rv.get_data(as_text=True)) self.assertEqual(study.title, json_data['title']) - self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status']) + self.assertEqual(study.protocol_builder_status.value, json_data['protocol_builder_status']) @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs @@ -230,7 +230,7 @@ class TestStudyApi(BaseTest): json_data = json.loads(rv.get_data(as_text=True)) self.assertEqual(study.id, json_data['id']) self.assertEqual(study.title, json_data['title']) - self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status']) + self.assertEqual(study.protocol_builder_status.value, json_data['protocol_builder_status']) self.assertEqual(study.primary_investigator_id, json_data['primary_investigator_id']) self.assertEqual(study.sponsor, json_data['sponsor']) self.assertEqual(study.ind_number, json_data['ind_number']) From de49397549e500d02ab4eb1aa22ecae537e2fc17 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 29 Jul 2020 10:51:34 -0600 Subject: [PATCH 45/60] Adjustings tests for protocol builder status changes --- crc/models/study.py | 2 +- tests/study/test_study_api.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index 51f3be2a..32697896 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -146,7 +146,7 @@ class Study(object): study_model.last_updated = datetime.datetime.now() study_model.protocol_builder_status = pb_status - if pb_status == ProtocolBuilderStatus.OPEN: + if pb_status == ProtocolBuilderStatus.open: study_model.enrollment_date = self.enrollment_date change = { diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 50e984f9..5e93245e 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -30,7 +30,7 @@ class TestStudyApi(BaseTest): def add_test_study(self): study_schema = StudySchema().dump(self.TEST_STUDY) - study_schema['protocol_builder_status'] = ProtocolBuilderStatus.ACTIVE.value + study_schema['protocol_builder_status'] = ProtocolBuilderStatus.active.value rv = self.app.post('/v1.0/study', content_type="application/json", headers=self.logged_in_headers(), @@ -137,7 +137,7 @@ class TestStudyApi(BaseTest): study: StudyModel = session.query(StudyModel).first() study.title = "Pilot Study of Fjord Placement for Single Fraction Outcomes to Cortisol Susceptibility" study_schema = StudySchema().dump(study) - study_schema['protocol_builder_status'] = ProtocolBuilderStatus.ACTIVE.value + study_schema['protocol_builder_status'] = ProtocolBuilderStatus.active.value rv = self.app.put('/v1.0/study/%i' % study.id, content_type="application/json", headers=self.logged_in_headers(), From 63537d7765f885c6621d64e21ef203179f4fb586 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 29 Jul 2020 22:45:56 -0400 Subject: [PATCH 46/60] Adds is_admin boolean flag to user schema --- crc/models/user.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crc/models/user.py b/crc/models/user.py index e621455b..eb431c95 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -1,6 +1,7 @@ import datetime import jwt +from marshmallow import fields from marshmallow_sqlalchemy import SQLAlchemyAutoSchema from crc import db, app @@ -18,6 +19,7 @@ class UserModel(db.Model): first_name = db.Column(db.String, nullable=True) last_name = db.Column(db.String, nullable=True) title = db.Column(db.String, nullable=True) + # TODO: Add Department and School def is_admin(self): @@ -64,3 +66,4 @@ class UserModelSchema(SQLAlchemyAutoSchema): load_instance = True include_relationships = True + is_admin = fields.Function(lambda obj: obj.is_admin()) From d9a91c891f91443dee0db7b9891837882f993f21 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 29 Jul 2020 22:46:22 -0400 Subject: [PATCH 47/60] Updates package hashes --- Pipfile.lock | 54 +++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index 5f5042bf..dd3e80bb 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -611,25 +611,25 @@ }, "pandas": { "hashes": [ - "sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096", - "sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0", - "sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453", - "sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc", - "sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7", - "sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b", - "sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8", - "sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8", - "sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef", - "sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d", - "sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705", - "sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9", - "sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91", - "sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3", - "sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107", - "sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e" + "sha256:0210f8fe19c2667a3817adb6de2c4fd92b1b78e1975ca60c0efa908e0985cbdb", + "sha256:0227e3a6e3a22c0e283a5041f1e3064d78fbde811217668bb966ed05386d8a7e", + "sha256:0bc440493cf9dc5b36d5d46bbd5508f6547ba68b02a28234cd8e81fdce42744d", + "sha256:16504f915f1ae424052f1e9b7cd2d01786f098fbb00fa4e0f69d42b22952d798", + "sha256:182a5aeae319df391c3df4740bb17d5300dcd78034b17732c12e62e6dd79e4a4", + "sha256:35db623487f00d9392d8af44a24516d6cb9f274afaf73cfcfe180b9c54e007d2", + "sha256:40ec0a7f611a3d00d3c666c4cceb9aa3f5bf9fbd81392948a93663064f527203", + "sha256:47a03bfef80d6812c91ed6fae43f04f2fa80a4e1b82b35aa4d9002e39529e0b8", + "sha256:4b21d46728f8a6be537716035b445e7ef3a75dbd30bd31aa1b251323219d853e", + "sha256:4d1a806252001c5db7caecbe1a26e49a6c23421d85a700960f6ba093112f54a1", + "sha256:60e20a4ab4d4fec253557d0fc9a4e4095c37b664f78c72af24860c8adcd07088", + "sha256:9f61cca5262840ff46ef857d4f5f65679b82188709d0e5e086a9123791f721c8", + "sha256:a15835c8409d5edc50b4af93be3377b5dd3eb53517e7f785060df1f06f6da0e2", + "sha256:b39508562ad0bb3f384b0db24da7d68a2608b9ddc85b1d931ccaaa92d5e45273", + "sha256:ed60848caadeacecefd0b1de81b91beff23960032cded0ac1449242b506a3b3f", + "sha256:fc714895b6de6803ac9f661abb316853d0cd657f5d23985222255ad76ccedc25" ], "index": "pypi", - "version": "==1.0.5" + "version": "==1.1.0" }, "psycopg2-binary": { "hashes": [ @@ -1122,6 +1122,12 @@ "markers": "python_version < '3.8'", "version": "==1.7.0" }, + "iniconfig": { + "hashes": [ + "sha256:aa0b40f50a00e72323cb5d41302f9c6165728fd764ac8822aa3fff00a40d56b4" + ], + "version": "==1.0.0" + }, "more-itertools": { "hashes": [ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", @@ -1172,11 +1178,11 @@ }, "pytest": { "hashes": [ - "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1", - "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8" + "sha256:869ec27f9b89964ccfe4fbdd5ccb8d3f285aaa3e9aa16a8491b9c8829148c230", + "sha256:a64d8fb4c15cdc70dae047352e980a197d855747cc885eb332cb73ddcc769168" ], "index": "pypi", - "version": "==5.4.3" + "version": "==6.0.0" }, "six": { "hashes": [ @@ -1186,12 +1192,12 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, - "wcwidth": { + "toml": { "hashes": [ - "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784", - "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83" + "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f", + "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88" ], - "version": "==0.2.5" + "version": "==0.10.1" }, "zipp": { "hashes": [ From d301e9e6fa3efb709eda65d3cb4aee73f20883d4 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Wed, 29 Jul 2020 22:47:47 -0400 Subject: [PATCH 48/60] Adds list_users endpoint. Adds admin impersonate uid parameter to user endpoint. Adds some utility methods to user service. Refactors authentication tests. --- crc/api.yml | 26 ++++- crc/api/user.py | 24 +++-- crc/services/user_service.py | 43 ++++++-- tests/base_test.py | 114 ++++++++++++--------- tests/test_authentication.py | 191 ++++++++++++++++++++++++++--------- 5 files changed, 290 insertions(+), 108 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index b3d61fc1..68f2f12a 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -31,6 +31,13 @@ paths: '304': description: Redirection to the hosted frontend with an auth_token header. /user: + parameters: + - name: admin_impersonate_uid + in: query + required: false + description: For admins, the unique uid of an existing user to impersonate. + schema: + type: string get: operationId: crc.api.user.get_current_user summary: Returns the current user. @@ -38,11 +45,27 @@ paths: - Users responses: '200': - description: The currently authenticated user. + description: The currently-authenticated user, or, if the current user is an admin and admin_impersonate_uid is provided, this will be the user with the given uid. content: application/json: schema: $ref: "#/components/schemas/User" + /list_users: + get: + operationId: crc.api.user.get_all_users + security: + - auth_admin: ['secret'] + summary: Returns a list of all users in the database. + tags: + - Users + responses: + '200': + description: All users in the database. + content: + application/json: + schema: + type: array + $ref: "#/components/schemas/User" # /v1.0/study /study: get: @@ -56,6 +79,7 @@ paths: content: application/json: schema: + type: array $ref: "#/components/schemas/Study" post: operationId: crc.api.study.add_study diff --git a/crc/api/user.py b/crc/api/user.py index 49b447ac..483edd65 100644 --- a/crc/api/user.py +++ b/crc/api/user.py @@ -5,6 +5,7 @@ from crc import app, db from crc.api.common import ApiError from crc.models.user import UserModel, UserModelSchema from crc.services.ldap_service import LdapService, LdapModel +from crc.services.user_service import UserService """ .. module:: crc.api.user @@ -56,8 +57,9 @@ def verify_token(token=None): return token_info else: - raise ApiError("no_user", "User not found. Please login via the frontend app before accessing this feature.", - status_code=403) + raise ApiError("no_user", + "User not found. Please login via the frontend app before accessing this feature.", + status_code=403) else: # Fall back to a default user if this is not production. @@ -67,7 +69,6 @@ def verify_token(token=None): return token_info - def verify_token_admin(token=None): """ Verifies the token for the user (if provided) in non-production environment. @@ -85,8 +86,20 @@ def verify_token_admin(token=None): token_info = UserModel.decode_auth_token(token) return token_info -def get_current_user(): - return UserModelSchema().dump(g.user) + +def get_current_user(admin_impersonate_uid=None): + if UserService.has_user(): + if admin_impersonate_uid is not None and UserService.user_is_admin(): + UserService.impersonate(admin_impersonate_uid) + + user = UserService.current_user(UserService.admin_is_impersonating()) + return UserModelSchema().dump(user) + + +def get_all_users(): + if "user" in g and g.user.is_admin(): + all_users = db.session.query(UserModel).all() + return UserModelSchema(many=True).dump(all_users) def login( @@ -129,7 +142,6 @@ def login( # X-Forwarded-Server: dev.crconnect.uvadcos.io # Connection: Keep-Alive - # If we're in production, override any uid with the uid from the SSO request headers if _is_production(): uid = _get_request_uid(request) diff --git a/crc/services/user_service.py b/crc/services/user_service.py index 6b2887f5..5d12601a 100644 --- a/crc/services/user_service.py +++ b/crc/services/user_service.py @@ -1,31 +1,62 @@ from flask import g +from crc import db from crc.api.common import ApiError +from crc.models.user import UserModel class UserService(object): """Provides common tools for working with users""" + # Returns true if the current user is logged in. @staticmethod def has_user(): - if 'user' not in g or not g.user: - return False - else: - return True + return 'user' in g and bool(g.user) + + # Returns true if the current user is an admin. + @staticmethod + def user_is_admin(): + return UserService.has_user() and g.user.is_admin() + + # Returns true if the current admin user is impersonating another user. + @staticmethod + def admin_is_impersonating(): + return UserService.user_is_admin() and \ + "impersonate_user" in g and \ + g.impersonate_user is not None + + # Returns true if the given user uid is different from the current user's uid. + @staticmethod + def is_different_user(uid): + return UserService.has_user() and uid is not None and uid is not g.user.uid @staticmethod def current_user(allow_admin_impersonate=False): - if not UserService.has_user(): raise ApiError("logged_out", "You are no longer logged in.", status_code=401) # Admins can pretend to be different users and act on a users behalf in # some circumstances. - if g.user.is_admin() and allow_admin_impersonate and "impersonate_user" in g: + if allow_admin_impersonate and UserService.admin_is_impersonating(): return g.impersonate_user else: return g.user + # Admins can pretend to be different users and act on a users behalf in some circumstances. + # This method allows an admin user to start impersonating another user with the given uid. + # Stops impersonating if the uid is None or invalid. + @staticmethod + def impersonate(uid=None): + # Clear out the current impersonating user. + g.impersonate_user = None + + if not UserService.has_user(): + raise ApiError("logged_out", "You are no longer logged in.", status_code=401) + + if not UserService.admin_is_impersonating() and UserService.is_different_user(uid): + # Impersonate the user if the given uid is valid. + g.impersonate_user = db.session.query(UserModel).filter(UserModel.uid == uid).first() + @staticmethod def in_list(uids, allow_admin_impersonate=False): """Returns true if the current user's id is in the given list of ids. False if there diff --git a/tests/base_test.py b/tests/base_test.py index 81ccc7bb..f5b66aa9 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -2,8 +2,6 @@ # IMPORTANT - Environment must be loaded before app, models, etc.... import os -from crc.services.user_service import UserService - os.environ["TESTING"] = "true" import json @@ -18,17 +16,19 @@ from crc.models.api_models import WorkflowApiSchema, MultiInstanceType from crc.models.approval import ApprovalModel, ApprovalStatus from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES from crc.models.protocol_builder import ProtocolBuilderStatus -from crc.models.task_event import TaskEventModel from crc.models.study import StudyModel +from crc.models.task_event import TaskEventModel from crc.models.user import UserModel -from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel +from crc.models.workflow import WorkflowSpecModel, WorkflowSpecCategoryModel from crc.services.file_service import FileService from crc.services.study_service import StudyService +from crc.services.user_service import UserService from crc.services.workflow_service import WorkflowService from example_data import ExampleDataLoader -#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES +# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES import logging + logging.basicConfig() @@ -39,48 +39,57 @@ class BaseTest(unittest.TestCase): if not app.config['TESTING']: raise (Exception("INVALID TEST CONFIGURATION. This is almost always in import order issue." - "The first class to import in each test should be the base_test.py file.")) + "The first class to import in each test should be the base_test.py file.")) auths = {} test_uid = "dhf8r" users = [ { - 'uid':'dhf8r', - 'email_address':'dhf8r@virginia.EDU', - 'display_name':'Daniel Harold Funk', - 'affiliation':'staff@virginia.edu;member@virginia.edu', - 'eppn':'dhf8r@virginia.edu', - 'first_name':'Daniel', - 'last_name':'Funk', - 'title':'SOFTWARE ENGINEER V' - } + 'uid': 'dhf8r', + 'email_address': 'dhf8r@virginia.EDU', + 'display_name': 'Daniel Harold Funk', + 'affiliation': 'staff@virginia.edu;member@virginia.edu', + 'eppn': 'dhf8r@virginia.edu', + 'first_name': 'Daniel', + 'last_name': 'Funk', + 'title': 'SOFTWARE ENGINEER V' + }, + { + 'uid': 'lbd3p', + 'email_address': 'lbd3p@virginia.EDU', + 'display_name': 'Laura Barnes', + 'affiliation': 'staff@virginia.edu;member@virginia.edu', + 'eppn': 'lbd3p@virginia.edu', + 'first_name': 'Laura', + 'last_name': 'Barnes', + 'title': 'Associate Professor of Systems and Information Engineering' + }, ] studies = [ { - 'id':0, - 'title':'The impact of fried pickles on beer consumption in bipedal software developers.', - 'last_updated':datetime.datetime.now(), - 'protocol_builder_status':ProtocolBuilderStatus.active, - 'primary_investigator_id':'dhf8r', - 'sponsor':'Sartography Pharmaceuticals', - 'ind_number':'1234', - 'user_uid':'dhf8r' + 'id': 0, + 'title': 'The impact of fried pickles on beer consumption in bipedal software developers.', + 'last_updated': datetime.datetime.now(), + 'protocol_builder_status': ProtocolBuilderStatus.active, + 'primary_investigator_id': 'dhf8r', + 'sponsor': 'Sartography Pharmaceuticals', + 'ind_number': '1234', + 'user_uid': 'dhf8r' }, { - 'id':1, - 'title':'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels', - 'last_updated':datetime.datetime.now(), - 'protocol_builder_status':ProtocolBuilderStatus.active, - 'primary_investigator_id':'dhf8r', - 'sponsor':'Makerspace & Co.', - 'ind_number':'5678', - 'user_uid':'dhf8r' + 'id': 1, + 'title': 'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels', + 'last_updated': datetime.datetime.now(), + 'protocol_builder_status': ProtocolBuilderStatus.active, + 'primary_investigator_id': 'dhf8r', + 'sponsor': 'Makerspace & Co.', + 'ind_number': '5678', + 'user_uid': 'dhf8r' } ] - @classmethod def setUpClass(cls): app.config.from_object('config.testing') @@ -100,7 +109,11 @@ class BaseTest(unittest.TestCase): def tearDown(self): ExampleDataLoader.clean_db() - g.user = None + self.logout() + + if 'impersonate_user' in g: + g.impersonate_user = None + self.auths = {} def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'): @@ -138,8 +151,13 @@ class BaseTest(unittest.TestCase): else: ExampleDataLoader().load_test_data() - for user_json in self.users: - db.session.add(UserModel(**user_json)) + # If in production mode, only add the first user. + if app.config['PRODUCTION']: + db.session.add(UserModel(**self.users[0])) + else: + for user_json in self.users: + db.session.add(UserModel(**user_json)) + db.session.commit() for study_json in self.studies: study_model = StudyModel(**study_json) @@ -220,7 +238,6 @@ class BaseTest(unittest.TestCase): return '?%s' % '&'.join(query_string_list) - def replace_file(self, name, file_path): """Replaces a stored file with the given name with the contents of the file at the given path.""" file_service = FileService() @@ -240,7 +257,8 @@ class BaseTest(unittest.TestCase): db.session.commit() return user - def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer", primary_investigator_id="lb3dp"): + def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer", + primary_investigator_id="lb3dp"): study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first() if study is None: user = self.create_user(uid=uid) @@ -294,19 +312,20 @@ class BaseTest(unittest.TestCase): file.close() def create_approval( - self, - study=None, - workflow=None, - approver_uid=None, - status=None, - version=None, + self, + study=None, + workflow=None, + approver_uid=None, + status=None, + version=None, ): study = study or self.create_study() workflow = workflow or self.create_workflow() approver_uid = approver_uid or self.test_uid status = status or ApprovalStatus.PENDING.value version = version or 1 - approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version) + approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, + version=version) db.session.add(approval) db.session.commit() return approval @@ -326,7 +345,6 @@ class BaseTest(unittest.TestCase): self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id) return workflow_api - def complete_form(self, workflow_in, task_in, dict_data, error_code=None, terminate_loop=None, user_uid="dhf8r"): prev_completed_task_count = workflow_in.completed_tasks if isinstance(task_in, dict): @@ -391,12 +409,14 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.multi_instance_count, event.mi_count) if task_in.multi_instance_type == 'looping' and not terminate_loop: - self.assertEqual(task_in.multi_instance_index+1, event.mi_index) + self.assertEqual(task_in.multi_instance_index + 1, event.mi_index) else: self.assertEqual(task_in.multi_instance_index, event.mi_index) self.assertEqual(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) - workflow = WorkflowApiSchema().load(json_data) return workflow + + def logout(self): + g.user = None diff --git a/tests/test_authentication.py b/tests/test_authentication.py index 829d71e3..61f578a0 100644 --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -13,6 +13,8 @@ from crc.models.user import UserModel class TestAuthentication(BaseTest): + admin_uid = 'dhf8r' + non_admin_uid = 'lb3dp' def tearDown(self): # Assure we set the production flag back to false. @@ -58,7 +60,7 @@ class TestAuthentication(BaseTest): self.assertTrue(expected_exp_3 - 1000 <= actual_exp_3 <= expected_exp_3 + 1000) def test_non_production_auth_creates_user(self): - new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap. + new_uid = self.non_admin_uid ## Assure this user id is in the fake responses from ldap. self.load_example_data() user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first() self.assertIsNone(user) @@ -88,21 +90,20 @@ class TestAuthentication(BaseTest): self.load_example_data() - new_uid = 'lb3dp' # This user is in the test ldap system. - user = db.session.query(UserModel).filter_by(uid=new_uid).first() + # User should not be in the system yet. + user = db.session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() self.assertIsNone(user) - redirect_url = 'http://worlds.best.website/admin' - headers = dict(Uid=new_uid) - db.session.flush() - rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers) - self.assert_success(rv) - user = db.session.query(UserModel).filter_by(uid=new_uid).first() - self.assertIsNotNone(user) - self.assertEqual(new_uid, user.uid) - self.assertEqual("Laura Barnes", user.display_name) - self.assertEqual("lb3dp@virginia.edu", user.email_address) - self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user.title) + # Log in + non_admin_user = self._login_as_non_admin() + + # User should be in the system now. + redirect_url = 'http://worlds.best.website/admin' + rv_user = self.app.get('/v1.0/user', headers=self.logged_in_headers(non_admin_user, redirect_url=redirect_url)) + self.assert_success(rv_user) + user_data = json.loads(rv_user.get_data(as_text=True)) + self.assertEqual(self.non_admin_uid, user_data['uid']) + self.assertFalse(user_data['is_admin']) # Switch production mode back off app.config['PRODUCTION'] = False @@ -119,6 +120,8 @@ class TestAuthentication(BaseTest): user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu') rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut')) self.assert_success(rv) + user_data = json.loads(rv.get_data(as_text=True)) + self.assertTrue(user_data['is_admin']) def test_admin_can_access_admin_only_endpoints(self): # Switch production mode on @@ -126,21 +129,8 @@ class TestAuthentication(BaseTest): self.load_example_data() - admin_uids = app.config['ADMIN_UIDS'] - self.assertGreater(len(admin_uids), 0) - admin_uid = admin_uids[0] - self.assertEqual(admin_uid, 'dhf8r') # This user is in the test ldap system. - admin_headers = dict(Uid=admin_uid) - - rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers) - self.assert_success(rv) - - admin_user = db.session.query(UserModel).filter(UserModel.uid == admin_uid).first() - self.assertIsNotNone(admin_user) - self.assertEqual(admin_uid, admin_user.uid) - - admin_study = self._make_fake_study(admin_uid) - + admin_user = self._login_as_admin() + admin_study = self._make_fake_study(admin_user.uid) admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode()) rv_add_study = self.app.post( @@ -173,26 +163,9 @@ class TestAuthentication(BaseTest): self.load_example_data() # Non-admin user should not be able to delete a study - non_admin_uid = 'lb3dp' - admin_uids = app.config['ADMIN_UIDS'] - self.assertGreater(len(admin_uids), 0) - self.assertNotIn(non_admin_uid, admin_uids) - - non_admin_headers = dict(Uid=non_admin_uid) - - rv = self.app.get( - 'v1.0/login', - follow_redirects=False, - headers=non_admin_headers - ) - self.assert_success(rv) - - non_admin_user = db.session.query(UserModel).filter_by(uid=non_admin_uid).first() - self.assertIsNotNone(non_admin_user) - + non_admin_user = self._login_as_non_admin() non_admin_token_headers = dict(Authorization='Bearer ' + non_admin_user.encode_auth_token().decode()) - - non_admin_study = self._make_fake_study(non_admin_uid) + non_admin_study = self._make_fake_study(non_admin_user.uid) rv_add_study = self.app.post( '/v1.0/study', @@ -216,6 +189,89 @@ class TestAuthentication(BaseTest): # Switch production mode back off app.config['PRODUCTION'] = False + def test_list_all_users(self): + self.load_example_data() + rv = self.app.get('/v1.0/user') + self.assert_failure(rv, 401) + + rv = self.app.get('/v1.0/user', headers=self.logged_in_headers()) + self.assert_success(rv) + + all_users = db.session.query(UserModel).all() + + rv = self.app.get('/v1.0/list_users', headers=self.logged_in_headers()) + self.assert_success(rv) + user_data = json.loads(rv.get_data(as_text=True)) + self.assertEqual(len(user_data), len(all_users)) + + def test_admin_can_impersonate_another_user(self): + # Switch production mode on + app.config['PRODUCTION'] = True + + self.load_example_data() + + admin_user = self._login_as_admin() + admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode()) + + # User should not be in the system yet. + non_admin_user = db.session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() + self.assertIsNone(non_admin_user) + + # Admin should not be able to impersonate non-existent user + rv_1 = self.app.get( + '/v1.0/user?admin_impersonate_uid=' + self.non_admin_uid, + content_type="application/json", + headers=admin_token_headers, + follow_redirects=False + ) + self.assert_success(rv_1) + user_data_1 = json.loads(rv_1.get_data(as_text=True)) + self.assertEqual(user_data_1['uid'], self.admin_uid, 'Admin user should be logged in as themselves') + + # Add the non-admin user now + self.logout() + non_admin_user = self._login_as_non_admin() + self.assertEqual(non_admin_user.uid, self.non_admin_uid) + non_admin_token_headers = dict(Authorization='Bearer ' + non_admin_user.encode_auth_token().decode()) + + # Add a study for the non-admin user + non_admin_study = self._make_fake_study(self.non_admin_uid) + rv_add_study = self.app.post( + '/v1.0/study', + content_type="application/json", + headers=non_admin_token_headers, + data=json.dumps(StudySchema().dump(non_admin_study)) + ) + self.assert_success(rv_add_study, 'Non-admin user should be able to add a study') + self.logout() + + # Admin should be able to impersonate user now + admin_user = self._login_as_admin() + rv_2 = self.app.get( + '/v1.0/user?admin_impersonate_uid=' + self.non_admin_uid, + content_type="application/json", + headers=admin_token_headers, + follow_redirects=False + ) + self.assert_success(rv_2) + user_data_2 = json.loads(rv_2.get_data(as_text=True)) + self.assertEqual(user_data_2['uid'], self.non_admin_uid, 'Admin user should impersonate non-admin user') + + # Study endpoint should return non-admin user's studies + rv_study = self.app.get( + '/v1.0/study', + content_type="application/json", + headers=admin_token_headers, + follow_redirects=False + ) + self.assert_success(rv_study, 'Admin user should be able to get impersonated user studies') + study_data = json.loads(rv_study.get_data(as_text=True)) + self.assertGreaterEqual(len(study_data), 1) + self.assertEqual(study_data[0]['user_uid'], self.non_admin_uid) + + # Switch production mode back off + app.config['PRODUCTION'] = False + def _make_fake_study(self, uid): return { "title": "blah", @@ -224,3 +280,42 @@ class TestAuthentication(BaseTest): "primary_investigator_id": uid, "user_uid": uid, } + + def _login_as_admin(self): + admin_uids = app.config['ADMIN_UIDS'] + self.assertGreater(len(admin_uids), 0) + self.assertIn(self.admin_uid, admin_uids) + admin_headers = dict(Uid=self.admin_uid) + + rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers) + self.assert_success(rv) + + admin_user = db.session.query(UserModel).filter(UserModel.uid == self.admin_uid).first() + self.assertIsNotNone(admin_user) + self.assertEqual(self.admin_uid, admin_user.uid) + self.assertTrue(admin_user.is_admin()) + return admin_user + + def _login_as_non_admin(self): + admin_uids = app.config['ADMIN_UIDS'] + self.assertGreater(len(admin_uids), 0) + self.assertNotIn(self.non_admin_uid, admin_uids) + + non_admin_headers = dict(Uid=self.non_admin_uid) + + rv = self.app.get( + 'v1.0/login?uid=' + self.non_admin_uid, + follow_redirects=False, + headers=non_admin_headers + ) + self.assert_success(rv) + + user = db.session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() + self.assertIsNotNone(user) + self.assertFalse(user.is_admin()) + self.assertIsNotNone(user) + self.assertEqual(self.non_admin_uid, user.uid) + self.assertEqual("Laura Barnes", user.display_name) + self.assertEqual("lb3dp@virginia.edu", user.email_address) + self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user.title) + return user From 1b0ebecbf4e0ee19726a09f20072be05a3f15dc3 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 30 Jul 2020 10:17:02 -0400 Subject: [PATCH 49/60] Uses Flask session to store impersonation state. --- crc/models/user.py | 4 +++- crc/services/user_service.py | 15 ++++++++++----- tests/base_test.py | 6 +----- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/crc/models/user.py b/crc/models/user.py index eb431c95..f047761a 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -65,5 +65,7 @@ class UserModelSchema(SQLAlchemyAutoSchema): model = UserModel load_instance = True include_relationships = True + is_admin = fields.Method('get_is_admin', dump_only=True) - is_admin = fields.Function(lambda obj: obj.is_admin()) + def get_is_admin(self, obj): + return obj.is_admin() diff --git a/crc/services/user_service.py b/crc/services/user_service.py index 5d12601a..c4362a65 100644 --- a/crc/services/user_service.py +++ b/crc/services/user_service.py @@ -1,4 +1,4 @@ -from flask import g +from flask import g, session from crc import db from crc.api.common import ApiError @@ -22,8 +22,8 @@ class UserService(object): @staticmethod def admin_is_impersonating(): return UserService.user_is_admin() and \ - "impersonate_user" in g and \ - g.impersonate_user is not None + "admin_impersonate_uid" in session and \ + session.get('admin_impersonate_uid') is not None # Returns true if the given user uid is different from the current user's uid. @staticmethod @@ -35,20 +35,21 @@ class UserService(object): if not UserService.has_user(): raise ApiError("logged_out", "You are no longer logged in.", status_code=401) - # Admins can pretend to be different users and act on a users behalf in + # Admins can pretend to be different users and act on a user's behalf in # some circumstances. if allow_admin_impersonate and UserService.admin_is_impersonating(): return g.impersonate_user else: return g.user - # Admins can pretend to be different users and act on a users behalf in some circumstances. + # Admins can pretend to be different users and act on a user's behalf in some circumstances. # This method allows an admin user to start impersonating another user with the given uid. # Stops impersonating if the uid is None or invalid. @staticmethod def impersonate(uid=None): # Clear out the current impersonating user. g.impersonate_user = None + session.pop('admin_impersonate_uid', None) if not UserService.has_user(): raise ApiError("logged_out", "You are no longer logged in.", status_code=401) @@ -57,6 +58,10 @@ class UserService(object): # Impersonate the user if the given uid is valid. g.impersonate_user = db.session.query(UserModel).filter(UserModel.uid == uid).first() + # Store the uid in the session. + if g.impersonate_user: + session['admin_impersonate_uid'] = uid + @staticmethod def in_list(uids, allow_admin_impersonate=False): """Returns true if the current user's id is in the given list of ids. False if there diff --git a/tests/base_test.py b/tests/base_test.py index f5b66aa9..d569af6f 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -8,7 +8,7 @@ import json import unittest import urllib.parse import datetime -from flask import g +from flask import g, session as flask_session from sqlalchemy import Sequence from crc import app, db, session @@ -110,10 +110,6 @@ class BaseTest(unittest.TestCase): def tearDown(self): ExampleDataLoader.clean_db() self.logout() - - if 'impersonate_user' in g: - g.impersonate_user = None - self.auths = {} def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'): From aa0f24bd33f9b14464542752f06612518abbde8f Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 30 Jul 2020 10:40:06 -0400 Subject: [PATCH 50/60] Fully deletes users from g in test tearDown --- crc/services/user_service.py | 10 ++++++---- tests/base_test.py | 15 ++++++++++++++- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/crc/services/user_service.py b/crc/services/user_service.py index c4362a65..d29ea5f8 100644 --- a/crc/services/user_service.py +++ b/crc/services/user_service.py @@ -21,6 +21,7 @@ class UserService(object): # Returns true if the current admin user is impersonating another user. @staticmethod def admin_is_impersonating(): + print("session.get('admin_impersonate_uid')", session.get('admin_impersonate_uid')) return UserService.user_is_admin() and \ "admin_impersonate_uid" in session and \ session.get('admin_impersonate_uid') is not None @@ -37,7 +38,7 @@ class UserService(object): # Admins can pretend to be different users and act on a user's behalf in # some circumstances. - if allow_admin_impersonate and UserService.admin_is_impersonating(): + if allow_admin_impersonate and UserService.admin_is_impersonating() and 'impersonate_user' in g: return g.impersonate_user else: return g.user @@ -47,9 +48,10 @@ class UserService(object): # Stops impersonating if the uid is None or invalid. @staticmethod def impersonate(uid=None): - # Clear out the current impersonating user. - g.impersonate_user = None - session.pop('admin_impersonate_uid', None) + # if uid is None: + # # Clear out the current impersonating user. + # g.impersonate_user = None + # session.pop('admin_impersonate_uid', None) if not UserService.has_user(): raise ApiError("logged_out", "You are no longer logged in.", status_code=401) diff --git a/tests/base_test.py b/tests/base_test.py index d569af6f..747949f3 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -415,4 +415,17 @@ class BaseTest(unittest.TestCase): return workflow def logout(self): - g.user = None + print("logout before 'user' in g", 'user' in g) + print('logout before flask_session', flask_session) + print("logout before 'impersonate_user' in g", 'impersonate_user' in g) + + if 'user' in g: + del g.user + + flask_session.clear() + if 'impersonate_user' in g: + del g.impersonate_user + + print("logout after 'user' in g", 'user' in g) + print('logout after flask_session', flask_session) + print("logout after 'impersonate_user' in g", 'impersonate_user' in g) From faba0f55ab31435e1e8824981f17c5713c315434 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 30 Jul 2020 12:40:53 -0400 Subject: [PATCH 51/60] Adds AdminSession model and refactors impersonation methods to use it. --- crc/api/user.py | 48 ++++++++++++---- crc/models/user.py | 7 +++ crc/services/user_service.py | 84 +++++++++++++++++++++------- migrations/versions/ab06a94e5d4c_.py | 34 +++++++++++ tests/base_test.py | 47 +++++++--------- tests/test_authentication.py | 24 ++++---- 6 files changed, 171 insertions(+), 73 deletions(-) create mode 100644 migrations/versions/ab06a94e5d4c_.py diff --git a/crc/api/user.py b/crc/api/user.py index 483edd65..5992626b 100644 --- a/crc/api/user.py +++ b/crc/api/user.py @@ -1,7 +1,7 @@ import flask from flask import g, request -from crc import app, db +from crc import app, session from crc.api.common import ApiError from crc.models.user import UserModel, UserModelSchema from crc.services.ldap_service import LdapService, LdapModel @@ -36,6 +36,10 @@ def verify_token(token=None): try: token_info = UserModel.decode_auth_token(token) g.user = UserModel.query.filter_by(uid=token_info['sub']).first() + + # If the user is valid, store the token for this session + if g.user: + g.token = token except: raise failure_error if g.user is not None: @@ -50,9 +54,11 @@ def verify_token(token=None): if uid is not None: db_user = UserModel.query.filter_by(uid=uid).first() + # If the user is valid, store the user and token for this session if db_user is not None: g.user = db_user token = g.user.encode_auth_token().decode() + g.token = token token_info = UserModel.decode_auth_token(token) return token_info @@ -87,18 +93,36 @@ def verify_token_admin(token=None): return token_info -def get_current_user(admin_impersonate_uid=None): - if UserService.has_user(): - if admin_impersonate_uid is not None and UserService.user_is_admin(): - UserService.impersonate(admin_impersonate_uid) +def start_impersonating(uid): + if uid is not None and UserService.user_is_admin(): + UserService.start_impersonating(uid) - user = UserService.current_user(UserService.admin_is_impersonating()) + user = UserService.current_user(allow_admin_impersonate=True) + return UserModelSchema().dump(user) + + +def stop_impersonating(): + if UserService.user_is_admin(): + UserService.stop_impersonating() + + user = UserService.current_user(allow_admin_impersonate=False) + return UserModelSchema().dump(user) + + +def get_current_user(admin_impersonate_uid=None): + if UserService.user_is_admin(): + if admin_impersonate_uid is not None: + UserService.start_impersonating(admin_impersonate_uid) + else: + UserService.stop_impersonating() + + user = UserService.current_user(UserService.user_is_admin() and UserService.admin_is_impersonating()) return UserModelSchema().dump(user) def get_all_users(): if "user" in g and g.user.is_admin(): - all_users = db.session.query(UserModel).all() + all_users = session.query(UserModel).all() return UserModelSchema(many=True).dump(all_users) @@ -189,6 +213,8 @@ def _handle_login(user_info: LdapModel, redirect_url=None): # Return the frontend auth callback URL, with auth token appended. auth_token = user.encode_auth_token().decode() + g.token = auth_token + if redirect_url is not None: if redirect_url.find("http://") != 0 and redirect_url.find("https://") != 0: redirect_url = "http://" + redirect_url @@ -201,13 +227,13 @@ def _handle_login(user_info: LdapModel, redirect_url=None): def _upsert_user(user_info): - user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first() + user = session.query(UserModel).filter(UserModel.uid == user_info.uid).first() if user is None: # Add new user user = UserModel() else: - user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).with_for_update().first() + user = session.query(UserModel).filter(UserModel.uid == user_info.uid).with_for_update().first() user.uid = user_info.uid user.display_name = user_info.display_name @@ -215,8 +241,8 @@ def _upsert_user(user_info): user.affiliation = user_info.affiliation user.title = user_info.title - db.session.add(user) - db.session.commit() + session.add(user) + session.commit() return user diff --git a/crc/models/user.py b/crc/models/user.py index f047761a..5b6c5dbb 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -69,3 +69,10 @@ class UserModelSchema(SQLAlchemyAutoSchema): def get_is_admin(self, obj): return obj.is_admin() + + +class AdminSessionModel(db.Model): + __tablename__ = 'admin_session' + id = db.Column(db.Integer, primary_key=True) + token = db.Column(db.String, unique=True) + admin_impersonate_uid = db.Column(db.String) diff --git a/crc/services/user_service.py b/crc/services/user_service.py index d29ea5f8..56a360ee 100644 --- a/crc/services/user_service.py +++ b/crc/services/user_service.py @@ -1,8 +1,8 @@ -from flask import g, session +from flask import g -from crc import db +from crc import session from crc.api.common import ApiError -from crc.models.user import UserModel +from crc.models.user import UserModel, AdminSessionModel class UserService(object): @@ -11,7 +11,10 @@ class UserService(object): # Returns true if the current user is logged in. @staticmethod def has_user(): - return 'user' in g and bool(g.user) + return 'token' in g and \ + bool(g.token) and \ + 'user' in g and \ + bool(g.user) # Returns true if the current user is an admin. @staticmethod @@ -21,10 +24,12 @@ class UserService(object): # Returns true if the current admin user is impersonating another user. @staticmethod def admin_is_impersonating(): - print("session.get('admin_impersonate_uid')", session.get('admin_impersonate_uid')) - return UserService.user_is_admin() and \ - "admin_impersonate_uid" in session and \ - session.get('admin_impersonate_uid') is not None + if UserService.user_is_admin(): + adminSession: AdminSessionModel = UserService.get_admin_session() + return adminSession is not None + + else: + raise ApiError("unauthorized", "You do not have permissions to do this.", status_code=403) # Returns true if the given user uid is different from the current user's uid. @staticmethod @@ -32,14 +37,14 @@ class UserService(object): return UserService.has_user() and uid is not None and uid is not g.user.uid @staticmethod - def current_user(allow_admin_impersonate=False): + def current_user(allow_admin_impersonate=False) -> UserModel: if not UserService.has_user(): raise ApiError("logged_out", "You are no longer logged in.", status_code=401) # Admins can pretend to be different users and act on a user's behalf in # some circumstances. - if allow_admin_impersonate and UserService.admin_is_impersonating() and 'impersonate_user' in g: - return g.impersonate_user + if UserService.user_is_admin() and allow_admin_impersonate and UserService.admin_is_impersonating(): + return UserService.get_admin_session_user() else: return g.user @@ -47,22 +52,42 @@ class UserService(object): # This method allows an admin user to start impersonating another user with the given uid. # Stops impersonating if the uid is None or invalid. @staticmethod - def impersonate(uid=None): - # if uid is None: - # # Clear out the current impersonating user. - # g.impersonate_user = None - # session.pop('admin_impersonate_uid', None) - + def start_impersonating(uid=None): if not UserService.has_user(): raise ApiError("logged_out", "You are no longer logged in.", status_code=401) + if not UserService.user_is_admin(): + raise ApiError("unauthorized", "You do not have permissions to do this.", status_code=403) + + if uid is None: + raise ApiError("invalid_uid", "Please provide a valid user uid.") + if not UserService.admin_is_impersonating() and UserService.is_different_user(uid): # Impersonate the user if the given uid is valid. - g.impersonate_user = db.session.query(UserModel).filter(UserModel.uid == uid).first() + impersonate_user = session.query(UserModel).filter(UserModel.uid == uid).first() - # Store the uid in the session. - if g.impersonate_user: - session['admin_impersonate_uid'] = uid + if impersonate_user is not None: + g.impersonate_user = impersonate_user + + # Store the uid and user session token. + session.add(AdminSessionModel(token=g.token, admin_impersonate_uid=uid)) + session.commit() + else: + raise ApiError("invalid_uid", "The uid provided is not valid.") + + @staticmethod + def stop_impersonating(): + if not UserService.has_user(): + raise ApiError("logged_out", "You are no longer logged in.", status_code=401) + + # Clear out the current impersonating user. + if 'impersonate_user' in g: + del g.impersonate_user + + admin_session: AdminSessionModel = UserService.get_admin_session() + if admin_session: + session.delete(admin_session) + session.commit() @staticmethod def in_list(uids, allow_admin_impersonate=False): @@ -73,3 +98,20 @@ class UserService(object): if user.uid in uids: return True return False + + @staticmethod + def get_admin_session() -> AdminSessionModel: + if UserService.user_is_admin(): + return session.query(AdminSessionModel).filter(AdminSessionModel.token == g.token).first() + else: + raise ApiError("unauthorized", "You do not have permissions to do this.", status_code=403) + + @staticmethod + def get_admin_session_user() -> UserModel: + if UserService.user_is_admin(): + admin_session = UserService.get_admin_session() + + if admin_session is not None: + return session.query(UserModel).filter(UserModel.uid == admin_session.admin_impersonate_uid).first() + else: + raise ApiError("unauthorized", "You do not have permissions to do this.", status_code=403) \ No newline at end of file diff --git a/migrations/versions/ab06a94e5d4c_.py b/migrations/versions/ab06a94e5d4c_.py new file mode 100644 index 00000000..5d9335dc --- /dev/null +++ b/migrations/versions/ab06a94e5d4c_.py @@ -0,0 +1,34 @@ +"""empty message + +Revision ID: ab06a94e5d4c +Revises: 2e7b377cbc7b +Create Date: 2020-07-30 11:23:46.601338 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'ab06a94e5d4c' +down_revision = '2e7b377cbc7b' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('admin_session', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('token', sa.String(), nullable=True), + sa.Column('admin_impersonate_uid', sa.String(), nullable=True), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('token') + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('admin_session') + # ### end Alembic commands ### diff --git a/tests/base_test.py b/tests/base_test.py index 747949f3..af1cace4 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -8,7 +8,7 @@ import json import unittest import urllib.parse import datetime -from flask import g, session as flask_session +from flask import g from sqlalchemy import Sequence from crc import app, db, session @@ -149,19 +149,19 @@ class BaseTest(unittest.TestCase): # If in production mode, only add the first user. if app.config['PRODUCTION']: - db.session.add(UserModel(**self.users[0])) + session.add(UserModel(**self.users[0])) else: for user_json in self.users: - db.session.add(UserModel(**user_json)) + session.add(UserModel(**user_json)) - db.session.commit() + session.commit() for study_json in self.studies: study_model = StudyModel(**study_json) - db.session.add(study_model) + session.add(study_model) StudyService._add_all_workflow_specs_to_study(study_model) - db.session.execute(Sequence(StudyModel.__tablename__ + '_id_seq')) - db.session.commit() - db.session.flush() + session.execute(Sequence(StudyModel.__tablename__ + '_id_seq')) + session.commit() + session.flush() specs = session.query(WorkflowSpecModel).all() self.assertIsNotNone(specs) @@ -185,8 +185,8 @@ class BaseTest(unittest.TestCase): """Loads a spec into the database based on a directory in /tests/data""" if category_id is None: category = WorkflowSpecCategoryModel(name="test", display_name="Test Workflows", display_order=0) - db.session.add(category) - db.session.commit() + session.add(category) + session.commit() category_id = category.id if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0: @@ -240,7 +240,7 @@ class BaseTest(unittest.TestCase): file = open(file_path, "rb") data = file.read() - file_model = db.session.query(FileModel).filter(FileModel.name == name).first() + file_model = session.query(FileModel).filter(FileModel.name == name).first() noise, file_extension = os.path.splitext(file_path) content_type = CONTENT_TYPES[file_extension[1:]] file_service.update_file(file_model, data, content_type) @@ -249,8 +249,8 @@ class BaseTest(unittest.TestCase): user = session.query(UserModel).filter(UserModel.uid == uid).first() if user is None: user = UserModel(uid=uid, email_address=email, display_name=display_name) - db.session.add(user) - db.session.commit() + session.add(user) + session.commit() return user def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer", @@ -260,8 +260,8 @@ class BaseTest(unittest.TestCase): user = self.create_user(uid=uid) study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.active, user_uid=user.uid, primary_investigator_id=primary_investigator_id) - db.session.add(study) - db.session.commit() + session.add(study) + session.commit() return study def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses, @@ -288,8 +288,8 @@ class BaseTest(unittest.TestCase): return full_study def create_workflow(self, workflow_name, display_name=None, study=None, category_id=None, as_user="dhf8r"): - db.session.flush() - spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first() + session.flush() + spec = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first() if spec is None: if display_name is None: display_name = workflow_name @@ -322,8 +322,8 @@ class BaseTest(unittest.TestCase): version = version or 1 approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version) - db.session.add(approval) - db.session.commit() + session.add(approval) + session.commit() return approval def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, do_engine_steps=True, user_uid="dhf8r"): @@ -415,17 +415,8 @@ class BaseTest(unittest.TestCase): return workflow def logout(self): - print("logout before 'user' in g", 'user' in g) - print('logout before flask_session', flask_session) - print("logout before 'impersonate_user' in g", 'impersonate_user' in g) - if 'user' in g: del g.user - flask_session.clear() if 'impersonate_user' in g: del g.impersonate_user - - print("logout after 'user' in g", 'user' in g) - print('logout after flask_session', flask_session) - print("logout after 'impersonate_user' in g", 'impersonate_user' in g) diff --git a/tests/test_authentication.py b/tests/test_authentication.py index 61f578a0..469885f4 100644 --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -5,7 +5,7 @@ from datetime import timezone, datetime, timedelta import jwt from tests.base_test import BaseTest -from crc import db, app +from crc import app, session from crc.api.common import ApiError from crc.models.protocol_builder import ProtocolBuilderStatus from crc.models.study import StudySchema, StudyModel @@ -62,7 +62,7 @@ class TestAuthentication(BaseTest): def test_non_production_auth_creates_user(self): new_uid = self.non_admin_uid ## Assure this user id is in the fake responses from ldap. self.load_example_data() - user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first() + user = session.query(UserModel).filter(UserModel.uid == new_uid).first() self.assertIsNone(user) user_info = {'uid': new_uid, 'first_name': 'Cordi', 'last_name': 'Nator', @@ -74,7 +74,7 @@ class TestAuthentication(BaseTest): self.assertTrue(rv_1.status_code == 302) self.assertTrue(str.startswith(rv_1.location, redirect_url)) - user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first() + user = session.query(UserModel).filter(UserModel.uid == new_uid).first() self.assertIsNotNone(user) self.assertIsNotNone(user.display_name) self.assertIsNotNone(user.email_address) @@ -91,7 +91,7 @@ class TestAuthentication(BaseTest): self.load_example_data() # User should not be in the system yet. - user = db.session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() + user = session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() self.assertIsNone(user) # Log in @@ -143,7 +143,7 @@ class TestAuthentication(BaseTest): self.assert_success(rv_add_study, 'Admin user should be able to add a study') new_admin_study = json.loads(rv_add_study.get_data(as_text=True)) - db_admin_study = db.session.query(StudyModel).filter_by(id=new_admin_study['id']).first() + db_admin_study = session.query(StudyModel).filter_by(id=new_admin_study['id']).first() self.assertIsNotNone(db_admin_study) rv_del_study = self.app.delete( @@ -176,7 +176,7 @@ class TestAuthentication(BaseTest): self.assert_success(rv_add_study, 'Non-admin user should be able to add a study') new_non_admin_study = json.loads(rv_add_study.get_data(as_text=True)) - db_non_admin_study = db.session.query(StudyModel).filter_by(id=new_non_admin_study['id']).first() + db_non_admin_study = session.query(StudyModel).filter_by(id=new_non_admin_study['id']).first() self.assertIsNotNone(db_non_admin_study) rv_non_admin_del_study = self.app.delete( @@ -197,7 +197,7 @@ class TestAuthentication(BaseTest): rv = self.app.get('/v1.0/user', headers=self.logged_in_headers()) self.assert_success(rv) - all_users = db.session.query(UserModel).all() + all_users = session.query(UserModel).all() rv = self.app.get('/v1.0/list_users', headers=self.logged_in_headers()) self.assert_success(rv) @@ -214,7 +214,7 @@ class TestAuthentication(BaseTest): admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode()) # User should not be in the system yet. - non_admin_user = db.session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() + non_admin_user = session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() self.assertIsNone(non_admin_user) # Admin should not be able to impersonate non-existent user @@ -224,9 +224,7 @@ class TestAuthentication(BaseTest): headers=admin_token_headers, follow_redirects=False ) - self.assert_success(rv_1) - user_data_1 = json.loads(rv_1.get_data(as_text=True)) - self.assertEqual(user_data_1['uid'], self.admin_uid, 'Admin user should be logged in as themselves') + self.assert_failure(rv_1, 400) # Add the non-admin user now self.logout() @@ -290,7 +288,7 @@ class TestAuthentication(BaseTest): rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers) self.assert_success(rv) - admin_user = db.session.query(UserModel).filter(UserModel.uid == self.admin_uid).first() + admin_user = session.query(UserModel).filter(UserModel.uid == self.admin_uid).first() self.assertIsNotNone(admin_user) self.assertEqual(self.admin_uid, admin_user.uid) self.assertTrue(admin_user.is_admin()) @@ -310,7 +308,7 @@ class TestAuthentication(BaseTest): ) self.assert_success(rv) - user = db.session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() + user = session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first() self.assertIsNotNone(user) self.assertFalse(user.is_admin()) self.assertIsNotNone(user) From 28d3f835e87065c3529613e25b359fb58124fcea Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 30 Jul 2020 13:21:50 -0400 Subject: [PATCH 52/60] Updates package hashes --- Pipfile.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Pipfile.lock b/Pipfile.lock index f86d6e78..bb4a7d1d 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -247,6 +247,7 @@ "sha256:525ba66fb5f90b07169fdd48b6373c18f1ee12728ca277ca44567a367d9d7f74", "sha256:a766c1dccb30c5f6eb2b203f87edd1d8588847709c78589e1521d769addc8218" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.10" }, "docutils": { From 9704cbcb26c8d11edbe40d81c507ad01e66c771b Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Thu, 30 Jul 2020 13:35:20 -0400 Subject: [PATCH 53/60] Modifications to the ldap scripts to bring them back in line with what Kelly is doing with the evaluation process. --- Pipfile | 2 +- Pipfile.lock | 4 +- crc/scripts/ldap.py | 50 +++++++++++++++ crc/scripts/ldap_lookup.py | 78 ----------------------- crc/scripts/ldap_replace.py | 60 ----------------- crc/services/workflow_processor.py | 14 +--- tests/data/ldap_replace/ldap_replace.bpmn | 17 ++--- tests/ldap/test_ldap_lookup_script.py | 74 +++++---------------- 8 files changed, 82 insertions(+), 217 deletions(-) create mode 100644 crc/scripts/ldap.py delete mode 100644 crc/scripts/ldap_lookup.py delete mode 100644 crc/scripts/ldap_replace.py diff --git a/Pipfile b/Pipfile index 56f3bc26..009e370c 100644 --- a/Pipfile +++ b/Pipfile @@ -38,7 +38,7 @@ recommonmark = "*" requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} sphinx = "*" -spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"} +spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "cr-connect-106-augment-eval"} #spiffworkflow = {editable = true,path="/home/kelly/sartography/SpiffWorkflow/"} swagger-ui-bundle = "*" webtest = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 8aabe194..36b98856 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "381d29428eb328ad6167774b510b9d818bd1505b95f50454a19f1564782326cc" + "sha256": "afb6a541d1a9f33155f91529ad961492dceded89466aa1e02fed9901ac5eb146" }, "pipfile-spec": 6, "requires": { @@ -804,7 +804,7 @@ }, "spiffworkflow": { "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "11ad40bbcb0fbd3c5bc1078e4989dc38b749f7f3" + "ref": "7712830665b4419df019413ac095cb0749adb346" }, "sqlalchemy": { "hashes": [ diff --git a/crc/scripts/ldap.py b/crc/scripts/ldap.py new file mode 100644 index 00000000..577d4a75 --- /dev/null +++ b/crc/scripts/ldap.py @@ -0,0 +1,50 @@ +import copy + +from crc import app +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService + + +class Ldap(Script): + """This Script allows to be introduced as part of a workflow and called from there, taking + a UID (or several) as input and looking it up through LDAP to return the person's details """ + + def get_description(self): + return """ +Attempts to create a dictionary with person details, using the +provided argument (a UID) and look it up through LDAP. + +Examples: +supervisor_info = ldap(supervisor_uid) // Sets the supervisor information to ldap details for the given uid. +""" + + def do_task_validate_only(self, task, *args, **kwargs): + return self.set_users_info_in_task(task, args) + + def do_task(self, task, study_id, workflow_id, *args, **kwargs): + return self.set_users_info_in_task(task, args) + + def set_users_info_in_task(self, task, args): + if len(args) != 1: + raise ApiError(code="missing_argument", + message="Ldap takes a single argument, the " + "UID for the person we want to look up") + uid = args[0] + user_info_dict = {} + + user_info = LdapService.user_info(uid) + user_info_dict = { + "display_name": user_info.display_name, + "given_name": user_info.given_name, + "email_address": user_info.email_address, + "telephone_number": user_info.telephone_number, + "title": user_info.title, + "department": user_info.department, + "affiliation": user_info.affiliation, + "sponsor_type": user_info.sponsor_type, + "uid": user_info.uid, + "proper_name": user_info.proper_name() + } + + return user_info_dict diff --git a/crc/scripts/ldap_lookup.py b/crc/scripts/ldap_lookup.py deleted file mode 100644 index 62bd287a..00000000 --- a/crc/scripts/ldap_lookup.py +++ /dev/null @@ -1,78 +0,0 @@ -import copy - -from crc import app -from crc.api.common import ApiError -from crc.scripts.script import Script -from crc.services.ldap_service import LdapService - - -USER_DETAILS = { - "PIComputingID": { - "value": "", - "data": { - }, - "label": "invalid uid" - } -} - - -class LdapLookup(Script): - """This Script allows to be introduced as part of a workflow and called from there, taking - a UID as input and looking it up through LDAP to return the person's details """ - - def get_description(self): - return """ -Attempts to create a dictionary with person details, using the -provided argument (a UID) and look it up through LDAP. - -Example: -LdapLookup PIComputingID -""" - - def do_task_validate_only(self, task, *args, **kwargs): - self.get_user_info(task, args) - - def do_task(self, task, *args, **kwargs): - args = [arg for arg in args if type(arg) == str] - user_info = self.get_user_info(task, args) - - user_details = copy.deepcopy(USER_DETAILS) - user_details['PIComputingID']['value'] = user_info['uid'] - if len(user_info.keys()) > 1: - user_details['PIComputingID']['label'] = user_info.pop('label') - else: - user_info.pop('uid') - user_details['PIComputingID']['data'] = user_info - return user_details - - def get_user_info(self, task, args): - if len(args) < 1: - raise ApiError(code="missing_argument", - message="Ldap lookup script requires one argument. The " - "UID for the person we want to look up") - - arg = args.pop() # Extracting only one value for now - uid = task.workflow.script_engine.evaluate_expression(task, arg) - if not isinstance(uid, str): - raise ApiError(code="invalid_argument", - message="Ldap lookup script requires one 1 UID argument, of type string.") - user_info_dict = {} - try: - user_info = LdapService.user_info(uid) - user_info_dict = { - "display_name": user_info.display_name, - "given_name": user_info.given_name, - "email_address": user_info.email_address, - "telephone_number": user_info.telephone_number, - "title": user_info.title, - "department": user_info.department, - "affiliation": user_info.affiliation, - "sponsor_type": user_info.sponsor_type, - "uid": user_info.uid, - "label": user_info.proper_name() - } - except: - user_info_dict['uid'] = uid - app.logger.error(f'Ldap lookup failed for UID {uid}') - - return user_info_dict diff --git a/crc/scripts/ldap_replace.py b/crc/scripts/ldap_replace.py deleted file mode 100644 index 88e2986a..00000000 --- a/crc/scripts/ldap_replace.py +++ /dev/null @@ -1,60 +0,0 @@ -import copy - -from crc import app -from crc.api.common import ApiError -from crc.scripts.script import Script -from crc.services.ldap_service import LdapService - - -class LdapReplace(Script): - """This Script allows to be introduced as part of a workflow and called from there, taking - a UID (or several) as input and looking it up through LDAP to return the person's details """ - - def get_description(self): - return """ -Attempts to create a dictionary with person details, using the -provided argument (a UID) and look it up through LDAP. - -Examples: -#! LdapReplace supervisor -#! LdapReplace supervisor collaborator -#! LdapReplace supervisor cosupervisor collaborator -""" - - def do_task_validate_only(self, task, *args, **kwargs): - self.set_users_info_in_task(task, args) - - def do_task(self, task, *args, **kwargs): - args = [arg for arg in args if type(arg) == str] - self.set_users_info_in_task(task, args) - - def set_users_info_in_task(self, task, args): - if len(args) < 1: - raise ApiError(code="missing_argument", - message="Ldap replace script requires at least one argument. The " - "UID for the person(s) we want to look up") - - users_info = {} - for arg in args: - uid = task.workflow.script_engine.evaluate_expression(task, arg) - if not isinstance(uid, str): - raise ApiError(code="invalid_argument", - message="Ldap replace script found an invalid argument, type string is required") - user_info_dict = {} - try: - user_info = LdapService.user_info(uid) - user_info_dict = { - "display_name": user_info.display_name, - "given_name": user_info.given_name, - "email_address": user_info.email_address, - "telephone_number": user_info.telephone_number, - "title": user_info.title, - "department": user_info.department, - "affiliation": user_info.affiliation, - "sponsor_type": user_info.sponsor_type, - "uid": user_info.uid, - "proper_name": user_info.proper_name() - } - except: - app.logger.error(f'Ldap replace failed for UID {uid}') - task.data[arg] = user_info_dict diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index f9243e68..2ec13702 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -29,19 +29,11 @@ from crc import app class CustomBpmnScriptEngine(BpmnScriptEngine): """This is a custom script processor that can be easily injected into Spiff Workflow. - Rather than execute arbitrary code, this assumes the script references a fully qualified python class - such as myapp.RandomFact. """ + It will execute python code read in from the bpmn. It will also make any scripts in the + scripts directory available for execution. """ def execute(self, task: SpiffTask, script, data): - """ - Functions in two modes. - 1. If the command is proceeded by #! then this is assumed to be a python script, and will - attempt to load that python module and execute the do_task method on that script. Scripts - must be located in the scripts package and they must extend the script.py class. - 2. If not proceeded by the #! this will attempt to execute the script directly and assumes it is - valid Python. - """ - # Shlex splits the whole string while respecting double quoted strings within + study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] if WorkflowProcessor.WORKFLOW_ID_KEY in task.workflow.data: workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] diff --git a/tests/data/ldap_replace/ldap_replace.bpmn b/tests/data/ldap_replace/ldap_replace.bpmn index 77f8c7ad..64389299 100644 --- a/tests/data/ldap_replace/ldap_replace.bpmn +++ b/tests/data/ldap_replace/ldap_replace.bpmn @@ -10,7 +10,8 @@ Flow_08n2npe Flow_1xlrgne - #! LdapReplace Supervisor Investigator + Supervisor = ldap(Supervisor) +Investigator = ldap(Investigator) @@ -33,6 +34,10 @@ + + + + @@ -45,22 +50,18 @@ - - - - + + + - - - diff --git a/tests/ldap/test_ldap_lookup_script.py b/tests/ldap/test_ldap_lookup_script.py index 220ca9c8..0a88a899 100644 --- a/tests/ldap/test_ldap_lookup_script.py +++ b/tests/ldap/test_ldap_lookup_script.py @@ -1,7 +1,8 @@ from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor -from crc.scripts.ldap_replace import LdapReplace +from crc.scripts.ldap import Ldap +from crc.api.common import ApiError from crc import db, mail @@ -14,60 +15,19 @@ class TestLdapLookupScript(BaseTest): processor = WorkflowProcessor(workflow) task = processor.next_task() - task.data = { - 'PIComputingID': 'dhf8r' - } + script = Ldap() + user_details = script.do_task(task, workflow.study_id, workflow.id, "dhf8r") - script = LdapReplace() - user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") - - self.assertEqual(task.data['PIComputingID']['display_name'], 'Dan Funk') - self.assertEqual(task.data['PIComputingID']['given_name'], 'Dan') - self.assertEqual(task.data['PIComputingID']['email_address'], 'dhf8r@virginia.edu') - self.assertEqual(task.data['PIComputingID']['telephone_number'], '+1 (434) 924-1723') - self.assertEqual(task.data['PIComputingID']['title'], 'E42:He\'s a hoopy frood') - self.assertEqual(task.data['PIComputingID']['department'], 'E0:EN-Eng Study of Parallel Universes') - self.assertEqual(task.data['PIComputingID']['affiliation'], 'faculty') - self.assertEqual(task.data['PIComputingID']['sponsor_type'], 'Staff') - self.assertEqual(task.data['PIComputingID']['uid'], 'dhf8r') - self.assertEqual(task.data['PIComputingID']['proper_name'], 'Dan Funk - (dhf8r)') - - def test_get_existing_users_details(self): - self.load_example_data() - self.create_reference_document() - workflow = self.create_workflow('empty_workflow') - processor = WorkflowProcessor(workflow) - task = processor.next_task() - - task.data = { - 'supervisor': 'dhf8r', - 'investigator': 'lb3dp' - } - - script = LdapReplace() - user_details = script.do_task(task, workflow.study_id, workflow.id, "supervisor", "investigator") - - self.assertEqual(task.data['supervisor']['display_name'], 'Dan Funk') - self.assertEqual(task.data['supervisor']['given_name'], 'Dan') - self.assertEqual(task.data['supervisor']['email_address'], 'dhf8r@virginia.edu') - self.assertEqual(task.data['supervisor']['telephone_number'], '+1 (434) 924-1723') - self.assertEqual(task.data['supervisor']['title'], 'E42:He\'s a hoopy frood') - self.assertEqual(task.data['supervisor']['department'], 'E0:EN-Eng Study of Parallel Universes') - self.assertEqual(task.data['supervisor']['affiliation'], 'faculty') - self.assertEqual(task.data['supervisor']['sponsor_type'], 'Staff') - self.assertEqual(task.data['supervisor']['uid'], 'dhf8r') - self.assertEqual(task.data['supervisor']['proper_name'], 'Dan Funk - (dhf8r)') - - self.assertEqual(task.data['investigator']['display_name'], 'Laura Barnes') - self.assertEqual(task.data['investigator']['given_name'], 'Laura') - self.assertEqual(task.data['investigator']['email_address'], 'lb3dp@virginia.edu') - self.assertEqual(task.data['investigator']['telephone_number'], '+1 (434) 924-1723') - self.assertEqual(task.data['investigator']['title'], 'E0:Associate Professor of Systems and Information Engineering') - self.assertEqual(task.data['investigator']['department'], 'E0:EN-Eng Sys and Environment') - self.assertEqual(task.data['investigator']['affiliation'], 'faculty') - self.assertEqual(task.data['investigator']['sponsor_type'], 'Staff') - self.assertEqual(task.data['investigator']['uid'], 'lb3dp') - self.assertEqual(task.data['investigator']['proper_name'], 'Laura Barnes - (lb3dp)') + self.assertEqual(user_details['display_name'], 'Dan Funk') + self.assertEqual(user_details['given_name'], 'Dan') + self.assertEqual(user_details['email_address'], 'dhf8r@virginia.edu') + self.assertEqual(user_details['telephone_number'], '+1 (434) 924-1723') + self.assertEqual(user_details['title'], 'E42:He\'s a hoopy frood') + self.assertEqual(user_details['department'], 'E0:EN-Eng Study of Parallel Universes') + self.assertEqual(user_details['affiliation'], 'faculty') + self.assertEqual(user_details['sponsor_type'], 'Staff') + self.assertEqual(user_details['uid'], 'dhf8r') + self.assertEqual(user_details['proper_name'], 'Dan Funk - (dhf8r)') def test_get_invalid_user_details(self): self.load_example_data() @@ -80,10 +40,10 @@ class TestLdapLookupScript(BaseTest): 'PIComputingID': 'rec3z' } - script = LdapReplace() - user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") + script = Ldap() + with(self.assertRaises(ApiError)): + user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID") - self.assertEqual(task.data['PIComputingID'], {}) def test_bpmn_task_receives_user_details(self): workflow = self.create_workflow('ldap_replace') From 7004d9ba888908a3d588a0412a9b9243373cfc15 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Thu, 30 Jul 2020 13:56:57 -0400 Subject: [PATCH 54/60] Bumping spiffworkflow version. --- Pipfile | 2 +- Pipfile.lock | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Pipfile b/Pipfile index 009e370c..56f3bc26 100644 --- a/Pipfile +++ b/Pipfile @@ -38,7 +38,7 @@ recommonmark = "*" requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} sphinx = "*" -spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "cr-connect-106-augment-eval"} +spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"} #spiffworkflow = {editable = true,path="/home/kelly/sartography/SpiffWorkflow/"} swagger-ui-bundle = "*" webtest = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 36b98856..8ce6175a 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "afb6a541d1a9f33155f91529ad961492dceded89466aa1e02fed9901ac5eb146" + "sha256": "381d29428eb328ad6167774b510b9d818bd1505b95f50454a19f1564782326cc" }, "pipfile-spec": 6, "requires": { @@ -804,7 +804,7 @@ }, "spiffworkflow": { "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "7712830665b4419df019413ac095cb0749adb346" + "ref": "7c8d59e7b9a978795bc8d1f354002fdc89540672" }, "sqlalchemy": { "hashes": [ @@ -892,10 +892,10 @@ }, "wtforms": { "hashes": [ - "sha256:43f19879b2a9b8dfd81d2e4e427ce44d3e5c09dbe08f2af8f4be9586b7dfc33d", - "sha256:715ebd303f47384bf6468fd9dfff52c6acc400e71204df8acfa6ef7bf40e1c27" + "sha256:7b504fc724d0d1d4d5d5c114e778ec88c37ea53144683e084215eed5155ada4c", + "sha256:81195de0ac94fbc8368abbaf9197b88c4f3ffd6c2719b5bf5fc9da744f3d829c" ], - "version": "==2.3.2" + "version": "==2.3.3" }, "xlrd": { "hashes": [ From 9112be548de0c401412f4a8bce02686ba6807f4c Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Thu, 30 Jul 2020 15:04:09 -0400 Subject: [PATCH 55/60] Adding box as a direct dependency. Really uncertain how this is working everwhere but in the actual deployment. --- Pipfile | 1 + Pipfile.lock | 10 +++++++++- setup.py | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Pipfile b/Pipfile index 56f3bc26..f16e89fa 100644 --- a/Pipfile +++ b/Pipfile @@ -46,6 +46,7 @@ werkzeug = "*" xlrd = "*" xlsxwriter = "*" pygithub = "*" +python-box = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index 8ce6175a..7ca0d3d7 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "381d29428eb328ad6167774b510b9d818bd1505b95f50454a19f1564782326cc" + "sha256": "45dc348da1f583da4a7c76113456b3f0225736e79a5da05ba2af9ede7f8089e0" }, "pipfile-spec": 6, "requires": { @@ -659,6 +659,14 @@ ], "version": "==0.16.0" }, + "python-box": { + "hashes": [ + "sha256:bcb057e8960f4d888a4caf8f668eeca3c5c61ad349d8d81c4339414984fa9454", + "sha256:f02e059a299cac0515687aafec7543d401b12759d6578e53fae74154e0cbaa79" + ], + "index": "pypi", + "version": "==5.1.0" + }, "python-dateutil": { "hashes": [ "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", diff --git a/setup.py b/setup.py index 159a3d35..74c022d1 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,3 @@ from setuptools import setup -setup(setup_requires=["pbr"], pbr=True) +setup(setup_requires=["pbr"], pbr=True, install_requires=['box']) From 5d23223e519eff9128a3e1de9901a1a919b00208 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Thu, 30 Jul 2020 21:03:11 -0600 Subject: [PATCH 56/60] New study status update --- crc/api.yml | 4 +- crc/api/study.py | 4 +- crc/models/protocol_builder.py | 1 + crc/models/study.py | 65 ++++++++++++++++++---------- crc/services/study_service.py | 6 +-- migrations/versions/1c3f88dbccc3_.py | 36 +++++++++++++++ migrations/versions/369d65dcb269_.py | 28 ------------ tests/base_test.py | 9 ++-- tests/study/test_study_api.py | 24 +++++----- tests/study/test_study_service.py | 4 +- tests/test_authentication.py | 4 +- 11 files changed, 105 insertions(+), 80 deletions(-) create mode 100644 migrations/versions/1c3f88dbccc3_.py delete mode 100644 migrations/versions/369d65dcb269_.py diff --git a/crc/api.yml b/crc/api.yml index b3d61fc1..4bb63af3 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -1062,9 +1062,9 @@ components: user_uid: type: string example: dhf8r - protocol_builder_status: + status: type: string - enum: ['incomplete', 'active', 'hold', 'open', 'abandoned'] + enum: ['in progress', 'hold', 'open for enrollment', 'abandoned'] example: done sponsor: type: string diff --git a/crc/api/study.py b/crc/api/study.py index b5572527..3247f47d 100644 --- a/crc/api/study.py +++ b/crc/api/study.py @@ -6,7 +6,7 @@ from sqlalchemy.exc import IntegrityError from crc import session from crc.api.common import ApiError, ApiErrorSchema from crc.models.protocol_builder import ProtocolBuilderStatus -from crc.models.study import StudySchema, StudyModel, Study +from crc.models.study import Study, StudyModel, StudySchema, StudyStatus from crc.services.study_service import StudyService @@ -21,7 +21,7 @@ def add_study(body): title=body['title'], primary_investigator_id=body['primary_investigator_id'], last_updated=datetime.now(), - protocol_builder_status=ProtocolBuilderStatus.active) + status=StudyStatus.in_progress) session.add(study_model) errors = StudyService._add_all_workflow_specs_to_study(study_model) diff --git a/crc/models/protocol_builder.py b/crc/models/protocol_builder.py index a91ae84b..2706cefe 100644 --- a/crc/models/protocol_builder.py +++ b/crc/models/protocol_builder.py @@ -17,6 +17,7 @@ class ProtocolBuilderInvestigatorType(enum.Enum): SCI = "Scientific Contact" +# Deprecated: Marked for removal class ProtocolBuilderStatus(enum.Enum): # • Active: found in PB and no HSR number and not hold # • Hold: store boolean value in CR Connect (add to Study Model) diff --git a/crc/models/study.py b/crc/models/study.py index 32697896..d1114349 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -1,4 +1,5 @@ import datetime +import enum import json import marshmallow @@ -14,12 +15,26 @@ from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, Workfl WorkflowModel +class StudyStatus(enum.Enum): + in_progress = 'in progress' + hold = 'hold' + open_for_enrollment = 'open for enrollment' + abandoned = 'abandoned' + + +class IrbStatus(enum.Enum): + incomplete_in_protocol_builder = 'incomplete in protocol builder' + completed_in_protocol_builder = 'completed in protocol builder' + hsr_assigned = 'hsr number assigned' + + class StudyModel(db.Model): __tablename__ = 'study' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String) last_updated = db.Column(db.DateTime(timezone=True), default=func.now()) - protocol_builder_status = db.Column(db.Enum(ProtocolBuilderStatus)) + status = db.Column(db.Enum(StudyStatus)) + irb_status = db.Column(db.Enum(IrbStatus)) primary_investigator_id = db.Column(db.String, nullable=True) sponsor = db.Column(db.String, nullable=True) hsr_number = db.Column(db.String, nullable=True) @@ -29,7 +44,6 @@ class StudyModel(db.Model): requirements = db.Column(db.ARRAY(db.Integer), nullable=True) on_hold = db.Column(db.Boolean, default=False) enrollment_date = db.Column(db.DateTime(timezone=True), nullable=True) - changes_history = db.Column(db.JSON, nullable=True) def update_from_protocol_builder(self, pbs: ProtocolBuilderStudy): self.hsr_number = pbs.HSRNUMBER @@ -37,11 +51,13 @@ class StudyModel(db.Model): self.user_uid = pbs.NETBADGEID self.last_updated = pbs.DATE_MODIFIED - self.protocol_builder_status = ProtocolBuilderStatus.active + self.irb_status = IrbStatus.incomplete_in_protocol_builder + self.status = StudyStatus.in_progress if pbs.HSRNUMBER: - self.protocol_builder_status = ProtocolBuilderStatus.open + self.irb_status = IrbStatus.hsr_assigned + self.status = StudyStatus.open_for_enrollment if self.on_hold: - self.protocol_builder_status = ProtocolBuilderStatus.hold + self.status = StudyStatus.hold class WorkflowMetadata(object): @@ -112,15 +128,15 @@ class CategorySchema(ma.Schema): class Study(object): def __init__(self, title, last_updated, primary_investigator_id, user_uid, - id=None, - protocol_builder_status=None, + id=None, status=None, irb_status=None, sponsor="", hsr_number="", ind_number="", categories=[], files=[], approvals=[], enrollment_date=None, **argsv): self.id = id self.user_uid = user_uid self.title = title self.last_updated = last_updated - self.protocol_builder_status = protocol_builder_status + self.status = status + self.irb_status = irb_status self.primary_investigator_id = primary_investigator_id self.sponsor = sponsor self.hsr_number = hsr_number @@ -142,25 +158,25 @@ class Study(object): """As the case for update was very reduced, it's mostly and specifically updating only the study status and generating a history record """ - pb_status = ProtocolBuilderStatus(self.protocol_builder_status) + status = StudyStatus(self.status) study_model.last_updated = datetime.datetime.now() - study_model.protocol_builder_status = pb_status + study_model.status = status - if pb_status == ProtocolBuilderStatus.open: + if status == StudyStatus.open_for_enrollment: study_model.enrollment_date = self.enrollment_date - change = { - 'status': ProtocolBuilderStatus(self.protocol_builder_status).value, - 'comment': '' if not hasattr(self, 'comment') else self.comment, - 'date': str(datetime.datetime.now()) - } + # change = { + # 'status': ProtocolBuilderStatus(self.protocol_builder_status).value, + # 'comment': '' if not hasattr(self, 'comment') else self.comment, + # 'date': str(datetime.datetime.now()) + # } - if study_model.changes_history: - changes_history = json.loads(study_model.changes_history) - changes_history.append(change) - else: - changes_history = [change] - study_model.changes_history = json.dumps(changes_history) + # if study_model.changes_history: + # changes_history = json.loads(study_model.changes_history) + # changes_history.append(change) + # else: + # changes_history = [change] + # study_model.changes_history = json.dumps(changes_history) def model_args(self): @@ -174,7 +190,7 @@ class Study(object): class StudyForUpdateSchema(ma.Schema): id = fields.Integer(required=False, allow_none=True) - protocol_builder_status = EnumField(ProtocolBuilderStatus, by_value=True) + status = EnumField(StudyStatus, by_value=True) hsr_number = fields.String(allow_none=True) sponsor = fields.String(allow_none=True) ind_number = fields.String(allow_none=True) @@ -196,7 +212,8 @@ class StudySchema(ma.Schema): id = fields.Integer(required=False, allow_none=True) categories = fields.List(fields.Nested(CategorySchema), dump_only=True) warnings = fields.List(fields.Nested(ApiErrorSchema), dump_only=True) - protocol_builder_status = EnumField(ProtocolBuilderStatus, by_value=True) + protocol_builder_status = EnumField(StudyStatus, by_value=True) + status = EnumField(StudyStatus, by_value=True) hsr_number = fields.String(allow_none=True) sponsor = fields.String(allow_none=True) ind_number = fields.String(allow_none=True) diff --git a/crc/services/study_service.py b/crc/services/study_service.py index 1d15d361..9d94dc60 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -12,7 +12,7 @@ from crc.api.common import ApiError from crc.models.file import FileModel, FileModelSchema, File from crc.models.ldap import LdapSchema from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus -from crc.models.study import StudyModel, Study, Category, WorkflowMetadata +from crc.models.study import StudyModel, Study, StudyStatus, Category, WorkflowMetadata from crc.models.task_event import TaskEventModel, TaskEvent from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \ WorkflowStatus @@ -64,7 +64,7 @@ class StudyService(object): # Calling this line repeatedly is very very slow. It creates the # master spec and runs it. Don't execute this for Abandoned studies, as # we don't have the information to process them. - if study.protocol_builder_status != ProtocolBuilderStatus.abandoned: + if study.status != StudyStatus.abandoned: status = StudyService.__get_study_status(study_model) study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status) @@ -265,7 +265,7 @@ class StudyService(object): for study in db_studies: pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None) if not pb_study: - study.protocol_builder_status = ProtocolBuilderStatus.abandoned + study.status = StudyStatus.abandoned db.session.commit() diff --git a/migrations/versions/1c3f88dbccc3_.py b/migrations/versions/1c3f88dbccc3_.py new file mode 100644 index 00000000..3801b76c --- /dev/null +++ b/migrations/versions/1c3f88dbccc3_.py @@ -0,0 +1,36 @@ +"""empty message + +Revision ID: 1c3f88dbccc3 +Revises: 2e7b377cbc7b +Create Date: 2020-07-30 18:51:01.816284 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '1c3f88dbccc3' +down_revision = '2e7b377cbc7b' +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute("CREATE TYPE irbstatus AS ENUM('incomplete_in_protocol_builder', 'completed_in_protocol_builder', 'hsr_assigned')") + op.execute("CREATE TYPE studystatus AS ENUM('in_progress', 'hold', 'open_for_enrollment', 'abandoned')") + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('study', sa.Column('irb_status', sa.Enum('incomplete_in_protocol_builder', 'completed_in_protocol_builder', 'hsr_assigned', name='irbstatus'), nullable=True)) + op.add_column('study', sa.Column('status', sa.Enum('in_progress', 'hold', 'open_for_enrollment', 'abandoned', name='studystatus'), nullable=True)) + op.drop_column('study', 'protocol_builder_status') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('study', sa.Column('protocol_builder_status', postgresql.ENUM('incomplete', 'active', 'hold', 'open', 'abandoned', name='protocolbuilderstatus'), autoincrement=False, nullable=True)) + op.drop_column('study', 'status') + op.drop_column('study', 'irb_status') + # ### end Alembic commands ### + op.execute('DROP TYPE studystatus') + op.execute('DROP TYPE irbstatus') diff --git a/migrations/versions/369d65dcb269_.py b/migrations/versions/369d65dcb269_.py deleted file mode 100644 index d13d7736..00000000 --- a/migrations/versions/369d65dcb269_.py +++ /dev/null @@ -1,28 +0,0 @@ -"""empty message - -Revision ID: 369d65dcb269 -Revises: c4ddb69e7ef4 -Create Date: 2020-07-27 20:05:29.524553 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '369d65dcb269' -down_revision = 'c4ddb69e7ef4' -branch_labels = None -depends_on = None - - -def upgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.add_column('study', sa.Column('changes_history', sa.JSON(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('study', 'changes_history') - # ### end Alembic commands ### diff --git a/tests/base_test.py b/tests/base_test.py index af0b1a20..07554378 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -15,9 +15,8 @@ from crc import app, db, session from crc.models.api_models import WorkflowApiSchema, MultiInstanceType from crc.models.approval import ApprovalModel, ApprovalStatus from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES -from crc.models.protocol_builder import ProtocolBuilderStatus from crc.models.task_event import TaskEventModel -from crc.models.study import StudyModel +from crc.models.study import StudyModel, StudyStatus from crc.models.user import UserModel from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel from crc.services.file_service import FileService @@ -60,7 +59,7 @@ class BaseTest(unittest.TestCase): 'id':0, 'title':'The impact of fried pickles on beer consumption in bipedal software developers.', 'last_updated':datetime.datetime.now(), - 'protocol_builder_status':ProtocolBuilderStatus.active, + 'status':StudyStatus.in_progress, 'primary_investigator_id':'dhf8r', 'sponsor':'Sartography Pharmaceuticals', 'ind_number':'1234', @@ -70,7 +69,7 @@ class BaseTest(unittest.TestCase): 'id':1, 'title':'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels', 'last_updated':datetime.datetime.now(), - 'protocol_builder_status':ProtocolBuilderStatus.active, + 'status':StudyStatus.in_progress, 'primary_investigator_id':'dhf8r', 'sponsor':'Makerspace & Co.', 'ind_number':'5678', @@ -241,7 +240,7 @@ class BaseTest(unittest.TestCase): study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first() if study is None: user = self.create_user(uid=uid) - study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.active, + study = StudyModel(title=title, status=StudyStatus.in_progress, user_uid=user.uid, primary_investigator_id=primary_investigator_id) db.session.add(study) db.session.commit() diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 5e93245e..62860cde 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -11,7 +11,7 @@ from crc.models.protocol_builder import ProtocolBuilderStatus, \ ProtocolBuilderStudySchema from crc.models.approval import ApprovalStatus from crc.models.task_event import TaskEventModel -from crc.models.study import StudyModel, StudySchema +from crc.models.study import StudyModel, StudySchema, StudyStatus from crc.models.workflow import WorkflowSpecModel, WorkflowModel from crc.services.file_service import FileService from crc.services.workflow_processor import WorkflowProcessor @@ -30,7 +30,7 @@ class TestStudyApi(BaseTest): def add_test_study(self): study_schema = StudySchema().dump(self.TEST_STUDY) - study_schema['protocol_builder_status'] = ProtocolBuilderStatus.active.value + study_schema['status'] = StudyStatus.in_progress.value rv = self.app.post('/v1.0/study', content_type="application/json", headers=self.logged_in_headers(), @@ -137,7 +137,7 @@ class TestStudyApi(BaseTest): study: StudyModel = session.query(StudyModel).first() study.title = "Pilot Study of Fjord Placement for Single Fraction Outcomes to Cortisol Susceptibility" study_schema = StudySchema().dump(study) - study_schema['protocol_builder_status'] = ProtocolBuilderStatus.active.value + study_schema['status'] = StudyStatus.in_progress.value rv = self.app.put('/v1.0/study/%i' % study.id, content_type="application/json", headers=self.logged_in_headers(), @@ -145,7 +145,7 @@ class TestStudyApi(BaseTest): self.assert_success(rv) json_data = json.loads(rv.get_data(as_text=True)) self.assertEqual(study.title, json_data['title']) - self.assertEqual(study.protocol_builder_status.value, json_data['protocol_builder_status']) + self.assertEqual(study.status.value, json_data['status']) @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs @@ -183,15 +183,15 @@ class TestStudyApi(BaseTest): num_incomplete = 0 num_abandoned = 0 - num_active = 0 + num_in_progress = 0 num_open = 0 for study in json_data: - if study['protocol_builder_status'] == 'abandoned': # One study does not exist in user_studies.json + if study['status'] == 'abandoned': # One study does not exist in user_studies.json num_abandoned += 1 - if study['protocol_builder_status'] == 'active': # One study is marked complete without HSR Number - num_active += 1 - if study['protocol_builder_status'] == 'open': # One study is marked complete and has an HSR Number + if study['status'] == 'in progress': # One study is marked complete without HSR Number + num_in_progress += 1 + if study['status'] == 'open for enrollment': # One study is marked complete and has an HSR Number num_open += 1 db_studies_after = session.query(StudyModel).all() @@ -199,10 +199,10 @@ class TestStudyApi(BaseTest): self.assertGreater(num_db_studies_after, num_db_studies_before) self.assertEqual(num_abandoned, 1) self.assertEqual(num_open, 1) - self.assertEqual(num_active, 2) + self.assertEqual(num_in_progress, 2) self.assertEqual(num_incomplete, 0) self.assertEqual(len(json_data), num_db_studies_after) - self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after) + self.assertEqual(num_open + num_in_progress + num_incomplete + num_abandoned, num_db_studies_after) @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_studies @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs @@ -230,7 +230,7 @@ class TestStudyApi(BaseTest): json_data = json.loads(rv.get_data(as_text=True)) self.assertEqual(study.id, json_data['id']) self.assertEqual(study.title, json_data['title']) - self.assertEqual(study.protocol_builder_status.value, json_data['protocol_builder_status']) + self.assertEqual(study.status.value, json_data['status']) self.assertEqual(study.primary_investigator_id, json_data['primary_investigator_id']) self.assertEqual(study.sponsor, json_data['sponsor']) self.assertEqual(study.ind_number, json_data['ind_number']) diff --git a/tests/study/test_study_service.py b/tests/study/test_study_service.py index e9711362..11de32cd 100644 --- a/tests/study/test_study_service.py +++ b/tests/study/test_study_service.py @@ -6,7 +6,7 @@ from tests.base_test import BaseTest from crc import db, app from crc.models.protocol_builder import ProtocolBuilderStatus -from crc.models.study import StudyModel +from crc.models.study import StudyModel, StudyStatus from crc.models.user import UserModel from crc.models.workflow import WorkflowModel, WorkflowStatus, \ WorkflowSpecCategoryModel @@ -40,7 +40,7 @@ class TestStudyService(BaseTest): for study in db.session.query(StudyModel).all(): StudyService().delete_study(study.id) - study = StudyModel(title="My title", protocol_builder_status=ProtocolBuilderStatus.active, user_uid=user.uid) + study = StudyModel(title="My title", status=StudyStatus.in_progress, user_uid=user.uid) db.session.add(study) self.load_test_spec("random_fact", category_id=cat.id) diff --git a/tests/test_authentication.py b/tests/test_authentication.py index 829d71e3..66053065 100644 --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -8,7 +8,7 @@ from tests.base_test import BaseTest from crc import db, app from crc.api.common import ApiError from crc.models.protocol_builder import ProtocolBuilderStatus -from crc.models.study import StudySchema, StudyModel +from crc.models.study import StudySchema, StudyModel, StudyStatus from crc.models.user import UserModel @@ -220,7 +220,7 @@ class TestAuthentication(BaseTest): return { "title": "blah", "last_updated": datetime.now(tz=timezone.utc), - "protocol_builder_status": ProtocolBuilderStatus.active, + "status": StudyStatus.in_progress, "primary_investigator_id": uid, "user_uid": uid, } From 438a31c9ece37e57997f693d859eef7eb14fde96 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 31 Jul 2020 13:19:26 -0400 Subject: [PATCH 57/60] Updates packages --- Pipfile | 3 +-- Pipfile.lock | 47 +++++++++-------------------------------------- 2 files changed, 10 insertions(+), 40 deletions(-) diff --git a/Pipfile b/Pipfile index f16e89fa..2cc41e39 100644 --- a/Pipfile +++ b/Pipfile @@ -38,9 +38,8 @@ recommonmark = "*" requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} sphinx = "*" -spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow.git",ref = "master"} -#spiffworkflow = {editable = true,path="/home/kelly/sartography/SpiffWorkflow/"} swagger-ui-bundle = "*" +spiffworkflow = {editable = true, git = "https://github.com/sartography/SpiffWorkflow.git", ref = "master"} webtest = "*" werkzeug = "*" xlrd = "*" diff --git a/Pipfile.lock b/Pipfile.lock index c955e731..634fea80 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "45dc348da1f583da4a7c76113456b3f0225736e79a5da05ba2af9ede7f8089e0" + "sha256": "096abf7ce152358489282a004ed634ca64730cb98276f3a513ed2d5b8a6635c6" }, "pipfile-spec": 6, "requires": { @@ -32,11 +32,11 @@ }, "amqp": { "hashes": [ - "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", - "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" + "sha256:70cdb10628468ff14e57ec2f751c7aa9e48e7e3651cfd62d431213c0c4e58f21", + "sha256:aa7f313fb887c91f15474c1229907a04dac0b8135822d6603437803424c0aa59" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.6.0" + "version": "==2.6.1" }, "aniso8601": { "hashes": [ @@ -247,6 +247,7 @@ "sha256:525ba66fb5f90b07169fdd48b6373c18f1ee12728ca277ca44567a367d9d7f74", "sha256:a766c1dccb30c5f6eb2b203f87edd1d8588847709c78589e1521d769addc8218" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.10" }, "docutils": { @@ -378,14 +379,6 @@ "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, - "importlib-metadata": { - "hashes": [ - "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83", - "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070" - ], - "markers": "python_version < '3.8'", - "version": "==1.7.0" - }, "inflection": { "hashes": [ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", @@ -955,6 +948,7 @@ "version": "==1.1.4" }, "spiffworkflow": { + "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", "ref": "7c8d59e7b9a978795bc8d1f354002fdc89540672" }, @@ -1076,14 +1070,6 @@ ], "index": "pypi", "version": "==1.3.0" - }, - "zipp": { - "hashes": [ - "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", - "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" - ], - "markers": "python_version >= '3.6'", - "version": "==3.1.0" } }, "develop": { @@ -1135,19 +1121,12 @@ "index": "pypi", "version": "==5.2.1" }, - "importlib-metadata": { - "hashes": [ - "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83", - "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070" - ], - "markers": "python_version < '3.8'", - "version": "==1.7.0" - }, "iniconfig": { "hashes": [ - "sha256:aa0b40f50a00e72323cb5d41302f9c6165728fd764ac8822aa3fff00a40d56b4" + "sha256:80cf40c597eb564e86346103f609d74efce0f6b4d4f30ec8ce9e2c26411ba437", + "sha256:e5f92f89355a67de0595932a6c6c02ab4afddc6fcdc0bfc5becd0d60884d3f69" ], - "version": "==1.0.0" + "version": "==1.0.1" }, "more-itertools": { "hashes": [ @@ -1219,14 +1198,6 @@ "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88" ], "version": "==0.10.1" - }, - "zipp": { - "hashes": [ - "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", - "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" - ], - "markers": "python_version >= '3.6'", - "version": "==3.1.0" } } } From ca9ef332baece1b861c98c786dff8c9a24d2e153 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 31 Jul 2020 13:19:37 -0400 Subject: [PATCH 58/60] Fixes failing test. --- crc/services/workflow_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 29d2bf51..c58fa098 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -190,7 +190,7 @@ class WorkflowProcessor(object): bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only #try: - bpmn_workflow.do_engine_steps() + # bpmn_workflow.do_engine_steps() # except WorkflowException as we: # raise ApiError.from_task_spec("error_loading_workflow", str(we), we.sender) return bpmn_workflow From 2c2967e633a6c7006afbfdc95a2de8116e50386a Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 31 Jul 2020 11:19:50 -0600 Subject: [PATCH 59/60] Normalizing enum values --- crc/models/study.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crc/models/study.py b/crc/models/study.py index d1114349..956723e0 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -16,9 +16,9 @@ from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, Workfl class StudyStatus(enum.Enum): - in_progress = 'in progress' + in_progress = 'in_progress' hold = 'hold' - open_for_enrollment = 'open for enrollment' + open_for_enrollment = 'open_for_enrollment' abandoned = 'abandoned' From 4fcba113b876907f05342a1acfad35c26810003b Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 31 Jul 2020 11:49:04 -0600 Subject: [PATCH 60/60] Fixing tests --- crc/api.yml | 2 +- tests/study/test_study_api.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index 4bb63af3..5107a60f 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -1064,7 +1064,7 @@ components: example: dhf8r status: type: string - enum: ['in progress', 'hold', 'open for enrollment', 'abandoned'] + enum: ['in_progress', 'hold', 'open_for_enrollment', 'abandoned'] example: done sponsor: type: string diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py index 62860cde..6bb601ce 100644 --- a/tests/study/test_study_api.py +++ b/tests/study/test_study_api.py @@ -189,9 +189,9 @@ class TestStudyApi(BaseTest): for study in json_data: if study['status'] == 'abandoned': # One study does not exist in user_studies.json num_abandoned += 1 - if study['status'] == 'in progress': # One study is marked complete without HSR Number + if study['status'] == 'in_progress': # One study is marked complete without HSR Number num_in_progress += 1 - if study['status'] == 'open for enrollment': # One study is marked complete and has an HSR Number + if study['status'] == 'open_for_enrollment': # One study is marked complete and has an HSR Number num_open += 1 db_studies_after = session.query(StudyModel).all()