From 1db940116613001faeada34903243741b6c790f8 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Mon, 1 Jun 2020 17:42:28 -0400 Subject: [PATCH 01/44] Don't put all the data into Spiff Tasks on a reload or backtrack, just store the data that gets submitted each time in the task log, and use that. This should correct issues with parallel tasks and other complex areas - so we don't have tasks seeing data that isn't along their path. --- crc/api/workflow.py | 44 +++++++++++++++++++------- crc/models/api_models.py | 7 ++--- crc/models/stats.py | 1 + crc/services/workflow_processor.py | 2 +- crc/services/workflow_service.py | 9 +++--- migrations/versions/3876e130664e_.py | 28 +++++++++++++++++ tests/test_tasks_api.py | 4 ++- tests/test_workflow_processor.py | 47 ---------------------------- 8 files changed, 73 insertions(+), 69 deletions(-) create mode 100644 migrations/versions/3876e130664e_.py diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 81252056..46befa20 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -1,5 +1,7 @@ import uuid +from SpiffWorkflow.util.deep_merge import DeepMerge + from crc import session from crc.api.common import ApiError, ApiErrorSchema from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema @@ -132,12 +134,19 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None): total_tasks=processor.workflow_model.total_tasks, completed_tasks=processor.workflow_model.completed_tasks, last_updated=processor.workflow_model.last_updated, - title=spec.display_name ) if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. # This may or may not work, sometimes there is no next task to complete. next_task = processor.next_task() if next_task: + latest_event = session.query(TaskEventModel) \ + .filter_by(workflow_id=processor.workflow_model.id) \ + .filter_by(task_name=next_task.task_spec.name) \ + .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ + .order_by(TaskEventModel.date.desc()).first() + if latest_event: + next_task.data = DeepMerge.merge(next_task.data, latest_event.task_data) + workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) return workflow_api @@ -158,17 +167,22 @@ def set_current_task(workflow_id, task_id): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.COMPLETED and task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not move the token to a task who's state is not " "currently set to COMPLETE or READY.") # Only reset the token if the task doesn't already have it. - if task.state == task.COMPLETED: - task.reset_token(reset_data=False) # we could optionally clear the previous data. + if spiff_task.state == spiff_task.COMPLETED: + spiff_task.reset_token(reset_data=True) # Don't try to copy the existing data back into this task. + processor.save() - WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET) - workflow_api_model = __get_workflow_api_model(processor, task) + task_api = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=True) + WorkflowService.log_task_action(workflow_model, task_api, + WorkflowService.TASK_ACTION_TOKEN_RESET, + version = processor.get_version_string()) + + workflow_api_model = __get_workflow_api_model(processor, spiff_task) return WorkflowApiSchema().dump(workflow_api_model) @@ -176,15 +190,21 @@ def update_task(workflow_id, task_id, body): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) - task = processor.bpmn_workflow.get_task(task_id) - if task.state != task.READY: + spiff_task = processor.bpmn_workflow.get_task(task_id) + if spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") - task.update_data(body) - processor.complete_task(task) + spiff_task.update_data(body) + processor.complete_task(spiff_task) processor.do_engine_steps() processor.save() - WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE) + + task_api = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=True) + WorkflowService.log_task_action(workflow_model, + task_api, + WorkflowService.TASK_ACTION_COMPLETE, + version = processor.get_version_string(), + updated_data = spiff_task.data) workflow_api_model = __get_workflow_api_model(processor) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index b8b535a7..eee6d5f5 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -119,7 +119,7 @@ class NavigationItemSchema(ma.Schema): class WorkflowApi(object): def __init__(self, id, status, next_task, navigation, - spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated, title): + spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated): self.id = id self.status = status self.next_task = next_task # The next task that requires user input. @@ -130,14 +130,13 @@ class WorkflowApi(object): self.total_tasks = total_tasks self.completed_tasks = completed_tasks self.last_updated = last_updated - self.title = title class WorkflowApiSchema(ma.Schema): class Meta: model = WorkflowApi fields = ["id", "status", "next_task", "navigation", "workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks", - "last_updated", "title"] + "last_updated"] unknown = INCLUDE status = EnumField(WorkflowStatus) @@ -148,7 +147,7 @@ class WorkflowApiSchema(ma.Schema): def make_workflow(self, data, **kwargs): keys = ['id', 'status', 'next_task', 'navigation', 'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks", - "last_updated", "title"] + "last_updated"] filtered_fields = {key: data[key] for key in keys} filtered_fields['next_task'] = TaskSchema().make_task(data['next_task']) return WorkflowApi(**filtered_fields) diff --git a/crc/models/stats.py b/crc/models/stats.py index c72df7d4..8912b1d1 100644 --- a/crc/models/stats.py +++ b/crc/models/stats.py @@ -17,6 +17,7 @@ class TaskEventModel(db.Model): task_title = db.Column(db.String) task_type = db.Column(db.String) task_state = db.Column(db.String) + task_data = db.Column(db.JSON) mi_type = db.Column(db.String) mi_count = db.Column(db.Integer) mi_index = db.Column(db.Integer) diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 93590d94..e5cbe0a3 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -315,7 +315,7 @@ class WorkflowProcessor(object): # Reset the current workflow to the beginning - which we will consider to be the first task after the root # element. This feels a little sketchy, but I think it is safe to assume root will have one child. first_task = self.bpmn_workflow.task_tree.children[0] - first_task.reset_token(reset_data=False) + first_task.reset_token(reset_data=True) # Clear out the data. for task in new_bpmn_workflow.get_tasks(SpiffTask.READY): task.data = first_task.data new_bpmn_workflow.do_engine_steps() diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 312dee3c..1b34bd56 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -9,6 +9,7 @@ from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask from SpiffWorkflow.bpmn.specs.UserTask import UserTask from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask from SpiffWorkflow.specs import CancelTask, StartTask +from SpiffWorkflow.util.deep_merge import DeepMerge from flask import g from jinja2 import Template @@ -316,21 +317,21 @@ class WorkflowService(object): field.options.append({"id": d.value, "name": d.label}) @staticmethod - def log_task_action(processor, spiff_task, action): - task = WorkflowService.spiff_task_to_api_task(spiff_task) - workflow_model = processor.workflow_model + def log_task_action(workflow_model: WorkflowModel, task: Task, + action: string, version, updated_data=None): task_event = TaskEventModel( study_id=workflow_model.study_id, user_uid=g.user.uid, workflow_id=workflow_model.id, workflow_spec_id=workflow_model.workflow_spec_id, - spec_version=processor.get_version_string(), + spec_version=version, action=action, task_id=task.id, task_name=task.name, task_title=task.title, task_type=str(task.type), task_state=task.state, + task_data=updated_data, mi_type=task.multi_instance_type.value, # Some tasks have a repeat behavior. mi_count=task.multi_instance_count, # This is the number of times the task could repeat. mi_index=task.multi_instance_index, # And the index of the currently repeating task. diff --git a/migrations/versions/3876e130664e_.py b/migrations/versions/3876e130664e_.py new file mode 100644 index 00000000..31e7ce13 --- /dev/null +++ b/migrations/versions/3876e130664e_.py @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: 3876e130664e +Revises: 5064b72284b7 +Create Date: 2020-06-01 15:39:53.937591 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '3876e130664e' +down_revision = '5064b72284b7' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_data', sa.JSON(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('task_event', 'task_data') + # ### end Alembic commands ### diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 67a644ef..41fd1a3b 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -10,7 +10,6 @@ from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSche from crc.models.file import FileModelSchema from crc.models.stats import TaskEventModel from crc.models.workflow import WorkflowStatus -from crc.services.protocol_builder import ProtocolBuilderService from crc.services.workflow_service import WorkflowService @@ -79,6 +78,9 @@ class TestTasksApi(BaseTest): self.assertEquals(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) + # Assure that the data provided occurs in the task data log. + for key in dict_data.keys(): + self.assertIn(key, event.task_data) workflow = WorkflowApiSchema().load(json_data) return workflow diff --git a/tests/test_workflow_processor.py b/tests/test_workflow_processor.py index b3f6c374..1f8beebf 100644 --- a/tests/test_workflow_processor.py +++ b/tests/test_workflow_processor.py @@ -270,53 +270,6 @@ class TestWorkflowProcessor(BaseTest): processor = self.get_processor(study, workflow_spec_model) self.assertTrue(processor.get_version_string().startswith('v2.1.1')) - def test_restart_workflow(self): - self.load_example_data() - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"key": "Value"} - processor.complete_task(task) - task_before_restart = processor.next_task() - processor.hard_reset() - task_after_restart = processor.next_task() - - self.assertNotEqual(task.get_name(), task_before_restart.get_name()) - self.assertEqual(task.get_name(), task_after_restart.get_name()) - self.assertEqual(task.data, task_after_restart.data) - - def test_soft_reset(self): - self.load_example_data() - - # Start the two_forms workflow, and enter some data in the first form. - study = session.query(StudyModel).first() - workflow_spec_model = self.load_test_spec("two_forms") - processor = self.get_processor(study, workflow_spec_model) - self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id) - task = processor.next_task() - task.data = {"color": "blue"} - processor.complete_task(task) - - # Modify the specification, with a minor text change. - file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_text_mod.bpmn') - self.replace_file("two_forms.bpmn", file_path) - - # Setting up another processor should not error out, but doesn't pick up the update. - processor.workflow_model.bpmn_workflow_json = processor.serialize() - processor2 = WorkflowProcessor(processor.workflow_model) - self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description) - self.assertNotEqual("# This is some documentation I wanted to add.", - processor2.bpmn_workflow.last_task.task_spec.documentation) - - # You can do a soft update and get the right response. - processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True) - self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description) - self.assertEqual("# This is some documentation I wanted to add.", - processor3.bpmn_workflow.last_task.task_spec.documentation) - - def test_hard_reset(self): self.load_example_data() From 4cf52b527cde4805ffb9f7cad17d23783fa32756 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 8 Jun 2020 07:14:31 -0600 Subject: [PATCH 02/44] Adding admin dashboard --- Pipfile | 1 + Pipfile.lock | 34 ++++++++++++++++++++++++---------- crc/__init__.py | 10 ++++++++++ 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/Pipfile b/Pipfile index 0079962c..a9d95c3d 100644 --- a/Pipfile +++ b/Pipfile @@ -40,6 +40,7 @@ gunicorn = "*" werkzeug = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} flask-mail = "*" +flask-admin = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index f8ab746b..f31d2457 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "6c89585086260ebcb41918b8ef3b1d9e189e1b492208d3ff000a138bc2f2fcee" + "sha256": "282ec41cafca86628782987347085a494c52318c94e56d36d4bbd6a44092b110" }, "pipfile-spec": 6, "requires": { @@ -111,10 +111,10 @@ }, "certifi": { "hashes": [ - "sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304", - "sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519" + "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", + "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" ], - "version": "==2020.4.5.1" + "version": "==2020.4.5.2" }, "cffi": { "hashes": [ @@ -261,6 +261,13 @@ "index": "pypi", "version": "==1.1.2" }, + "flask-admin": { + "hashes": [ + "sha256:68c761d8582d59b1f7702013e944a7ad11d7659a72f3006b89b68b0bd8df61b8" + ], + "index": "pypi", + "version": "==1.5.6" + }, "flask-bcrypt": { "hashes": [ "sha256:d71c8585b2ee1c62024392ebdbc447438564e2c8c02b4e57b56a4cafd8d13c5f" @@ -285,11 +292,11 @@ }, "flask-marshmallow": { "hashes": [ - "sha256:6e6aec171b8e092e0eafaf035ff5b8637bf3a58ab46f568c4c1bab02f2a3c196", - "sha256:a1685536e7ab5abdc712bbc1ac1a6b0b50951a368502f7985e7d1c27b3c21e59" + "sha256:1da1e6454a56a3e15107b987121729f152325bdef23f3df2f9b52bbd074af38e", + "sha256:aefc1f1d96256c430a409f08241bab75ffe97e5d14ac5d1f000764e39bf4873a" ], "index": "pypi", - "version": "==0.12.0" + "version": "==0.13.0" }, "flask-migrate": { "hashes": [ @@ -359,10 +366,10 @@ }, "inflection": { "hashes": [ - "sha256:32a5c3341d9583ec319548b9015b7fbdf8c429cbcb575d326c33ae3a0e90d52c", - "sha256:9a15d3598f01220e93f2207c432cfede50daff53137ce660fb8be838ef1ca6cc" + "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", + "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], - "version": "==0.4.0" + "version": "==0.5.0" }, "itsdangerous": { "hashes": [ @@ -890,6 +897,13 @@ "index": "pypi", "version": "==1.0.1" }, + "wtforms": { + "hashes": [ + "sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b", + "sha256:861a13b3ae521d6700dac3b2771970bd354a63ba7043ecc3a82b5288596a1972" + ], + "version": "==2.3.1" + }, "xlrd": { "hashes": [ "sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2", diff --git a/crc/__init__.py b/crc/__init__.py index a1dd95f6..2bcc17ef 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -4,6 +4,8 @@ import sentry_sdk import connexion from jinja2 import Environment, FileSystemLoader +from flask_admin import Admin +from flask_admin.contrib.sqla import ModelView from flask_cors import CORS from flask_marshmallow import Marshmallow from flask_mail import Mail @@ -40,10 +42,18 @@ from crc import api connexion_app.add_api('api.yml', base_path='/v1.0') +# Admin app +admin = Admin(app) +admin.add_view(ModelView(models.study.StudyModel, db.session)) +admin.add_view(ModelView(models.approval.ApprovalModel, db.session)) +admin.add_view(ModelView(models.user.UserModel, db.session)) +admin.add_view(ModelView(models.workflow.WorkflowModel, db.session)) + # Convert list of allowed origins to list of regexes origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']] cors = CORS(connexion_app.app, origins=origins_re) +# Sentry error handling if app.config['ENABLE_SENTRY']: sentry_sdk.init( dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915", From 5c1c0f685eba5ba95d99ce2f6b376fa7e0b865d6 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 8 Jun 2020 16:17:41 -0600 Subject: [PATCH 03/44] Tests update --- crc/static/templates/mails/ramp_up_denied.txt | 2 +- tests/test_mails.py | 77 ++++++++++++++----- 2 files changed, 58 insertions(+), 21 deletions(-) diff --git a/crc/static/templates/mails/ramp_up_denied.txt b/crc/static/templates/mails/ramp_up_denied.txt index 5fbaefda..120522b8 100644 --- a/crc/static/templates/mails/ramp_up_denied.txt +++ b/crc/static/templates/mails/ramp_up_denied.txt @@ -1 +1 @@ - Your Research Ramp-up Plan has been denied by {{ approver_1 }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver_1 }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval. \ No newline at end of file + Your Research Ramp-up Plan has been denied by {{ approver }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval. \ No newline at end of file diff --git a/tests/test_mails.py b/tests/test_mails.py index 15a01583..48752358 100644 --- a/tests/test_mails.py +++ b/tests/test_mails.py @@ -1,6 +1,7 @@ from tests.base_test import BaseTest +from crc import mail from crc.services.mails import ( send_ramp_up_submission_email, send_ramp_up_approval_request_email, @@ -21,35 +22,71 @@ class TestMails(BaseTest): self.approver_2 = 'Close Reviewer' def test_send_ramp_up_submission_email(self): - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) + with mail.record_messages() as outbox: - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) + + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) def test_send_ramp_up_approval_request_email(self): - send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) - self.assertTrue(True) + with mail.record_messages() as outbox: + send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) def test_send_ramp_up_approval_request_first_review_email(self): - send_ramp_up_approval_request_first_review_email( - self.sender, self.recipients, self.primary_investigator - ) - self.assertTrue(True) + with mail.record_messages() as outbox: + send_ramp_up_approval_request_first_review_email( + self.sender, self.recipients, self.primary_investigator + ) + + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) def test_send_ramp_up_approved_email(self): - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) + with mail.record_messages() as outbox: + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) - self.assertTrue(True) + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) + self.assertEqual(len(outbox), 2) + self.assertIn(self.approver_1, outbox[1].body) + self.assertIn(self.approver_1, outbox[1].html) + self.assertIn(self.approver_2, outbox[1].body) + self.assertIn(self.approver_2, outbox[1].html) def test_send_ramp_up_denied_email(self): - send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) - self.assertTrue(True) + with mail.record_messages() as outbox: + send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.approver_1, outbox[0].body) + self.assertIn(self.approver_1, outbox[0].html) def test_send_send_ramp_up_denied_email_to_approver(self): - send_ramp_up_denied_email_to_approver( - self.sender, self.recipients, self.primary_investigator, self.approver_2 - ) - self.assertTrue(True) + with mail.record_messages() as outbox: + send_ramp_up_denied_email_to_approver( + self.sender, self.recipients, self.primary_investigator, self.approver_2 + ) + + self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') + self.assertIn(self.primary_investigator, outbox[0].body) + self.assertIn(self.primary_investigator, outbox[0].html) + self.assertIn(self.approver_2, outbox[0].body) + self.assertIn(self.approver_2, outbox[0].html) From e9e805b2c96e36f634b58e8b88b64e5725bd48dc Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Tue, 9 Jun 2020 22:57:56 -0600 Subject: [PATCH 04/44] Storing emails in database --- crc/models/email.py | 18 +++++++++ crc/services/approval_service.py | 5 +++ crc/services/email_service.py | 27 ++++++++++++++ crc/services/file_service.py | 2 +- crc/services/mails.py | 55 ++++++++++++++++++++-------- crc/services/study_service.py | 3 -- crc/services/workflow_service.py | 3 +- migrations/versions/62a11a335778_.py | 38 +++++++++++++++++++ tests/test_mails.py | 37 ++++++++++++++----- 9 files changed, 158 insertions(+), 30 deletions(-) create mode 100644 crc/models/email.py create mode 100644 crc/services/email_service.py create mode 100644 migrations/versions/62a11a335778_.py diff --git a/crc/models/email.py b/crc/models/email.py new file mode 100644 index 00000000..c3180a27 --- /dev/null +++ b/crc/models/email.py @@ -0,0 +1,18 @@ +from flask_marshmallow.sqla import SQLAlchemyAutoSchema +from marshmallow import EXCLUDE +from sqlalchemy import func + +from crc import db +from crc.models.approval import ApprovalModel + + +class EmailModel(db.Model): + __tablename__ = 'email' + id = db.Column(db.Integer, primary_key=True) + subject = db.Column(db.String) + sender = db.Column(db.String) + recipients = db.Column(db.String) + content = db.Column(db.String) + content_html = db.Column(db.String) + approval_id = db.Column(db.Integer, db.ForeignKey(ApprovalModel.id), nullable=False) + approval = db.relationship(ApprovalModel) diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index 1f6f56b3..dbeed829 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -132,6 +132,7 @@ class ApprovalService(object): mail_result = send_ramp_up_approved_email( 'askresearch@virginia.edu', [pi_user_info.email_address], + approval_id, f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: @@ -144,6 +145,7 @@ class ApprovalService(object): mail_result = send_ramp_up_denied_email( 'askresearch@virginia.edu', [pi_user_info.email_address], + approval_id, f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: @@ -159,6 +161,7 @@ class ApprovalService(object): mail_result = send_ramp_up_denied_email_to_approver( 'askresearch@virginia.edu', approver_email, + approval_id, f'{pi_user_info.display_name} - ({pi_user_info.uid})', f'{approver_info.display_name} - ({approver_info.uid})' ) @@ -231,6 +234,7 @@ class ApprovalService(object): mail_result = send_ramp_up_submission_email( 'askresearch@virginia.edu', [pi_user_info.email_address], + model.id, f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: @@ -241,6 +245,7 @@ class ApprovalService(object): mail_result = send_ramp_up_approval_request_first_review_email( 'askresearch@virginia.edu', approver_email, + model.id, f'{pi_user_info.display_name} - ({pi_user_info.uid})' ) if mail_result: diff --git a/crc/services/email_service.py b/crc/services/email_service.py new file mode 100644 index 00000000..036ea1c9 --- /dev/null +++ b/crc/services/email_service.py @@ -0,0 +1,27 @@ +from datetime import datetime + +from sqlalchemy import desc + +from crc import app, db, session +from crc.api.common import ApiError + +from crc.models.approval import ApprovalModel +from crc.models.email import EmailModel + + +class EmailService(object): + """Provides common tools for working with an Email""" + + @staticmethod + def add_email(subject, sender, recipients, content, content_html, approval_id): + """We will receive all data related to an email and store it""" + + # Find corresponding approval + approval = db.session.query(ApprovalModel).get(approval_id) + + # Create EmailModel + email_model = EmailModel(subject=subject, sender=sender, recipients=str(recipients), + content=content, content_html=content_html, approval=approval) + + db.session.add(email_model) + db.session.commit() diff --git a/crc/services/file_service.py b/crc/services/file_service.py index ff234a79..ef4e8935 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -58,7 +58,7 @@ class FileService(object): "irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code) """Assure this is unique to the workflow, task, and document code AND the Name - Because we will allow users to upload multiple files for the same form field + Because we will allow users to upload multiple files for the same form field in some cases """ file_model = session.query(FileModel)\ .filter(FileModel.workflow_id == workflow_id)\ diff --git a/crc/services/mails.py b/crc/services/mails.py index bd825f69..40db52c8 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -3,13 +3,16 @@ import os from flask import render_template, render_template_string from flask_mail import Message +from crc.services.email_service import EmailService + # TODO: Extract common mailing code into its own function def send_test_email(sender, recipients): try: msg = Message('Research Ramp-up Plan test', sender=sender, - recipients=recipients) + recipients=recipients, + bcc=['rrt_emails@googlegroups.com']) from crc import env, mail template = env.get_template('ramp_up_approval_request_first_review.txt') template_vars = {'primary_investigator': "test"} @@ -20,11 +23,10 @@ def send_test_email(sender, recipients): except Exception as e: return str(e) - - -def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None): +def send_ramp_up_submission_email(sender, recipients, approval_id, approver_1, approver_2=None): try: - msg = Message('Research Ramp-up Plan Submitted', + subject = 'Research Ramp-up Plan Submitted' + msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) @@ -35,13 +37,17 @@ def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=Non template = env.get_template('ramp_up_submission.html') msg.html = template.render(template_vars) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=msg.body, content_html=msg.html, approval_id=approval_id) + mail.send(msg) except Exception as e: return str(e) -def send_ramp_up_approval_request_email(sender, recipients, primary_investigator): +def send_ramp_up_approval_request_email(sender, recipients, approval_id, primary_investigator): try: - msg = Message('Research Ramp-up Plan Approval Request', + subject = 'Research Ramp-up Plan Approval Request' + msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) @@ -52,13 +58,17 @@ def send_ramp_up_approval_request_email(sender, recipients, primary_investigator template = env.get_template('ramp_up_approval_request.html') msg.html = template.render(template_vars) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=msg.body, content_html=msg.html, approval_id=approval_id) + mail.send(msg) except Exception as e: return str(e) -def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator): +def send_ramp_up_approval_request_first_review_email(sender, recipients, approval_id, primary_investigator): try: - msg = Message('Research Ramp-up Plan Approval Request', + subject = 'Research Ramp-up Plan Approval Request' + msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) @@ -69,13 +79,17 @@ def send_ramp_up_approval_request_first_review_email(sender, recipients, primary template = env.get_template('ramp_up_approval_request_first_review.html') msg.html = template.render(template_vars) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=msg.body, content_html=msg.html, approval_id=approval_id) + mail.send(msg) except Exception as e: return str(e) -def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None): +def send_ramp_up_approved_email(sender, recipients, approval_id, approver_1, approver_2=None): try: - msg = Message('Research Ramp-up Plan Approved', + subject = 'Research Ramp-up Plan Approved' + msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) @@ -87,13 +101,17 @@ def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None) template = env.get_template('ramp_up_approved.html') msg.html = template.render(template_vars) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=msg.body, content_html=msg.html, approval_id=approval_id) + mail.send(msg) except Exception as e: return str(e) -def send_ramp_up_denied_email(sender, recipients, approver): +def send_ramp_up_denied_email(sender, recipients, approval_id, approver): try: - msg = Message('Research Ramp-up Plan Denied', + subject = 'Research Ramp-up Plan Denied' + msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) @@ -105,13 +123,17 @@ def send_ramp_up_denied_email(sender, recipients, approver): template = env.get_template('ramp_up_denied.html') msg.html = template.render(template_vars) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=msg.body, content_html=msg.html, approval_id=approval_id) + mail.send(msg) except Exception as e: return str(e) -def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2): +def send_ramp_up_denied_email_to_approver(sender, recipients, approval_id, primary_investigator, approver_2): try: - msg = Message('Research Ramp-up Plan Denied', + subject = 'Research Ramp-up Plan Denied' + msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) @@ -123,6 +145,9 @@ def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigat template = env.get_template('ramp_up_denied_first_approver.html') msg.html = template.render(template_vars) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=msg.body, content_html=msg.html, approval_id=approval_id) + mail.send(msg) except Exception as e: return str(e) diff --git a/crc/services/study_service.py b/crc/services/study_service.py index 4024b5f0..43aa8297 100644 --- a/crc/services/study_service.py +++ b/crc/services/study_service.py @@ -181,8 +181,6 @@ class StudyService(object): documents[code] = doc return documents - - @staticmethod def get_investigators(study_id): @@ -224,7 +222,6 @@ class StudyService(object): return FileModelSchema().dump(file) - @staticmethod def synch_with_protocol_builder_if_enabled(user): """Assures that the studies we have locally for the given user are diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 5efa8cab..ab7494ee 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -37,7 +37,7 @@ class WorkflowService(object): the workflow Processor should be hidden behind this service. This will help maintain a structure that avoids circular dependencies. But for now, this contains tools for converting spiff-workflow models into our - own API models with additional information and capabilities and + own API models with additional information and capabilities and handles the testing of a workflow specification by completing it with random selections, attempting to mimic a front end as much as possible. """ @@ -180,7 +180,6 @@ class WorkflowService(object): def __get_options(self): pass - @staticmethod def _random_string(string_length=10): """Generate a random string of fixed length """ diff --git a/migrations/versions/62a11a335778_.py b/migrations/versions/62a11a335778_.py new file mode 100644 index 00000000..ee8d8f91 --- /dev/null +++ b/migrations/versions/62a11a335778_.py @@ -0,0 +1,38 @@ +"""empty message + +Revision ID: 62a11a335778 +Revises: 17597692d0b0 +Create Date: 2020-06-09 22:45:52.475183 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '62a11a335778' +down_revision = '17597692d0b0' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('email', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('subject', sa.String(), nullable=True), + sa.Column('sender', sa.String(), nullable=True), + sa.Column('recipients', sa.String(), nullable=True), + sa.Column('content', sa.String(), nullable=True), + sa.Column('content_html', sa.String(), nullable=True), + sa.Column('approval_id', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['approval_id'], ['approval.id'], ), + sa.PrimaryKeyConstraint('id') + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('email') + # ### end Alembic commands ### diff --git a/tests/test_mails.py b/tests/test_mails.py index 48752358..916d6ff7 100644 --- a/tests/test_mails.py +++ b/tests/test_mails.py @@ -1,7 +1,8 @@ from tests.base_test import BaseTest -from crc import mail +from crc import mail, session +from crc.models.approval import ApprovalModel, ApprovalStatus from crc.services.mails import ( send_ramp_up_submission_email, send_ramp_up_approval_request_email, @@ -15,6 +16,21 @@ from crc.services.mails import ( class TestMails(BaseTest): def setUp(self): + """Initial setup shared by all TestApprovals tests""" + self.load_example_data() + self.study = self.create_study() + self.workflow = self.create_workflow('random_fact') + + self.approval = ApprovalModel( + study=self.study, + workflow=self.workflow, + approver_uid='lb3dp', + status=ApprovalStatus.PENDING.value, + version=1 + ) + session.add(self.approval) + session.commit() + self.sender = 'sender@sartography.com' self.recipients = ['recipient@sartography.com'] self.primary_investigator = 'Dr. Bartlett' @@ -24,13 +40,14 @@ class TestMails(BaseTest): def test_send_ramp_up_submission_email(self): with mail.record_messages() as outbox: - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) + send_ramp_up_submission_email(self.sender, self.recipients, self.approval.id, self.approver_1) self.assertEqual(len(outbox), 1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted') self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) - send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) + send_ramp_up_submission_email(self.sender, self.recipients, self.approval.id, + self.approver_1, self.approver_2) self.assertEqual(len(outbox), 2) self.assertIn(self.approver_1, outbox[1].body) self.assertIn(self.approver_1, outbox[1].html) @@ -39,7 +56,8 @@ class TestMails(BaseTest): def test_send_ramp_up_approval_request_email(self): with mail.record_messages() as outbox: - send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) + send_ramp_up_approval_request_email(self.sender, self.recipients, self.approval.id, + self.primary_investigator) self.assertEqual(len(outbox), 1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') @@ -49,7 +67,7 @@ class TestMails(BaseTest): def test_send_ramp_up_approval_request_first_review_email(self): with mail.record_messages() as outbox: send_ramp_up_approval_request_first_review_email( - self.sender, self.recipients, self.primary_investigator + self.sender, self.recipients, self.approval.id, self.primary_investigator ) self.assertEqual(len(outbox), 1) @@ -59,13 +77,14 @@ class TestMails(BaseTest): def test_send_ramp_up_approved_email(self): with mail.record_messages() as outbox: - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) + send_ramp_up_approved_email(self.sender, self.recipients, self.approval.id, self.approver_1) self.assertEqual(len(outbox), 1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved') self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) - send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) + send_ramp_up_approved_email(self.sender, self.recipients, self.approval.id, + self.approver_1, self.approver_2) self.assertEqual(len(outbox), 2) self.assertIn(self.approver_1, outbox[1].body) self.assertIn(self.approver_1, outbox[1].html) @@ -74,7 +93,7 @@ class TestMails(BaseTest): def test_send_ramp_up_denied_email(self): with mail.record_messages() as outbox: - send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) + send_ramp_up_denied_email(self.sender, self.recipients, self.approval.id, self.approver_1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) @@ -82,7 +101,7 @@ class TestMails(BaseTest): def test_send_send_ramp_up_denied_email_to_approver(self): with mail.record_messages() as outbox: send_ramp_up_denied_email_to_approver( - self.sender, self.recipients, self.primary_investigator, self.approver_2 + self.sender, self.recipients, self.approval.id, self.primary_investigator, self.approver_2 ) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') From 5f13b96079a338a9516295e5a79655ad5b714cb3 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Fri, 12 Jun 2020 12:17:08 -0600 Subject: [PATCH 05/44] More enhancements --- crc/services/mails.py | 172 ++++++++++++++++-------------------- tests/test_email_service.py | 42 +++++++++ tests/test_mails.py | 19 ++++ 3 files changed, 138 insertions(+), 95 deletions(-) create mode 100644 tests/test_email_service.py diff --git a/crc/services/mails.py b/crc/services/mails.py index 40db52c8..6816b586 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -23,131 +23,113 @@ def send_test_email(sender, recipients): except Exception as e: return str(e) -def send_ramp_up_submission_email(sender, recipients, approval_id, approver_1, approver_2=None): +def send_mail(subject, sender, recipients, content, content_html): + from crc import mail try: - subject = 'Research Ramp-up Plan Submitted' msg = Message(subject, sender=sender, recipients=recipients, bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_submission.txt') - template_vars = {'approver_1': approver_1, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_submission.html') - msg.html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=msg.body, content_html=msg.html, approval_id=approval_id) + msg.body = content + msg.html = content_html mail.send(msg) except Exception as e: return str(e) +def send_ramp_up_submission_email(sender, recipients, approval_id, approver_1, approver_2=None): + from crc import env + subject = 'Research Ramp-up Plan Submitted' + + template = env.get_template('ramp_up_submission.txt') + template_vars = {'approver_1': approver_1, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_submission.html') + content_html = template.render(template_vars) + + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval_id) + + result = send_mail(subject, sender, recipients, content, content_html) + return result + def send_ramp_up_approval_request_email(sender, recipients, approval_id, primary_investigator): - try: - subject = 'Research Ramp-up Plan Approval Request' - msg = Message(subject, - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_approval_request.txt') - template_vars = {'primary_investigator': primary_investigator} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approval_request.html') - msg.html = template.render(template_vars) + from crc import env + subject = 'Research Ramp-up Plan Approval Request' - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=msg.body, content_html=msg.html, approval_id=approval_id) + template = env.get_template('ramp_up_approval_request.txt') + template_vars = {'primary_investigator': primary_investigator} + content = template.render(template_vars) + template = env.get_template('ramp_up_approval_request.html') + content_html = template.render(template_vars) - mail.send(msg) - except Exception as e: - return str(e) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval_id) + + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_approval_request_first_review_email(sender, recipients, approval_id, primary_investigator): - try: - subject = 'Research Ramp-up Plan Approval Request' - msg = Message(subject, - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - from crc import env, mail - template = env.get_template('ramp_up_approval_request_first_review.txt') - template_vars = {'primary_investigator': primary_investigator} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approval_request_first_review.html') - msg.html = template.render(template_vars) + from crc import env + subject = 'Research Ramp-up Plan Approval Request' - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=msg.body, content_html=msg.html, approval_id=approval_id) + template = env.get_template('ramp_up_approval_request_first_review.txt') + template_vars = {'primary_investigator': primary_investigator} + content = template.render(template_vars) + template = env.get_template('ramp_up_approval_request_first_review.html') + content_html = template.render(template_vars) - mail.send(msg) - except Exception as e: - return str(e) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval_id) + + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_approved_email(sender, recipients, approval_id, approver_1, approver_2=None): - try: - subject = 'Research Ramp-up Plan Approved' - msg = Message(subject, - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + from crc import env + subject = 'Research Ramp-up Plan Approved' - from crc import env, mail - template = env.get_template('ramp_up_approved.txt') - template_vars = {'approver_1': approver_1, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_approved.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_approved.txt') + template_vars = {'approver_1': approver_1, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_approved.html') + content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=msg.body, content_html=msg.html, approval_id=approval_id) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval_id) - mail.send(msg) - except Exception as e: - return str(e) + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_denied_email(sender, recipients, approval_id, approver): - try: - subject = 'Research Ramp-up Plan Denied' - msg = Message(subject, - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + from crc import env + subject = 'Research Ramp-up Plan Denied' - from crc import env, mail - template = env.get_template('ramp_up_denied.txt') - template_vars = {'approver': approver} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_denied.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_denied.txt') + template_vars = {'approver': approver} + content = template.render(template_vars) + template = env.get_template('ramp_up_denied.html') + content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=msg.body, content_html=msg.html, approval_id=approval_id) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval_id) - mail.send(msg) - except Exception as e: - return str(e) + result = send_mail(subject, sender, recipients, content, content_html) + return result def send_ramp_up_denied_email_to_approver(sender, recipients, approval_id, primary_investigator, approver_2): - try: - subject = 'Research Ramp-up Plan Denied' - msg = Message(subject, - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) + from crc import env + subject = 'Research Ramp-up Plan Denied' - from crc import env, mail - template = env.get_template('ramp_up_denied_first_approver.txt') - template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2} - msg.body = template.render(template_vars) - template = env.get_template('ramp_up_denied_first_approver.html') - msg.html = template.render(template_vars) + template = env.get_template('ramp_up_denied_first_approver.txt') + template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2} + content = template.render(template_vars) + template = env.get_template('ramp_up_denied_first_approver.html') + content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=msg.body, content_html=msg.html, approval_id=approval_id) + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval_id) - mail.send(msg) - except Exception as e: - return str(e) + result = send_mail(subject, sender, recipients, content, content_html) + return result diff --git a/tests/test_email_service.py b/tests/test_email_service.py new file mode 100644 index 00000000..9e0f2e57 --- /dev/null +++ b/tests/test_email_service.py @@ -0,0 +1,42 @@ +from tests.base_test import BaseTest + +from crc import session +from crc.models.approval import ApprovalModel, ApprovalStatus +from crc.models.email import EmailModel +from crc.services.email_service import EmailService + + +class TestEmailService(BaseTest): + + def test_add_email(self): + self.load_example_data() + study = self.create_study() + workflow = self.create_workflow('random_fact') + + approval = ApprovalModel( + study=study, + workflow=workflow, + approver_uid='lb3dp', + status=ApprovalStatus.PENDING.value, + version=1 + ) + session.add(approval) + session.commit() + + subject = 'Email Subject' + sender = 'sender@sartography.com' + recipients = ['recipient@sartography.com', 'back@sartography.com'] + content = 'Content for this email' + content_html = '

Hypertext Markup Language content for this email

' + + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, approval_id=approval.id) + + email_model = EmailModel.query.first() + + self.assertEqual(email_model.subject, subject) + self.assertEqual(email_model.sender, sender) + self.assertEqual(email_model.recipients, str(recipients)) + self.assertEqual(email_model.content, content) + self.assertEqual(email_model.content_html, content_html) + self.assertEqual(email_model.approval, approval) diff --git a/tests/test_mails.py b/tests/test_mails.py index 916d6ff7..5408e517 100644 --- a/tests/test_mails.py +++ b/tests/test_mails.py @@ -3,6 +3,7 @@ from tests.base_test import BaseTest from crc import mail, session from crc.models.approval import ApprovalModel, ApprovalStatus +from crc.models.email import EmailModel from crc.services.mails import ( send_ramp_up_submission_email, send_ramp_up_approval_request_email, @@ -54,6 +55,9 @@ class TestMails(BaseTest): self.assertIn(self.approver_2, outbox[1].body) self.assertIn(self.approver_2, outbox[1].html) + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + def test_send_ramp_up_approval_request_email(self): with mail.record_messages() as outbox: send_ramp_up_approval_request_email(self.sender, self.recipients, self.approval.id, @@ -64,6 +68,9 @@ class TestMails(BaseTest): self.assertIn(self.primary_investigator, outbox[0].body) self.assertIn(self.primary_investigator, outbox[0].html) + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + def test_send_ramp_up_approval_request_first_review_email(self): with mail.record_messages() as outbox: send_ramp_up_approval_request_first_review_email( @@ -75,6 +82,9 @@ class TestMails(BaseTest): self.assertIn(self.primary_investigator, outbox[0].body) self.assertIn(self.primary_investigator, outbox[0].html) + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + def test_send_ramp_up_approved_email(self): with mail.record_messages() as outbox: send_ramp_up_approved_email(self.sender, self.recipients, self.approval.id, self.approver_1) @@ -91,6 +101,9 @@ class TestMails(BaseTest): self.assertIn(self.approver_2, outbox[1].body) self.assertIn(self.approver_2, outbox[1].html) + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 2) + def test_send_ramp_up_denied_email(self): with mail.record_messages() as outbox: send_ramp_up_denied_email(self.sender, self.recipients, self.approval.id, self.approver_1) @@ -98,6 +111,9 @@ class TestMails(BaseTest): self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) + def test_send_send_ramp_up_denied_email_to_approver(self): with mail.record_messages() as outbox: send_ramp_up_denied_email_to_approver( @@ -109,3 +125,6 @@ class TestMails(BaseTest): self.assertIn(self.primary_investigator, outbox[0].html) self.assertIn(self.approver_2, outbox[0].body) self.assertIn(self.approver_2, outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) From 0608ffa08a17116aea5f1694d91029aa9ae6d6c0 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 16 Jun 2020 12:26:25 -0400 Subject: [PATCH 06/44] Restricting the admin endpoints to be admin only, and adding a bit of configuration. --- crc/__init__.py | 7 +----- crc/api/admin.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 6 deletions(-) create mode 100644 crc/api/admin.py diff --git a/crc/__init__.py b/crc/__init__.py index 62f62de0..a211b0fa 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -39,15 +39,10 @@ ma = Marshmallow(app) from crc import models from crc import api +from crc.api import admin connexion_app.add_api('api.yml', base_path='/v1.0') -# Admin app -admin = Admin(app) -admin.add_view(ModelView(models.study.StudyModel, db.session)) -admin.add_view(ModelView(models.approval.ApprovalModel, db.session)) -admin.add_view(ModelView(models.user.UserModel, db.session)) -admin.add_view(ModelView(models.workflow.WorkflowModel, db.session)) # Convert list of allowed origins to list of regexes origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']] diff --git a/crc/api/admin.py b/crc/api/admin.py new file mode 100644 index 00000000..26a1b181 --- /dev/null +++ b/crc/api/admin.py @@ -0,0 +1,56 @@ +# Admin app + +from flask import url_for +from flask_admin import Admin +from flask_admin.contrib import sqla +from flask_admin.contrib.sqla import ModelView +from werkzeug.utils import redirect + +from crc import db, app +from crc.api.user import verify_token, verify_token_admin +from crc.models.approval import ApprovalModel +from crc.models.file import FileModel +from crc.models.study import StudyModel +from crc.models.user import UserModel +from crc.models.workflow import WorkflowModel + + +class AdminModelView(sqla.ModelView): + can_create = False + can_edit = False + can_delete = False + page_size = 50 # the number of entries to display on the list view + column_exclude_list = ['bpmn_workflow_json', ] + column_display_pk = True + can_export = True + + def is_accessible(self): + return verify_token_admin() + + def inaccessible_callback(self, name, **kwargs): + # redirect to login page if user doesn't have access + return redirect(url_for('home')) + +class UserView(AdminModelView): + column_filters = ['uid'] + +class StudyView(AdminModelView): + column_filters = ['id', 'primary_investigator_id'] + column_searchable_list = ['title'] + +class ApprovalView(AdminModelView): + column_filters = ['study_id', 'approver_uid'] + +class WorkflowView(AdminModelView): + column_filters = ['study_id', 'id'] + +class FileView(AdminModelView): + column_filters = ['workflow_id'] + +admin = Admin(app) + +admin.add_view(StudyView(StudyModel, db.session)) +admin.add_view(ApprovalView(ApprovalModel, db.session)) +admin.add_view(UserView(UserModel, db.session)) +admin.add_view(WorkflowView(WorkflowModel, db.session)) +admin.add_view(FileView(FileModel, db.session)) From 1b9166dcb7e0051fe2d8e4f8e1856f010836a1da Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Tue, 16 Jun 2020 13:34:21 -0400 Subject: [PATCH 07/44] Cleaning up the merge, which resulted in some lost code. --- crc/models/api_models.py | 7 ++++--- crc/services/workflow_service.py | 4 +--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index f98a1b13..53706a75 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -120,7 +120,7 @@ class NavigationItemSchema(ma.Schema): class WorkflowApi(object): def __init__(self, id, status, next_task, navigation, - spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated): + spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated, title): self.id = id self.status = status self.next_task = next_task # The next task that requires user input. @@ -131,13 +131,14 @@ class WorkflowApi(object): self.total_tasks = total_tasks self.completed_tasks = completed_tasks self.last_updated = last_updated + self.title = title class WorkflowApiSchema(ma.Schema): class Meta: model = WorkflowApi fields = ["id", "status", "next_task", "navigation", "workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks", - "last_updated"] + "last_updated", "title"] unknown = INCLUDE status = EnumField(WorkflowStatus) @@ -148,7 +149,7 @@ class WorkflowApiSchema(ma.Schema): def make_workflow(self, data, **kwargs): keys = ['id', 'status', 'next_task', 'navigation', 'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks", - "last_updated"] + "last_updated", "title"] filtered_fields = {key: data[key] for key in keys} filtered_fields['next_task'] = TaskSchema().make_task(data['next_task']) return WorkflowApi(**filtered_fields) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index b6769458..a8860886 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -9,8 +9,6 @@ from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask from SpiffWorkflow.bpmn.specs.UserTask import UserTask from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask from SpiffWorkflow.specs import CancelTask, StartTask -from SpiffWorkflow.util.deep_merge import DeepMerge -from flask import g from jinja2 import Template from crc import db, app @@ -24,7 +22,7 @@ from crc.models.workflow import WorkflowModel, WorkflowStatus from crc.services.file_service import FileService from crc.services.lookup_service import LookupService from crc.services.study_service import StudyService -from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine +from crc.services.workflow_processor import WorkflowProcessor class WorkflowService(object): From d4a285883f016a9e1d86e1b090962c69700a8332 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Tue, 16 Jun 2020 18:42:36 -0600 Subject: [PATCH 08/44] Email script --- crc/scripts/email.py | 81 +++++++++++++++++++++++++++++++++++++ crc/scripts/fact_service.py | 2 +- tests/data/email/email.bpmn | 58 ++++++++++++++++++++++++++ tests/test_email_script.py | 30 ++++++++++++++ 4 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 crc/scripts/email.py create mode 100644 tests/data/email/email.bpmn create mode 100644 tests/test_email_script.py diff --git a/crc/scripts/email.py b/crc/scripts/email.py new file mode 100644 index 00000000..2958fb29 --- /dev/null +++ b/crc/scripts/email.py @@ -0,0 +1,81 @@ +from jinja2 import Template + +from crc.api.common import ApiError +from crc.scripts.script import Script +from crc.services.ldap_service import LdapService +from crc.services.mails import send_mail + + +class Email(Script): + """This Script allows to be introduced as part of a workflow and called from there, specifying + recipients and content """ + + def get_description(self): + return """ +Creates an email, using the provided arguments (a list of UIDs)" +Each argument will be used to look up personal information needed for +the email creation. + +Example: +Email Subject ApprvlApprvr1 PIComputingID +""" + + def do_task_validate_only(self, task, *args, **kwargs): + self.get_emails(task, args) + + def do_task(self, task, *args, **kwargs): + subject = self.get_subject(task, args) + recipients = self.get_emails(task, args) + content = self.get_content(task) + if recipients: + send_mail( + subject='Test Subject', + sender='sender@sartography.com', + recipients=recipients, + content=content, + content_html=content + ) + + def get_emails(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one argument. The " + "name of the variable in the task data that contains user" + "id to process. Multiple arguments are accepted.") + emails = [] + for arg in args[1:]: + uid = task.workflow.script_engine.evaluate_expression(task, arg) + user_info = LdapService.user_info(uid) + email = user_info.email_address + emails.append(user_info.email_address) + if not isinstance(email, str): + raise ApiError(code="invalid_argument", + message="The Email script requires at least 1 UID argument. The " + "name of the variable in the task data that contains subject and" + " user ids to process. This must point to an array or a string, but " + "it currently points to a %s " % emails.__class__.__name__) + + return emails + + def get_subject(self, task, args): + if len(args) < 1: + raise ApiError(code="missing_argument", + message="Email script requires at least one subject argument. The " + "name of the variable in the task data that contains subject" + " to process. Multiple arguments are accepted.") + subject = task.workflow.script_engine.evaluate_expression(task, args[0]) + if not isinstance(subject, str): + raise ApiError(code="invalid_argument", + message="The Email script requires 1 argument. The " + "the name of the variable in the task data that contains user" + "ids to process. This must point to an array or a string, but " + "it currently points to a %s " % emails.__class__.__name__) + + return subject + + def get_content(self, task): + content = task.task_spec.documentation + template = Template(content) + rendered = template.render({'approver': 'Bossman', 'not_here': 22}) + + return rendered diff --git a/crc/scripts/fact_service.py b/crc/scripts/fact_service.py index c4468721..b3701312 100644 --- a/crc/scripts/fact_service.py +++ b/crc/scripts/fact_service.py @@ -5,7 +5,7 @@ from crc.scripts.script import Script class FactService(Script): def get_description(self): - return """Just your basic class that can pull in data from a few api endpoints and + return """Just your basic class that can pull in data from a few api endpoints and do a basic task.""" def get_cat(self): diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn new file mode 100644 index 00000000..b2221f24 --- /dev/null +++ b/tests/data/email/email.bpmn @@ -0,0 +1,58 @@ + + + + + Flow_1synsig + + + Flow_1xlrgne + + + Email content to be delivered to {{ approver }} + Flow_08n2npe + Flow_1xlrgne + Email Subject ApprvlApprvr1 PIComputingID + + + + + + + + + + + + Flow_1synsig + Flow_08n2npe + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/test_email_script.py b/tests/test_email_script.py new file mode 100644 index 00000000..9ac93e07 --- /dev/null +++ b/tests/test_email_script.py @@ -0,0 +1,30 @@ +from tests.base_test import BaseTest + +from crc.services.file_service import FileService +from crc.scripts.email import Email +from crc.services.workflow_processor import WorkflowProcessor +from crc.api.common import ApiError + +from crc import db +# from crc.models.approval import ApprovalModel + + +class TestEmailScript(BaseTest): + + def test_do_task(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('email') + processor = WorkflowProcessor(workflow) + task = processor.next_task() + processor.complete_task(task) + task = processor.next_task() + task.data = { + 'PIComputingID': 'dhf8r', + 'ApprvlApprvr1': 'lb3dp', + 'Subject': 'Email Script needs your help' + } + + script = Email() + script.do_task(task, 'Subject', 'PIComputingID', 'ApprvlApprvr1') + self.assertTrue(True) From 2ff836019f39c466db6a3ec17db01668550a5695 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Tue, 16 Jun 2020 18:55:18 -0600 Subject: [PATCH 09/44] Sonarcloud fix --- crc/scripts/email.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crc/scripts/email.py b/crc/scripts/email.py index 2958fb29..3dc9cb11 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -69,7 +69,7 @@ Email Subject ApprvlApprvr1 PIComputingID message="The Email script requires 1 argument. The " "the name of the variable in the task data that contains user" "ids to process. This must point to an array or a string, but " - "it currently points to a %s " % emails.__class__.__name__) + "it currently points to a %s " % subject.__class__.__name__) return subject From c730a7b1ec0c4304b5c95264abb49ad5f35166ed Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 17 Jun 2020 08:53:02 -0600 Subject: [PATCH 10/44] Sending subject and using default sender --- config/default.py | 1 + crc/scripts/email.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/config/default.py b/config/default.py index 9a606a4c..3faaef7b 100644 --- a/config/default.py +++ b/config/default.py @@ -45,6 +45,7 @@ LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1)) # Email configuration +DEFAULT_SENDER = 'askresearch@virginia.edu' FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com'] MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True) MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io') diff --git a/crc/scripts/email.py b/crc/scripts/email.py index 3dc9cb11..18e5df86 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -1,5 +1,6 @@ from jinja2 import Template +from crc import app from crc.api.common import ApiError from crc.scripts.script import Script from crc.services.ldap_service import LdapService @@ -29,8 +30,8 @@ Email Subject ApprvlApprvr1 PIComputingID content = self.get_content(task) if recipients: send_mail( - subject='Test Subject', - sender='sender@sartography.com', + subject=subject, + sender=app.config['DEFAULT_SENDER'], recipients=recipients, content=content, content_html=content From 1844c939199462cfcfcd33e6269f7661f1625ccc Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Wed, 17 Jun 2020 11:35:06 -0400 Subject: [PATCH 11/44] STG-26 - basic test case for a looping task Criteria : task.multi_instance_type == 'looping' to terminate, use the standard endpoint for submitting form data with a query variable of terminate_loop=true Will likely need two buttons: "Submit and quit" "Submit and add another" or something similar --- crc/api.yml | 6 +++ crc/api/workflow.py | 5 +- tests/base_test.py | 29 ++++++---- tests/data/looping_task/looping_task.bpmn | 45 ++++++++++++++++ tests/test_looping_task.py | 54 +++++++++++++++++++ .../test_workflow_processor_multi_instance.py | 3 +- 6 files changed, 131 insertions(+), 11 deletions(-) create mode 100644 tests/data/looping_task/looping_task.bpmn create mode 100644 tests/test_looping_task.py diff --git a/crc/api.yml b/crc/api.yml index 64f6086a..24cd2d5d 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -626,6 +626,12 @@ paths: schema: type: string format: uuid + - name: terminate_loop + in: query + required: false + description: Terminate the loop on a looping task + schema: + type: boolean put: operationId: crc.api.workflow.update_task summary: Exclusively for User Tasks, submits form data as a flat set of key/values. diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 655a85e7..890a4de5 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -175,7 +175,7 @@ def set_current_task(workflow_id, task_id): return WorkflowApiSchema().dump(workflow_api_model) -def update_task(workflow_id, task_id, body): +def update_task(workflow_id, task_id, body, terminate_loop=None): workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() if workflow_model is None: @@ -191,6 +191,9 @@ def update_task(workflow_id, task_id, body): if task.state != task.READY: raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") + if terminate_loop: + task.terminate_loop() + task.update_data(body) processor.complete_task(task) processor.do_engine_steps() diff --git a/tests/base_test.py b/tests/base_test.py index 93294193..3bdae053 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -290,7 +290,7 @@ class BaseTest(unittest.TestCase): self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id) return workflow_api - def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"): + def complete_form(self, workflow_in, task_in, dict_data, error_code=None, terminate_loop=None, user_uid="dhf8r"): prev_completed_task_count = workflow_in.completed_tasks if isinstance(task_in, dict): task_id = task_in["id"] @@ -299,11 +299,16 @@ class BaseTest(unittest.TestCase): user = session.query(UserModel).filter_by(uid=user_uid).first() self.assertIsNotNone(user) - - rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), - headers=self.logged_in_headers(user=user), - content_type="application/json", - data=json.dumps(dict_data)) + if terminate_loop: + rv = self.app.put('/v1.0/workflow/%i/task/%s/data?terminate_loop=true' % (workflow_in.id, task_id), + headers=self.logged_in_headers(user=user), + content_type="application/json", + data=json.dumps(dict_data)) + else: + rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id), + headers=self.logged_in_headers(user=user), + content_type="application/json", + data=json.dumps(dict_data)) if error_code: self.assert_failure(rv, error_code=error_code) return @@ -316,7 +321,9 @@ class BaseTest(unittest.TestCase): # The total number of tasks may change over time, as users move through gateways # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created... self.assertIsNotNone(workflow.total_tasks) - self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) + # presumably, we also need to deal with sequential items here too . . + if not task_in.multi_instance_type == 'looping': + self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) # Assure a record exists in the Task Events task_events = session.query(TaskEventModel) \ @@ -335,7 +342,8 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.name, event.task_name) self.assertEqual(task_in.title, event.task_title) self.assertEqual(task_in.type, event.task_type) - self.assertEqual("COMPLETED", event.task_state) + if not task_in.multi_instance_type == 'looping': + self.assertEqual("COMPLETED", event.task_state) # Not sure what voodoo is happening inside of marshmallow to get me in this state. if isinstance(task_in.multi_instance_type, MultiInstanceType): @@ -344,7 +352,10 @@ class BaseTest(unittest.TestCase): self.assertEqual(task_in.multi_instance_type, event.mi_type) self.assertEqual(task_in.multi_instance_count, event.mi_count) - self.assertEqual(task_in.multi_instance_index, event.mi_index) + if task_in.multi_instance_type == 'looping' and not terminate_loop: + self.assertEqual(task_in.multi_instance_index+1, event.mi_index) + else: + self.assertEqual(task_in.multi_instance_index, event.mi_index) self.assertEqual(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) diff --git a/tests/data/looping_task/looping_task.bpmn b/tests/data/looping_task/looping_task.bpmn new file mode 100644 index 00000000..0c3929bf --- /dev/null +++ b/tests/data/looping_task/looping_task.bpmn @@ -0,0 +1,45 @@ + + + + + Flow_0vlor2k + + + + + + + + + Flow_0vlor2k + Flow_1tvod7v + + + + Flow_1tvod7v + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/test_looping_task.py b/tests/test_looping_task.py new file mode 100644 index 00000000..87701ef4 --- /dev/null +++ b/tests/test_looping_task.py @@ -0,0 +1,54 @@ +from unittest.mock import patch + +from crc import session +from crc.models.api_models import MultiInstanceType +from crc.models.study import StudyModel +from crc.models.workflow import WorkflowStatus +from crc.services.study_service import StudyService +from crc.services.workflow_processor import WorkflowProcessor +from crc.services.workflow_service import WorkflowService +from tests.base_test import BaseTest + + +class TestWorkflowProcessorLoopingTask(BaseTest): + """Tests the Workflow Processor as it deals with a Looping task""" + + def _populate_form_with_random_data(self, task): + api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) + WorkflowService.populate_form_with_random_data(task, api_task, required_only=False) + + def get_processor(self, study_model, spec_model): + workflow_model = StudyService._create_workflow_model(study_model, spec_model) + return WorkflowProcessor(workflow_model) + + def test_create_and_complete_workflow(self): + # This depends on getting a list of investigators back from the protocol builder. + + workflow = self.create_workflow('looping_task') + task = self.get_workflow_api(workflow).next_task + + self.assertEqual("GetNames", task.name) + + self.assertEqual(task.multi_instance_type, 'looping') + self.assertEqual(1, task.multi_instance_index) + self.complete_form(workflow,task,{'GetNames_MICurrentVar':{'Name': 'Peter Norvig', 'Nickname': 'Pete'}}) + task = self.get_workflow_api(workflow).next_task + + self.assertEqual(task.multi_instance_type,'looping') + self.assertEqual(2, task.multi_instance_index) + self.complete_form(workflow, + task, + {'GetNames_MICurrentVar':{'Name': 'Stuart Russell', 'Nickname': 'Stu'}}, + terminate_loop=True) + + task = self.get_workflow_api(workflow).next_task + self.assertEqual(task.name,'Event_End') + self.assertEqual(workflow.completed_tasks,workflow.total_tasks) + self.assertEqual(task.data, {'GetNames_MICurrentVar': 2, + 'GetNames_MIData': {'1': {'Name': 'Peter Norvig', + 'Nickname': 'Pete'}, + '2': {'Name': 'Stuart Russell', + 'Nickname': 'Stu'}}}) + + + diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/test_workflow_processor_multi_instance.py index aefb73f1..a4c76dd0 100644 --- a/tests/test_workflow_processor_multi_instance.py +++ b/tests/test_workflow_processor_multi_instance.py @@ -32,7 +32,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest): 'error': 'Unable to locate a user with id asd3v in LDAP'}} def _populate_form_with_random_data(self, task): - WorkflowProcessor.populate_form_with_random_data(task) + + WorkflowService.populate_form_with_random_data(task) def get_processor(self, study_model, spec_model): workflow_model = StudyService._create_workflow_model(study_model, spec_model) From 3b57adb84caf864f80c5dd47ff9684ecd704bc50 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Wed, 17 Jun 2020 17:11:15 -0400 Subject: [PATCH 12/44] Continuing a major refactor. Some important points: * TaskEvents now contain the data for each event as it was when the task was completed. * When loading a task for the front end, if the task was completed previously, we take that data, and overwrite it with the lastest data, allowing users to see previously entered values. * Pulling in the Admin branch, as there are changes in that branch that are critical to seeing what is happening when we do this thing. * Moved code for converting a workflow to an API ready data stricture into the Workflow service where it belongs, and out of the API. * Hard resets just convert to using the latest spec, they don't try to keep the data from the last task. There is a better way. * Moving to a previous task does not attept to keep the data from the last completed task. * Added a function that will fix all the existing RRT data by adding critical data into the TaskEvent model. This can be called with from the flask command line tool. --- crc/__init__.py | 7 + crc/api/admin.py | 16 ++ crc/api/workflow.py | 62 +------- crc/models/api_models.py | 1 + crc/services/workflow_processor.py | 26 +--- crc/services/workflow_service.py | 140 +++++++++++++++++- .../{3876e130664e_.py => 1fdd1bdb600e_.py} | 10 +- tests/test_workflow_service.py | 58 +++++++- 8 files changed, 232 insertions(+), 88 deletions(-) rename migrations/versions/{3876e130664e_.py => 1fdd1bdb600e_.py} (78%) diff --git a/crc/__init__.py b/crc/__init__.py index a211b0fa..59ffeac7 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -93,3 +93,10 @@ def clear_db(): """Load example data into the database.""" from example_data import ExampleDataLoader ExampleDataLoader.clean_db() + +@app.cli.command() +def rrt_data_fix(): + """Finds all the empty task event logs, and populates + them with good wholesome data.""" + from crc.services.workflow_service import WorkflowService + WorkflowService.fix_legacy_data_model_for_rrt() diff --git a/crc/api/admin.py b/crc/api/admin.py index 26a1b181..6a27b6da 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -1,15 +1,18 @@ # Admin app +import json from flask import url_for from flask_admin import Admin from flask_admin.contrib import sqla from flask_admin.contrib.sqla import ModelView from werkzeug.utils import redirect +from jinja2 import Markup from crc import db, app from crc.api.user import verify_token, verify_token_admin from crc.models.approval import ApprovalModel from crc.models.file import FileModel +from crc.models.stats import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel from crc.models.workflow import WorkflowModel @@ -47,6 +50,18 @@ class WorkflowView(AdminModelView): class FileView(AdminModelView): column_filters = ['workflow_id'] +def json_formatter(view, context, model, name): + value = getattr(model, name) + json_value = json.dumps(value, ensure_ascii=False, indent=2) + return Markup('
{}
'.format(json_value)) + +class TaskEventView(AdminModelView): + column_filters = ['workflow_id', 'action'] + column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'task_data', 'date'] + column_formatters = { + 'task_data': json_formatter, + } + admin = Admin(app) admin.add_view(StudyView(StudyModel, db.session)) @@ -54,3 +69,4 @@ admin.add_view(ApprovalView(ApprovalModel, db.session)) admin.add_view(UserView(UserModel, db.session)) admin.add_view(WorkflowView(WorkflowModel, db.session)) admin.add_view(FileView(FileModel, db.session)) +admin.add_view(TaskEventView(TaskEventModel, db.session)) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 4d1667dd..14c40df5 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -96,66 +96,10 @@ def delete_workflow_specification(spec_id): session.commit() -def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None): - """Returns an API model representing the state of the current workflow, if requested, and - possible, next_task is set to the current_task.""" - - nav_dict = processor.bpmn_workflow.get_nav_list() - navigation = [] - for nav_item in nav_dict: - spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) - if 'description' in nav_item: - nav_item['title'] = nav_item.pop('description') - # fixme: duplicate code from the workflow_service. Should only do this in one place. - if ' ' in nav_item['title']: - nav_item['title'] = nav_item['title'].partition(' ')[2] - else: - nav_item['title'] = "" - if spiff_task: - nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) - nav_item['title'] = nav_item['task'].title # Prefer the task title. - else: - nav_item['task'] = None - if not 'is_decision' in nav_item: - nav_item['is_decision'] = False - - navigation.append(NavigationItem(**nav_item)) - NavigationItemSchema().dump(nav_item) - - spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() - workflow_api = WorkflowApi( - id=processor.get_workflow_id(), - status=processor.get_status(), - next_task=None, - navigation=navigation, - workflow_spec_id=processor.workflow_spec_id, - spec_version=processor.get_version_string(), - is_latest_spec=processor.is_latest_spec, - total_tasks=len(navigation), - completed_tasks=processor.workflow_model.completed_tasks, - last_updated=processor.workflow_model.last_updated, - ) - if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. - # This may or may not work, sometimes there is no next task to complete. - next_task = processor.next_task() - if next_task: - latest_event = session.query(TaskEventModel) \ - .filter_by(workflow_id=processor.workflow_model.id) \ - .filter_by(task_name=next_task.task_spec.name) \ - .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ - .order_by(TaskEventModel.date.desc()).first() - if latest_event: - next_task.data = DeepMerge.merge(next_task.data, latest_event.task_data) - - workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) - - return workflow_api - - def get_workflow(workflow_id, soft_reset=False, hard_reset=False): workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - workflow_api_model = __get_workflow_api_model(processor) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) @@ -181,7 +125,7 @@ def set_current_task(workflow_id, task_id): WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, WorkflowService.TASK_ACTION_TOKEN_RESET, version=processor.get_version_string()) - workflow_api_model = __get_workflow_api_model(processor, spiff_task) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor, spiff_task) return WorkflowApiSchema().dump(workflow_api_model) @@ -209,7 +153,7 @@ def update_task(workflow_id, task_id, body): WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, WorkflowService.TASK_ACTION_COMPLETE, version=processor.get_version_string(), updated_data=spiff_task.data) - workflow_api_model = __get_workflow_api_model(processor) + workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 53706a75..361b9183 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -36,6 +36,7 @@ class Task(object): PROP_OPTIONS_FILE = "spreadsheet.name" PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column" PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column" + PROP_OPTIONS_READ_ONLY = "read_only" PROP_LDAP_LOOKUP = "ldap.lookup" VALIDATION_REQUIRED = "required" FIELD_TYPE_AUTO_COMPLETE = "autocomplete" diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index e5cbe0a3..c84aa3fa 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -102,14 +102,15 @@ class WorkflowProcessor(object): def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False, validate_only=False): """Create a Workflow Processor based on the serialized information available in the workflow model. - If soft_reset is set to true, it will try to use the latest version of the workflow specification. - If hard_reset is set to true, it will create a new Workflow, but embed the data from the last - completed task in the previous workflow. + If soft_reset is set to true, it will try to use the latest version of the workflow specification + without resetting to the beginning of the workflow. This will work for some minor changes to the spec. + If hard_reset is set to true, it will use the latest spec, and start the workflow over from the beginning. + which should work in casees where a soft reset fails. If neither flag is set, it will use the same version of the specification that was used to originally create the workflow model. """ self.workflow_model = workflow_model - if soft_reset or len(workflow_model.dependencies) == 0: + if soft_reset or len(workflow_model.dependencies) == 0: # Depenencies of 0 means the workflow was never started. self.spec_data_files = FileService.get_spec_data_files( workflow_spec_id=workflow_model.workflow_spec_id) else: @@ -216,8 +217,6 @@ class WorkflowProcessor(object): full_version = "v%s (%s)" % (version, files) return full_version - - def update_dependencies(self, spec_data_files): existing_dependencies = FileService.get_spec_data_files( workflow_spec_id=self.workflow_model.workflow_spec_id, @@ -299,25 +298,12 @@ class WorkflowProcessor(object): return WorkflowStatus.waiting def hard_reset(self): - """Recreate this workflow, but keep the data from the last completed task and add - it back into the first task. This may be useful when a workflow specification changes, - and users need to review all the prior steps, but they don't need to reenter all the previous data. - - Returns the new version. + """Recreate this workflow. This will be useful when a workflow specification changes. """ - - # Create a new workflow based on the latest specs. self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id) new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id) new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine) new_bpmn_workflow.data = self.bpmn_workflow.data - - # Reset the current workflow to the beginning - which we will consider to be the first task after the root - # element. This feels a little sketchy, but I think it is safe to assume root will have one child. - first_task = self.bpmn_workflow.task_tree.children[0] - first_task.reset_token(reset_data=True) # Clear out the data. - for task in new_bpmn_workflow.get_tasks(SpiffTask.READY): - task.data = first_task.data new_bpmn_workflow.do_engine_steps() self.bpmn_workflow = new_bpmn_workflow diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index a8860886..3b064954 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -1,3 +1,4 @@ +import copy import string from datetime import datetime import random @@ -9,16 +10,17 @@ from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask from SpiffWorkflow.bpmn.specs.UserTask import UserTask from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask from SpiffWorkflow.specs import CancelTask, StartTask +from SpiffWorkflow.util.deep_merge import DeepMerge from jinja2 import Template from crc import db, app from crc.api.common import ApiError -from crc.models.api_models import Task, MultiInstanceType +from crc.models.api_models import Task, MultiInstanceType, NavigationItem, NavigationItemSchema, WorkflowApi from crc.models.file import LookupDataModel from crc.models.stats import TaskEventModel from crc.models.study import StudyModel from crc.models.user import UserModel -from crc.models.workflow import WorkflowModel, WorkflowStatus +from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel from crc.services.file_service import FileService from crc.services.lookup_service import LookupService from crc.services.study_service import StudyService @@ -179,13 +181,81 @@ class WorkflowService(object): def __get_options(self): pass - @staticmethod def _random_string(string_length=10): """Generate a random string of fixed length """ letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(string_length)) + @staticmethod + def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None): + """Returns an API model representing the state of the current workflow, if requested, and + possible, next_task is set to the current_task.""" + + nav_dict = processor.bpmn_workflow.get_nav_list() + navigation = [] + for nav_item in nav_dict: + spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id']) + if 'description' in nav_item: + nav_item['title'] = nav_item.pop('description') + # fixme: duplicate code from the workflow_service. Should only do this in one place. + if ' ' in nav_item['title']: + nav_item['title'] = nav_item['title'].partition(' ')[2] + else: + nav_item['title'] = "" + if spiff_task: + nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False) + nav_item['title'] = nav_item['task'].title # Prefer the task title. + else: + nav_item['task'] = None + if not 'is_decision' in nav_item: + nav_item['is_decision'] = False + + navigation.append(NavigationItem(**nav_item)) + NavigationItemSchema().dump(nav_item) + + spec = db.session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first() + workflow_api = WorkflowApi( + id=processor.get_workflow_id(), + status=processor.get_status(), + next_task=None, + navigation=navigation, + workflow_spec_id=processor.workflow_spec_id, + spec_version=processor.get_version_string(), + is_latest_spec=processor.is_latest_spec, + total_tasks=len(navigation), + completed_tasks=processor.workflow_model.completed_tasks, + last_updated=processor.workflow_model.last_updated, + title=spec.display_name + ) + if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks. + # This may or may not work, sometimes there is no next task to complete. + next_task = processor.next_task() + if next_task: + workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) + + return workflow_api + + @staticmethod + def get_previously_submitted_data(workflow_id, task): + """ If the user has completed this task previously, find that data in the task events table, and return it.""" + latest_event = db.session.query(TaskEventModel) \ + .filter_by(workflow_id=workflow_id) \ + .filter_by(task_name=task.task_spec.name) \ + .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ + .order_by(TaskEventModel.date.desc()).first() + if latest_event: + if latest_event.task_data is not None: + return latest_event.task_data + else: + app.logger.error("missing_task_data", "We have lost data for workflow %i, task %s, it is not " + "in the task event model, " + "and it should be." % (workflow_id, task.task_spec.name)) + return {} + else: + return {} + + @staticmethod def spiff_task_to_api_task(spiff_task, add_docs_and_forms=False): task_type = spiff_task.task_spec.__class__.__name__ @@ -342,3 +412,67 @@ class WorkflowService(object): db.session.add(task_event) db.session.commit() + @staticmethod + def fix_legacy_data_model_for_rrt(): + """ Remove this after use! This is just to fix RRT so the data is handled correctly. + + Utility that is likely called via the flask command line, it will loop through all the + workflows in the system and attempt to add the right data into the task action log so that + users do not have to re fill out all of the forms if they start over or go back in the workflow. + Viciously inefficient, but should only have to run one time for RRT""" + workflows = db.session.query(WorkflowModel).all() + for workflow_model in workflows: + task_logs = db.session.query(TaskEventModel) \ + .filter(TaskEventModel.workflow_id == workflow_model.id) \ + .filter(TaskEventModel.action == WorkflowService.TASK_ACTION_COMPLETE) \ + .order_by(TaskEventModel.date.desc()).all() + + processor = WorkflowProcessor(workflow_model) + # Grab all the data from last task completed, which will be everything in this + # rrt situation because of how we were keeping all the data at the time. + latest_data = processor.next_task().data + + # Move forward in the task spec tree, dropping any data that would have been + # added in subsequent tasks, just looking at form data, will not track the automated + # task data additions, hopefully this doesn't hang us. + for log in task_logs: + if log.task_data is not None: # Only do this if the task event does not have data populated in it. + continue + data = copy.deepcopy(latest_data) # Or you end up with insane crazy issues. + # In the simple case of RRT, there is exactly one task for the given task_spec + task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0] + data = WorkflowService.__remove_data_added_by_children(data, task.children[0]) + log.task_data = data + db.session.add(log) + + db.session.commit() + + @staticmethod + def __remove_data_added_by_children(latest_data, child_task): + """Removes data from latest_data that would be added by the child task or any of it's children.""" + if hasattr(child_task.task_spec, 'form'): + for field in child_task.task_spec.form.fields: + latest_data.pop(field.id, None) + if field.has_property(Task.PROP_OPTIONS_READ_ONLY) and \ + field.get_property(Task.PROP_OPTIONS_READ_ONLY).lower().strip() == "true": + continue # Don't pop off read only fields. + if field.has_property(Task.PROP_OPTIONS_REPEAT): + group = field.get_property(Task.PROP_OPTIONS_REPEAT) + group_data = [] + if group in latest_data: + for item in latest_data[group]: + item.pop(field.id, None) + if item: + group_data.append(item) + latest_data[group] = group_data + if not latest_data[group]: + latest_data.pop(group, None) + if isinstance(child_task.task_spec, BusinessRuleTask): + for output in child_task.task_spec.dmnEngine.decisionTable.outputs: + latest_data.pop(output.name, None) + for child in child_task.children: + latest_data = WorkflowService.__remove_data_added_by_children(latest_data, child) + return latest_data + + + diff --git a/migrations/versions/3876e130664e_.py b/migrations/versions/1fdd1bdb600e_.py similarity index 78% rename from migrations/versions/3876e130664e_.py rename to migrations/versions/1fdd1bdb600e_.py index 31e7ce13..dff1fdae 100644 --- a/migrations/versions/3876e130664e_.py +++ b/migrations/versions/1fdd1bdb600e_.py @@ -1,8 +1,8 @@ """empty message -Revision ID: 3876e130664e -Revises: 5064b72284b7 -Create Date: 2020-06-01 15:39:53.937591 +Revision ID: 1fdd1bdb600e +Revises: 17597692d0b0 +Create Date: 2020-06-17 16:44:16.427988 """ from alembic import op @@ -10,8 +10,8 @@ import sqlalchemy as sa # revision identifiers, used by Alembic. -revision = '3876e130664e' -down_revision = '5064b72284b7' +revision = '1fdd1bdb600e' +down_revision = '17597692d0b0' branch_labels = None depends_on = None diff --git a/tests/test_workflow_service.py b/tests/test_workflow_service.py index 9f3ceda1..6f0fa5e3 100644 --- a/tests/test_workflow_service.py +++ b/tests/test_workflow_service.py @@ -1,7 +1,14 @@ +import json + from tests.base_test import BaseTest from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService +from SpiffWorkflow import Task as SpiffTask, WorkflowException +from example_data import ExampleDataLoader +from crc import db +from crc.models.stats import TaskEventModel +from crc.models.api_models import Task class TestWorkflowService(BaseTest): @@ -78,4 +85,53 @@ class TestWorkflowService(BaseTest): task = processor.next_task() task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) WorkflowService.populate_form_with_random_data(task, task_api, required_only=False) - self.assertTrue(isinstance(task.data["sponsor"], dict)) \ No newline at end of file + self.assertTrue(isinstance(task.data["sponsor"], dict)) + + def test_fix_legacy_data_model_for_rrt(self): + ExampleDataLoader().load_rrt() # Make sure the research_rampup is loaded, as it's not a test spec. + workflow = self.create_workflow('research_rampup') + processor = WorkflowProcessor(workflow, validate_only=True) + + # Use the test spec code to complete the workflow of research rampup. + while not processor.bpmn_workflow.is_completed(): + processor.bpmn_workflow.do_engine_steps() + tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY) + for task in tasks: + task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) + WorkflowService.populate_form_with_random_data(task, task_api, False) + task.complete() + # create the task events with no task_data in them. + WorkflowService.log_task_action('dhf8r', workflow, task, + WorkflowService.TASK_ACTION_COMPLETE, + version=processor.get_version_string(), + updated_data=None) + processor.save() + db.session.commit() + + WorkflowService.fix_legacy_data_model_for_rrt() + + # All tasks should now have data associated with them. + task_logs = db.session.query(TaskEventModel) \ + .filter(TaskEventModel.workflow_id == workflow.id) \ + .filter(TaskEventModel.action == WorkflowService.TASK_ACTION_COMPLETE) \ + .order_by(TaskEventModel.date).all() # Get them back in order. + + self.assertEqual(17, len(task_logs)) + for log in task_logs: + task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0] + self.assertIsNotNone(log.task_data) + # Each task should have the data in the form for that task in the task event. + if hasattr(task.task_spec, 'form'): + for field in task.task_spec.form.fields: + if field.has_property(Task.PROP_OPTIONS_REPEAT): + self.assertIn(field.get_property(Task.PROP_OPTIONS_REPEAT), log.task_data) + else: + self.assertIn(field.id, log.task_data) + + # Some spot checks: + # The first task should be empty, with all the data removed. + self.assertEqual({}, task_logs[0].task_data) + + # The last task should have all the data. + self.assertDictEqual(processor.bpmn_workflow.last_task.data, task_logs[16].task_data) + From 2ce2dc73b547c721482418e6ad62231f442345cf Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 17 Jun 2020 16:09:38 -0600 Subject: [PATCH 13/44] Rendering proper content & organizing file structure for email tests --- crc/models/ldap.py | 3 +++ crc/scripts/email.py | 18 +++++++++++------- tests/data/email/email.bpmn | 2 +- tests/{ => emails}/test_email_script.py | 0 tests/{ => emails}/test_email_service.py | 0 tests/{ => emails}/test_mails.py | 0 6 files changed, 15 insertions(+), 8 deletions(-) rename tests/{ => emails}/test_email_script.py (100%) rename tests/{ => emails}/test_email_service.py (100%) rename tests/{ => emails}/test_mails.py (100%) diff --git a/crc/models/ldap.py b/crc/models/ldap.py index 7e05eccd..802e0d36 100644 --- a/crc/models/ldap.py +++ b/crc/models/ldap.py @@ -29,6 +29,9 @@ class LdapModel(db.Model): affiliation=", ".join(entry.uvaPersonIAMAffiliation), sponsor_type=", ".join(entry.uvaPersonSponsoredType)) + def proper_name(self): + return f'{self.display_name} - ({self.uid})' + class LdapSchema(SQLAlchemyAutoSchema): class Meta: diff --git a/crc/scripts/email.py b/crc/scripts/email.py index 18e5df86..cbc093e8 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -22,12 +22,14 @@ Email Subject ApprvlApprvr1 PIComputingID """ def do_task_validate_only(self, task, *args, **kwargs): - self.get_emails(task, args) + self.get_subject(task, args) + self.get_users_info(task, args) + self.get_content(task, {}) def do_task(self, task, *args, **kwargs): subject = self.get_subject(task, args) - recipients = self.get_emails(task, args) - content = self.get_content(task) + recipients, display_keys = self.get_users_info(task, args) + content = self.get_content(task, display_keys) if recipients: send_mail( subject=subject, @@ -37,18 +39,20 @@ Email Subject ApprvlApprvr1 PIComputingID content_html=content ) - def get_emails(self, task, args): + def get_users_info(self, task, args): if len(args) < 1: raise ApiError(code="missing_argument", message="Email script requires at least one argument. The " "name of the variable in the task data that contains user" "id to process. Multiple arguments are accepted.") emails = [] + display_keys = {} for arg in args[1:]: uid = task.workflow.script_engine.evaluate_expression(task, arg) user_info = LdapService.user_info(uid) email = user_info.email_address emails.append(user_info.email_address) + display_keys[arg] = user_info.proper_name() if not isinstance(email, str): raise ApiError(code="invalid_argument", message="The Email script requires at least 1 UID argument. The " @@ -56,7 +60,7 @@ Email Subject ApprvlApprvr1 PIComputingID " user ids to process. This must point to an array or a string, but " "it currently points to a %s " % emails.__class__.__name__) - return emails + return emails, display_keys def get_subject(self, task, args): if len(args) < 1: @@ -74,9 +78,9 @@ Email Subject ApprvlApprvr1 PIComputingID return subject - def get_content(self, task): + def get_content(self, task, display_keys): content = task.task_spec.documentation template = Template(content) - rendered = template.render({'approver': 'Bossman', 'not_here': 22}) + rendered = template.render(display_keys) return rendered diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn index b2221f24..c3887d68 100644 --- a/tests/data/email/email.bpmn +++ b/tests/data/email/email.bpmn @@ -8,7 +8,7 @@ Flow_1xlrgne - Email content to be delivered to {{ approver }} + Email content to be delivered to {{ ApprvlApprvr1 }} Flow_08n2npe Flow_1xlrgne Email Subject ApprvlApprvr1 PIComputingID diff --git a/tests/test_email_script.py b/tests/emails/test_email_script.py similarity index 100% rename from tests/test_email_script.py rename to tests/emails/test_email_script.py diff --git a/tests/test_email_service.py b/tests/emails/test_email_service.py similarity index 100% rename from tests/test_email_service.py rename to tests/emails/test_email_service.py diff --git a/tests/test_mails.py b/tests/emails/test_mails.py similarity index 100% rename from tests/test_mails.py rename to tests/emails/test_mails.py From ddf1f4640cc0df8aed62fdcd79a840aa63996e8b Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 17 Jun 2020 16:10:06 -0600 Subject: [PATCH 14/44] Re-organizing tests file structure --- tests/{ => approval}/test_approvals_api.py | 0 tests/{ => approval}/test_approvals_service.py | 0 tests/{ => approval}/test_request_approval_script.py | 0 tests/{ => files}/test_file_service.py | 0 tests/{ => files}/test_files_api.py | 0 tests/{ => study}/test_study_api.py | 0 tests/{ => study}/test_study_details_documents.py | 0 tests/{ => study}/test_study_service.py | 0 tests/{ => study}/test_update_study_script.py | 0 tests/{ => workflow}/test_workflow_processor.py | 0 tests/{ => workflow}/test_workflow_processor_multi_instance.py | 0 tests/{ => workflow}/test_workflow_service.py | 0 tests/{ => workflow}/test_workflow_spec_api.py | 0 tests/{ => workflow}/test_workflow_spec_validation_api.py | 0 14 files changed, 0 insertions(+), 0 deletions(-) rename tests/{ => approval}/test_approvals_api.py (100%) rename tests/{ => approval}/test_approvals_service.py (100%) rename tests/{ => approval}/test_request_approval_script.py (100%) rename tests/{ => files}/test_file_service.py (100%) rename tests/{ => files}/test_files_api.py (100%) rename tests/{ => study}/test_study_api.py (100%) rename tests/{ => study}/test_study_details_documents.py (100%) rename tests/{ => study}/test_study_service.py (100%) rename tests/{ => study}/test_update_study_script.py (100%) rename tests/{ => workflow}/test_workflow_processor.py (100%) rename tests/{ => workflow}/test_workflow_processor_multi_instance.py (100%) rename tests/{ => workflow}/test_workflow_service.py (100%) rename tests/{ => workflow}/test_workflow_spec_api.py (100%) rename tests/{ => workflow}/test_workflow_spec_validation_api.py (100%) diff --git a/tests/test_approvals_api.py b/tests/approval/test_approvals_api.py similarity index 100% rename from tests/test_approvals_api.py rename to tests/approval/test_approvals_api.py diff --git a/tests/test_approvals_service.py b/tests/approval/test_approvals_service.py similarity index 100% rename from tests/test_approvals_service.py rename to tests/approval/test_approvals_service.py diff --git a/tests/test_request_approval_script.py b/tests/approval/test_request_approval_script.py similarity index 100% rename from tests/test_request_approval_script.py rename to tests/approval/test_request_approval_script.py diff --git a/tests/test_file_service.py b/tests/files/test_file_service.py similarity index 100% rename from tests/test_file_service.py rename to tests/files/test_file_service.py diff --git a/tests/test_files_api.py b/tests/files/test_files_api.py similarity index 100% rename from tests/test_files_api.py rename to tests/files/test_files_api.py diff --git a/tests/test_study_api.py b/tests/study/test_study_api.py similarity index 100% rename from tests/test_study_api.py rename to tests/study/test_study_api.py diff --git a/tests/test_study_details_documents.py b/tests/study/test_study_details_documents.py similarity index 100% rename from tests/test_study_details_documents.py rename to tests/study/test_study_details_documents.py diff --git a/tests/test_study_service.py b/tests/study/test_study_service.py similarity index 100% rename from tests/test_study_service.py rename to tests/study/test_study_service.py diff --git a/tests/test_update_study_script.py b/tests/study/test_update_study_script.py similarity index 100% rename from tests/test_update_study_script.py rename to tests/study/test_update_study_script.py diff --git a/tests/test_workflow_processor.py b/tests/workflow/test_workflow_processor.py similarity index 100% rename from tests/test_workflow_processor.py rename to tests/workflow/test_workflow_processor.py diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py similarity index 100% rename from tests/test_workflow_processor_multi_instance.py rename to tests/workflow/test_workflow_processor_multi_instance.py diff --git a/tests/test_workflow_service.py b/tests/workflow/test_workflow_service.py similarity index 100% rename from tests/test_workflow_service.py rename to tests/workflow/test_workflow_service.py diff --git a/tests/test_workflow_spec_api.py b/tests/workflow/test_workflow_spec_api.py similarity index 100% rename from tests/test_workflow_spec_api.py rename to tests/workflow/test_workflow_spec_api.py diff --git a/tests/test_workflow_spec_validation_api.py b/tests/workflow/test_workflow_spec_validation_api.py similarity index 100% rename from tests/test_workflow_spec_validation_api.py rename to tests/workflow/test_workflow_spec_validation_api.py From 896ba6b37777e07459d0118106c889af23268cdc Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 17 Jun 2020 17:00:16 -0600 Subject: [PATCH 15/44] Email relies now on markdown content --- Pipfile | 1 + Pipfile.lock | 64 +++++++++++-------- crc/models/email.py | 6 +- crc/scripts/email.py | 10 +-- crc/services/email_service.py | 14 ++-- crc/services/mails.py | 23 ++----- .../{62a11a335778_.py => 839f6f255b81_.py} | 10 +-- tests/data/email/email.bpmn | 10 ++- tests/emails/test_email_script.py | 4 +- tests/emails/test_email_service.py | 17 ++--- 10 files changed, 81 insertions(+), 78 deletions(-) rename migrations/versions/{62a11a335778_.py => 839f6f255b81_.py} (79%) diff --git a/Pipfile b/Pipfile index 0079962c..3cf80ffc 100644 --- a/Pipfile +++ b/Pipfile @@ -40,6 +40,7 @@ gunicorn = "*" werkzeug = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} flask-mail = "*" +markdown = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index f8ab746b..19fcdf9d 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "6c89585086260ebcb41918b8ef3b1d9e189e1b492208d3ff000a138bc2f2fcee" + "sha256": "f5c922e74e296622c19ecfdd5c22cdcc71841fe81cdd95f407a2eb2ba475e615" }, "pipfile-spec": 6, "requires": { @@ -104,17 +104,17 @@ }, "celery": { "hashes": [ - "sha256:9ae2e73b93cc7d6b48b56aaf49a68c91752d0ffd7dfdcc47f842ca79a6f13eae", - "sha256:c2037b6a8463da43b19969a0fc13f9023ceca6352b4dd51be01c66fbbb13647e" + "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647", + "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b" ], - "version": "==4.4.4" + "version": "==4.4.5" }, "certifi": { "hashes": [ - "sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304", - "sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519" + "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", + "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" ], - "version": "==2020.4.5.1" + "version": "==2020.4.5.2" }, "cffi": { "hashes": [ @@ -285,11 +285,11 @@ }, "flask-marshmallow": { "hashes": [ - "sha256:6e6aec171b8e092e0eafaf035ff5b8637bf3a58ab46f568c4c1bab02f2a3c196", - "sha256:a1685536e7ab5abdc712bbc1ac1a6b0b50951a368502f7985e7d1c27b3c21e59" + "sha256:1da1e6454a56a3e15107b987121729f152325bdef23f3df2f9b52bbd074af38e", + "sha256:aefc1f1d96256c430a409f08241bab75ffe97e5d14ac5d1f000764e39bf4873a" ], "index": "pypi", - "version": "==0.12.0" + "version": "==0.13.0" }, "flask-migrate": { "hashes": [ @@ -359,10 +359,10 @@ }, "inflection": { "hashes": [ - "sha256:32a5c3341d9583ec319548b9015b7fbdf8c429cbcb575d326c33ae3a0e90d52c", - "sha256:9a15d3598f01220e93f2207c432cfede50daff53137ce660fb8be838ef1ca6cc" + "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", + "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], - "version": "==0.4.0" + "version": "==0.5.0" }, "itsdangerous": { "hashes": [ @@ -446,6 +446,14 @@ ], "version": "==1.1.3" }, + "markdown": { + "hashes": [ + "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17", + "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59" + ], + "index": "pypi", + "version": "==3.2.2" + }, "markupsafe": { "hashes": [ "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", @@ -711,11 +719,11 @@ }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "index": "pypi", - "version": "==2.23.0" + "version": "==2.24.0" }, "sentry-sdk": { "extras": [ @@ -751,11 +759,11 @@ }, "sphinx": { "hashes": [ - "sha256:779a519adbd3a70fc7c468af08c5e74829868b0a5b34587b33340e010291856c", - "sha256:ea64df287958ee5aac46be7ac2b7277305b0381d213728c3a49d8bb9b8415807" + "sha256:74fbead182a611ce1444f50218a1c5fc70b6cc547f64948f5182fb30a2a20258", + "sha256:97c9e3bcce2f61d9f5edf131299ee9d1219630598d9f9a8791459a4d9e815be5" ], "index": "pypi", - "version": "==3.0.4" + "version": "==3.1.1" }, "sphinxcontrib-applehelp": { "hashes": [ @@ -932,10 +940,10 @@ }, "more-itertools": { "hashes": [ - "sha256:558bb897a2232f5e4f8e2399089e35aecb746e1f9191b6584a151647e89267be", - "sha256:7818f596b1e87be009031c7653d01acc46ed422e6656b394b0f765ce66ed4982" + "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", + "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], - "version": "==8.3.0" + "version": "==8.4.0" }, "packaging": { "hashes": [ @@ -961,10 +969,10 @@ }, "py": { "hashes": [ - "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa", - "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0" + "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44", + "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b" ], - "version": "==1.8.1" + "version": "==1.8.2" }, "pyparsing": { "hashes": [ @@ -990,10 +998,10 @@ }, "wcwidth": { "hashes": [ - "sha256:980fbf4f3c196c0f329cdcd1e84c554d6a211f18e252e525a0cf4223154a41d6", - "sha256:edbc2b718b4db6cdf393eefe3a420183947d6aa312505ce6754516f458ff8830" + "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f", + "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f" ], - "version": "==0.2.3" + "version": "==0.2.4" }, "zipp": { "hashes": [ diff --git a/crc/models/email.py b/crc/models/email.py index c3180a27..dc8c6834 100644 --- a/crc/models/email.py +++ b/crc/models/email.py @@ -3,7 +3,7 @@ from marshmallow import EXCLUDE from sqlalchemy import func from crc import db -from crc.models.approval import ApprovalModel +from crc.models.study import StudyModel class EmailModel(db.Model): @@ -14,5 +14,5 @@ class EmailModel(db.Model): recipients = db.Column(db.String) content = db.Column(db.String) content_html = db.Column(db.String) - approval_id = db.Column(db.Integer, db.ForeignKey(ApprovalModel.id), nullable=False) - approval = db.relationship(ApprovalModel) + study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=True) + study = db.relationship(StudyModel) diff --git a/crc/scripts/email.py b/crc/scripts/email.py index cbc093e8..f2f34a66 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -1,3 +1,4 @@ +import markdown from jinja2 import Template from crc import app @@ -29,14 +30,15 @@ Email Subject ApprvlApprvr1 PIComputingID def do_task(self, task, *args, **kwargs): subject = self.get_subject(task, args) recipients, display_keys = self.get_users_info(task, args) - content = self.get_content(task, display_keys) + content, content_html = self.get_content(task, display_keys) + import pdb; pdb.set_trace() if recipients: send_mail( subject=subject, sender=app.config['DEFAULT_SENDER'], recipients=recipients, content=content, - content_html=content + content_html=content_html ) def get_users_info(self, task, args): @@ -82,5 +84,5 @@ Email Subject ApprvlApprvr1 PIComputingID content = task.task_spec.documentation template = Template(content) rendered = template.render(display_keys) - - return rendered + rendered_markdown = markdown.markdown(rendered).replace('\n', '
') + return rendered, rendered_markdown diff --git a/crc/services/email_service.py b/crc/services/email_service.py index 036ea1c9..633f2102 100644 --- a/crc/services/email_service.py +++ b/crc/services/email_service.py @@ -5,7 +5,7 @@ from sqlalchemy import desc from crc import app, db, session from crc.api.common import ApiError -from crc.models.approval import ApprovalModel +from crc.models.study import StudyModel from crc.models.email import EmailModel @@ -13,15 +13,19 @@ class EmailService(object): """Provides common tools for working with an Email""" @staticmethod - def add_email(subject, sender, recipients, content, content_html, approval_id): + def add_email(subject, sender, recipients, content, content_html, study_id): """We will receive all data related to an email and store it""" - # Find corresponding approval - approval = db.session.query(ApprovalModel).get(approval_id) + # Find corresponding study - if any + study = None + if type(study_id) == int: + study = db.session.query(StudyModel).get(study_id) # Create EmailModel email_model = EmailModel(subject=subject, sender=sender, recipients=str(recipients), - content=content, content_html=content_html, approval=approval) + content=content, content_html=content_html, study=study) + + # TODO: Send email from here, not from caller functions db.session.add(email_model) db.session.commit() diff --git a/crc/services/mails.py b/crc/services/mails.py index 6816b586..b9b18bd1 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -23,7 +23,7 @@ def send_test_email(sender, recipients): except Exception as e: return str(e) -def send_mail(subject, sender, recipients, content, content_html): +def send_mail(subject, sender, recipients, content, content_html, study_id=None): from crc import mail try: msg = Message(subject, @@ -34,6 +34,9 @@ def send_mail(subject, sender, recipients, content, content_html): msg.body = content msg.html = content_html + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + content=content, content_html=content_html, study_id=study_id) + mail.send(msg) except Exception as e: return str(e) @@ -48,9 +51,6 @@ def send_ramp_up_submission_email(sender, recipients, approval_id, approver_1, a template = env.get_template('ramp_up_submission.html') content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval_id) - result = send_mail(subject, sender, recipients, content, content_html) return result @@ -64,9 +64,6 @@ def send_ramp_up_approval_request_email(sender, recipients, approval_id, primary template = env.get_template('ramp_up_approval_request.html') content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval_id) - result = send_mail(subject, sender, recipients, content, content_html) return result @@ -80,9 +77,6 @@ def send_ramp_up_approval_request_first_review_email(sender, recipients, approva template = env.get_template('ramp_up_approval_request_first_review.html') content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval_id) - result = send_mail(subject, sender, recipients, content, content_html) return result @@ -96,9 +90,6 @@ def send_ramp_up_approved_email(sender, recipients, approval_id, approver_1, app template = env.get_template('ramp_up_approved.html') content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval_id) - result = send_mail(subject, sender, recipients, content, content_html) return result @@ -112,9 +103,6 @@ def send_ramp_up_denied_email(sender, recipients, approval_id, approver): template = env.get_template('ramp_up_denied.html') content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval_id) - result = send_mail(subject, sender, recipients, content, content_html) return result @@ -128,8 +116,5 @@ def send_ramp_up_denied_email_to_approver(sender, recipients, approval_id, prima template = env.get_template('ramp_up_denied_first_approver.html') content_html = template.render(template_vars) - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval_id) - result = send_mail(subject, sender, recipients, content, content_html) return result diff --git a/migrations/versions/62a11a335778_.py b/migrations/versions/839f6f255b81_.py similarity index 79% rename from migrations/versions/62a11a335778_.py rename to migrations/versions/839f6f255b81_.py index ee8d8f91..e5400627 100644 --- a/migrations/versions/62a11a335778_.py +++ b/migrations/versions/839f6f255b81_.py @@ -1,8 +1,8 @@ """empty message -Revision ID: 62a11a335778 +Revision ID: 839f6f255b81 Revises: 17597692d0b0 -Create Date: 2020-06-09 22:45:52.475183 +Create Date: 2020-06-17 16:22:05.076206 """ from alembic import op @@ -10,7 +10,7 @@ import sqlalchemy as sa # revision identifiers, used by Alembic. -revision = '62a11a335778' +revision = '839f6f255b81' down_revision = '17597692d0b0' branch_labels = None depends_on = None @@ -25,8 +25,8 @@ def upgrade(): sa.Column('recipients', sa.String(), nullable=True), sa.Column('content', sa.String(), nullable=True), sa.Column('content_html', sa.String(), nullable=True), - sa.Column('approval_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['approval_id'], ['approval.id'], ), + sa.Column('study_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['study_id'], ['study.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn index c3887d68..4830e28f 100644 --- a/tests/data/email/email.bpmn +++ b/tests/data/email/email.bpmn @@ -8,7 +8,15 @@ Flow_1xlrgne - Email content to be delivered to {{ ApprvlApprvr1 }} + # Dear Approver +## you have been requested for approval + + +--- + +Email content to be delivered to {{ ApprvlApprvr1 }} + +--- Flow_08n2npe Flow_1xlrgne Email Subject ApprvlApprvr1 PIComputingID diff --git a/tests/emails/test_email_script.py b/tests/emails/test_email_script.py index 9ac93e07..2e1a5e04 100644 --- a/tests/emails/test_email_script.py +++ b/tests/emails/test_email_script.py @@ -6,7 +6,6 @@ from crc.services.workflow_processor import WorkflowProcessor from crc.api.common import ApiError from crc import db -# from crc.models.approval import ApprovalModel class TestEmailScript(BaseTest): @@ -17,6 +16,7 @@ class TestEmailScript(BaseTest): workflow = self.create_workflow('email') processor = WorkflowProcessor(workflow) task = processor.next_task() + # TODO: Replace with proper `complete_form` method from test_tasks processor.complete_task(task) task = processor.next_task() task.data = { @@ -27,4 +27,6 @@ class TestEmailScript(BaseTest): script = Email() script.do_task(task, 'Subject', 'PIComputingID', 'ApprvlApprvr1') + + # TODO: Add proper assertions self.assertTrue(True) diff --git a/tests/emails/test_email_service.py b/tests/emails/test_email_service.py index 9e0f2e57..c165ed10 100644 --- a/tests/emails/test_email_service.py +++ b/tests/emails/test_email_service.py @@ -13,24 +13,15 @@ class TestEmailService(BaseTest): study = self.create_study() workflow = self.create_workflow('random_fact') - approval = ApprovalModel( - study=study, - workflow=workflow, - approver_uid='lb3dp', - status=ApprovalStatus.PENDING.value, - version=1 - ) - session.add(approval) - session.commit() - subject = 'Email Subject' sender = 'sender@sartography.com' recipients = ['recipient@sartography.com', 'back@sartography.com'] content = 'Content for this email' content_html = '

Hypertext Markup Language content for this email

' + import pdb; pdb.set_trace() EmailService.add_email(subject=subject, sender=sender, recipients=recipients, - content=content, content_html=content_html, approval_id=approval.id) + content=content, content_html=content_html, study_id=study.id) email_model = EmailModel.query.first() @@ -39,4 +30,6 @@ class TestEmailService(BaseTest): self.assertEqual(email_model.recipients, str(recipients)) self.assertEqual(email_model.content, content) self.assertEqual(email_model.content_html, content_html) - self.assertEqual(email_model.approval, approval) + self.assertEqual(email_model.study, study) + + # TODO: Create email model without study From 5ce279b6637a3be7d61f840909dce9ead6185ce5 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 17 Jun 2020 17:36:15 -0600 Subject: [PATCH 16/44] Dropping silly pdb statement --- crc/scripts/email.py | 1 - tests/emails/test_email_service.py | 1 - 2 files changed, 2 deletions(-) diff --git a/crc/scripts/email.py b/crc/scripts/email.py index f2f34a66..f9d345a8 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -31,7 +31,6 @@ Email Subject ApprvlApprvr1 PIComputingID subject = self.get_subject(task, args) recipients, display_keys = self.get_users_info(task, args) content, content_html = self.get_content(task, display_keys) - import pdb; pdb.set_trace() if recipients: send_mail( subject=subject, diff --git a/tests/emails/test_email_service.py b/tests/emails/test_email_service.py index c165ed10..e2bcd139 100644 --- a/tests/emails/test_email_service.py +++ b/tests/emails/test_email_service.py @@ -19,7 +19,6 @@ class TestEmailService(BaseTest): content = 'Content for this email' content_html = '

Hypertext Markup Language content for this email

' - import pdb; pdb.set_trace() EmailService.add_email(subject=subject, sender=sender, recipients=recipients, content=content, content_html=content_html, study_id=study.id) From 4db815a999882b488cdae83fbb7372f52f3ce5be Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 17 Jun 2020 21:11:47 -0600 Subject: [PATCH 17/44] Handling incoming values from processor --- crc/scripts/email.py | 37 ++++++++++++++++++++---------- crc/services/workflow_processor.py | 2 +- tests/data/email/email.bpmn | 2 +- tests/emails/test_email_script.py | 32 +++++++++++++++----------- tests/files/test_file_service.py | 4 ++-- tests/files/test_files_api.py | 16 ++++--------- 6 files changed, 52 insertions(+), 41 deletions(-) diff --git a/crc/scripts/email.py b/crc/scripts/email.py index f9d345a8..01def412 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -28,8 +28,9 @@ Email Subject ApprvlApprvr1 PIComputingID self.get_content(task, {}) def do_task(self, task, *args, **kwargs): - subject = self.get_subject(task, args) - recipients, display_keys = self.get_users_info(task, args) + args = [arg for arg in args if type(arg) == str] + subject, subject_index = self.get_subject(task, args) + recipients, display_keys = self.get_users_info(task, args, subject_index) content, content_html = self.get_content(task, display_keys) if recipients: send_mail( @@ -40,7 +41,7 @@ Email Subject ApprvlApprvr1 PIComputingID content_html=content_html ) - def get_users_info(self, task, args): + def get_users_info(self, task, args, subject_index): if len(args) < 1: raise ApiError(code="missing_argument", message="Email script requires at least one argument. The " @@ -48,7 +49,7 @@ Email Subject ApprvlApprvr1 PIComputingID "id to process. Multiple arguments are accepted.") emails = [] display_keys = {} - for arg in args[1:]: + for arg in args[subject_index+1:]: uid = task.workflow.script_engine.evaluate_expression(task, arg) user_info = LdapService.user_info(uid) email = user_info.email_address @@ -69,15 +70,27 @@ Email Subject ApprvlApprvr1 PIComputingID message="Email script requires at least one subject argument. The " "name of the variable in the task data that contains subject" " to process. Multiple arguments are accepted.") - subject = task.workflow.script_engine.evaluate_expression(task, args[0]) - if not isinstance(subject, str): - raise ApiError(code="invalid_argument", - message="The Email script requires 1 argument. The " - "the name of the variable in the task data that contains user" - "ids to process. This must point to an array or a string, but " - "it currently points to a %s " % subject.__class__.__name__) - return subject + subject_index = 0 + subject = args[subject_index] + if subject.startswith('"') and not subject.endswith('"'): + # Multi-word subject + subject_index += 1 + next_word = args[subject_index] + while not next_word.endswith('"'): + subject = ' '.join((subject, next_word)) + subject_index += 1 + next_word = args[subject_index] + subject = ' '.join((subject, next_word)) + subject = subject.replace('"', '') + if not isinstance(subject, str): + raise ApiError(code="invalid_argument", + message="The Email script requires 1 argument. The " + "the name of the variable in the task data that contains user" + "ids to process. This must point to an array or a string, but " + "it currently points to a %s " % subject.__class__.__name__) + + return subject, subject_index def get_content(self, task, display_keys): content = task.task_spec.documentation diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index 93590d94..f04fb332 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -60,7 +60,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): "does not properly implement the CRC Script class.", task=task) if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]: - """If this is running a validation, and not a normal process, then we want to + """If this is running a validation, and not a normal process, then we want to mimic running the script, but not make any external calls or database changes.""" klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:]) else: diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn index 4830e28f..54ec61a8 100644 --- a/tests/data/email/email.bpmn +++ b/tests/data/email/email.bpmn @@ -19,7 +19,7 @@ Email content to be delivered to {{ ApprvlApprvr1 }} --- Flow_08n2npe Flow_1xlrgne - Email Subject ApprvlApprvr1 PIComputingID + Email "Camunda Email Subject" ApprvlApprvr1 PIComputingID
diff --git a/tests/emails/test_email_script.py b/tests/emails/test_email_script.py index 2e1a5e04..79d5c6ee 100644 --- a/tests/emails/test_email_script.py +++ b/tests/emails/test_email_script.py @@ -11,22 +11,26 @@ from crc import db class TestEmailScript(BaseTest): def test_do_task(self): - self.load_example_data() - self.create_reference_document() + # self.load_example_data() + # self.create_reference_document() workflow = self.create_workflow('email') - processor = WorkflowProcessor(workflow) - task = processor.next_task() - # TODO: Replace with proper `complete_form` method from test_tasks - processor.complete_task(task) - task = processor.next_task() - task.data = { - 'PIComputingID': 'dhf8r', - 'ApprvlApprvr1': 'lb3dp', - 'Subject': 'Email Script needs your help' - } - script = Email() - script.do_task(task, 'Subject', 'PIComputingID', 'ApprvlApprvr1') + # processor = WorkflowProcessor(workflow) + # task = processor.next_task() + # TODO: Replace with proper `complete_form` method from test_tasks + # processor.complete_task(task) + # task = processor.next_task() + task_data = { + 'PIComputingID': 'dhf8r', + 'ApprvlApprvr1': 'lb3dp' + } + task = self.get_workflow_api(workflow).next_task + + self.complete_form(workflow, task, task_data) + + + # script = Email() + # script.do_task(task, 'Subject', 'PIComputingID', 'ApprvlApprvr1') # TODO: Add proper assertions self.assertTrue(True) diff --git a/tests/files/test_file_service.py b/tests/files/test_file_service.py index 1dea810c..dd95e458 100644 --- a/tests/files/test_file_service.py +++ b/tests/files/test_file_service.py @@ -61,14 +61,14 @@ class TestFileService(BaseTest): # Archive the file file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(1, len(file_models)) + self.assertEqual(1, len(file_models)) file_model = file_models[0] file_model.archived = True db.session.add(file_model) # Assure that the file no longer comes back. file_models = FileService.get_workflow_files(workflow_id=workflow.id) - self.assertEquals(0, len(file_models)) + self.assertEqual(0, len(file_models)) # Add the file again with different data FileService.add_workflow_file(workflow_id=workflow.id, diff --git a/tests/files/test_files_api.py b/tests/files/test_files_api.py index 2d14a8b5..59e6c1f6 100644 --- a/tests/files/test_files_api.py +++ b/tests/files/test_files_api.py @@ -91,7 +91,6 @@ class TestFilesApi(BaseTest): content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) - def test_archive_file_no_longer_shows_up(self): self.load_example_data() self.create_reference_document() @@ -109,21 +108,16 @@ class TestFilesApi(BaseTest): self.assert_success(rv) rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(1, len(json.loads(rv.get_data(as_text=True)))) + self.assertEqual(1, len(json.loads(rv.get_data(as_text=True)))) file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all() - self.assertEquals(1, len(file_model)) + self.assertEqual(1, len(file_model)) file_model[0].archived = True db.session.commit() rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) - self.assertEquals(0, len(json.loads(rv.get_data(as_text=True)))) - - - - - + self.assertEqual(0, len(json.loads(rv.get_data(as_text=True)))) def test_set_reference_file(self): file_name = "irb_document_types.xls" @@ -285,8 +279,8 @@ class TestFilesApi(BaseTest): .filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\ .filter(ApprovalModel.study_id == workflow.study_id).all() - self.assertEquals(1, len(approvals)) - self.assertEquals(1, len(approvals[0].approval_files)) + self.assertEqual(1, len(approvals)) + self.assertEqual(1, len(approvals[0].approval_files)) def test_change_primary_bpmn(self): From 479f6d9647de8efeceffd78e9a9ab08e9ad39cf0 Mon Sep 17 00:00:00 2001 From: Kelly McDonald Date: Thu, 18 Jun 2020 12:01:02 -0400 Subject: [PATCH 18/44] STG-26 Do rename per conversation, continue to look for ways to implement looping in a way that is re-entrant --- tests/data/looping_task/looping_task.bpmn | 4 ++-- tests/test_looping_task.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/data/looping_task/looping_task.bpmn b/tests/data/looping_task/looping_task.bpmn index 0c3929bf..96b1b32f 100644 --- a/tests/data/looping_task/looping_task.bpmn +++ b/tests/data/looping_task/looping_task.bpmn @@ -7,8 +7,8 @@ - - + + Flow_0vlor2k diff --git a/tests/test_looping_task.py b/tests/test_looping_task.py index 87701ef4..e56e0877 100644 --- a/tests/test_looping_task.py +++ b/tests/test_looping_task.py @@ -31,21 +31,21 @@ class TestWorkflowProcessorLoopingTask(BaseTest): self.assertEqual(task.multi_instance_type, 'looping') self.assertEqual(1, task.multi_instance_index) - self.complete_form(workflow,task,{'GetNames_MICurrentVar':{'Name': 'Peter Norvig', 'Nickname': 'Pete'}}) + self.complete_form(workflow,task,{'GetNames_CurrentVar':{'Name': 'Peter Norvig', 'Nickname': 'Pete'}}) task = self.get_workflow_api(workflow).next_task self.assertEqual(task.multi_instance_type,'looping') self.assertEqual(2, task.multi_instance_index) self.complete_form(workflow, task, - {'GetNames_MICurrentVar':{'Name': 'Stuart Russell', 'Nickname': 'Stu'}}, + {'GetNames_CurrentVar':{'Name': 'Stuart Russell', 'Nickname': 'Stu'}}, terminate_loop=True) task = self.get_workflow_api(workflow).next_task self.assertEqual(task.name,'Event_End') self.assertEqual(workflow.completed_tasks,workflow.total_tasks) - self.assertEqual(task.data, {'GetNames_MICurrentVar': 2, - 'GetNames_MIData': {'1': {'Name': 'Peter Norvig', + self.assertEqual(task.data, {'GetNames_CurrentVar': 2, + 'GetNames': {'1': {'Name': 'Peter Norvig', 'Nickname': 'Pete'}, '2': {'Name': 'Stuart Russell', 'Nickname': 'Stu'}}}) From e6d74aaa1afebb5f7718243fb42bc11efff899f8 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Thu, 18 Jun 2020 12:53:50 -0600 Subject: [PATCH 19/44] Removing extra index when parsing users info --- crc/api/tools.py | 2 +- crc/scripts/email.py | 17 +++++++++++------ tests/data/random_fact/random_fact.bpmn | 6 +++--- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/crc/api/tools.py b/crc/api/tools.py index d140e962..fa969a1e 100644 --- a/crc/api/tools.py +++ b/crc/api/tools.py @@ -14,7 +14,7 @@ from crc.services.mails import send_test_email def render_markdown(data, template): """ - Provides a quick way to very that a Jinja markdown template will work properly on a given json + Provides a quick way to very that a Jinja markdown template will work properly on a given json data structure. Useful for folks that are building these markdown templates. """ try: diff --git a/crc/scripts/email.py b/crc/scripts/email.py index 01def412..c94a98a3 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -29,8 +29,8 @@ Email Subject ApprvlApprvr1 PIComputingID def do_task(self, task, *args, **kwargs): args = [arg for arg in args if type(arg) == str] - subject, subject_index = self.get_subject(task, args) - recipients, display_keys = self.get_users_info(task, args, subject_index) + subject = self.get_subject(task, args) + recipients, display_keys = self.get_users_info(task, args) content, content_html = self.get_content(task, display_keys) if recipients: send_mail( @@ -41,7 +41,7 @@ Email Subject ApprvlApprvr1 PIComputingID content_html=content_html ) - def get_users_info(self, task, args, subject_index): + def get_users_info(self, task, args): if len(args) < 1: raise ApiError(code="missing_argument", message="Email script requires at least one argument. The " @@ -49,8 +49,13 @@ Email Subject ApprvlApprvr1 PIComputingID "id to process. Multiple arguments are accepted.") emails = [] display_keys = {} - for arg in args[subject_index+1:]: - uid = task.workflow.script_engine.evaluate_expression(task, arg) + for arg in args: + try: + uid = task.workflow.script_engine.evaluate_expression(task, arg) + except Exception as e: + app.logger.error(f'Workflow engines could not parse {arg}') + app.logger.error(str(e)) + continue user_info = LdapService.user_info(uid) email = user_info.email_address emails.append(user_info.email_address) @@ -90,7 +95,7 @@ Email Subject ApprvlApprvr1 PIComputingID "ids to process. This must point to an array or a string, but " "it currently points to a %s " % subject.__class__.__name__) - return subject, subject_index + return subject def get_content(self, task, display_keys): content = task.task_spec.documentation diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn index 628f1bd4..fc5e41bb 100644 --- a/tests/data/random_fact/random_fact.bpmn +++ b/tests/data/random_fact/random_fact.bpmn @@ -175,9 +175,6 @@ Your random fact is: - - - @@ -187,6 +184,9 @@ Your random fact is: + + + From 6aec15cc7c2642ab5fca22de1e0633278e1b4c4b Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Fri, 19 Jun 2020 08:22:53 -0400 Subject: [PATCH 20/44] Shifting to a different model, where the TaskEvents store ONLY the form data submitted for that task. In order to allow proper deletion of tasks, we no longer merge data returned from the front end, we set it directly as the task_data. When returning data to the front end, we take any previous form submission and merge it into the current task data, allowing users to keep their previous submissions. There is now an "extract_form_data" method that does it's best job to calculate what form data might have changed from the front end. --- Pipfile.lock | 58 ++++++++++++++++---------- crc/api/admin.py | 4 +- crc/api/workflow.py | 6 +-- crc/models/stats.py | 2 +- crc/services/workflow_service.py | 62 ++++++++++++++-------------- migrations/versions/de30304ff5e6_.py | 30 ++++++++++++++ tests/test_tasks_api.py | 13 +++--- tests/test_workflow_service.py | 15 +++---- 8 files changed, 116 insertions(+), 74 deletions(-) create mode 100644 migrations/versions/de30304ff5e6_.py diff --git a/Pipfile.lock b/Pipfile.lock index 2f99c84f..8cc805d0 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "faaf0e1f31f4bf99df366e52df20bb148a05996a0e6467767660665c514af2d7" + "sha256": "78a8da35dec2fb58b02a58afc8ffabe8b1c22bec8f054295e8b1ba3b4a6f4ec0" }, "pipfile-spec": 6, "requires": { @@ -261,6 +261,13 @@ "index": "pypi", "version": "==1.1.2" }, + "flask-admin": { + "hashes": [ + "sha256:68c761d8582d59b1f7702013e944a7ad11d7659a72f3006b89b68b0bd8df61b8" + ], + "index": "pypi", + "version": "==1.5.6" + }, "flask-bcrypt": { "hashes": [ "sha256:d71c8585b2ee1c62024392ebdbc447438564e2c8c02b4e57b56a4cafd8d13c5f" @@ -558,25 +565,25 @@ }, "pandas": { "hashes": [ - "sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46", - "sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5", - "sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa", - "sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc", - "sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678", - "sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc", - "sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31", - "sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8", - "sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6", - "sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa", - "sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4", - "sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874", - "sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd", - "sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4", - "sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126", - "sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648" + "sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096", + "sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0", + "sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453", + "sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc", + "sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7", + "sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b", + "sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8", + "sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8", + "sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef", + "sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d", + "sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705", + "sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9", + "sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91", + "sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3", + "sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107", + "sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e" ], "index": "pypi", - "version": "==1.0.4" + "version": "==1.0.5" }, "psycopg2-binary": { "hashes": [ @@ -711,11 +718,11 @@ }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "index": "pypi", - "version": "==2.23.0" + "version": "==2.24.0" }, "sentry-sdk": { "extras": [ @@ -802,7 +809,7 @@ "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0" + "ref": "5450dc0463a95811d386b7de063d950bf6179d2b" }, "sqlalchemy": { "hashes": [ @@ -890,6 +897,13 @@ "index": "pypi", "version": "==1.0.1" }, + "wtforms": { + "hashes": [ + "sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b", + "sha256:861a13b3ae521d6700dac3b2771970bd354a63ba7043ecc3a82b5288596a1972" + ], + "version": "==2.3.1" + }, "xlrd": { "hashes": [ "sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2", diff --git a/crc/api/admin.py b/crc/api/admin.py index 6a27b6da..37532c38 100644 --- a/crc/api/admin.py +++ b/crc/api/admin.py @@ -57,9 +57,9 @@ def json_formatter(view, context, model, name): class TaskEventView(AdminModelView): column_filters = ['workflow_id', 'action'] - column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'task_data', 'date'] + column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'form_data', 'date'] column_formatters = { - 'task_data': json_formatter, + 'form_data': json_formatter, } admin = Admin(app) diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 14c40df5..9e1dffc2 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -145,14 +145,14 @@ def update_task(workflow_id, task_id, body): if spiff_task.state != spiff_task.READY: raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") - spiff_task.update_data(body) + if body: # IF and only if we get the body back, update the task data with the content. + spiff_task.data = body # Accept the data from the front end as complete. Do not merge it in, as then it is impossible to remove items. processor.complete_task(spiff_task) processor.do_engine_steps() processor.save() WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, WorkflowService.TASK_ACTION_COMPLETE, - version=processor.get_version_string(), - updated_data=spiff_task.data) + version=processor.get_version_string()) workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/stats.py b/crc/models/stats.py index 8912b1d1..0a2e69b7 100644 --- a/crc/models/stats.py +++ b/crc/models/stats.py @@ -17,7 +17,7 @@ class TaskEventModel(db.Model): task_title = db.Column(db.String) task_type = db.Column(db.String) task_state = db.Column(db.String) - task_data = db.Column(db.JSON) + form_data = db.Column(db.JSON) # And form data submitted when the task was completed. mi_type = db.Column(db.String) mi_count = db.Column(db.Integer) mi_index = db.Column(db.Integer) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 3b064954..2ce7b078 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -6,6 +6,7 @@ import random import jinja2 from SpiffWorkflow import Task as SpiffTask, WorkflowException from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask +from SpiffWorkflow.bpmn.specs.MultiInstanceTask import MultiInstanceTask from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask from SpiffWorkflow.bpmn.specs.UserTask import UserTask from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask @@ -232,23 +233,25 @@ class WorkflowService(object): # This may or may not work, sometimes there is no next task to complete. next_task = processor.next_task() if next_task: + previous_form_data = WorkflowService.get_previously_submitted_data(processor.workflow_model.id, next_task) + DeepMerge.merge(next_task.data, previous_form_data) workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True) return workflow_api @staticmethod def get_previously_submitted_data(workflow_id, task): - """ If the user has completed this task previously, find that data in the task events table, and return it.""" + """ If the user has completed this task previously, find the form data for the last submission.""" latest_event = db.session.query(TaskEventModel) \ .filter_by(workflow_id=workflow_id) \ .filter_by(task_name=task.task_spec.name) \ .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ .order_by(TaskEventModel.date.desc()).first() if latest_event: - if latest_event.task_data is not None: - return latest_event.task_data + if latest_event.form_data is not None: + return latest_event.form_data else: - app.logger.error("missing_task_data", "We have lost data for workflow %i, task %s, it is not " + app.logger.error("missing_form_dat", "We have lost data for workflow %i, task %s, it is not " "in the task event model, " "and it should be." % (workflow_id, task.task_spec.name)) return {} @@ -387,9 +390,9 @@ class WorkflowService(object): field.options.append({"id": d.value, "name": d.label}) @staticmethod - def log_task_action(user_uid, workflow_model, spiff_task, action, - version, updated_data=None): + def log_task_action(user_uid, workflow_model, spiff_task, action, version): task = WorkflowService.spiff_task_to_api_task(spiff_task) + form_data = WorkflowService.extract_form_data(spiff_task.data, spiff_task) task_event = TaskEventModel( study_id=workflow_model.study_id, user_uid=user_uid, @@ -402,7 +405,7 @@ class WorkflowService(object): task_title=task.title, task_type=str(task.type), task_state=task.state, - task_data=updated_data, + form_data=form_data, mi_type=task.multi_instance_type.value, # Some tasks have a repeat behavior. mi_count=task.multi_instance_count, # This is the number of times the task could repeat. mi_index=task.multi_instance_index, # And the index of the currently repeating task. @@ -436,43 +439,40 @@ class WorkflowService(object): # added in subsequent tasks, just looking at form data, will not track the automated # task data additions, hopefully this doesn't hang us. for log in task_logs: - if log.task_data is not None: # Only do this if the task event does not have data populated in it. - continue +# if log.task_data is not None: # Only do this if the task event does not have data populated in it. +# continue data = copy.deepcopy(latest_data) # Or you end up with insane crazy issues. # In the simple case of RRT, there is exactly one task for the given task_spec task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0] - data = WorkflowService.__remove_data_added_by_children(data, task.children[0]) - log.task_data = data + data = WorkflowService.extract_form_data(data, task) + log.form_data = data db.session.add(log) db.session.commit() @staticmethod - def __remove_data_added_by_children(latest_data, child_task): + def extract_form_data(latest_data, task): """Removes data from latest_data that would be added by the child task or any of it's children.""" - if hasattr(child_task.task_spec, 'form'): - for field in child_task.task_spec.form.fields: - latest_data.pop(field.id, None) + data = {} + + if hasattr(task.task_spec, 'form'): + for field in task.task_spec.form.fields: if field.has_property(Task.PROP_OPTIONS_READ_ONLY) and \ field.get_property(Task.PROP_OPTIONS_READ_ONLY).lower().strip() == "true": - continue # Don't pop off read only fields. - if field.has_property(Task.PROP_OPTIONS_REPEAT): + continue # Don't add read-only data + elif field.has_property(Task.PROP_OPTIONS_REPEAT): group = field.get_property(Task.PROP_OPTIONS_REPEAT) - group_data = [] if group in latest_data: - for item in latest_data[group]: - item.pop(field.id, None) - if item: - group_data.append(item) - latest_data[group] = group_data - if not latest_data[group]: - latest_data.pop(group, None) - if isinstance(child_task.task_spec, BusinessRuleTask): - for output in child_task.task_spec.dmnEngine.decisionTable.outputs: - latest_data.pop(output.name, None) - for child in child_task.children: - latest_data = WorkflowService.__remove_data_added_by_children(latest_data, child) - return latest_data + data[group] = latest_data[group] + elif isinstance(task.task_spec, MultiInstanceTask): + group = task.task_spec.elementVar + if group in latest_data: + data[group] = latest_data[group] + else: + if field.id in latest_data: + data[field.id] = latest_data[field.id] + + return data diff --git a/migrations/versions/de30304ff5e6_.py b/migrations/versions/de30304ff5e6_.py new file mode 100644 index 00000000..46a43f18 --- /dev/null +++ b/migrations/versions/de30304ff5e6_.py @@ -0,0 +1,30 @@ +"""empty message + +Revision ID: de30304ff5e6 +Revises: 1fdd1bdb600e +Create Date: 2020-06-18 16:19:11.133665 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = 'de30304ff5e6' +down_revision = '1fdd1bdb600e' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('form_data', sa.JSON(), nullable=True)) + op.drop_column('task_event', 'task_data') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('task_event', sa.Column('task_data', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) + op.drop_column('task_event', 'form_data') + # ### end Alembic commands ### diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index b51bca99..1b35434c 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -77,9 +77,8 @@ class TestTasksApi(BaseTest): self.assertEquals(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) - # Assure that the data provided occurs in the task data log. - for key in dict_data.keys(): - self.assertIn(key, event.task_data) + # Assure that there is data in the form_data + self.assertIsNotNone(event.form_data) workflow = WorkflowApiSchema().load(json_data) return workflow @@ -372,13 +371,13 @@ class TestTasksApi(BaseTest): self.assertEqual("UserTask", task.type) self.assertEqual("Activity_A", task.name) self.assertEqual("My Sub Process", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldA": "Dan"}) task = workflow_api.next_task self.assertIsNotNone(task) self.assertEqual("Activity_B", task.name) self.assertEqual("Sub Workflow Example", task.process_name) - workflow_api = self.complete_form(workflow, task, {"name": "Dan"}) + workflow_api = self.complete_form(workflow, task, {"FieldB": "Dan"}) self.assertEqual(WorkflowStatus.complete, workflow_api.status) def test_update_task_resets_token(self): @@ -446,7 +445,9 @@ class TestTasksApi(BaseTest): for i in random.sample(range(9), 9): task = TaskSchema().load(ready_items[i]['task']) - self.complete_form(workflow, task, {"investigator":{"email": "dhf8r@virginia.edu"}}) + data = workflow_api.next_task.data + data['investigator']['email'] = "dhf8r@virginia.edu" + self.complete_form(workflow, task, data) #tasks = self.get_workflow_api(workflow).user_tasks workflow = self.get_workflow_api(workflow) diff --git a/tests/test_workflow_service.py b/tests/test_workflow_service.py index 6f0fa5e3..6b1b5c58 100644 --- a/tests/test_workflow_service.py +++ b/tests/test_workflow_service.py @@ -100,11 +100,10 @@ class TestWorkflowService(BaseTest): task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) WorkflowService.populate_form_with_random_data(task, task_api, False) task.complete() - # create the task events with no task_data in them. + # create the task events WorkflowService.log_task_action('dhf8r', workflow, task, WorkflowService.TASK_ACTION_COMPLETE, - version=processor.get_version_string(), - updated_data=None) + version=processor.get_version_string()) processor.save() db.session.commit() @@ -119,19 +118,17 @@ class TestWorkflowService(BaseTest): self.assertEqual(17, len(task_logs)) for log in task_logs: task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0] - self.assertIsNotNone(log.task_data) + self.assertIsNotNone(log.form_data) # Each task should have the data in the form for that task in the task event. if hasattr(task.task_spec, 'form'): for field in task.task_spec.form.fields: if field.has_property(Task.PROP_OPTIONS_REPEAT): - self.assertIn(field.get_property(Task.PROP_OPTIONS_REPEAT), log.task_data) + self.assertIn(field.get_property(Task.PROP_OPTIONS_REPEAT), log.form_data) else: - self.assertIn(field.id, log.task_data) + self.assertIn(field.id, log.form_data) # Some spot checks: # The first task should be empty, with all the data removed. - self.assertEqual({}, task_logs[0].task_data) + self.assertEqual({}, task_logs[0].form_data) - # The last task should have all the data. - self.assertDictEqual(processor.bpmn_workflow.last_task.data, task_logs[16].task_data) From 9d1c495c905f32748702f3b6958297a306b6f404 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Fri, 19 Jun 2020 08:44:02 -0400 Subject: [PATCH 21/44] Fix RRT Data added to docker run --- docker_run.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docker_run.sh b/docker_run.sh index 6bc3c90b..4cd2cbc4 100755 --- a/docker_run.sh +++ b/docker_run.sh @@ -28,3 +28,8 @@ if [ "$APPLICATION_ROOT" = "/" ]; then else pipenv run gunicorn -e SCRIPT_NAME="$APPLICATION_ROOT" --bind 0.0.0.0:$PORT0 wsgi:app fi + +if [ "$FIX_RRT_DATA" = "true" ]; then + echo 'Fixing RRT data...' + pipenv run flask rrt-data-fix +fi \ No newline at end of file From 8384497600e882ce89fb95962b514d309f7f6ba0 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Fri, 19 Jun 2020 10:07:10 -0400 Subject: [PATCH 22/44] Move the fix rrt data to a place where it will get picked up. --- docker_run.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docker_run.sh b/docker_run.sh index 4cd2cbc4..8ad66274 100755 --- a/docker_run.sh +++ b/docker_run.sh @@ -23,13 +23,16 @@ if [ "$RESET_DB_RRT" = "true" ]; then pipenv run flask load-example-rrt-data fi +if [ "$FIX_RRT_DATA" = "true" ]; then + echo 'Fixing RRT data...' + pipenv run flask rrt-data-fix +fi + + +# THIS MUST BE THE LAST COMMAND! if [ "$APPLICATION_ROOT" = "/" ]; then pipenv run gunicorn --bind 0.0.0.0:$PORT0 wsgi:app else pipenv run gunicorn -e SCRIPT_NAME="$APPLICATION_ROOT" --bind 0.0.0.0:$PORT0 wsgi:app fi -if [ "$FIX_RRT_DATA" = "true" ]; then - echo 'Fixing RRT data...' - pipenv run flask rrt-data-fix -fi \ No newline at end of file From b8d60ca94467f03e5c28cb73be75fa1f71f5cf82 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 22 Jun 2020 07:14:00 -0600 Subject: [PATCH 23/44] Spreadsheet generation --- crc/services/approval_service.py | 39 ++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index 1f6f56b3..81608a34 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -1,6 +1,6 @@ -from datetime import datetime +from datetime import datetime, timedelta -from sqlalchemy import desc +from sqlalchemy import desc, func from crc import app, db, session from crc.api.common import ApiError @@ -109,6 +109,41 @@ class ApprovalService(object): db_approvals = query.all() return [Approval.from_model(approval_model) for approval_model in db_approvals] + @staticmethod + def get_health_attesting_for_today(): + """Return a CSV with prepared information related to approvals + created today""" + # import pdb; pdb.set_trace() + today = datetime.now() - timedelta(days=3) + today = today.date() + approvals = session.query(ApprovalModel).filter( + # func.date(ApprovalModel.date_created)==today, + ApprovalModel.status==ApprovalStatus.APPROVED.value + ) + + health_attesting_rows = [ + 'university_computing_id', + 'last_name', + 'first_name', + 'department', + 'job_title', + 'supervisor_university_computing_id' + ] + for approval in approvals: + pi_info = LdapService.user_info(approval.study.primary_investigator_id) + approver_info = LdapService.user_info(approval.approver_uid) + first_name = pi_info.given_name + last_name = pi_info.display_name.replace(first_name, '').strip() + health_attesting_rows.append([ + pi_info.uid, + last_name, + first_name, + '', + 'Academic Researcher', + approver_info.uid + ]) + + return health_attesting_rows @staticmethod def update_approval(approval_id, approver_uid): From e5541e4950416ebb9746a893c5386b3219f5826b Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 22 Jun 2020 09:24:58 -0600 Subject: [PATCH 24/44] Enable CSV download --- crc/api.yml | 22 ++++++++++++++++ crc/api/approval.py | 15 ++++++++++- crc/services/approval_service.py | 44 +++++++++++++++++--------------- 3 files changed, 60 insertions(+), 21 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index 64f6086a..71710881 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -917,6 +917,28 @@ paths: application/json: schema: type: object + /health_attesting: + parameters: + - name: all_approvals + in: query + required: false + description: If set to false, returns just approvals for today. + schema: + type: string + get: + operationId: crc.api.approval.get_health_attesting_csv + summary: Returns a CSV file with health attesting records + tags: + - Approvals + responses: + '200': + description: A CSV file + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Approval" components: securitySchemes: jwt: diff --git a/crc/api/approval.py b/crc/api/approval.py index b3ee0fed..a44dfc5b 100644 --- a/crc/api/approval.py +++ b/crc/api/approval.py @@ -1,9 +1,11 @@ +import csv +import io import json import pickle from base64 import b64decode from datetime import datetime -from flask import g +from flask import g, make_response from crc import db, session from crc.api.common import ApiError @@ -88,6 +90,17 @@ def get_approvals_for_study(study_id=None): return results +def get_health_attesting_csv(all_approvals=True): + records = ApprovalService.get_health_attesting_records(all_approvals) + si = io.StringIO() + cw = csv.writer(si) + cw.writerows(records) + output = make_response(si.getvalue()) + output.headers["Content-Disposition"] = "attachment; filename=health_attesting.csv" + output.headers["Content-type"] = "text/csv" + return output + + # ----- Begin descent into madness ---- # def get_csv(): """A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index 81608a34..f98733a5 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -110,24 +110,27 @@ class ApprovalService(object): return [Approval.from_model(approval_model) for approval_model in db_approvals] @staticmethod - def get_health_attesting_for_today(): - """Return a CSV with prepared information related to approvals - created today""" - # import pdb; pdb.set_trace() - today = datetime.now() - timedelta(days=3) - today = today.date() - approvals = session.query(ApprovalModel).filter( - # func.date(ApprovalModel.date_created)==today, - ApprovalModel.status==ApprovalStatus.APPROVED.value - ) + def get_health_attesting_records(all_approvals=True): + """Return a list with prepared information related to all approvals + approved or filtered by today """ + if all_approvals: + approvals = session.query(ApprovalModel).filter( + ApprovalModel.status==ApprovalStatus.APPROVED.value + ) + else: + today = datetime.now().date() + approvals = session.query(ApprovalModel).filter( + func.date(ApprovalModel.date_created)==today, + ApprovalModel.status==ApprovalStatus.APPROVED.value + ) health_attesting_rows = [ - 'university_computing_id', - 'last_name', - 'first_name', - 'department', - 'job_title', - 'supervisor_university_computing_id' + ['university_computing_id', + 'last_name', + 'first_name', + 'department', + 'job_title', + 'supervisor_university_computing_id'] ] for approval in approvals: pi_info = LdapService.user_info(approval.study.primary_investigator_id) @@ -147,13 +150,14 @@ class ApprovalService(object): @staticmethod def update_approval(approval_id, approver_uid): - """Update a specific approval""" + """Update a specific approval + NOTE: Actual update happens in the API layer, this + funtion is currently in charge of only sending + corresponding emails + """ db_approval = session.query(ApprovalModel).get(approval_id) status = db_approval.status if db_approval: - # db_approval.status = status - # session.add(db_approval) - # session.commit() if status == ApprovalStatus.APPROVED.value: # second_approval = ApprovalModel().query.filter_by( # study_id=db_approval.study_id, workflow_id=db_approval.workflow_id, From dc5ffd29d0eae051140f9dfcb0408b73e80955c8 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 22 Jun 2020 14:07:57 -0600 Subject: [PATCH 25/44] Refactoring shared code --- crc/api/approval.py | 65 +--------------- crc/services/approval_service.py | 123 ++++++++++++++++++++++++------- tests/test_approvals_service.py | 26 +++++++ tests/test_tasks_api.py | 32 ++++---- 4 files changed, 144 insertions(+), 102 deletions(-) diff --git a/crc/api/approval.py b/crc/api/approval.py index a44dfc5b..fd01e221 100644 --- a/crc/api/approval.py +++ b/crc/api/approval.py @@ -90,8 +90,8 @@ def get_approvals_for_study(study_id=None): return results -def get_health_attesting_csv(all_approvals=True): - records = ApprovalService.get_health_attesting_records(all_approvals) +def get_health_attesting_csv(): + records = ApprovalService.get_health_attesting_records() si = io.StringIO() cw = csv.writer(si) cw.writerows(records) @@ -105,67 +105,10 @@ def get_health_attesting_csv(all_approvals=True): def get_csv(): """A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a man to do just about anything""" - approvals = ApprovalService.get_all_approvals(include_cancelled=False) - output = [] - errors = [] - for approval in approvals: - try: - if approval.status != ApprovalStatus.APPROVED.value: - continue - for related_approval in approval.related_approvals: - if related_approval.status != ApprovalStatus.APPROVED.value: - continue - workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() - data = json.loads(workflow.bpmn_workflow_json) - last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) - personnel = extract_value(last_task, 'personnel') - training_val = extract_value(last_task, 'RequiredTraining') - pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] - review_complete = 'AllRequiredTraining' in training_val - pi_uid = workflow.study.primary_investigator_id - pi_details = LdapService.user_info(pi_uid) - details = [] - details.append(pi_details) - for person in personnel: - uid = person['PersonnelComputingID']['value'] - details.append(LdapService.user_info(uid)) + content = ApprovalService.get_not_really_csv_content() - for person in details: - record = { - "study_id": approval.study_id, - "pi_uid": pi_details.uid, - "pi": pi_details.display_name, - "name": person.display_name, - "uid": person.uid, - "email": person.email_address, - "supervisor": "", - "review_complete": review_complete, - } - # We only know the PI's supervisor. - if person.uid == pi_details.uid: - record["supervisor"] = pi_supervisor + return content - output.append(record) - - except Exception as e: - errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) - return {"results": output, "errors": errors } - - -def extract_value(task, key): - if key in task['data']: - return pickle.loads(b64decode(task['data'][key]['__bytes__'])) - else: - return "" - - -def find_task(uuid, task): - if task['id']['__uuid__'] == uuid: - return task - for child in task['children']: - task = find_task(uuid, child) - if task: - return task # ----- come back to the world of the living ---- # diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index f98733a5..cd3a6549 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -1,3 +1,6 @@ +import json +import pickle +from base64 import b64decode from datetime import datetime, timedelta from sqlalchemy import desc, func @@ -110,19 +113,51 @@ class ApprovalService(object): return [Approval.from_model(approval_model) for approval_model in db_approvals] @staticmethod - def get_health_attesting_records(all_approvals=True): - """Return a list with prepared information related to all approvals - approved or filtered by today """ - if all_approvals: - approvals = session.query(ApprovalModel).filter( - ApprovalModel.status==ApprovalStatus.APPROVED.value - ) - else: - today = datetime.now().date() - approvals = session.query(ApprovalModel).filter( - func.date(ApprovalModel.date_created)==today, - ApprovalModel.status==ApprovalStatus.APPROVED.value - ) + def get_approval_details(approval): + """Returns a list of packed approval details, obtained from + the task data sent during the workflow """ + def extract_value(task, key): + if key in task['data']: + return pickle.loads(b64decode(task['data'][key]['__bytes__'])) + else: + return "" + + def find_task(uuid, task): + if task['id']['__uuid__'] == uuid: + return task + for child in task['children']: + task = find_task(uuid, child) + if task: + return task + + if approval.status != ApprovalStatus.APPROVED.value: + return {} + for related_approval in approval.related_approvals: + if related_approval.status != ApprovalStatus.APPROVED.value: + continue + workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first() + data = json.loads(workflow.bpmn_workflow_json) + last_task = find_task(data['last_task']['__uuid__'], data['task_tree']) + personnel = extract_value(last_task, 'personnel') + training_val = extract_value(last_task, 'RequiredTraining') + pi_supervisor = extract_value(last_task, 'PISupervisor')['value'] + review_complete = 'AllRequiredTraining' in training_val + pi_uid = workflow.study.primary_investigator_id + pi_details = LdapService.user_info(pi_uid) + details = {'Supervisor': pi_supervisor} + details['person_details'] = [] + details['person_details'].append(pi_details) + for person in personnel: + uid = person['PersonnelComputingID']['value'] + details['person_details'].append(LdapService.user_info(uid)) + + return details + + @staticmethod + def get_health_attesting_records(): + """Return a list with prepared information related to all approvals """ + + approvals = ApprovalService.get_all_approvals(include_cancelled=False) health_attesting_rows = [ ['university_computing_id', @@ -132,22 +167,60 @@ class ApprovalService(object): 'job_title', 'supervisor_university_computing_id'] ] + for approval in approvals: - pi_info = LdapService.user_info(approval.study.primary_investigator_id) - approver_info = LdapService.user_info(approval.approver_uid) - first_name = pi_info.given_name - last_name = pi_info.display_name.replace(first_name, '').strip() - health_attesting_rows.append([ - pi_info.uid, - last_name, - first_name, - '', - 'Academic Researcher', - approver_info.uid - ]) + try: + details = ApprovalService.get_approval_details(approval) + if not details: + continue + + for person in details['person_details']: + first_name = person.given_name + last_name = person.display_name.replace(first_name, '').strip() + record = [ + person.uid, + last_name, + first_name, + '', + 'Academic Researcher', + details['Supervisor'] if person.uid == details['person_details'][0].uid else 'askresearch' + ] + + if record not in health_attesting_rows: + health_attesting_rows.append(record) + + except Exception as e: + app.logger.error("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) return health_attesting_rows + @staticmethod + def get_not_really_csv_content(): + approvals = ApprovalService.get_all_approvals(include_cancelled=False) + output = [] + errors = [] + for approval in approvals: + try: + details = ApprovalService.get_approval_details(approval) + + for person in details['person_details']: + record = { + "study_id": approval.study_id, + "pi_uid": pi_details.uid, + "pi": pi_details.display_name, + "name": person.display_name, + "uid": person.uid, + "email": person.email_address, + "supervisor": details['Supervisor'] if person.uid == details['person_details'][0].uid else "", + "review_complete": review_complete, + } + + output.append(record) + + except Exception as e: + errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e))) + return {"results": output, "errors": errors } + @staticmethod def update_approval(approval_id, approver_uid): """Update a specific approval diff --git a/tests/test_approvals_service.py b/tests/test_approvals_service.py index 26a26ef4..d8f8d503 100644 --- a/tests/test_approvals_service.py +++ b/tests/test_approvals_service.py @@ -57,6 +57,32 @@ class TestApprovalsService(BaseTest): self.assertEqual(1, models[0].version) self.assertEqual(2, models[1].version) + def test_get_health_attesting_records(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="AD_CoCAppr") + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + records = ApprovalService.get_health_attesting_records() + + self.assertEqual(len(records), 1) + + def test_get_not_really_csv_content(self): + self.load_example_data() + self.create_reference_document() + workflow = self.create_workflow('empty_workflow') + FileService.add_workflow_file(workflow_id=workflow.id, + name="anything.png", content_type="text", + binary_data=b'5678', irb_doc_code="AD_CoCAppr") + + ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") + records = ApprovalService.get_not_really_csv_content() + + self.assertEqual(len(records), 1) + def test_new_approval_sends_proper_emails(self): self.assertEqual(1, 1) diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 1b35434c..7288b5e4 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -47,7 +47,7 @@ class TestTasksApi(BaseTest): # The total number of tasks may change over time, as users move through gateways # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created... self.assertIsNotNone(workflow.total_tasks) - self.assertEquals(prev_completed_task_count + 1, workflow.completed_tasks) + self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks) # Assure a record exists in the Task Events task_events = session.query(TaskEventModel) \ .filter_by(workflow_id=workflow.id) \ @@ -56,25 +56,25 @@ class TestTasksApi(BaseTest): self.assertGreater(len(task_events), 0) event = task_events[0] self.assertIsNotNone(event.study_id) - self.assertEquals("dhf8r", event.user_uid) - self.assertEquals(workflow.id, event.workflow_id) - self.assertEquals(workflow.workflow_spec_id, event.workflow_spec_id) - self.assertEquals(workflow.spec_version, event.spec_version) - self.assertEquals(WorkflowService.TASK_ACTION_COMPLETE, event.action) - self.assertEquals(task_in.id, task_id) - self.assertEquals(task_in.name, event.task_name) - self.assertEquals(task_in.title, event.task_title) - self.assertEquals(task_in.type, event.task_type) - self.assertEquals("COMPLETED", event.task_state) + self.assertEqual("dhf8r", event.user_uid) + self.assertEqual(workflow.id, event.workflow_id) + self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id) + self.assertEqual(workflow.spec_version, event.spec_version) + self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action) + self.assertEqual(task_in.id, task_id) + self.assertEqual(task_in.name, event.task_name) + self.assertEqual(task_in.title, event.task_title) + self.assertEqual(task_in.type, event.task_type) + self.assertEqual("COMPLETED", event.task_state) # Not sure what vodoo is happening inside of marshmallow to get me in this state. if isinstance(task_in.multi_instance_type, MultiInstanceType): - self.assertEquals(task_in.multi_instance_type.value, event.mi_type) + self.assertEqual(task_in.multi_instance_type.value, event.mi_type) else: - self.assertEquals(task_in.multi_instance_type, event.mi_type) + self.assertEqual(task_in.multi_instance_type, event.mi_type) - self.assertEquals(task_in.multi_instance_count, event.mi_count) - self.assertEquals(task_in.multi_instance_index, event.mi_index) - self.assertEquals(task_in.process_name, event.process_name) + self.assertEqual(task_in.multi_instance_count, event.mi_count) + self.assertEqual(task_in.multi_instance_index, event.mi_index) + self.assertEqual(task_in.process_name, event.process_name) self.assertIsNotNone(event.date) # Assure that there is data in the form_data From bb825f80971f6034d1b802502e8d7e021c13f7b5 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 22 Jun 2020 14:09:58 -0600 Subject: [PATCH 26/44] Dropping old parameter from endpoint --- crc/api.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/crc/api.yml b/crc/api.yml index 71710881..b60dcc23 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -918,13 +918,6 @@ paths: schema: type: object /health_attesting: - parameters: - - name: all_approvals - in: query - required: false - description: If set to false, returns just approvals for today. - schema: - type: string get: operationId: crc.api.approval.get_health_attesting_csv summary: Returns a CSV file with health attesting records From 91fe5f0cdd5190b89b25e2df444b783af0cc76a4 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 22 Jun 2020 14:22:56 -0600 Subject: [PATCH 27/44] Fixing broken test --- tests/test_approvals_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_approvals_service.py b/tests/test_approvals_service.py index d8f8d503..34871fec 100644 --- a/tests/test_approvals_service.py +++ b/tests/test_approvals_service.py @@ -81,7 +81,7 @@ class TestApprovalsService(BaseTest): ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r") records = ApprovalService.get_not_really_csv_content() - self.assertEqual(len(records), 1) + self.assertEqual(len(records), 2) def test_new_approval_sends_proper_emails(self): self.assertEqual(1, 1) From dd10e56d1a7a83cf507db863a0eeb5f326c216f8 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Mon, 22 Jun 2020 14:56:24 -0600 Subject: [PATCH 28/44] Adding forgotten variables to returned dict --- crc/services/approval_service.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index cd3a6549..eacac72c 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -144,7 +144,11 @@ class ApprovalService(object): review_complete = 'AllRequiredTraining' in training_val pi_uid = workflow.study.primary_investigator_id pi_details = LdapService.user_info(pi_uid) - details = {'Supervisor': pi_supervisor} + details = { + 'Supervisor': pi_supervisor, + 'PI_Details': pi_details, + 'Review': review_complete + } details['person_details'] = [] details['person_details'].append(pi_details) for person in personnel: @@ -206,13 +210,13 @@ class ApprovalService(object): for person in details['person_details']: record = { "study_id": approval.study_id, - "pi_uid": pi_details.uid, - "pi": pi_details.display_name, + "pi_uid": details['PI_Details'].uid, + "pi": details['PI_Details'].display_name, "name": person.display_name, "uid": person.uid, "email": person.email_address, "supervisor": details['Supervisor'] if person.uid == details['person_details'][0].uid else "", - "review_complete": review_complete, + "review_complete": details['Review'], } output.append(record) From a29b41048493f7c01ca8ddf8d1c176fea5972090 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 24 Jun 2020 21:47:15 -0600 Subject: [PATCH 29/44] Updating migrations --- .../versions/{839f6f255b81_.py => 5acd138e969c_.py} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename migrations/versions/{839f6f255b81_.py => 5acd138e969c_.py} (86%) diff --git a/migrations/versions/839f6f255b81_.py b/migrations/versions/5acd138e969c_.py similarity index 86% rename from migrations/versions/839f6f255b81_.py rename to migrations/versions/5acd138e969c_.py index e5400627..22b6b79a 100644 --- a/migrations/versions/839f6f255b81_.py +++ b/migrations/versions/5acd138e969c_.py @@ -1,8 +1,8 @@ """empty message -Revision ID: 839f6f255b81 -Revises: 17597692d0b0 -Create Date: 2020-06-17 16:22:05.076206 +Revision ID: 5acd138e969c +Revises: de30304ff5e6 +Create Date: 2020-06-24 21:36:15.128632 """ from alembic import op @@ -10,8 +10,8 @@ import sqlalchemy as sa # revision identifiers, used by Alembic. -revision = '839f6f255b81' -down_revision = '17597692d0b0' +revision = '5acd138e969c' +down_revision = 'de30304ff5e6' branch_labels = None depends_on = None From a0d877e02f14dc815c3c1f64cadd592000826c9e Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 24 Jun 2020 22:23:31 -0600 Subject: [PATCH 30/44] Feedback from PR addressed --- crc/services/approval_service.py | 3 --- crc/services/mails.py | 12 ++++++------ tests/data/email/email.bpmn | 13 +++++++------ tests/emails/test_email_script.py | 29 ++++++++++++++++------------- 4 files changed, 29 insertions(+), 28 deletions(-) diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index dbeed829..754bd48d 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -132,7 +132,6 @@ class ApprovalService(object): mail_result = send_ramp_up_approved_email( 'askresearch@virginia.edu', [pi_user_info.email_address], - approval_id, f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: @@ -145,7 +144,6 @@ class ApprovalService(object): mail_result = send_ramp_up_denied_email( 'askresearch@virginia.edu', [pi_user_info.email_address], - approval_id, f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: @@ -161,7 +159,6 @@ class ApprovalService(object): mail_result = send_ramp_up_denied_email_to_approver( 'askresearch@virginia.edu', approver_email, - approval_id, f'{pi_user_info.display_name} - ({pi_user_info.uid})', f'{approver_info.display_name} - ({approver_info.uid})' ) diff --git a/crc/services/mails.py b/crc/services/mails.py index b9b18bd1..c4942a7d 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -41,7 +41,7 @@ def send_mail(subject, sender, recipients, content, content_html, study_id=None) except Exception as e: return str(e) -def send_ramp_up_submission_email(sender, recipients, approval_id, approver_1, approver_2=None): +def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None): from crc import env subject = 'Research Ramp-up Plan Submitted' @@ -54,7 +54,7 @@ def send_ramp_up_submission_email(sender, recipients, approval_id, approver_1, a result = send_mail(subject, sender, recipients, content, content_html) return result -def send_ramp_up_approval_request_email(sender, recipients, approval_id, primary_investigator): +def send_ramp_up_approval_request_email(sender, recipients, primary_investigator): from crc import env subject = 'Research Ramp-up Plan Approval Request' @@ -67,7 +67,7 @@ def send_ramp_up_approval_request_email(sender, recipients, approval_id, primary result = send_mail(subject, sender, recipients, content, content_html) return result -def send_ramp_up_approval_request_first_review_email(sender, recipients, approval_id, primary_investigator): +def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator): from crc import env subject = 'Research Ramp-up Plan Approval Request' @@ -80,7 +80,7 @@ def send_ramp_up_approval_request_first_review_email(sender, recipients, approva result = send_mail(subject, sender, recipients, content, content_html) return result -def send_ramp_up_approved_email(sender, recipients, approval_id, approver_1, approver_2=None): +def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None): from crc import env subject = 'Research Ramp-up Plan Approved' @@ -93,7 +93,7 @@ def send_ramp_up_approved_email(sender, recipients, approval_id, approver_1, app result = send_mail(subject, sender, recipients, content, content_html) return result -def send_ramp_up_denied_email(sender, recipients, approval_id, approver): +def send_ramp_up_denied_email(sender, recipients, approver): from crc import env subject = 'Research Ramp-up Plan Denied' @@ -106,7 +106,7 @@ def send_ramp_up_denied_email(sender, recipients, approval_id, approver): result = send_mail(subject, sender, recipients, content, content_html) return result -def send_ramp_up_denied_email_to_approver(sender, recipients, approval_id, primary_investigator, approver_2): +def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2): from crc import env subject = 'Research Ramp-up Plan Denied' diff --git a/tests/data/email/email.bpmn b/tests/data/email/email.bpmn index 54ec61a8..1b8d5252 100644 --- a/tests/data/email/email.bpmn +++ b/tests/data/email/email.bpmn @@ -13,6 +13,7 @@ --- +New request submitted by {{ PIComputingID }} Email content to be delivered to {{ ApprvlApprvr1 }} @@ -37,17 +38,17 @@ Email content to be delivered to {{ ApprvlApprvr1 }} - - - + + + - - - + + + diff --git a/tests/emails/test_email_script.py b/tests/emails/test_email_script.py index 79d5c6ee..12a00fac 100644 --- a/tests/emails/test_email_script.py +++ b/tests/emails/test_email_script.py @@ -1,36 +1,39 @@ from tests.base_test import BaseTest +from crc.models.email import EmailModel from crc.services.file_service import FileService from crc.scripts.email import Email from crc.services.workflow_processor import WorkflowProcessor from crc.api.common import ApiError -from crc import db +from crc import db, mail class TestEmailScript(BaseTest): def test_do_task(self): - # self.load_example_data() - # self.create_reference_document() workflow = self.create_workflow('email') - # processor = WorkflowProcessor(workflow) - # task = processor.next_task() - # TODO: Replace with proper `complete_form` method from test_tasks - # processor.complete_task(task) - # task = processor.next_task() task_data = { 'PIComputingID': 'dhf8r', 'ApprvlApprvr1': 'lb3dp' } task = self.get_workflow_api(workflow).next_task - self.complete_form(workflow, task, task_data) + with mail.record_messages() as outbox: + self.complete_form(workflow, task, task_data) - # script = Email() - # script.do_task(task, 'Subject', 'PIComputingID', 'ApprvlApprvr1') + self.assertEqual(len(outbox), 1) + self.assertEqual(outbox[0].subject, 'Camunda Email Subject') - # TODO: Add proper assertions - self.assertTrue(True) + # PI is present + self.assertIn(task_data['PIComputingID'], outbox[0].body) + self.assertIn(task_data['PIComputingID'], outbox[0].html) + + # Approver is present + self.assertIn(task_data['ApprvlApprvr1'], outbox[0].body) + self.assertIn(task_data['ApprvlApprvr1'], outbox[0].html) + + db_emails = EmailModel.query.count() + self.assertEqual(db_emails, 1) From 5d1ae402b68bd5332f07da4a0bdea5401525bf82 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Wed, 24 Jun 2020 22:43:44 -0600 Subject: [PATCH 31/44] Slight refactor on data passed to template rendering --- crc/scripts/email.py | 14 ++++++-------- crc/services/approval_service.py | 2 -- tests/emails/test_mails.py | 29 ++++++++--------------------- 3 files changed, 14 insertions(+), 31 deletions(-) diff --git a/crc/scripts/email.py b/crc/scripts/email.py index c94a98a3..d3a64725 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -25,13 +25,13 @@ Email Subject ApprvlApprvr1 PIComputingID def do_task_validate_only(self, task, *args, **kwargs): self.get_subject(task, args) self.get_users_info(task, args) - self.get_content(task, {}) + self.get_content(task) def do_task(self, task, *args, **kwargs): args = [arg for arg in args if type(arg) == str] subject = self.get_subject(task, args) - recipients, display_keys = self.get_users_info(task, args) - content, content_html = self.get_content(task, display_keys) + recipients = self.get_users_info(task, args) + content, content_html = self.get_content(task) if recipients: send_mail( subject=subject, @@ -48,7 +48,6 @@ Email Subject ApprvlApprvr1 PIComputingID "name of the variable in the task data that contains user" "id to process. Multiple arguments are accepted.") emails = [] - display_keys = {} for arg in args: try: uid = task.workflow.script_engine.evaluate_expression(task, arg) @@ -59,7 +58,6 @@ Email Subject ApprvlApprvr1 PIComputingID user_info = LdapService.user_info(uid) email = user_info.email_address emails.append(user_info.email_address) - display_keys[arg] = user_info.proper_name() if not isinstance(email, str): raise ApiError(code="invalid_argument", message="The Email script requires at least 1 UID argument. The " @@ -67,7 +65,7 @@ Email Subject ApprvlApprvr1 PIComputingID " user ids to process. This must point to an array or a string, but " "it currently points to a %s " % emails.__class__.__name__) - return emails, display_keys + return emails def get_subject(self, task, args): if len(args) < 1: @@ -97,9 +95,9 @@ Email Subject ApprvlApprvr1 PIComputingID return subject - def get_content(self, task, display_keys): + def get_content(self, task): content = task.task_spec.documentation template = Template(content) - rendered = template.render(display_keys) + rendered = template.render(task.data) rendered_markdown = markdown.markdown(rendered).replace('\n', '
') return rendered, rendered_markdown diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py index 754bd48d..1f6f56b3 100644 --- a/crc/services/approval_service.py +++ b/crc/services/approval_service.py @@ -231,7 +231,6 @@ class ApprovalService(object): mail_result = send_ramp_up_submission_email( 'askresearch@virginia.edu', [pi_user_info.email_address], - model.id, f'{approver_info.display_name} - ({approver_info.uid})' ) if mail_result: @@ -242,7 +241,6 @@ class ApprovalService(object): mail_result = send_ramp_up_approval_request_first_review_email( 'askresearch@virginia.edu', approver_email, - model.id, f'{pi_user_info.display_name} - ({pi_user_info.uid})' ) if mail_result: diff --git a/tests/emails/test_mails.py b/tests/emails/test_mails.py index 5408e517..0710e02e 100644 --- a/tests/emails/test_mails.py +++ b/tests/emails/test_mails.py @@ -22,16 +22,6 @@ class TestMails(BaseTest): self.study = self.create_study() self.workflow = self.create_workflow('random_fact') - self.approval = ApprovalModel( - study=self.study, - workflow=self.workflow, - approver_uid='lb3dp', - status=ApprovalStatus.PENDING.value, - version=1 - ) - session.add(self.approval) - session.commit() - self.sender = 'sender@sartography.com' self.recipients = ['recipient@sartography.com'] self.primary_investigator = 'Dr. Bartlett' @@ -41,14 +31,13 @@ class TestMails(BaseTest): def test_send_ramp_up_submission_email(self): with mail.record_messages() as outbox: - send_ramp_up_submission_email(self.sender, self.recipients, self.approval.id, self.approver_1) + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1) self.assertEqual(len(outbox), 1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted') self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) - send_ramp_up_submission_email(self.sender, self.recipients, self.approval.id, - self.approver_1, self.approver_2) + send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2) self.assertEqual(len(outbox), 2) self.assertIn(self.approver_1, outbox[1].body) self.assertIn(self.approver_1, outbox[1].html) @@ -60,8 +49,7 @@ class TestMails(BaseTest): def test_send_ramp_up_approval_request_email(self): with mail.record_messages() as outbox: - send_ramp_up_approval_request_email(self.sender, self.recipients, self.approval.id, - self.primary_investigator) + send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator) self.assertEqual(len(outbox), 1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request') @@ -74,7 +62,7 @@ class TestMails(BaseTest): def test_send_ramp_up_approval_request_first_review_email(self): with mail.record_messages() as outbox: send_ramp_up_approval_request_first_review_email( - self.sender, self.recipients, self.approval.id, self.primary_investigator + self.sender, self.recipients, self.primary_investigator ) self.assertEqual(len(outbox), 1) @@ -87,14 +75,13 @@ class TestMails(BaseTest): def test_send_ramp_up_approved_email(self): with mail.record_messages() as outbox: - send_ramp_up_approved_email(self.sender, self.recipients, self.approval.id, self.approver_1) + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1) self.assertEqual(len(outbox), 1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved') self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) - send_ramp_up_approved_email(self.sender, self.recipients, self.approval.id, - self.approver_1, self.approver_2) + send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2) self.assertEqual(len(outbox), 2) self.assertIn(self.approver_1, outbox[1].body) self.assertIn(self.approver_1, outbox[1].html) @@ -106,7 +93,7 @@ class TestMails(BaseTest): def test_send_ramp_up_denied_email(self): with mail.record_messages() as outbox: - send_ramp_up_denied_email(self.sender, self.recipients, self.approval.id, self.approver_1) + send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') self.assertIn(self.approver_1, outbox[0].body) self.assertIn(self.approver_1, outbox[0].html) @@ -117,7 +104,7 @@ class TestMails(BaseTest): def test_send_send_ramp_up_denied_email_to_approver(self): with mail.record_messages() as outbox: send_ramp_up_denied_email_to_approver( - self.sender, self.recipients, self.approval.id, self.primary_investigator, self.approver_2 + self.sender, self.recipients, self.primary_investigator, self.approver_2 ) self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied') From 23941d73ad49a0e83eb0d59f11dbb1d50de44e85 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 25 Jun 2020 11:02:57 -0400 Subject: [PATCH 32/44] Fixes variable names. Updates Spiff to STG-26 branch. Updates package versions. --- Pipfile | 2 +- Pipfile.lock | 213 ++++++++++++++++++++++++++++++++++---------- crc/api/workflow.py | 6 +- src/spiffworkflow | 1 + 4 files changed, 173 insertions(+), 49 deletions(-) create mode 160000 src/spiffworkflow diff --git a/Pipfile b/Pipfile index e78257d8..96f8a748 100644 --- a/Pipfile +++ b/Pipfile @@ -26,7 +26,7 @@ pyjwt = "*" requests = "*" xlsxwriter = "*" webtest = "*" -spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "deploy"} +spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "STG-26"} alembic = "*" coverage = "*" sphinx = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 8cc805d0..baea6649 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "78a8da35dec2fb58b02a58afc8ffabe8b1c22bec8f054295e8b1ba3b4a6f4ec0" + "sha256": "8d6d99bcacef0b12f29f3c402f7980799812f645c576767b5477445a1fc03062" }, "pipfile-spec": 6, "requires": { @@ -35,6 +35,7 @@ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.6.0" }, "aniso8601": { @@ -49,6 +50,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -56,6 +58,7 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -79,6 +82,7 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -104,17 +108,18 @@ }, "celery": { "hashes": [ - "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647", - "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b" + "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", + "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], - "version": "==4.4.5" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.4.6" }, "certifi": { "hashes": [ - "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", - "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" + "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", + "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" ], - "version": "==2020.4.5.2" + "version": "==2020.6.20" }, "cffi": { "hashes": [ @@ -161,6 +166,7 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -182,6 +188,7 @@ "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" ], + "markers": "python_version >= '3.6'", "version": "==5.0.0" }, "connexion": { @@ -237,6 +244,7 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -319,12 +327,14 @@ "sha256:0b656fbf87c5f24109d859bafa791d29751fabbda2302b606881ae5485b557a5", "sha256:fcfe6df52cd2ed8a63008ca36b86a51fa7a4b70cef1c39e5625f722fca32308e" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.3" }, "future": { "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", "version": "==0.18.2" }, "gunicorn": { @@ -347,6 +357,7 @@ "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.9" }, "imagesize": { @@ -354,6 +365,7 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -361,7 +373,7 @@ "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545", "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958" ], - "markers": "python_version < '3.8'", + "markers": "python_version < '3.8' and python_version < '3.8'", "version": "==1.6.1" }, "inflection": { @@ -369,6 +381,7 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], + "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -376,6 +389,7 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -390,6 +404,7 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -401,14 +416,19 @@ }, "kombu": { "hashes": [ - "sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a", - "sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3" + "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", + "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], - "version": "==4.6.10" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.6.11" }, "ldap3": { "hashes": [ "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", + "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", + "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", + "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", + "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", @@ -444,6 +464,7 @@ "sha256:f95d28193c3863132b1f55c1056036bf580b5a488d908f7d22a04ace8935a3a9", "sha256:fadd2a63a2bfd7fb604508e553d1cf68eca250b2fbdbd81213b5f6f2fbf23529" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.5.1" }, "mako": { @@ -451,6 +472,7 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, "markupsafe": { @@ -489,6 +511,7 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -517,29 +540,35 @@ }, "numpy": { "hashes": [ - "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233", - "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b", - "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7", - "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f", - "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5", - "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb", - "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583", - "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1", - "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a", - "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271", - "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824", - "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3", - "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc", - "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161", - "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f", - "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f", - "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf", - "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b", - "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0", - "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675", - "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8" + "sha256:13af0184177469192d80db9bd02619f6fa8b922f9f327e077d6f2a6acb1ce1c0", + "sha256:26a45798ca2a4e168d00de75d4a524abf5907949231512f372b217ede3429e98", + "sha256:26f509450db547e4dfa3ec739419b31edad646d21fb8d0ed0734188b35ff6b27", + "sha256:30a59fb41bb6b8c465ab50d60a1b298d1cd7b85274e71f38af5a75d6c475d2d2", + "sha256:33c623ef9ca5e19e05991f127c1be5aeb1ab5cdf30cb1c5cf3960752e58b599b", + "sha256:356f96c9fbec59974a592452ab6a036cd6f180822a60b529a975c9467fcd5f23", + "sha256:3c40c827d36c6d1c3cf413694d7dc843d50997ebffbc7c87d888a203ed6403a7", + "sha256:4d054f013a1983551254e2379385e359884e5af105e3efe00418977d02f634a7", + "sha256:63d971bb211ad3ca37b2adecdd5365f40f3b741a455beecba70fd0dde8b2a4cb", + "sha256:658624a11f6e1c252b2cd170d94bf28c8f9410acab9f2fd4369e11e1cd4e1aaf", + "sha256:76766cc80d6128750075378d3bb7812cf146415bd29b588616f72c943c00d598", + "sha256:7b57f26e5e6ee2f14f960db46bd58ffdca25ca06dd997729b1b179fddd35f5a3", + "sha256:7b852817800eb02e109ae4a9cef2beda8dd50d98b76b6cfb7b5c0099d27b52d4", + "sha256:8cde829f14bd38f6da7b2954be0f2837043e8b8d7a9110ec5e318ae6bf706610", + "sha256:a2e3a39f43f0ce95204beb8fe0831199542ccab1e0c6e486a0b4947256215632", + "sha256:a86c962e211f37edd61d6e11bb4df7eddc4a519a38a856e20a6498c319efa6b0", + "sha256:a8705c5073fe3fcc297fb8e0b31aa794e05af6a329e81b7ca4ffecab7f2b95ef", + "sha256:b6aaeadf1e4866ca0fdf7bb4eed25e521ae21a7947c59f78154b24fc7abbe1dd", + "sha256:be62aeff8f2f054eff7725f502f6228298891fd648dc2630e03e44bf63e8cee0", + "sha256:c2edbb783c841e36ca0fa159f0ae97a88ce8137fb3a6cd82eae77349ba4b607b", + "sha256:cbe326f6d364375a8e5a8ccb7e9cd73f4b2f6dc3b2ed205633a0db8243e2a96a", + "sha256:d34fbb98ad0d6b563b95de852a284074514331e6b9da0a9fc894fb1cdae7a79e", + "sha256:d97a86937cf9970453c3b62abb55a6475f173347b4cde7f8dcdb48c8e1b9952d", + "sha256:dd53d7c4a69e766e4900f29db5872f5824a06827d594427cf1a4aa542818b796", + "sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a", + "sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596" ], - "version": "==1.18.5" + "markers": "python_version >= '3.6'", + "version": "==1.19.0" }, "openapi-spec-validator": { "hashes": [ @@ -551,16 +580,17 @@ }, "openpyxl": { "hashes": [ - "sha256:547a9fc6aafcf44abe358b89ed4438d077e9d92e4f182c87e2dc294186dc4b64" + "sha256:6e62f058d19b09b95d20ebfbfb04857ad08d0833190516c1660675f699c6186f" ], "index": "pypi", - "version": "==3.0.3" + "version": "==3.0.4" }, "packaging": { "hashes": [ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { @@ -623,8 +653,19 @@ }, "pyasn1": { "hashes": [ + "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", + "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", + "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", + "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" + "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", + "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", + "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", + "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", + "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776", + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", + "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", + "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3" ], "version": "==0.4.8" }, @@ -633,6 +674,7 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -640,6 +682,7 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], + "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -655,6 +698,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", "version": "==2.4.7" }, "pyrsistent": { @@ -681,10 +725,67 @@ "hashes": [ "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", + "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", + "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522" ], "version": "==1.0.4" }, + "python-levenshtein-wheels": { + "hashes": [ + "sha256:0065529c8aec4c044468286177761857d36981ba6f7fdb62d7d5f7ffd143de5d", + "sha256:016924a59d689f9f47d5f7b26b70f31e309255e8dd72602c91e93ceb752b9f92", + "sha256:089d046ea7727e583233c71fef1046663ed67b96967063ae8ddc9f551e86a4fc", + "sha256:09f9faaaa8f65726f91b44c11d3d622fee0f1780cfbe2bf3f410dd0e7345adcb", + "sha256:0aea217eab612acd45dcc3424a2e8dbd977cc309f80359d0c01971f1e65b9a9b", + "sha256:0beb91ad80b1573829066e5af36b80190c367be6e0a65292f073353b0388c7fc", + "sha256:0ec1bc73f5ed3a1a06e02d13bb3cd22a0b32ebf65a9667bbccba106bfa0546f1", + "sha256:0fa2ca69ef803bc6037a8c919e2e8a17b55e94c9c9ffcb4c21befbb15a1d0f40", + "sha256:11c77d0d74ab7f46f89a58ae9c2d67349ebc1ae3e18636627f9939d810167c31", + "sha256:19a68716a322486ddffc8bf7e5cf44a82f7700b05a10658e6e7fc5c7ae92b13d", + "sha256:19a95a01d28d63b042438ba860c4ace90362906a038fa77962ba33325d377d10", + "sha256:1a61f3a51e00a3608659bbaabb3f27af37c9dbe84d843369061a3e45cf0d5103", + "sha256:1c50aebebab403fb2dd415d70355446ac364dece502b0e2737a1a085bb9a4aa4", + "sha256:1d2390d04f9b673391e5ce1a0b054d0565f2e00ea5d1187a044221dc5c02c3e6", + "sha256:1e51cdc123625a28709662d24ea0cb4cf6f991845e6054d9f803c78da1d6b08f", + "sha256:1eca6dc97dfcf588f53281fe48a6d5c423d4e14bdab658a1aa6efd447acc64e0", + "sha256:1f0056d3216b0fe38f25c6f8ebc84bd9f6d34c55a7a9414341b674fb98961399", + "sha256:228b59460e9a786e498bdfc8011838b89c6054650b115c86c9c819a055a793b0", + "sha256:23020f9ff2cb3457a926dcc470b84f9bd5b7646bd8b8e06b915bdbbc905cb23f", + "sha256:2b7b7cf0f43b677f818aa9a610464abf06106c19a51b9ac35bd051a439f337a5", + "sha256:3b591c9a7e91480f0d7bf2041d325f578b9b9c2f2d593304377cb28862e7f9a2", + "sha256:3ca9c70411ab587d071c1d8fc8b69d0558be8e4aa920f2595e2cb5eb229ccc4c", + "sha256:3e6bcca97a7ff4e720352b57ddc26380c0583dcdd4b791acef7b574ad58468a7", + "sha256:3ed88f9e638da57647149115c34e0e120cae6f3d35eee7d77e22cc9c1d8eced3", + "sha256:445bf7941cb1fa05d6c2a4a502ad4868a5cacd92e8eb77b2bd008cdda9d37c55", + "sha256:4ba5e147d76d7ee884fd6eae461438b080bcc9f2c6eb9b576811e1bcfe8f808e", + "sha256:4bb128b719c30f3b9feacfe71a338ae07d39dbffc077139416f3535c89f12362", + "sha256:4e951907b9b5d40c9f1b611c8bdfe46ff8cf8371877cebbd589bf5840feab662", + "sha256:53c0c9964390368fd64460b690f168221c669766b193b7e80ae3950c2b9551f8", + "sha256:57c4edef81611098d37176278f2b6a3712bf864eed313496d7d80504805896d1", + "sha256:5b36e406937c6463d1c1ef3dd82d3f771d9d845f21351e8a026fe4dd398ea8d0", + "sha256:7d0821dab24b430dfdc2cba70a06e6d7a45cb839d0dd0e6db97bb99e23c3d884", + "sha256:7f7283dfe50eac8a8cd9b777de9eb50b1edf7dbb46fc7cc9d9b0050d0c135021", + "sha256:7f9759095b3fc825464a72b1cae95125e610eba3c70f91557754c32a0bf32ea2", + "sha256:8005a4df455569c0d490ddfd9e5a163f21293477fd0ed4ea9effdd723ddd8eaa", + "sha256:86e865f29ad3dc3bb4733e5247220173d90f05ac8d2ad18e9689a220f90de55f", + "sha256:98727050ba70eb8d318ec8a8203531c20119347fc8f281102b097326812742ab", + "sha256:ac9cdf044dcb9481c7da782db01b50c1f0e7cdd78c8507b963b6d072829c0263", + "sha256:acfad8ffed96891fe7c583d92717cd8ec0c03b59a954c389fd4e26a5cdeac610", + "sha256:ad15f25abff8220e556d64e2a27c646241b08f00faf1bc02313655696cd3edfa", + "sha256:b679f951f842c38665aa54bea4d7403099131f71fac6d8584f893a731fe1266d", + "sha256:b8c183dc4aa4e95dc5c373eedc3d205c176805835611fcfec5d9050736c695c4", + "sha256:c097a6829967c76526a037ed34500a028f78f0d765c8e3dbd1a7717afd09fb92", + "sha256:c2c76f483d05eddec60a5cd89e92385adef565a4f243b1d9a6abe2f6bd2a7c0a", + "sha256:c388baa3c04272a7c585d3da24030c142353eb26eb531dd2681502e6be7d7a26", + "sha256:cb0f2a711db665b5bf8697b5af3b9884bb1139385c5c12c2e472e4bbee62da99", + "sha256:cbac984d7b36e75b440d1c8ff9d3425d778364a0cbc23f8943383d4decd35d5e", + "sha256:f55adf069be2d655f8d668594fe1be1b84d9dc8106d380a9ada06f34941c33c8", + "sha256:f9084ed3b8997ad4353d124b903f2860a9695b9e080663276d9e58c32e293244", + "sha256:fb7df3504222fcb1fa593f76623abbb54d6019eec15aac5d05cd07ad90ac016c" + ], + "version": "==0.13.1" + }, "pytz": { "hashes": [ "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed", @@ -740,6 +841,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", "version": "==1.15.0" }, "snowballstemmer": { @@ -754,6 +856,7 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], + "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -769,6 +872,7 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -776,6 +880,7 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -783,6 +888,7 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -790,6 +896,7 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -797,6 +904,7 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -804,12 +912,13 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], + "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "5450dc0463a95811d386b7de063d950bf6179d2b" + "ref": "49163a983b7d8b8e564079c79277b21e358a26ac" }, "sqlalchemy": { "hashes": [ @@ -842,6 +951,7 @@ "sha256:f502ef245c492b391e0e23e94cba030ab91722dcc56963c85bfd7f3441ea2bbe", "sha256:fe01bac7226499aedf472c62fa3b85b2c619365f3f14dd222ffe4f3aa91e5f98" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.17" }, "swagger-ui-bundle": { @@ -858,6 +968,7 @@ "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.25.9" }, "vine": { @@ -865,6 +976,7 @@ "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.0" }, "waitress": { @@ -872,6 +984,7 @@ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -879,6 +992,7 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", "version": "==1.8.6" }, "webtest": { @@ -925,6 +1039,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -934,6 +1049,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -978,7 +1094,7 @@ "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545", "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958" ], - "markers": "python_version < '3.8'", + "markers": "python_version < '3.8' and python_version < '3.8'", "version": "==1.6.1" }, "more-itertools": { @@ -986,6 +1102,7 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], + "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -993,6 +1110,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -1008,20 +1126,23 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { "hashes": [ - "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44", - "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b" + "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", + "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], - "version": "==1.8.2" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.9.0" }, "pyparsing": { "hashes": [ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", "version": "==2.4.7" }, "pytest": { @@ -1037,20 +1158,22 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", "version": "==1.15.0" }, "wcwidth": { "hashes": [ - "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f", - "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f" + "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784", + "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83" ], - "version": "==0.2.4" + "version": "==0.2.5" }, "zipp": { "hashes": [ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } } diff --git a/crc/api/workflow.py b/crc/api/workflow.py index e5ea738b..2e35dad2 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -146,10 +146,10 @@ def update_task(workflow_id, task_id, body, terminate_loop=None): raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") if terminate_loop: - task.terminate_loop() + spiff_task.terminate_loop() - task.update_data(body) - processor.complete_task(task) + spiff_task.update_data(body) + processor.complete_task(spiff_task) processor.do_engine_steps() processor.save() diff --git a/src/spiffworkflow b/src/spiffworkflow new file mode 160000 index 00000000..49163a98 --- /dev/null +++ b/src/spiffworkflow @@ -0,0 +1 @@ +Subproject commit 49163a983b7d8b8e564079c79277b21e358a26ac From 0ca9b9624e003040dba31d245121168b090ed503 Mon Sep 17 00:00:00 2001 From: Dan Funk Date: Thu, 25 Jun 2020 12:44:34 -0400 Subject: [PATCH 33/44] Switching to STG-26 for the Spiff Library, and adding a test that demonstrates the failure Aaron ran into yesterday. --- Pipfile | 2 +- Pipfile.lock | 194 ++++++++++++------ .../test_workflow_processor_multi_instance.py | 20 +- 3 files changed, 145 insertions(+), 71 deletions(-) diff --git a/Pipfile b/Pipfile index 0079962c..d6da8498 100644 --- a/Pipfile +++ b/Pipfile @@ -25,7 +25,7 @@ pyjwt = "*" requests = "*" xlsxwriter = "*" webtest = "*" -spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "deploy"} +spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "STG-26"} alembic = "*" coverage = "*" sphinx = "*" diff --git a/Pipfile.lock b/Pipfile.lock index fb38d03c..52268cbc 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "6c89585086260ebcb41918b8ef3b1d9e189e1b492208d3ff000a138bc2f2fcee" + "sha256": "0453e28d7e408f683f7db19925416bdaf3e14c520977d09f7b07b2a9cbce5c03" }, "pipfile-spec": 6, "requires": { @@ -104,17 +104,17 @@ }, "celery": { "hashes": [ - "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647", - "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b" + "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", + "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], - "version": "==4.4.5" + "version": "==4.4.6" }, "certifi": { "hashes": [ - "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1", - "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc" + "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", + "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" ], - "version": "==2020.4.5.2" + "version": "==2020.6.20" }, "cffi": { "hashes": [ @@ -394,10 +394,10 @@ }, "kombu": { "hashes": [ - "sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a", - "sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3" + "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", + "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], - "version": "==4.6.10" + "version": "==4.6.11" }, "ldap3": { "hashes": [ @@ -510,29 +510,34 @@ }, "numpy": { "hashes": [ - "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233", - "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b", - "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7", - "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f", - "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5", - "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb", - "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583", - "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1", - "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a", - "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271", - "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824", - "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3", - "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc", - "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161", - "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f", - "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f", - "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf", - "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b", - "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0", - "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675", - "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8" + "sha256:13af0184177469192d80db9bd02619f6fa8b922f9f327e077d6f2a6acb1ce1c0", + "sha256:26a45798ca2a4e168d00de75d4a524abf5907949231512f372b217ede3429e98", + "sha256:26f509450db547e4dfa3ec739419b31edad646d21fb8d0ed0734188b35ff6b27", + "sha256:30a59fb41bb6b8c465ab50d60a1b298d1cd7b85274e71f38af5a75d6c475d2d2", + "sha256:33c623ef9ca5e19e05991f127c1be5aeb1ab5cdf30cb1c5cf3960752e58b599b", + "sha256:356f96c9fbec59974a592452ab6a036cd6f180822a60b529a975c9467fcd5f23", + "sha256:3c40c827d36c6d1c3cf413694d7dc843d50997ebffbc7c87d888a203ed6403a7", + "sha256:4d054f013a1983551254e2379385e359884e5af105e3efe00418977d02f634a7", + "sha256:63d971bb211ad3ca37b2adecdd5365f40f3b741a455beecba70fd0dde8b2a4cb", + "sha256:658624a11f6e1c252b2cd170d94bf28c8f9410acab9f2fd4369e11e1cd4e1aaf", + "sha256:76766cc80d6128750075378d3bb7812cf146415bd29b588616f72c943c00d598", + "sha256:7b57f26e5e6ee2f14f960db46bd58ffdca25ca06dd997729b1b179fddd35f5a3", + "sha256:7b852817800eb02e109ae4a9cef2beda8dd50d98b76b6cfb7b5c0099d27b52d4", + "sha256:8cde829f14bd38f6da7b2954be0f2837043e8b8d7a9110ec5e318ae6bf706610", + "sha256:a2e3a39f43f0ce95204beb8fe0831199542ccab1e0c6e486a0b4947256215632", + "sha256:a86c962e211f37edd61d6e11bb4df7eddc4a519a38a856e20a6498c319efa6b0", + "sha256:a8705c5073fe3fcc297fb8e0b31aa794e05af6a329e81b7ca4ffecab7f2b95ef", + "sha256:b6aaeadf1e4866ca0fdf7bb4eed25e521ae21a7947c59f78154b24fc7abbe1dd", + "sha256:be62aeff8f2f054eff7725f502f6228298891fd648dc2630e03e44bf63e8cee0", + "sha256:c2edbb783c841e36ca0fa159f0ae97a88ce8137fb3a6cd82eae77349ba4b607b", + "sha256:cbe326f6d364375a8e5a8ccb7e9cd73f4b2f6dc3b2ed205633a0db8243e2a96a", + "sha256:d34fbb98ad0d6b563b95de852a284074514331e6b9da0a9fc894fb1cdae7a79e", + "sha256:d97a86937cf9970453c3b62abb55a6475f173347b4cde7f8dcdb48c8e1b9952d", + "sha256:dd53d7c4a69e766e4900f29db5872f5824a06827d594427cf1a4aa542818b796", + "sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a", + "sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596" ], - "version": "==1.18.5" + "version": "==1.19.0" }, "openapi-spec-validator": { "hashes": [ @@ -544,10 +549,10 @@ }, "openpyxl": { "hashes": [ - "sha256:547a9fc6aafcf44abe358b89ed4438d077e9d92e4f182c87e2dc294186dc4b64" + "sha256:6e62f058d19b09b95d20ebfbfb04857ad08d0833190516c1660675f699c6186f" ], "index": "pypi", - "version": "==3.0.3" + "version": "==3.0.4" }, "packaging": { "hashes": [ @@ -558,25 +563,25 @@ }, "pandas": { "hashes": [ - "sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46", - "sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5", - "sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa", - "sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc", - "sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678", - "sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc", - "sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31", - "sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8", - "sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6", - "sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa", - "sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4", - "sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874", - "sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd", - "sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4", - "sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126", - "sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648" + "sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096", + "sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0", + "sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453", + "sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc", + "sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7", + "sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b", + "sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8", + "sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8", + "sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef", + "sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d", + "sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705", + "sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9", + "sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91", + "sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3", + "sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107", + "sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e" ], "index": "pypi", - "version": "==1.0.4" + "version": "==1.0.5" }, "psycopg2-binary": { "hashes": [ @@ -678,6 +683,61 @@ ], "version": "==1.0.4" }, + "python-levenshtein-wheels": { + "hashes": [ + "sha256:0065529c8aec4c044468286177761857d36981ba6f7fdb62d7d5f7ffd143de5d", + "sha256:016924a59d689f9f47d5f7b26b70f31e309255e8dd72602c91e93ceb752b9f92", + "sha256:089d046ea7727e583233c71fef1046663ed67b96967063ae8ddc9f551e86a4fc", + "sha256:09f9faaaa8f65726f91b44c11d3d622fee0f1780cfbe2bf3f410dd0e7345adcb", + "sha256:0aea217eab612acd45dcc3424a2e8dbd977cc309f80359d0c01971f1e65b9a9b", + "sha256:0beb91ad80b1573829066e5af36b80190c367be6e0a65292f073353b0388c7fc", + "sha256:0ec1bc73f5ed3a1a06e02d13bb3cd22a0b32ebf65a9667bbccba106bfa0546f1", + "sha256:0fa2ca69ef803bc6037a8c919e2e8a17b55e94c9c9ffcb4c21befbb15a1d0f40", + "sha256:11c77d0d74ab7f46f89a58ae9c2d67349ebc1ae3e18636627f9939d810167c31", + "sha256:19a68716a322486ddffc8bf7e5cf44a82f7700b05a10658e6e7fc5c7ae92b13d", + "sha256:19a95a01d28d63b042438ba860c4ace90362906a038fa77962ba33325d377d10", + "sha256:1a61f3a51e00a3608659bbaabb3f27af37c9dbe84d843369061a3e45cf0d5103", + "sha256:1c50aebebab403fb2dd415d70355446ac364dece502b0e2737a1a085bb9a4aa4", + "sha256:1d2390d04f9b673391e5ce1a0b054d0565f2e00ea5d1187a044221dc5c02c3e6", + "sha256:1e51cdc123625a28709662d24ea0cb4cf6f991845e6054d9f803c78da1d6b08f", + "sha256:1eca6dc97dfcf588f53281fe48a6d5c423d4e14bdab658a1aa6efd447acc64e0", + "sha256:1f0056d3216b0fe38f25c6f8ebc84bd9f6d34c55a7a9414341b674fb98961399", + "sha256:228b59460e9a786e498bdfc8011838b89c6054650b115c86c9c819a055a793b0", + "sha256:23020f9ff2cb3457a926dcc470b84f9bd5b7646bd8b8e06b915bdbbc905cb23f", + "sha256:2b7b7cf0f43b677f818aa9a610464abf06106c19a51b9ac35bd051a439f337a5", + "sha256:3b591c9a7e91480f0d7bf2041d325f578b9b9c2f2d593304377cb28862e7f9a2", + "sha256:3ca9c70411ab587d071c1d8fc8b69d0558be8e4aa920f2595e2cb5eb229ccc4c", + "sha256:3e6bcca97a7ff4e720352b57ddc26380c0583dcdd4b791acef7b574ad58468a7", + "sha256:3ed88f9e638da57647149115c34e0e120cae6f3d35eee7d77e22cc9c1d8eced3", + "sha256:445bf7941cb1fa05d6c2a4a502ad4868a5cacd92e8eb77b2bd008cdda9d37c55", + "sha256:4ba5e147d76d7ee884fd6eae461438b080bcc9f2c6eb9b576811e1bcfe8f808e", + "sha256:4bb128b719c30f3b9feacfe71a338ae07d39dbffc077139416f3535c89f12362", + "sha256:4e951907b9b5d40c9f1b611c8bdfe46ff8cf8371877cebbd589bf5840feab662", + "sha256:53c0c9964390368fd64460b690f168221c669766b193b7e80ae3950c2b9551f8", + "sha256:57c4edef81611098d37176278f2b6a3712bf864eed313496d7d80504805896d1", + "sha256:5b36e406937c6463d1c1ef3dd82d3f771d9d845f21351e8a026fe4dd398ea8d0", + "sha256:7d0821dab24b430dfdc2cba70a06e6d7a45cb839d0dd0e6db97bb99e23c3d884", + "sha256:7f7283dfe50eac8a8cd9b777de9eb50b1edf7dbb46fc7cc9d9b0050d0c135021", + "sha256:7f9759095b3fc825464a72b1cae95125e610eba3c70f91557754c32a0bf32ea2", + "sha256:8005a4df455569c0d490ddfd9e5a163f21293477fd0ed4ea9effdd723ddd8eaa", + "sha256:86e865f29ad3dc3bb4733e5247220173d90f05ac8d2ad18e9689a220f90de55f", + "sha256:98727050ba70eb8d318ec8a8203531c20119347fc8f281102b097326812742ab", + "sha256:ac9cdf044dcb9481c7da782db01b50c1f0e7cdd78c8507b963b6d072829c0263", + "sha256:acfad8ffed96891fe7c583d92717cd8ec0c03b59a954c389fd4e26a5cdeac610", + "sha256:ad15f25abff8220e556d64e2a27c646241b08f00faf1bc02313655696cd3edfa", + "sha256:b679f951f842c38665aa54bea4d7403099131f71fac6d8584f893a731fe1266d", + "sha256:b8c183dc4aa4e95dc5c373eedc3d205c176805835611fcfec5d9050736c695c4", + "sha256:c097a6829967c76526a037ed34500a028f78f0d765c8e3dbd1a7717afd09fb92", + "sha256:c2c76f483d05eddec60a5cd89e92385adef565a4f243b1d9a6abe2f6bd2a7c0a", + "sha256:c388baa3c04272a7c585d3da24030c142353eb26eb531dd2681502e6be7d7a26", + "sha256:cb0f2a711db665b5bf8697b5af3b9884bb1139385c5c12c2e472e4bbee62da99", + "sha256:cbac984d7b36e75b440d1c8ff9d3425d778364a0cbc23f8943383d4decd35d5e", + "sha256:f55adf069be2d655f8d668594fe1be1b84d9dc8106d380a9ada06f34941c33c8", + "sha256:f9084ed3b8997ad4353d124b903f2860a9695b9e080663276d9e58c32e293244", + "sha256:fb7df3504222fcb1fa593f76623abbb54d6019eec15aac5d05cd07ad90ac016c" + ], + "version": "==0.13.1" + }, "pytz": { "hashes": [ "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed", @@ -711,11 +771,11 @@ }, "requests": { "hashes": [ - "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee", - "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6" + "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b", + "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898" ], "index": "pypi", - "version": "==2.23.0" + "version": "==2.24.0" }, "sentry-sdk": { "extras": [ @@ -751,11 +811,11 @@ }, "sphinx": { "hashes": [ - "sha256:1c445320a3310baa5ccb8d957267ef4a0fc930dc1234db5098b3d7af14fbb242", - "sha256:7d3d5087e39ab5a031b75588e9859f011de70e213cd0080ccbc28079fb0786d1" + "sha256:74fbead182a611ce1444f50218a1c5fc70b6cc547f64948f5182fb30a2a20258", + "sha256:97c9e3bcce2f61d9f5edf131299ee9d1219630598d9f9a8791459a4d9e815be5" ], "index": "pypi", - "version": "==3.1.0" + "version": "==3.1.1" }, "sphinxcontrib-applehelp": { "hashes": [ @@ -802,7 +862,7 @@ "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0" + "ref": "49163a983b7d8b8e564079c79277b21e358a26ac" }, "sqlalchemy": { "hashes": [ @@ -932,10 +992,10 @@ }, "more-itertools": { "hashes": [ - "sha256:558bb897a2232f5e4f8e2399089e35aecb746e1f9191b6584a151647e89267be", - "sha256:7818f596b1e87be009031c7653d01acc46ed422e6656b394b0f765ce66ed4982" + "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", + "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], - "version": "==8.3.0" + "version": "==8.4.0" }, "packaging": { "hashes": [ @@ -961,10 +1021,10 @@ }, "py": { "hashes": [ - "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa", - "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0" + "sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44", + "sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b" ], - "version": "==1.8.1" + "version": "==1.8.2" }, "pyparsing": { "hashes": [ @@ -990,10 +1050,10 @@ }, "wcwidth": { "hashes": [ - "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f", - "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f" + "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784", + "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83" ], - "version": "==0.2.4" + "version": "==0.2.5" }, "zipp": { "hashes": [ diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/test_workflow_processor_multi_instance.py index a4c76dd0..a54b7eab 100644 --- a/tests/test_workflow_processor_multi_instance.py +++ b/tests/test_workflow_processor_multi_instance.py @@ -1,13 +1,13 @@ from unittest.mock import patch +from tests.base_test import BaseTest -from crc import session +from crc import session, db from crc.models.api_models import MultiInstanceType from crc.models.study import StudyModel -from crc.models.workflow import WorkflowStatus +from crc.models.workflow import WorkflowStatus, WorkflowModel from crc.services.study_service import StudyService from crc.services.workflow_processor import WorkflowProcessor from crc.services.workflow_service import WorkflowService -from tests.base_test import BaseTest class TestWorkflowProcessorMultiInstance(BaseTest): @@ -97,6 +97,12 @@ class TestWorkflowProcessorMultiInstance(BaseTest): self.assertEqual(WorkflowStatus.complete, processor.get_status()) + def refresh_processor(self, processor): + """Saves the processor, and returns a new one read in from the database""" + processor.save() + processor = WorkflowProcessor(processor.workflow_model) + return processor + @patch('crc.services.study_service.StudyService.get_investigators') def test_create_and_complete_workflow_parallel(self, mock_study_service): """Unlike the test above, the parallel task allows us to complete the items in any order.""" @@ -108,11 +114,15 @@ class TestWorkflowProcessorMultiInstance(BaseTest): workflow_spec_model = self.load_test_spec("multi_instance_parallel") study = session.query(StudyModel).first() processor = self.get_processor(study, workflow_spec_model) + processor = self.refresh_processor(processor) processor.bpmn_workflow.do_engine_steps() # In the Parallel instance, there should be three tasks, all of them in the ready state. next_user_tasks = processor.next_user_tasks() self.assertEqual(3, len(next_user_tasks)) + # There should be six tasks in the navigation: start event, the script task, end event, and three tasks + # for the three executions of hte multi-instance. + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # We can complete the tasks out of order. task = next_user_tasks[2] @@ -125,6 +135,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[0] api_task = WorkflowService.spiff_task_to_api_task(task) @@ -132,6 +143,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) task = next_user_tasks[1] api_task = WorkflowService.spiff_task_to_api_task(task) @@ -139,6 +151,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) # Completing the tasks out of order, still provides the correct information. expected = self.mock_investigator_response @@ -149,3 +162,4 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) + self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) From 237abcdfed3d1fdb370df697fc791d49d5ed5afb Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 25 Jun 2020 13:11:42 -0400 Subject: [PATCH 34/44] Updates lock file and removes unnecessary src directory --- Pipfile.lock | 16 ++++++++-------- src/spiffworkflow | 1 - 2 files changed, 8 insertions(+), 9 deletions(-) delete mode 160000 src/spiffworkflow diff --git a/Pipfile.lock b/Pipfile.lock index baea6649..85a0fdbf 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -334,7 +334,7 @@ "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "gunicorn": { @@ -373,7 +373,7 @@ "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545", "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958" ], - "markers": "python_version < '3.8' and python_version < '3.8'", + "markers": "python_version < '3.8'", "version": "==1.6.1" }, "inflection": { @@ -698,7 +698,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -841,7 +841,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -992,7 +992,7 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -1094,7 +1094,7 @@ "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545", "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958" ], - "markers": "python_version < '3.8' and python_version < '3.8'", + "markers": "python_version < '3.8'", "version": "==1.6.1" }, "more-itertools": { @@ -1142,7 +1142,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1158,7 +1158,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { diff --git a/src/spiffworkflow b/src/spiffworkflow deleted file mode 160000 index 49163a98..00000000 --- a/src/spiffworkflow +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 49163a983b7d8b8e564079c79277b21e358a26ac From 7116b582e8dbea7684dfe4521212b630b2d5ad06 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Thu, 25 Jun 2020 12:01:24 -0600 Subject: [PATCH 35/44] Splitting commands properly without losing double quoted strings --- crc/scripts/email.py | 14 +------------- crc/services/workflow_processor.py | 5 ++++- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/crc/scripts/email.py b/crc/scripts/email.py index d3a64725..6f8244dd 100644 --- a/crc/scripts/email.py +++ b/crc/scripts/email.py @@ -73,19 +73,7 @@ Email Subject ApprvlApprvr1 PIComputingID message="Email script requires at least one subject argument. The " "name of the variable in the task data that contains subject" " to process. Multiple arguments are accepted.") - - subject_index = 0 - subject = args[subject_index] - if subject.startswith('"') and not subject.endswith('"'): - # Multi-word subject - subject_index += 1 - next_word = args[subject_index] - while not next_word.endswith('"'): - subject = ' '.join((subject, next_word)) - subject_index += 1 - next_word = args[subject_index] - subject = ' '.join((subject, next_word)) - subject = subject.replace('"', '') + subject = args[0] if not isinstance(subject, str): raise ApiError(code="invalid_argument", message="The Email script requires 1 argument. The " diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index dde14bb5..52aa5a33 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -1,4 +1,5 @@ import re +import shlex import xml.etree.ElementTree as ElementTree from datetime import datetime from typing import List @@ -36,7 +37,9 @@ class CustomBpmnScriptEngine(BpmnScriptEngine): This allows us to reference custom code from the BPMN diagram. """ - commands = script.split(" ") + # Shlex splits the whole string while respecting double quoted strings within + commands = shlex.split(script) + printable_comms = commands path_and_command = commands[0].rsplit(".", 1) if len(path_and_command) == 1: module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0]) From c3ceda4c2fa08ef1f0d22a11faa3f4d3cf194e0c Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Thu, 25 Jun 2020 14:02:16 -0400 Subject: [PATCH 36/44] Replaces xml.etree with lxml.etree --- Pipfile | 39 +++++++++++++++--------------- Pipfile.lock | 6 ++--- crc/services/file_service.py | 8 +++--- crc/services/workflow_processor.py | 6 ++--- 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/Pipfile b/Pipfile index 96f8a748..5873efb4 100644 --- a/Pipfile +++ b/Pipfile @@ -9,39 +9,40 @@ pbr = "*" coverage = "*" [packages] +alembic = "*" connexion = {extras = ["swagger-ui"],version = "*"} -swagger-ui-bundle = "*" +coverage = "*" +docxtpl = "*" flask = "*" +flask-admin = "*" flask-bcrypt = "*" flask-cors = "*" +flask-mail = "*" flask-marshmallow = "*" flask-migrate = "*" flask-restful = "*" +gunicorn = "*" httpretty = "*" +ldap3 = "*" marshmallow = "*" marshmallow-enum = "*" marshmallow-sqlalchemy = "*" openpyxl = "*" -pyjwt = "*" -requests = "*" -xlsxwriter = "*" -webtest = "*" -spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "STG-26"} -alembic = "*" -coverage = "*" -sphinx = "*" -recommonmark = "*" -psycopg2-binary = "*" -docxtpl = "*" -python-dateutil = "*" pandas = "*" -xlrd = "*" -ldap3 = "*" -gunicorn = "*" -werkzeug = "*" +psycopg2-binary = "*" +pyjwt = "*" +python-dateutil = "*" +recommonmark = "*" +requests = "*" sentry-sdk = {extras = ["flask"],version = "==0.14.4"} -flask-mail = "*" -flask-admin = "*" +sphinx = "*" +spiffworkflow = {editable = true,git = "https://github.com/sartography/SpiffWorkflow.git",ref = "STG-26"} +swagger-ui-bundle = "*" +webtest = "*" +werkzeug = "*" +xlrd = "*" +xlsxwriter = "*" +lxml = "*" [requires] python_version = "3.7" diff --git a/Pipfile.lock b/Pipfile.lock index 85a0fdbf..ce008306 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "8d6d99bcacef0b12f29f3c402f7980799812f645c576767b5477445a1fc03062" + "sha256": "d82c06e080dbdd4c9da4e308d29ebefd9ef41be7a15caa72c6d6f9b7007d8910" }, "pipfile-spec": 6, "requires": { @@ -464,7 +464,7 @@ "sha256:f95d28193c3863132b1f55c1056036bf580b5a488d908f7d22a04ace8935a3a9", "sha256:fadd2a63a2bfd7fb604508e553d1cf68eca250b2fbdbd81213b5f6f2fbf23529" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "index": "pypi", "version": "==4.5.1" }, "mako": { @@ -918,7 +918,7 @@ "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "49163a983b7d8b8e564079c79277b21e358a26ac" + "ref": "8ab8d98792ac46e0bac5b1b35a59ddfe28aa9760" }, "sqlalchemy": { "hashes": [ diff --git a/crc/services/file_service.py b/crc/services/file_service.py index ff234a79..fe8cb4e2 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -3,7 +3,7 @@ import json import os from datetime import datetime from uuid import UUID -from xml.etree import ElementTree +from lxml import etree import flask from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException @@ -151,7 +151,7 @@ class FileService(object): # If this is a BPMN, extract the process id. if file_model.type == FileType.bpmn: - bpmn: ElementTree.Element = ElementTree.fromstring(binary_data) + bpmn: etree.Element = etree.fromstring(binary_data) file_model.primary_process_id = FileService.get_process_id(bpmn) new_file_data_model = FileDataModel( @@ -165,7 +165,7 @@ class FileService(object): return file_model @staticmethod - def get_process_id(et_root: ElementTree.Element): + def get_process_id(et_root: etree.Element): process_elements = [] for child in et_root: if child.tag.endswith('process') and child.attrib.get('isExecutable', False): @@ -179,7 +179,7 @@ class FileService(object): # Look for the element that has the startEvent in it for e in process_elements: - this_element: ElementTree.Element = e + this_element: etree.Element = e for child_element in list(this_element): if child_element.tag.endswith('startEvent'): return this_element.attrib['id'] diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py index c84aa3fa..edb25770 100644 --- a/crc/services/workflow_processor.py +++ b/crc/services/workflow_processor.py @@ -1,5 +1,5 @@ import re -import xml.etree.ElementTree as ElementTree +from lxml import etree from datetime import datetime from typing import List @@ -266,12 +266,12 @@ class WorkflowProcessor(object): for file_data in file_data_models: if file_data.file_model.type == FileType.bpmn: - bpmn: ElementTree.Element = ElementTree.fromstring(file_data.data) + bpmn: etree.Element = etree.fromstring(file_data.data) if file_data.file_model.primary: process_id = FileService.get_process_id(bpmn) parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name) elif file_data.file_model.type == FileType.dmn: - dmn: ElementTree.Element = ElementTree.fromstring(file_data.data) + dmn: etree.Element = etree.fromstring(file_data.data) parser.add_dmn_xml(dmn, filename=file_data.file_model.name) if process_id is None: raise (ApiError(code="no_primary_bpmn_error", From 665faaa175ee2c426831b344033947e6fdda8605 Mon Sep 17 00:00:00 2001 From: Carlos Lopez Date: Thu, 25 Jun 2020 16:18:42 -0600 Subject: [PATCH 37/44] Send emails from service --- crc/__init__.py | 5 +++-- crc/api/tools.py | 2 +- crc/services/email_service.py | 17 ++++++++++++++--- crc/services/mails.py | 17 +---------------- 4 files changed, 19 insertions(+), 22 deletions(-) diff --git a/crc/__init__.py b/crc/__init__.py index 59ffeac7..d169b547 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -34,6 +34,9 @@ db = SQLAlchemy(app) session = db.session """:type: sqlalchemy.orm.Session""" +# Mail settings +mail = Mail(app) + migrate = Migrate(app, db) ma = Marshmallow(app) @@ -58,8 +61,6 @@ if app.config['ENABLE_SENTRY']: # Jinja environment definition, used to render mail templates template_dir = os.getcwd() + '/crc/static/templates/mails' env = Environment(loader=FileSystemLoader(template_dir)) -# Mail settings -mail = Mail(app) print('=== USING THESE CONFIG SETTINGS: ===') print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT']) diff --git a/crc/api/tools.py b/crc/api/tools.py index fa969a1e..760d0d71 100644 --- a/crc/api/tools.py +++ b/crc/api/tools.py @@ -65,4 +65,4 @@ def send_email(address): """Just sends a quick test email to assure the system is working.""" if not address: address = "dan@sartography.com" - return send_test_email(address, [address]) \ No newline at end of file + return send_test_email(address, [address]) diff --git a/crc/services/email_service.py b/crc/services/email_service.py index 633f2102..3d78eada 100644 --- a/crc/services/email_service.py +++ b/crc/services/email_service.py @@ -1,8 +1,8 @@ from datetime import datetime - +from flask_mail import Message from sqlalchemy import desc -from crc import app, db, session +from crc import app, db, mail, session from crc.api.common import ApiError from crc.models.study import StudyModel @@ -25,7 +25,18 @@ class EmailService(object): email_model = EmailModel(subject=subject, sender=sender, recipients=str(recipients), content=content, content_html=content_html, study=study) - # TODO: Send email from here, not from caller functions + # Send mail + try: + msg = Message(subject, + sender=sender, + recipients=recipients) + + msg.body = content + msg.html = content_html + + mail.send(msg) + except Exception as e: + app.logger.error(str(e)) db.session.add(email_model) db.session.commit() diff --git a/crc/services/mails.py b/crc/services/mails.py index c4942a7d..a1570035 100644 --- a/crc/services/mails.py +++ b/crc/services/mails.py @@ -6,7 +6,6 @@ from flask_mail import Message from crc.services.email_service import EmailService -# TODO: Extract common mailing code into its own function def send_test_email(sender, recipients): try: msg = Message('Research Ramp-up Plan test', @@ -24,23 +23,9 @@ def send_test_email(sender, recipients): return str(e) def send_mail(subject, sender, recipients, content, content_html, study_id=None): - from crc import mail - try: - msg = Message(subject, - sender=sender, - recipients=recipients, - bcc=['rrt_emails@googlegroups.com']) - - msg.body = content - msg.html = content_html - - EmailService.add_email(subject=subject, sender=sender, recipients=recipients, + EmailService.add_email(subject=subject, sender=sender, recipients=recipients, content=content, content_html=content_html, study_id=study_id) - mail.send(msg) - except Exception as e: - return str(e) - def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None): from crc import env subject = 'Research Ramp-up Plan Submitted' From f49e6905efdb850bd3929466f3a701fb1754fc01 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 26 Jun 2020 10:48:39 -0400 Subject: [PATCH 38/44] Updates Spiff --- Pipfile.lock | 72 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 5 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index d87f0411..e5b3808a 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -35,6 +35,7 @@ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b", "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.6.0" }, "aniso8601": { @@ -49,6 +50,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "babel": { @@ -56,6 +58,7 @@ "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38", "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.0" }, "bcrypt": { @@ -79,6 +82,7 @@ "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==3.1.7" }, "beautifulsoup4": { @@ -107,6 +111,7 @@ "sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916", "sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.4.6" }, "certifi": { @@ -161,6 +166,7 @@ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -182,6 +188,7 @@ "sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1", "sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd" ], + "markers": "python_version >= '3.6'", "version": "==5.0.0" }, "connexion": { @@ -237,6 +244,7 @@ "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==0.16" }, "docxtpl": { @@ -319,12 +327,14 @@ "sha256:0b656fbf87c5f24109d859bafa791d29751fabbda2302b606881ae5485b557a5", "sha256:fcfe6df52cd2ed8a63008ca36b86a51fa7a4b70cef1c39e5625f722fca32308e" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.3" }, "future": { "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "gunicorn": { @@ -347,6 +357,7 @@ "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb", "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.9" }, "imagesize": { @@ -354,6 +365,7 @@ "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.2.0" }, "importlib-metadata": { @@ -369,6 +381,7 @@ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9", "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924" ], + "markers": "python_version >= '3.5'", "version": "==0.5.0" }, "itsdangerous": { @@ -376,6 +389,7 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jdcal": { @@ -390,6 +404,7 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -404,12 +419,17 @@ "sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a", "sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==4.6.11" }, "ldap3": { "hashes": [ + "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", + "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", + "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", + "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b", "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", - "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" + "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c" ], "index": "pypi", "version": "==2.7" @@ -452,6 +472,7 @@ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27", "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.3" }, "markdown": { @@ -498,6 +519,7 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "marshmallow": { @@ -553,6 +575,7 @@ "sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a", "sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596" ], + "markers": "python_version >= '3.6'", "version": "==1.19.0" }, "openapi-spec-validator": { @@ -575,6 +598,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pandas": { @@ -637,8 +661,19 @@ }, "pyasn1": { "hashes": [ + "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", + "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", + "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", + "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3", + "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", + "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", + "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", + "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", + "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", + "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba" + "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776" ], "version": "==0.4.8" }, @@ -647,6 +682,7 @@ "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.20" }, "pygments": { @@ -654,6 +690,7 @@ "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44", "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324" ], + "markers": "python_version >= '3.5'", "version": "==2.6.1" }, "pyjwt": { @@ -669,6 +706,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyrsistent": { @@ -693,9 +731,11 @@ }, "python-editor": { "hashes": [ - "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8" + "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", + "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522", + "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d" ], "version": "==1.0.4" }, @@ -809,6 +849,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "snowballstemmer": { @@ -823,6 +864,7 @@ "sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55", "sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232" ], + "markers": "python_version >= '3.5'", "version": "==2.0.1" }, "sphinx": { @@ -838,6 +880,7 @@ "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-devhelp": { @@ -845,6 +888,7 @@ "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" ], + "markers": "python_version >= '3.5'", "version": "==1.0.2" }, "sphinxcontrib-htmlhelp": { @@ -852,6 +896,7 @@ "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-jsmath": { @@ -859,6 +904,7 @@ "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" ], + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "sphinxcontrib-qthelp": { @@ -866,6 +912,7 @@ "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" ], + "markers": "python_version >= '3.5'", "version": "==1.0.3" }, "sphinxcontrib-serializinghtml": { @@ -873,12 +920,13 @@ "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" ], + "markers": "python_version >= '3.5'", "version": "==1.1.4" }, "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "bd2af4ef61ad3adaf193635bbb21729d067f033b" + "ref": "62caf2c30d7932ac82ada0d1db84ef9fe9106c43" }, "sqlalchemy": { "hashes": [ @@ -911,6 +959,7 @@ "sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274", "sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.18" }, "swagger-ui-bundle": { @@ -927,6 +976,7 @@ "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527", "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.25.9" }, "vine": { @@ -934,6 +984,7 @@ "sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87", "sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.3.0" }, "waitress": { @@ -941,6 +992,7 @@ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261", "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.4.4" }, "webob": { @@ -948,6 +1000,7 @@ "sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b", "sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.8.6" }, "webtest": { @@ -994,6 +1047,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } }, @@ -1003,6 +1057,7 @@ "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==19.3.0" }, "coverage": { @@ -1055,6 +1110,7 @@ "sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5", "sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2" ], + "markers": "python_version >= '3.5'", "version": "==8.4.0" }, "packaging": { @@ -1062,6 +1118,7 @@ "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.4" }, "pbr": { @@ -1077,6 +1134,7 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { @@ -1084,6 +1142,7 @@ "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.9.0" }, "pyparsing": { @@ -1091,6 +1150,7 @@ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pytest": { @@ -1106,6 +1166,7 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "wcwidth": { @@ -1120,6 +1181,7 @@ "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b", "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96" ], + "markers": "python_version >= '3.6'", "version": "==3.1.0" } } From 29b108673da798c9fe29ab3c9d7fe2b6140ca794 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 26 Jun 2020 11:51:27 -0400 Subject: [PATCH 39/44] Adds failing test exposing bug with getting data for multi-instance tasks --- .../test_workflow_processor_multi_instance.py | 58 ++++++++++++------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/tests/workflow/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py index a54b7eab..a81eeac1 100644 --- a/tests/workflow/test_workflow_processor_multi_instance.py +++ b/tests/workflow/test_workflow_processor_multi_instance.py @@ -32,7 +32,6 @@ class TestWorkflowProcessorMultiInstance(BaseTest): 'error': 'Unable to locate a user with id asd3v in LDAP'}} def _populate_form_with_random_data(self, task): - WorkflowService.populate_form_with_random_data(task) def get_processor(self, study_model, spec_model): @@ -52,49 +51,64 @@ class TestWorkflowProcessorMultiInstance(BaseTest): self.assertIsNotNone(processor) self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) processor.bpmn_workflow.do_engine_steps() - next_user_tasks = processor.next_user_tasks() - self.assertEqual(1, len(next_user_tasks)) - - task = next_user_tasks[0] + workflow_api = WorkflowService.processor_to_workflow_api(processor) + self.assertIsNotNone(workflow_api) + self.assertIsNotNone(workflow_api.next_task) + # 1st investigator + api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) - self.assertEqual("dhf8r", task.data["investigator"]["user_id"]) - - self.assertEqual("MutiInstanceTask", task.get_name()) - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual(MultiInstanceType.sequential, api_task.multi_instance_type) + self.assertEqual("dhf8r", api_task.data["investigator"]["user_id"]) + self.assertEqual("MutiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(1, api_task.multi_instance_index) - task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) + # 2nd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual(None, api_task.data["investigator"]["user_id"]) self.assertEqual("MutiInstanceTask", api_task.name) - task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(2, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() + workflow_api = WorkflowService.processor_to_workflow_api(processor) - task = next_user_tasks[0] - api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + # 3rd investigator + api_task = workflow_api.next_task + self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) + self.assertEqual("MutiInstanceTask", api_task.get_name()) + self.assertEqual("asd3v", api_task.data["investigator"]["user_id"]) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(3, api_task.multi_instance_index) + + task = processor.get_current_user_tasks()[0] + self.assertEqual(task.id, api_task.id) + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() - task = processor.bpmn_workflow.last_task + workflow_api = WorkflowService.processor_to_workflow_api(processor) + + # Last task + api_task = workflow_api.next_task expected = self.mock_investigator_response expected['PI']['email'] = "asd3v@virginia.edu" expected['SC_I']['email'] = "asdf32@virginia.edu" expected['DC']['email'] = "dhf8r@virginia.edu" - self.assertEqual(expected, - task.data['StudyInfo']['investigators']) + self.assertEqual(expected, api_task.data['StudyInfo']['investigators']) self.assertEqual(WorkflowStatus.complete, processor.get_status()) def refresh_processor(self, processor): @@ -132,7 +146,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): api_task = WorkflowService.spiff_task_to_api_task(task) self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type) - task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}}) + task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() self.assertEquals(6, len(processor.bpmn_workflow.get_nav_list())) From fe61333b7b7f8b2194d07dfdb74f94b37546673d Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 26 Jun 2020 12:31:15 -0400 Subject: [PATCH 40/44] Fixes typo --- tests/data/multi_instance/multi_instance.bpmn | 8 ++++---- .../multi_instance_parallel.bpmn | 8 ++++---- tests/test_tasks_api.py | 2 +- .../workflow/test_workflow_processor_multi_instance.py | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/data/multi_instance/multi_instance.bpmn b/tests/data/multi_instance/multi_instance.bpmn index d53f7b17..28bda546 100644 --- a/tests/data/multi_instance/multi_instance.bpmn +++ b/tests/data/multi_instance/multi_instance.bpmn @@ -8,8 +8,8 @@ Flow_0ugjw69 - - + + # Please provide addtional information about: ## Investigator ID: {{investigator.NETBADGEID}} ## Role: {{investigator.INVESTIGATORTYPEFULL}} @@ -25,7 +25,7 @@ Flow_0ugjw69 - + Flow_0t6p1sb SequenceFlow_1p568pp @@ -58,7 +58,7 @@
- + diff --git a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn index ba1fd76b..dd6215ed 100644 --- a/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn +++ b/tests/data/multi_instance_parallel/multi_instance_parallel.bpmn @@ -8,8 +8,8 @@ Flow_0ugjw69 - - + + # Please provide addtional information about: ## Investigator ID: {{investigator.user_id}} ## Role: {{investigator.type_full}} @@ -22,7 +22,7 @@ Flow_0ugjw69 - + Flow_0t6p1sb SequenceFlow_1p568pp @@ -55,7 +55,7 @@ - + diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index 7288b5e4..c6b09dae 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -440,7 +440,7 @@ class TestTasksApi(BaseTest): self.assertEqual(9, len(ready_items)) self.assertEqual("UserTask", workflow_api.next_task.type) - self.assertEqual("MutiInstanceTask",workflow_api.next_task.name) + self.assertEqual("MultiInstanceTask",workflow_api.next_task.name) self.assertEqual("more information", workflow_api.next_task.title) for i in random.sample(range(9), 9): diff --git a/tests/workflow/test_workflow_processor_multi_instance.py b/tests/workflow/test_workflow_processor_multi_instance.py index a81eeac1..76821fed 100644 --- a/tests/workflow/test_workflow_processor_multi_instance.py +++ b/tests/workflow/test_workflow_processor_multi_instance.py @@ -59,7 +59,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) self.assertEqual("dhf8r", api_task.data["investigator"]["user_id"]) - self.assertEqual("MutiInstanceTask", api_task.name) + self.assertEqual("MultiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(1, api_task.multi_instance_index) @@ -74,7 +74,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) self.assertEqual(None, api_task.data["investigator"]["user_id"]) - self.assertEqual("MutiInstanceTask", api_task.name) + self.assertEqual("MultiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(2, api_task.multi_instance_index) @@ -88,8 +88,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest): # 3rd investigator api_task = workflow_api.next_task self.assertEqual(WorkflowStatus.user_input_required, processor.get_status()) - self.assertEqual("MutiInstanceTask", api_task.get_name()) self.assertEqual("asd3v", api_task.data["investigator"]["user_id"]) + self.assertEqual("MultiInstanceTask", api_task.name) self.assertEqual(3, api_task.multi_instance_count) self.assertEqual(3, api_task.multi_instance_index) @@ -153,7 +153,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task = next_user_tasks[0] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", api_task.name) + self.assertEqual("MultiInstanceTask", api_task.name) task.update_data({"investigator":{"email":"asd3v@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() @@ -161,7 +161,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest): task = next_user_tasks[1] api_task = WorkflowService.spiff_task_to_api_task(task) - self.assertEqual("MutiInstanceTask", task.get_name()) + self.assertEqual("MultiInstanceTask", task.get_name()) task.update_data({"investigator":{"email":"asdf32@virginia.edu"}}) processor.complete_task(task) processor.do_engine_steps() From f9f3003ef0b5d656cd535ce374e48179cabc7558 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 26 Jun 2020 12:31:37 -0400 Subject: [PATCH 41/44] Filters by multi-instance index, if applicable --- crc/services/workflow_service.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 8d81a908..f77d264b 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -242,10 +242,12 @@ class WorkflowService(object): @staticmethod def get_previously_submitted_data(workflow_id, task): """ If the user has completed this task previously, find the form data for the last submission.""" + mi_index = task.multi_instance_index if hasattr(task, 'multi_instance_index') else None; latest_event = db.session.query(TaskEventModel) \ .filter_by(workflow_id=workflow_id) \ .filter_by(task_name=task.task_spec.name) \ .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ + .filter_by(mi_index=mi_index) \ .order_by(TaskEventModel.date.desc()).first() if latest_event: if latest_event.form_data is not None: From 0ef52854a00fd582903e71f656ec9309af7a9235 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 26 Jun 2020 12:47:12 -0400 Subject: [PATCH 42/44] Updates packages --- Pipfile.lock | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/Pipfile.lock b/Pipfile.lock index e5b3808a..9b79a526 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -424,12 +424,12 @@ }, "ldap3": { "hashes": [ - "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", - "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", - "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", - "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b", "sha256:17f04298b70bf7ecaa5db8a7d8622b5a962ef7fc2b245b2eea705ac1c24338c0", - "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c" + "sha256:298769ab0232b3a3efa1e84881096c24526fe37911c83a11285f222fe4975efd", + "sha256:4fd2db72d0412cc16ee86be01332095e86e361329c3579b314231eb2e56c7871", + "sha256:52ab557b3c4908db4a90bea16731aa714b1b54e039b54fd4c4b83994c6c48c0c", + "sha256:53aaae5bf14f3827c69600ddf4d61b88f49c055bb93060e9702c5bafd206c744", + "sha256:81df4ac8b6df10fb1f05b17c18d0cb8c4c344d5a03083c382824960ed959cf5b" ], "index": "pypi", "version": "==2.7" @@ -661,19 +661,19 @@ }, "pyasn1": { "hashes": [ - "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", - "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", - "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", - "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", - "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3", - "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", - "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", - "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", - "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", - "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", + "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", + "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", + "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", - "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776" + "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", + "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", + "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", + "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", + "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776", + "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", + "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", + "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3" ], "version": "==0.4.8" }, @@ -731,11 +731,11 @@ }, "python-editor": { "hashes": [ - "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", + "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d", "sha256:51fda6bcc5ddbbb7063b2af7509e43bd84bfc32a4ff71349ec7847713882327b", + "sha256:5f98b069316ea1c2ed3f67e7f5df6c0d8f10b689964a4a811ff64f0106819ec8", "sha256:c3da2053dbab6b29c94e43c486ff67206eafbe7eb52dbec7390b5e2fb05aac77", - "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522", - "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d" + "sha256:ea87e17f6ec459e780e4221f295411462e0d0810858e055fc514684350a2f522" ], "version": "==1.0.4" }, @@ -926,7 +926,7 @@ "spiffworkflow": { "editable": true, "git": "https://github.com/sartography/SpiffWorkflow.git", - "ref": "62caf2c30d7932ac82ada0d1db84ef9fe9106c43" + "ref": "599f41fcf9257196710806e16bef023c836735f4" }, "sqlalchemy": { "hashes": [ From 848ad563d31ab2844ef857d93fffd258bbb68b2d Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Fri, 26 Jun 2020 12:47:42 -0400 Subject: [PATCH 43/44] Only filters by mi_index if task has a mult_instance_index --- crc/services/workflow_service.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index f77d264b..2f4299a8 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -242,13 +242,15 @@ class WorkflowService(object): @staticmethod def get_previously_submitted_data(workflow_id, task): """ If the user has completed this task previously, find the form data for the last submission.""" - mi_index = task.multi_instance_index if hasattr(task, 'multi_instance_index') else None; - latest_event = db.session.query(TaskEventModel) \ + query = db.session.query(TaskEventModel) \ .filter_by(workflow_id=workflow_id) \ .filter_by(task_name=task.task_spec.name) \ - .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \ - .filter_by(mi_index=mi_index) \ - .order_by(TaskEventModel.date.desc()).first() + .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) + + if hasattr(task, 'multi_instance_index'): + query = query.filter_by(mi_index=task.multi_instance_index) + + latest_event = query.order_by(TaskEventModel.date.desc()).first() if latest_event: if latest_event.form_data is not None: return latest_event.form_data From a996c815085074732ff93766f6bf98e3b4e6d6a6 Mon Sep 17 00:00:00 2001 From: Aaron Louie Date: Sun, 28 Jun 2020 11:35:35 -0400 Subject: [PATCH 44/44] Gets mi_index out of Spiff task internal data --- crc/services/workflow_service.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py index 2f4299a8..0faf3b76 100644 --- a/crc/services/workflow_service.py +++ b/crc/services/workflow_service.py @@ -240,24 +240,24 @@ class WorkflowService(object): return workflow_api @staticmethod - def get_previously_submitted_data(workflow_id, task): + def get_previously_submitted_data(workflow_id, spiff_task): """ If the user has completed this task previously, find the form data for the last submission.""" query = db.session.query(TaskEventModel) \ .filter_by(workflow_id=workflow_id) \ - .filter_by(task_name=task.task_spec.name) \ + .filter_by(task_name=spiff_task.task_spec.name) \ .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) - if hasattr(task, 'multi_instance_index'): - query = query.filter_by(mi_index=task.multi_instance_index) + if hasattr(spiff_task, 'internal_data') and 'runtimes' in spiff_task.internal_data: + query = query.filter_by(mi_index=spiff_task.internal_data['runtimes']) - latest_event = query.order_by(TaskEventModel.date.desc()).first() + latest_event = query.order_by(TaskEventModel.date.desc()).first() if latest_event: if latest_event.form_data is not None: return latest_event.form_data else: - app.logger.error("missing_form_dat", "We have lost data for workflow %i, task %s, it is not " - "in the task event model, " - "and it should be." % (workflow_id, task.task_spec.name)) + app.logger.error("missing_form_data", "We have lost data for workflow %i, " + "task %s, it is not in the task event model, " + "and it should be." % (workflow_id, spiff_task.task_spec.name)) return {} else: return {} @@ -294,8 +294,8 @@ class WorkflowService(object): props = {} if hasattr(spiff_task.task_spec, 'extensions'): - for id, val in spiff_task.task_spec.extensions.items(): - props[id] = val + for key, val in spiff_task.task_spec.extensions.items(): + props[key] = val task = Task(spiff_task.id, spiff_task.task_spec.name,