diff --git a/Pipfile.lock b/Pipfile.lock
index 6d0f9167..909cf764 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -35,7 +35,6 @@
"sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b",
"sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==2.6.0"
},
"aniso8601": {
@@ -50,7 +49,6 @@
"sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c",
"sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==19.3.0"
},
"babel": {
@@ -58,7 +56,6 @@
"sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38",
"sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.8.0"
},
"bcrypt": {
@@ -82,7 +79,6 @@
"sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
"sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==3.1.7"
},
"beautifulsoup4": {
@@ -111,7 +107,6 @@
"sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916",
"sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==4.4.6"
},
"certifi": {
@@ -166,7 +161,6 @@
"sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a",
"sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==7.1.2"
},
"clickclick": {
@@ -188,7 +182,6 @@
"sha256:2ca44140ee259b5e3d8aaf47c79c36a7ab0d5e94d70bd4105c03ede7a20ea5a1",
"sha256:cffc044844040c7ce04e9acd1838b5f2e5fa3170182f6fda4d2ea8b0099dbadd"
],
- "markers": "python_version >= '3.6'",
"version": "==5.0.0"
},
"connexion": {
@@ -247,7 +240,6 @@
"sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af",
"sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==0.16"
},
"docxtpl": {
@@ -337,7 +329,6 @@
"hashes": [
"sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"
],
- "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.18.2"
},
"gunicorn": {
@@ -360,7 +351,6 @@
"sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6",
"sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.10"
},
"imagesize": {
@@ -368,7 +358,6 @@
"sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1",
"sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.2.0"
},
"importlib-metadata": {
@@ -384,7 +373,6 @@
"sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9",
"sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924"
],
- "markers": "python_version >= '3.5'",
"version": "==0.5.0"
},
"itsdangerous": {
@@ -392,7 +380,6 @@
"sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19",
"sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.1.0"
},
"jdcal": {
@@ -407,7 +394,6 @@
"sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0",
"sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==2.11.2"
},
"jsonschema": {
@@ -422,7 +408,6 @@
"sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a",
"sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==4.6.11"
},
"ldap3": {
@@ -475,7 +460,6 @@
"sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27",
"sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.1.3"
},
"markdown": {
@@ -522,7 +506,6 @@
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7",
"sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.1.1"
},
"marshmallow": {
@@ -578,7 +561,6 @@
"sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a",
"sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596"
],
- "markers": "python_version >= '3.6'",
"version": "==1.19.0"
},
"openapi-spec-validator": {
@@ -602,7 +584,6 @@
"sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8",
"sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==20.4"
},
"pandas": {
@@ -686,7 +667,6 @@
"sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
"sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.20"
},
"pygments": {
@@ -694,7 +674,6 @@
"sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44",
"sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324"
],
- "markers": "python_version >= '3.5'",
"version": "==2.6.1"
},
"pyjwt": {
@@ -710,7 +689,6 @@
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
- "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
},
"pyrsistent": {
@@ -853,7 +831,6 @@
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.15.0"
},
"snowballstemmer": {
@@ -868,7 +845,6 @@
"sha256:1634eea42ab371d3d346309b93df7870a88610f0725d47528be902a0d95ecc55",
"sha256:a59dc181727e95d25f781f0eb4fd1825ff45590ec8ff49eadfd7f1a537cc0232"
],
- "markers": "python_version >= '3.5'",
"version": "==2.0.1"
},
"sphinx": {
@@ -884,7 +860,6 @@
"sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a",
"sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"
],
- "markers": "python_version >= '3.5'",
"version": "==1.0.2"
},
"sphinxcontrib-devhelp": {
@@ -892,7 +867,6 @@
"sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e",
"sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"
],
- "markers": "python_version >= '3.5'",
"version": "==1.0.2"
},
"sphinxcontrib-htmlhelp": {
@@ -900,7 +874,6 @@
"sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f",
"sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b"
],
- "markers": "python_version >= '3.5'",
"version": "==1.0.3"
},
"sphinxcontrib-jsmath": {
@@ -908,7 +881,6 @@
"sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178",
"sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"
],
- "markers": "python_version >= '3.5'",
"version": "==1.0.1"
},
"sphinxcontrib-qthelp": {
@@ -916,7 +888,6 @@
"sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72",
"sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"
],
- "markers": "python_version >= '3.5'",
"version": "==1.0.3"
},
"sphinxcontrib-serializinghtml": {
@@ -924,7 +895,6 @@
"sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc",
"sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a"
],
- "markers": "python_version >= '3.5'",
"version": "==1.1.4"
},
"spiffworkflow": {
@@ -963,7 +933,6 @@
"sha256:f57be5673e12763dd400fea568608700a63ce1c6bd5bdbc3cc3a2c5fdb045274",
"sha256:fc728ece3d5c772c196fd338a99798e7efac7a04f9cb6416299a3638ee9a94cd"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.3.18"
},
"swagger-ui-bundle": {
@@ -980,7 +949,6 @@
"sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527",
"sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'",
"version": "==1.25.9"
},
"vine": {
@@ -988,7 +956,6 @@
"sha256:133ee6d7a9016f177ddeaf191c1f58421a1dcc6ee9a42c58b34bed40e1d2cd87",
"sha256:ea4947cc56d1fd6f2095c8d543ee25dad966f78692528e68b4fada11ba3f98af"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.3.0"
},
"waitress": {
@@ -996,7 +963,6 @@
"sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261",
"sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==1.4.4"
},
"webob": {
@@ -1004,7 +970,6 @@
"sha256:a3c89a8e9ba0aeb17382836cdb73c516d0ecf6630ec40ec28288f3ed459ce87b",
"sha256:aa3a917ed752ba3e0b242234b2a373f9c4e2a75d35291dcbe977649bd21fd108"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.8.6"
},
"webtest": {
@@ -1051,7 +1016,6 @@
"sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b",
"sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"
],
- "markers": "python_version >= '3.6'",
"version": "==3.1.0"
}
},
@@ -1061,7 +1025,6 @@
"sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c",
"sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==19.3.0"
},
"coverage": {
@@ -1117,7 +1080,6 @@
"sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5",
"sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2"
],
- "markers": "python_version >= '3.5'",
"version": "==8.4.0"
},
"packaging": {
@@ -1125,7 +1087,6 @@
"sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8",
"sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==20.4"
},
"pbr": {
@@ -1141,7 +1102,6 @@
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==0.13.1"
},
"py": {
@@ -1149,7 +1109,6 @@
"sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2",
"sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.9.0"
},
"pyparsing": {
@@ -1157,7 +1116,6 @@
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
- "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==2.4.7"
},
"pytest": {
@@ -1173,7 +1131,6 @@
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
"sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.15.0"
},
"wcwidth": {
@@ -1188,7 +1145,6 @@
"sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b",
"sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"
],
- "markers": "python_version >= '3.6'",
"version": "==3.1.0"
}
}
diff --git a/crc/api.yml b/crc/api.yml
index 213e8d15..3d504ad4 100644
--- a/crc/api.yml
+++ b/crc/api.yml
@@ -502,7 +502,6 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/File"
- # /v1.0/workflow/0
/reference_file:
get:
operationId: crc.api.file.get_reference_files
@@ -565,6 +564,26 @@ paths:
type: string
format: binary
example: ''
+ /task_events:
+ parameters:
+ - name: action
+ in: query
+ required: false
+ description: The type of action the event documents, options include "ASSIGNMENT" for tasks that are waiting on you, "COMPLETE" for things have completed.
+ schema:
+ type: string
+ get:
+ operationId: crc.api.workflow.get_task_events
+ summary: Returns a list of task events related to the current user. Can be filtered by type.
+ tags:
+ - Workflows and Tasks
+ responses:
+ '200':
+ description: Returns details about tasks that are waiting on the current user.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/TaskEvent"
# /v1.0/workflow/0
/workflow/{workflow_id}:
parameters:
@@ -1192,6 +1211,36 @@ components:
value: "model.my_boolean_field_id && model.my_enum_field_value !== 'something'"
- id: "hide_expression"
value: "model.my_enum_field_value === 'something'"
+ TaskEvent:
+ properties:
+ workflow:
+ $ref: "#/components/schemas/Workflow"
+ study:
+ $ref: "#/components/schemas/Study"
+ workflow_sec:
+ $ref: "#/components/schemas/WorkflowSpec"
+ spec_version:
+ type: string
+ action:
+ type: string
+ task_id:
+ type: string
+ task_type:
+ type: string
+ task_lane:
+ type: string
+ form_data:
+ type: object
+ mi_type:
+ type: string
+ mi_count:
+ type: integer
+ mi_index:
+ type: integer
+ process_name:
+ type: string
+ date:
+ type: string
Form:
properties:
key:
diff --git a/crc/api/admin.py b/crc/api/admin.py
index 37532c38..4e96fcd8 100644
--- a/crc/api/admin.py
+++ b/crc/api/admin.py
@@ -12,7 +12,7 @@ from crc import db, app
from crc.api.user import verify_token, verify_token_admin
from crc.models.approval import ApprovalModel
from crc.models.file import FileModel
-from crc.models.stats import TaskEventModel
+from crc.models.task_event import TaskEventModel
from crc.models.study import StudyModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowModel
diff --git a/crc/api/workflow.py b/crc/api/workflow.py
index dc86ac9e..a290d340 100644
--- a/crc/api/workflow.py
+++ b/crc/api/workflow.py
@@ -6,7 +6,8 @@ from crc import session, app
from crc.api.common import ApiError, ApiErrorSchema
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
from crc.models.file import FileModel, LookupDataSchema
-from crc.models.stats import TaskEventModel
+from crc.models.study import StudyModel, WorkflowMetadata
+from crc.models.task_event import TaskEventModel, TaskEventModelSchema, TaskEvent, TaskEventSchema
from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \
WorkflowSpecCategoryModelSchema
from crc.services.file_service import FileService
@@ -87,7 +88,7 @@ def delete_workflow_specification(spec_id):
session.query(TaskEventModel).filter(TaskEventModel.workflow_spec_id == spec_id).delete()
- # Delete all stats and workflow models related to this specification
+ # Delete all events and workflow models related to this specification
for workflow in session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id):
StudyService.delete_workflow(workflow)
session.query(WorkflowSpecModel).filter_by(id=spec_id).delete()
@@ -98,19 +99,38 @@ def get_workflow(workflow_id, soft_reset=False, hard_reset=False):
workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first()
processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset)
workflow_api_model = WorkflowService.processor_to_workflow_api(processor)
+ WorkflowService.update_task_assignments(processor)
return WorkflowApiSchema().dump(workflow_api_model)
+def get_task_events(action):
+ """Provides a way to see a history of what has happened, or get a list of tasks that need your attention."""
+ query = session.query(TaskEventModel).filter(TaskEventModel.user_uid == g.user.uid)
+ if action:
+ query = query.filter(TaskEventModel.action == action)
+ events = query.all()
+
+ # Turn the database records into something a little richer for the UI to use.
+ task_events = []
+ for event in events:
+ study = session.query(StudyModel).filter(StudyModel.id == event.study_id).first()
+ workflow = session.query(WorkflowModel).filter(WorkflowModel.id == event.workflow_id).first()
+ workflow_meta = WorkflowMetadata.from_workflow(workflow)
+ task_events.append(TaskEvent(event, study, workflow_meta))
+ return TaskEventSchema(many=True).dump(task_events)
+
+
def delete_workflow(workflow_id):
StudyService.delete_workflow(workflow_id)
def set_current_task(workflow_id, task_id):
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
- user_uid = __get_user_uid(workflow_model.study.user_uid)
processor = WorkflowProcessor(workflow_model)
task_id = uuid.UUID(task_id)
spiff_task = processor.bpmn_workflow.get_task(task_id)
+ _verify_user_and_role(processor, spiff_task)
+ user_uid = g.user.uid
if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY:
raise ApiError("invalid_state", "You may not move the token to a task who's state is not "
"currently set to COMPLETE or READY.")
@@ -120,41 +140,42 @@ def set_current_task(workflow_id, task_id):
spiff_task.reset_token(reset_data=True) # Don't try to copy the existing data back into this task.
processor.save()
- WorkflowService.log_task_action(user_uid, workflow_model, spiff_task,
- WorkflowService.TASK_ACTION_TOKEN_RESET,
- version=processor.get_version_string())
+ WorkflowService.log_task_action(user_uid, processor, spiff_task, WorkflowService.TASK_ACTION_TOKEN_RESET)
+ WorkflowService.update_task_assignments(processor)
+
workflow_api_model = WorkflowService.processor_to_workflow_api(processor, spiff_task)
return WorkflowApiSchema().dump(workflow_api_model)
def update_task(workflow_id, task_id, body, terminate_loop=None):
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
-
if workflow_model is None:
raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
elif workflow_model.study is None:
raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
- user_uid = __get_user_uid(workflow_model.study.user_uid)
processor = WorkflowProcessor(workflow_model)
task_id = uuid.UUID(task_id)
spiff_task = processor.bpmn_workflow.get_task(task_id)
+ _verify_user_and_role(processor, spiff_task)
if not spiff_task:
raise ApiError("empty_task", "Processor failed to obtain task.", status_code=404)
if spiff_task.state != spiff_task.READY:
raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. "
"Consider calling a token reset to make this task Ready.")
+
if terminate_loop:
spiff_task.terminate_loop()
-
spiff_task.update_data(body)
processor.complete_task(spiff_task)
processor.do_engine_steps()
processor.save()
- WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, WorkflowService.TASK_ACTION_COMPLETE,
- version=processor.get_version_string())
+ # Log the action, and any pending task assignments in the event of lanes in the workflow.
+ WorkflowService.log_task_action(g.user.uid, processor, spiff_task, WorkflowService.TASK_ACTION_COMPLETE)
+ WorkflowService.update_task_assignments(processor)
+
workflow_api_model = WorkflowService.processor_to_workflow_api(processor)
return WorkflowApiSchema().dump(workflow_api_model)
@@ -210,13 +231,21 @@ def lookup(workflow_id, field_id, query=None, value=None, limit=10):
return LookupDataSchema(many=True).dump(lookup_data)
-def __get_user_uid(user_uid):
- if 'user' in g:
- if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
- raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.",
- status_code=403)
- else:
- return g.user.uid
+def _verify_user_and_role(processor, spiff_task):
+ """Assures the currently logged in user can access the given workflow and task, or
+ raises an error.
+ Allow administrators to modify tasks, otherwise assure that the current user
+ is allowed to edit or update the task. Will raise the appropriate error if user
+ is not authorized. """
- else:
+ if 'user' not in g:
raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
+
+ if g.user.uid in app.config['ADMIN_UIDS']:
+ return g.user.uid
+
+ allowed_users = WorkflowService.get_users_assigned_to_task(processor, spiff_task)
+ if g.user.uid not in allowed_users:
+ raise ApiError.from_task("permission_denied",
+ f"This task must be completed by '{allowed_users}', "
+ f"but you are {g.user.uid}", spiff_task)
diff --git a/crc/models/api_models.py b/crc/models/api_models.py
index 4dec1a74..843609e0 100644
--- a/crc/models/api_models.py
+++ b/crc/models/api_models.py
@@ -29,6 +29,7 @@ class NavigationItem(object):
self.state = state
self.is_decision = is_decision
self.task = task
+ self.lane = lane
class Task(object):
@@ -63,8 +64,9 @@ class Task(object):
##########################################################################
- def __init__(self, id, name, title, type, state, form, documentation, data,
- multi_instance_type, multi_instance_count, multi_instance_index, process_name, properties):
+ def __init__(self, id, name, title, type, state, lane, form, documentation, data,
+ multi_instance_type, multi_instance_count, multi_instance_index,
+ process_name, properties):
self.id = id
self.name = name
self.title = title
@@ -73,6 +75,7 @@ class Task(object):
self.form = form
self.documentation = documentation
self.data = data
+ self.lane = lane
self.multi_instance_type = multi_instance_type # Some tasks have a repeat behavior.
self.multi_instance_count = multi_instance_count # This is the number of times the task could repeat.
self.multi_instance_index = multi_instance_index # And the index of the currently repeating task.
@@ -111,7 +114,7 @@ class FormSchema(ma.Schema):
class TaskSchema(ma.Schema):
class Meta:
- fields = ["id", "name", "title", "type", "state", "form", "documentation", "data", "multi_instance_type",
+ fields = ["id", "name", "title", "type", "state", "lane", "form", "documentation", "data", "multi_instance_type",
"multi_instance_count", "multi_instance_index", "process_name", "properties"]
multi_instance_type = EnumField(MultiInstanceType)
@@ -119,6 +122,7 @@ class TaskSchema(ma.Schema):
form = marshmallow.fields.Nested(FormSchema, required=False, allow_none=True)
title = marshmallow.fields.String(required=False, allow_none=True)
process_name = marshmallow.fields.String(required=False, allow_none=True)
+ lane = marshmallow.fields.String(required=False, allow_none=True)
@marshmallow.post_load
def make_task(self, data, **kwargs):
@@ -128,10 +132,11 @@ class TaskSchema(ma.Schema):
class NavigationItemSchema(ma.Schema):
class Meta:
fields = ["id", "task_id", "name", "title", "backtracks", "level", "indent", "child_count", "state",
- "is_decision", "task"]
+ "is_decision", "task", "lane"]
unknown = INCLUDE
task = marshmallow.fields.Nested(TaskSchema, dump_only=True, required=False, allow_none=True)
backtracks = marshmallow.fields.String(required=False, allow_none=True)
+ lane = marshmallow.fields.String(required=False, allow_none=True)
title = marshmallow.fields.String(required=False, allow_none=True)
task_id = marshmallow.fields.String(required=False, allow_none=True)
diff --git a/crc/models/stats.py b/crc/models/stats.py
deleted file mode 100644
index 0a2e69b7..00000000
--- a/crc/models/stats.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
-
-from crc import db
-
-
-class TaskEventModel(db.Model):
- __tablename__ = 'task_event'
- id = db.Column(db.Integer, primary_key=True)
- study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False)
- user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=False)
- workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False)
- workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
- spec_version = db.Column(db.String)
- action = db.Column(db.String)
- task_id = db.Column(db.String)
- task_name = db.Column(db.String)
- task_title = db.Column(db.String)
- task_type = db.Column(db.String)
- task_state = db.Column(db.String)
- form_data = db.Column(db.JSON) # And form data submitted when the task was completed.
- mi_type = db.Column(db.String)
- mi_count = db.Column(db.Integer)
- mi_index = db.Column(db.Integer)
- process_name = db.Column(db.String)
- date = db.Column(db.DateTime)
-
-
-class TaskEventModelSchema(SQLAlchemyAutoSchema):
- class Meta:
- model = TaskEventModel
- load_instance = True
- include_relationships = True
- include_fk = True # Includes foreign keys
diff --git a/crc/models/task_event.py b/crc/models/task_event.py
new file mode 100644
index 00000000..a6cb1a2d
--- /dev/null
+++ b/crc/models/task_event.py
@@ -0,0 +1,64 @@
+from marshmallow import INCLUDE, fields
+from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
+
+from crc import db, ma
+from crc.models.study import StudyModel, StudySchema, WorkflowMetadataSchema, WorkflowMetadata
+from crc.models.workflow import WorkflowModel
+
+
+class TaskEventModel(db.Model):
+ __tablename__ = 'task_event'
+ id = db.Column(db.Integer, primary_key=True)
+ study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False)
+ user_uid = db.Column(db.String, nullable=False) # In some cases the unique user id may not exist in the db yet.
+ workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False)
+ workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
+ spec_version = db.Column(db.String)
+ action = db.Column(db.String)
+ task_id = db.Column(db.String)
+ task_name = db.Column(db.String)
+ task_title = db.Column(db.String)
+ task_type = db.Column(db.String)
+ task_state = db.Column(db.String)
+ task_lane = db.Column(db.String)
+ form_data = db.Column(db.JSON) # And form data submitted when the task was completed.
+ mi_type = db.Column(db.String)
+ mi_count = db.Column(db.Integer)
+ mi_index = db.Column(db.Integer)
+ process_name = db.Column(db.String)
+ date = db.Column(db.DateTime)
+
+
+class TaskEventModelSchema(SQLAlchemyAutoSchema):
+ class Meta:
+ model = TaskEventModel
+ load_instance = True
+ include_relationships = True
+ include_fk = True # Includes foreign keys
+
+
+class TaskEvent(object):
+ def __init__(self, model: TaskEventModel, study: StudyModel, workflow: WorkflowMetadata):
+ self.id = model.id
+ self.study = study
+ self.workflow = workflow
+ self.user_uid = model.user_uid
+ self.action = model.action
+ self.task_id = model.task_id
+ self.task_title = model.task_title
+ self.task_name = model.task_name
+ self.task_type = model.task_type
+ self.task_state = model.task_state
+ self.task_lane = model.task_lane
+
+
+class TaskEventSchema(ma.Schema):
+
+ study = fields.Nested(StudySchema, dump_only=True)
+ workflow = fields.Nested(WorkflowMetadataSchema, dump_only=True)
+
+ class Meta:
+ model = TaskEvent
+ additional = ["id", "user_uid", "action", "task_id", "task_title",
+ "task_name", "task_type", "task_state", "task_lane"]
+ unknown = INCLUDE
diff --git a/crc/services/study_service.py b/crc/services/study_service.py
index ce283cfe..fbc62d01 100644
--- a/crc/services/study_service.py
+++ b/crc/services/study_service.py
@@ -13,7 +13,7 @@ from crc.api.common import ApiError
from crc.models.file import FileModel, FileModelSchema, File
from crc.models.ldap import LdapSchema
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
-from crc.models.stats import TaskEventModel
+from crc.models.task_event import TaskEventModel
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus
diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py
index de6cf1c7..74d70408 100644
--- a/crc/services/workflow_service.py
+++ b/crc/services/workflow_service.py
@@ -1,6 +1,7 @@
import copy
import json
import string
+import uuid
from datetime import datetime
import random
@@ -15,13 +16,14 @@ from SpiffWorkflow.bpmn.specs.UserTask import UserTask
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
from SpiffWorkflow.specs import CancelTask, StartTask
from SpiffWorkflow.util.deep_merge import DeepMerge
+from flask import g
from jinja2 import Template
from crc import db, app
from crc.api.common import ApiError
from crc.models.api_models import Task, MultiInstanceType, NavigationItem, NavigationItemSchema, WorkflowApi
from crc.models.file import LookupDataModel
-from crc.models.stats import TaskEventModel
+from crc.models.task_event import TaskEventModel
from crc.models.study import StudyModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel
@@ -32,10 +34,13 @@ from crc.services.workflow_processor import WorkflowProcessor
class WorkflowService(object):
- TASK_ACTION_COMPLETE = "Complete"
- TASK_ACTION_TOKEN_RESET = "Backwards Move"
- TASK_ACTION_HARD_RESET = "Restart (Hard)"
- TASK_ACTION_SOFT_RESET = "Restart (Soft)"
+ TASK_ACTION_COMPLETE = "COMPLETE"
+ TASK_ACTION_TOKEN_RESET = "TOKEN_RESET"
+ TASK_ACTION_HARD_RESET = "HARD_RESET"
+ TASK_ACTION_SOFT_RESET = "SOFT_RESET"
+ TASK_ACTION_ASSIGNMENT = "ASSIGNMENT" # Whenever the lane changes between tasks we assign the task to specifc user.
+
+ TASK_STATE_LOCKED = "LOCKED" # When the task belongs to a different user.
"""Provides tools for processing workflows and tasks. This
should at some point, be the only way to work with Workflows, and
@@ -94,11 +99,16 @@ class WorkflowService(object):
processor.bpmn_workflow.do_engine_steps()
tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY)
for task in tasks:
+ if task.task_spec.lane is not None and task.task_spec.lane not in task.data:
+ raise ApiError.from_task("invalid_role",
+ f"This task is in a lane called '{task.task_spec.lane}', The "
+ f" current task data must have information mapping this role to "
+ f" a unique user id.", task)
task_api = WorkflowService.spiff_task_to_api_task(
task,
add_docs_and_forms=True) # Assure we try to process the documentation, and raise those errors.
WorkflowService.populate_form_with_random_data(task, task_api, required_only)
- task.complete()
+ processor.complete_task(task)
except WorkflowException as we:
WorkflowService.delete_test_data()
raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we)
@@ -197,13 +207,15 @@ class WorkflowService(object):
possible, next_task is set to the current_task."""
nav_dict = processor.bpmn_workflow.get_nav_list()
+
+ # Some basic cleanup of the title for the for the navigation.
navigation = []
for nav_item in nav_dict:
spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id'])
if 'description' in nav_item:
nav_item['title'] = nav_item.pop('description')
# fixme: duplicate code from the workflow_service. Should only do this in one place.
- if ' ' in nav_item['title']:
+ if nav_item['title'] is not None and ' ' in nav_item['title']:
nav_item['title'] = nav_item['title'].partition(' ')[2]
else:
nav_item['title'] = ""
@@ -211,11 +223,13 @@ class WorkflowService(object):
nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False)
nav_item['title'] = nav_item['task'].title # Prefer the task title.
+ user_uids = WorkflowService.get_users_assigned_to_task(processor, spiff_task)
+ if 'user' not in g or not g.user or g.user.uid not in user_uids:
+ nav_item['state'] = WorkflowService.TASK_STATE_LOCKED
+
else:
nav_item['task'] = None
- if not 'is_decision' in nav_item:
- nav_item['is_decision'] = False
navigation.append(NavigationItem(**nav_item))
NavigationItemSchema().dump(nav_item)
@@ -241,7 +255,10 @@ class WorkflowService(object):
previous_form_data = WorkflowService.get_previously_submitted_data(processor.workflow_model.id, next_task)
DeepMerge.merge(next_task.data, previous_form_data)
workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True)
-
+ # Update the state of the task to locked if the current user does not own the task.
+ user_uids = WorkflowService.get_users_assigned_to_task(processor, next_task)
+ if 'user' not in g or not g.user or g.user.uid not in user_uids:
+ workflow_api.next_task.state = WorkflowService.TASK_STATE_LOCKED
return workflow_api
@staticmethod
@@ -299,11 +316,17 @@ class WorkflowService(object):
for key, val in spiff_task.task_spec.extensions.items():
props[key] = val
+ if hasattr(spiff_task.task_spec, 'lane'):
+ lane = spiff_task.task_spec.lane
+ else:
+ lane = None
+
task = Task(spiff_task.id,
spiff_task.task_spec.name,
spiff_task.task_spec.description,
task_type,
spiff_task.get_state_name(),
+ lane,
None,
"",
{},
@@ -424,21 +447,50 @@ class WorkflowService(object):
return options
@staticmethod
- def log_task_action(user_uid, workflow_model, spiff_task, action, version):
+ def update_task_assignments(processor):
+ """For every upcoming user task, log a task action
+ that connects the assigned user(s) to that task. All
+ existing assignment actions for this workflow are removed from the database,
+ so that only the current valid actions are available. update_task_assignments
+ should be called whenever progress is made on a workflow."""
+ db.session.query(TaskEventModel). \
+ filter(TaskEventModel.workflow_id == processor.workflow_model.id). \
+ filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).delete()
+
+ for task in processor.get_current_user_tasks():
+ user_ids = WorkflowService.get_users_assigned_to_task(processor, task)
+ for user_id in user_ids:
+ WorkflowService.log_task_action(user_id, processor, task, WorkflowService.TASK_ACTION_ASSIGNMENT)
+
+ @staticmethod
+ def get_users_assigned_to_task(processor, spiff_task):
+ if not hasattr(spiff_task.task_spec, 'lane') or spiff_task.task_spec.lane is None:
+ return [processor.workflow_model.study.user_uid]
+ # todo: return a list of all users that can edit the study by default
+ if spiff_task.task_spec.lane not in spiff_task.data:
+ return [] # No users are assignable to the task at this moment
+ lane_users = spiff_task.data[spiff_task.task_spec.lane]
+ if not isinstance(lane_users, list):
+ lane_users = [lane_users]
+ return lane_users
+
+ @staticmethod
+ def log_task_action(user_uid, processor, spiff_task, action):
task = WorkflowService.spiff_task_to_api_task(spiff_task)
form_data = WorkflowService.extract_form_data(spiff_task.data, spiff_task)
task_event = TaskEventModel(
- study_id=workflow_model.study_id,
+ study_id=processor.workflow_model.study_id,
user_uid=user_uid,
- workflow_id=workflow_model.id,
- workflow_spec_id=workflow_model.workflow_spec_id,
- spec_version=version,
+ workflow_id=processor.workflow_model.id,
+ workflow_spec_id=processor.workflow_model.workflow_spec_id,
+ spec_version=processor.get_version_string(),
action=action,
task_id=task.id,
task_name=task.name,
task_title=task.title,
task_type=str(task.type),
task_state=task.state,
+ task_lane=task.lane,
form_data=form_data,
mi_type=task.multi_instance_type.value, # Some tasks have a repeat behavior.
mi_count=task.multi_instance_count, # This is the number of times the task could repeat.
diff --git a/migrations/versions/ffef4661a37d_.py b/migrations/versions/ffef4661a37d_.py
new file mode 100644
index 00000000..2a263951
--- /dev/null
+++ b/migrations/versions/ffef4661a37d_.py
@@ -0,0 +1,38 @@
+"""empty message
+
+Revision ID: ffef4661a37d
+Revises: 5acd138e969c
+Create Date: 2020-07-14 19:52:05.270939
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'ffef4661a37d'
+down_revision = '5acd138e969c'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('task_event', sa.Column('task_lane', sa.String(), nullable=True))
+ op.drop_constraint('task_event_user_uid_fkey', 'task_event', type_='foreignkey')
+ op.execute("update task_event set action = 'COMPLETE' where action='Complete'")
+ op.execute("update task_event set action = 'TOKEN_RESET' where action='Backwards Move'")
+ op.execute("update task_event set action = 'HARD_RESET' where action='Restart (Hard)'")
+ op.execute("update task_event set action = 'SOFT_RESET' where action='Restart (Soft)'")
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_foreign_key('task_event_user_uid_fkey', 'task_event', 'user', ['user_uid'], ['uid'])
+ op.drop_column('task_event', 'task_lane')
+ op.execute("update task_event set action = 'Complete' where action='COMPLETE'")
+ op.execute("update task_event set action = 'Backwards Move' where action='TOKEN_RESET'")
+ op.execute("update task_event set action = 'Restart (Hard)' where action='HARD_RESET'")
+ op.execute("update task_event set action = 'Restart (Soft)' where action='SOFT_RESET'")
+ # ### end Alembic commands ###
diff --git a/tests/base_test.py b/tests/base_test.py
index 116df5a2..6ea1966d 100644
--- a/tests/base_test.py
+++ b/tests/base_test.py
@@ -16,7 +16,7 @@ from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
from crc.models.approval import ApprovalModel, ApprovalStatus
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
from crc.models.protocol_builder import ProtocolBuilderStatus
-from crc.models.stats import TaskEventModel
+from crc.models.task_event import TaskEventModel
from crc.models.study import StudyModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
@@ -230,7 +230,7 @@ class BaseTest(unittest.TestCase):
db.session.commit()
return user
- def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"):
+ def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer", primary_investigator_id="lb3dp"):
study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
if study is None:
user = self.create_user(uid=uid)
@@ -263,13 +263,13 @@ class BaseTest(unittest.TestCase):
return full_study
- def create_workflow(self, workflow_name, study=None, category_id=None):
+ def create_workflow(self, workflow_name, study=None, category_id=None, as_user="dhf8r"):
db.session.flush()
spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.name == workflow_name).first()
if spec is None:
spec = self.load_test_spec(workflow_name, category_id=category_id)
if study is None:
- study = self.create_study()
+ study = self.create_study(uid=as_user)
workflow_model = StudyService._create_workflow_model(study, spec)
return workflow_model
@@ -313,6 +313,7 @@ class BaseTest(unittest.TestCase):
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
return workflow_api
+
def complete_form(self, workflow_in, task_in, dict_data, error_code=None, terminate_loop=None, user_uid="dhf8r"):
prev_completed_task_count = workflow_in.completed_tasks
if isinstance(task_in, dict):
@@ -339,7 +340,7 @@ class BaseTest(unittest.TestCase):
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
- # Assure stats are updated on the model
+ # Assure task events are updated on the model
workflow = WorkflowApiSchema().load(json_data)
# The total number of tasks may change over time, as users move through gateways
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
@@ -352,6 +353,7 @@ class BaseTest(unittest.TestCase):
task_events = session.query(TaskEventModel) \
.filter_by(workflow_id=workflow.id) \
.filter_by(task_id=task_id) \
+ .filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \
.order_by(TaskEventModel.date.desc()).all()
self.assertGreater(len(task_events), 0)
event = task_events[0]
diff --git a/tests/data/invalid_roles/invalid_roles.bpmn b/tests/data/invalid_roles/invalid_roles.bpmn
new file mode 100644
index 00000000..de10f712
--- /dev/null
+++ b/tests/data/invalid_roles/invalid_roles.bpmn
@@ -0,0 +1,177 @@
+
+
+
+
+
+
+
+
+ StartEvent_1
+ Activity_1hljoeq
+ Event_0lscajc
+ Activity_19ccxoj
+
+
+ Gateway_1fkgc4u
+ Activity_14eor1x
+
+
+
+ Flow_0a7090c
+
+
+ # Answer me these questions 3, ere the other side you see!
+
+
+
+
+
+
+
+ Flow_0a7090c
+ Flow_070gq5r
+ Flow_1hcpt7c
+
+
+ Flow_1gp4zfd
+ Flow_0vnghsi
+ Flow_1g38q6b
+
+
+ # Your responses were approved!
+
+
+Gosh! you must really know a lot about colors and swallows and stuff!
+Your supervisor provided the following feedback:
+
+
+{{feedback}}
+
+
+You are all done! WARNING: If you go back and reanswer the questions it will create a new approval request.
+
+
+
+
+
+
+ Flow_1g38q6b
+
+
+ # Your Request was rejected
+
+
+Perhaps you don't know the right answer to one of the questions.
+Your Supervisor provided the following feedback:
+
+
+{{feedback}}
+
+
+Please press save to re-try the questions, and submit your responses again.
+
+
+
+
+
+
+ Flow_0vnghsi
+ Flow_070gq5r
+
+
+
+
+
+
+
+
+ Flow_1hcpt7c
+ Flow_1gp4zfd
+
+
+
+
+ approval==True
+
+
+ approval==True
+
+
+
+
+ Removed a field that would set the supervisor, making this not validate.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/data/ldap_response.json b/tests/data/ldap_response.json
index f42fee94..cab99457 100644
--- a/tests/data/ldap_response.json
+++ b/tests/data/ldap_response.json
@@ -1,155 +1,124 @@
{
- "entries": [
- {
- "attributes": {
- "cn": [
- "Laura Barnes (lb3dp)"
- ],
- "displayName": "Laura Barnes",
- "givenName": [
- "Laura"
- ],
- "mail": [
- "lb3dp@virginia.edu"
- ],
- "objectClass": [
- "top",
- "person",
- "organizationalPerson",
- "inetOrgPerson",
- "uvaPerson",
- "uidObject"
- ],
- "telephoneNumber": [
- "+1 (434) 924-1723"
- ],
- "title": [
- "E0:Associate Professor of Systems and Information Engineering"
- ],
- "uvaDisplayDepartment": [
- "E0:EN-Eng Sys and Environment"
- ],
- "uvaPersonIAMAffiliation": [
- "faculty"
- ],
- "uvaPersonSponsoredType": [
- "Staff"
- ]
- },
- "dn": "uid=lb3dp,ou=People,o=University of Virginia,c=US",
- "raw": {
- "cn": [
- "Laura Barnes (lb3dp)"
- ],
- "displayName": [
- "Laura Barnes"
- ],
- "givenName": [
- "Laura"
- ],
- "mail": [
- "lb3dp@virginia.edu"
- ],
- "objectClass": [
- "top",
- "person",
- "organizationalPerson",
- "inetOrgPerson",
- "uvaPerson",
- "uidObject"
- ],
- "telephoneNumber": [
- "+1 (434) 924-1723"
- ],
- "title": [
- "E0:Associate Professor of Systems and Information Engineering"
- ],
- "uvaDisplayDepartment": [
- "E0:EN-Eng Sys and Environment"
- ],
- "uvaPersonIAMAffiliation": [
- "faculty"
- ],
- "uvaPersonSponsoredType": [
- "Staff"
- ]
- }
- },
- {
- "attributes": {
- "cn": [
- "Dan Funk (dhf8r)"
- ],
- "displayName": "Dan Funk",
- "givenName": [
- "Dan"
- ],
- "mail": [
- "dhf8r@virginia.edu"
- ],
- "objectClass": [
- "top",
- "person",
- "organizationalPerson",
- "inetOrgPerson",
- "uvaPerson",
- "uidObject"
- ],
- "telephoneNumber": [
- "+1 (434) 924-1723"
- ],
- "title": [
- "E42:He's a hoopy frood"
- ],
- "uvaDisplayDepartment": [
- "E0:EN-Eng Study of Parallel Universes"
- ],
- "uvaPersonIAMAffiliation": [
- "faculty"
- ],
- "uvaPersonSponsoredType": [
- "Staff"
- ]
- },
- "dn": "uid=dhf8r,ou=People,o=University of Virginia,c=US",
- "raw": {
- "cn": [
- "Dan Funk (dhf84)"
- ],
- "displayName": [
- "Dan Funk"
- ],
- "givenName": [
- "Dan"
- ],
- "mail": [
- "dhf8r@virginia.edu"
- ],
- "objectClass": [
- "top",
- "person",
- "organizationalPerson",
- "inetOrgPerson",
- "uvaPerson",
- "uidObject"
- ],
- "telephoneNumber": [
- "+1 (434) 924-1723"
- ],
- "title": [
- "E42:He's a hoopy frood"
- ],
- "uvaDisplayDepartment": [
- "E0:EN-Eng Study of Parallel Universes"
- ],
- "uvaPersonIAMAffiliation": [
- "faculty"
- ],
- "uvaPersonSponsoredType": [
- "Staff"
- ]
- }
- }
-
- ]
+ "entries": [
+ {
+ "dn": "uid=lb3dp,ou=People,o=University of Virginia,c=US",
+ "raw": {
+ "cn": [
+ "Laura Barnes (lb3dp)"
+ ],
+ "displayName": [
+ "Laura Barnes"
+ ],
+ "givenName": [
+ "Laura"
+ ],
+ "mail": [
+ "lb3dp@virginia.edu"
+ ],
+ "objectClass": [
+ "top",
+ "person",
+ "organizationalPerson",
+ "inetOrgPerson",
+ "uvaPerson",
+ "uidObject"
+ ],
+ "telephoneNumber": [
+ "+1 (434) 924-1723"
+ ],
+ "title": [
+ "E0:Associate Professor of Systems and Information Engineering"
+ ],
+ "uvaDisplayDepartment": [
+ "E0:EN-Eng Sys and Environment"
+ ],
+ "uvaPersonIAMAffiliation": [
+ "faculty"
+ ],
+ "uvaPersonSponsoredType": [
+ "Staff"
+ ]
+ }
+ },
+ {
+ "dn": "uid=dhf8r,ou=People,o=University of Virginia,c=US",
+ "raw": {
+ "cn": [
+ "Dan Funk (dhf84)"
+ ],
+ "displayName": [
+ "Dan Funk"
+ ],
+ "givenName": [
+ "Dan"
+ ],
+ "mail": [
+ "dhf8r@virginia.edu"
+ ],
+ "objectClass": [
+ "top",
+ "person",
+ "organizationalPerson",
+ "inetOrgPerson",
+ "uvaPerson",
+ "uidObject"
+ ],
+ "telephoneNumber": [
+ "+1 (434) 924-1723"
+ ],
+ "title": [
+ "E42:He's a hoopy frood"
+ ],
+ "uvaDisplayDepartment": [
+ "E0:EN-Eng Study of Parallel Universes"
+ ],
+ "uvaPersonIAMAffiliation": [
+ "faculty"
+ ],
+ "uvaPersonSponsoredType": [
+ "Staff"
+ ]
+ }
+ },
+ {
+ "dn": "uid=lje5u,ou=People,o=University of Virginia,c=US",
+ "raw": {
+ "cn": [
+ "Elder, Lori J (lje5u)"
+ ],
+ "displayName": [
+ "Lori Elder"
+ ],
+ "givenName": [
+ "Lori"
+ ],
+ "mail": [
+ "lje5u@virginia.edu"
+ ],
+ "objectClass": [
+ "top",
+ "person",
+ "organizationalPerson",
+ "inetOrgPerson",
+ "uvaPerson",
+ "uidObject"
+ ],
+ "telephoneNumber": [
+ "+1 (434) 924-1723"
+ ],
+ "title": [
+ "E42:The vision"
+ ],
+ "uvaDisplayDepartment": [
+ "E0:EN-Phy Anything could go here."
+ ],
+ "uvaPersonIAMAffiliation": [
+ "faculty"
+ ],
+ "uvaPersonSponsoredType": [
+ "Staff"
+ ]
+ }
+ }
+ ]
}
\ No newline at end of file
diff --git a/tests/data/roles/roles.bpmn b/tests/data/roles/roles.bpmn
new file mode 100644
index 00000000..be7992d7
--- /dev/null
+++ b/tests/data/roles/roles.bpmn
@@ -0,0 +1,155 @@
+
+
+
+
+
+
+
+
+ StartEvent_1
+ Activity_1hljoeq
+ Event_0lscajc
+ Activity_19ccxoj
+
+
+ Gateway_1fkgc4u
+ Activity_14eor1x
+
+
+
+ Flow_0a7090c
+
+
+ # Answer me these questions 3, ere the other side you see!
+
+
+
+
+
+
+
+
+ Flow_0a7090c
+ Flow_070gq5r
+ Flow_1hcpt7c
+
+
+ Flow_1gp4zfd
+ Flow_0vnghsi
+ Flow_1g38q6b
+
+
+ # Your responses were approved!
+
+
+Gosh! you must really know a lot about colors and swallows and stuff!
+Your supervisor provided the following feedback:
+
+
+{{feedback}}
+
+
+You are all done! WARNING: If you go back and reanswer the questions it will create a new approval request.
+ Flow_1g38q6b
+
+
+ # Your Request was rejected
+
+
+Perhaps you don't know the right answer to one of the questions.
+Your Supervisor provided the following feedback:
+
+
+{{feedback}}
+
+
+Please press save to re-try the questions, and submit your responses again.
+ Flow_0vnghsi
+ Flow_070gq5r
+
+
+
+
+ approval==False
+
+
+ approval==True
+
+
+
+
+
+
+
+
+
+
+ Flow_1hcpt7c
+ Flow_1gp4zfd
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/study/test_study_api.py b/tests/study/test_study_api.py
index ea5a86e6..3b781f50 100644
--- a/tests/study/test_study_api.py
+++ b/tests/study/test_study_api.py
@@ -8,7 +8,7 @@ from crc import session, app
from crc.models.protocol_builder import ProtocolBuilderStatus, \
ProtocolBuilderStudySchema
from crc.models.approval import ApprovalStatus
-from crc.models.stats import TaskEventModel
+from crc.models.task_event import TaskEventModel
from crc.models.study import StudyModel, StudySchema
from crc.models.workflow import WorkflowSpecModel, WorkflowModel
from crc.services.file_service import FileService
diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py
index 09690058..8284313d 100644
--- a/tests/test_tasks_api.py
+++ b/tests/test_tasks_api.py
@@ -9,80 +9,10 @@ from crc import session, app
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
from crc.models.file import FileModelSchema
from crc.models.workflow import WorkflowStatus
-from crc.services.workflow_service import WorkflowService
-from crc.models.stats import TaskEventModel
+
class TestTasksApi(BaseTest):
- def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
- rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
- (workflow.id, str(soft_reset), str(hard_reset)),
- headers=self.logged_in_headers(),
- content_type="application/json")
- self.assert_success(rv)
- json_data = json.loads(rv.get_data(as_text=True))
- workflow_api = WorkflowApiSchema().load(json_data)
- self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
- return workflow_api
-
- def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
- prev_completed_task_count = workflow_in.completed_tasks
- if isinstance(task_in, dict):
- task_id = task_in["id"]
- else:
- task_id = task_in.id
- rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
- headers=self.logged_in_headers(),
- content_type="application/json",
- data=json.dumps(dict_data))
- if error_code:
- self.assert_failure(rv, error_code=error_code)
- return
-
- self.assert_success(rv)
- json_data = json.loads(rv.get_data(as_text=True))
-
- # Assure stats are updated on the model
- workflow = WorkflowApiSchema().load(json_data)
- # The total number of tasks may change over time, as users move through gateways
- # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
- self.assertIsNotNone(workflow.total_tasks)
- self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
- # Assure a record exists in the Task Events
- task_events = session.query(TaskEventModel) \
- .filter_by(workflow_id=workflow.id) \
- .filter_by(task_id=task_id) \
- .order_by(TaskEventModel.date.desc()).all()
- self.assertGreater(len(task_events), 0)
- event = task_events[0]
- self.assertIsNotNone(event.study_id)
- self.assertEqual("dhf8r", event.user_uid)
- self.assertEqual(workflow.id, event.workflow_id)
- self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
- self.assertEqual(workflow.spec_version, event.spec_version)
- self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
- self.assertEqual(task_in.id, task_id)
- self.assertEqual(task_in.name, event.task_name)
- self.assertEqual(task_in.title, event.task_title)
- self.assertEqual(task_in.type, event.task_type)
- self.assertEqual("COMPLETED", event.task_state)
- # Not sure what vodoo is happening inside of marshmallow to get me in this state.
- if isinstance(task_in.multi_instance_type, MultiInstanceType):
- self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
- else:
- self.assertEqual(task_in.multi_instance_type, event.mi_type)
-
- self.assertEqual(task_in.multi_instance_count, event.mi_count)
- self.assertEqual(task_in.multi_instance_index, event.mi_index)
- self.assertEqual(task_in.process_name, event.process_name)
- self.assertIsNotNone(event.date)
-
- # Assure that there is data in the form_data
- self.assertIsNotNone(event.form_data)
-
- workflow = WorkflowApiSchema().load(json_data)
- return workflow
-
def assert_options_populated(self, results, lookup_data_keys):
option_keys = ['value', 'label', 'data']
self.assertIsInstance(results, list)
diff --git a/tests/test_user_roles.py b/tests/test_user_roles.py
new file mode 100644
index 00000000..6104641c
--- /dev/null
+++ b/tests/test_user_roles.py
@@ -0,0 +1,202 @@
+import json
+
+from tests.base_test import BaseTest
+from crc.models.workflow import WorkflowStatus
+from crc import db
+from crc.api.common import ApiError
+from crc.models.task_event import TaskEventModel, TaskEventSchema
+from crc.services.workflow_service import WorkflowService
+
+
+class TestTasksApi(BaseTest):
+
+ def test_raise_error_if_role_does_not_exist_in_data(self):
+ workflow = self.create_workflow('roles', as_user="lje5u")
+ workflow_api = self.get_workflow_api(workflow, user_uid="lje5u")
+ data = workflow_api.next_task.data
+ # User lje5u can complete the first task
+ self.complete_form(workflow, workflow_api.next_task, data, user_uid="lje5u")
+
+ # The next task is a supervisor task, and should raise an error if the role
+ # information is not in the task data.
+ workflow_api = self.get_workflow_api(workflow, user_uid="lje5u")
+ data = workflow_api.next_task.data
+ data["approved"] = True
+ result = self.complete_form(workflow, workflow_api.next_task, data, user_uid="lje5u",
+ error_code="permission_denied")
+
+ def test_validation_of_workflow_fails_if_workflow_does_not_define_user_for_lane(self):
+ error = None
+ try:
+ workflow = self.create_workflow('invalid_roles', as_user="lje5u")
+ WorkflowService.test_spec(workflow.workflow_spec_id)
+ except ApiError as ae:
+ error = ae
+ self.assertIsNotNone(error, "An error should be raised.")
+ self.assertEquals("invalid_role", error.code)
+
+ def test_raise_error_if_user_does_not_have_the_correct_role(self):
+ submitter = self.create_user(uid='lje5u')
+ supervisor = self.create_user(uid='lb3dp')
+ workflow = self.create_workflow('roles', as_user=submitter.uid)
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+
+ # User lje5u can complete the first task, and set her supervisor
+ data = workflow_api.next_task.data
+ data['supervisor'] = supervisor.uid
+ self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid)
+
+ # But she can not complete the supervisor role.
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+ data = workflow_api.next_task.data
+ data["approval"] = True
+ result = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid,
+ error_code="permission_denied")
+
+ # Only her supervisor can do that.
+ self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid)
+
+ def test_nav_includes_lanes(self):
+ submitter = self.create_user(uid='lje5u')
+ workflow = self.create_workflow('roles', as_user=submitter.uid)
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+
+ nav = workflow_api.navigation
+ self.assertEquals(5, len(nav))
+ self.assertEquals("supervisor", nav[1]['lane'])
+
+ def test_get_outstanding_tasks_awaiting_current_user(self):
+ submitter = self.create_user(uid='lje5u')
+ supervisor = self.create_user(uid='lb3dp')
+ workflow = self.create_workflow('roles', as_user=submitter.uid)
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+
+ # User lje5u can complete the first task, and set her supervisor
+ data = workflow_api.next_task.data
+ data['supervisor'] = supervisor.uid
+ workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid)
+
+ # At this point there should be a task_log with an action of Lane Change on it for
+ # the supervisor.
+ task_logs = db.session.query(TaskEventModel). \
+ filter(TaskEventModel.user_uid == supervisor.uid). \
+ filter(TaskEventModel.action == WorkflowService.TASK_ACTION_ASSIGNMENT).all()
+ self.assertEquals(1, len(task_logs))
+
+ # A call to the /task endpoint as the supervisor user should return a list of
+ # tasks that need their attention.
+ rv = self.app.get('/v1.0/task_events?action=ASSIGNMENT',
+ headers=self.logged_in_headers(supervisor),
+ content_type="application/json")
+ self.assert_success(rv)
+ json_data = json.loads(rv.get_data(as_text=True))
+ tasks = TaskEventSchema(many=True).load(json_data)
+ self.assertEquals(1, len(tasks))
+ self.assertEquals(workflow.id, tasks[0]['workflow']['id'])
+ self.assertEquals(workflow.study.id, tasks[0]['study']['id'])
+
+ # Assure we can say something sensible like:
+ # You have a task called "Approval" to be completed in the "Supervisor Approval" workflow
+ # for the study 'Why dogs are stinky' managed by user "Jane Smith (js42x)",
+ # please check here to complete the task.
+ # Display name isn't set in the tests, so just checking name, but the full workflow details are included.
+ # I didn't delve into the full user details to keep things decoupled from ldap, so you just get the
+ # uid back, but could query to get the full entry.
+ self.assertEquals("roles", tasks[0]['workflow']['name'])
+ self.assertEquals("Beer consumption in the bipedal software engineer", tasks[0]['study']['title'])
+ self.assertEquals("lje5u", tasks[0]['study']['user_uid'])
+
+ # Completing the next step of the workflow will close the task.
+ data['approval'] = True
+ self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid)
+
+ def test_navigation_and_current_task_updates_through_workflow(self):
+
+ submitter = self.create_user(uid='lje5u')
+ supervisor = self.create_user(uid='lb3dp')
+ workflow = self.create_workflow('roles', as_user=submitter.uid)
+
+ # Navigation as Submitter with ready task.
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+ nav = workflow_api.navigation
+ self.assertEquals(5, len(nav))
+ self.assertEquals('READY', nav[0]['state']) # First item is ready, no progress yet.
+ self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user.
+ self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked.
+ self.assertEquals('NOOP', nav[3]['state']) # Approved Path, has no operation
+ self.assertEquals('NOOP', nav[4]['state']) # Rejected Path, has no operation.
+ self.assertEquals('READY', workflow_api.next_task.state)
+
+ # Navigation as Submitter after handoff to supervisor
+ data = workflow_api.next_task.data
+ data['supervisor'] = supervisor.uid
+ workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid)
+ nav = workflow_api.navigation
+ self.assertEquals('COMPLETED', nav[0]['state']) # First item is ready, no progress yet.
+ self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user.
+ self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked.
+ self.assertEquals('LOCKED', workflow_api.next_task.state)
+ # In the event the next task is locked, we should say something sensible here.
+ # It is possible to look at the role of the task, and say The next task "TASK TITLE" will
+ # be handled by 'dhf8r', who is full-filling the role of supervisor. the Task Data
+ # is guaranteed to have a supervisor attribute in it that will contain the users uid, which
+ # could be looked up through an ldap service.
+ self.assertEquals('supervisor', workflow_api.next_task.lane)
+
+
+ # Navigation as Supervisor
+ workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid)
+ nav = workflow_api.navigation
+ self.assertEquals(5, len(nav))
+ self.assertEquals('LOCKED', nav[0]['state']) # First item belongs to the submitter, and is locked.
+ self.assertEquals('READY', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user.
+ self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway, and belongs to no one, and is locked.
+ self.assertEquals('READY', workflow_api.next_task.state)
+
+ data = workflow_api.next_task.data
+ data["approval"] = False
+ workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid)
+
+ # Navigation as Supervisor, after completing task.
+ nav = workflow_api.navigation
+ self.assertEquals(5, len(nav))
+ self.assertEquals('LOCKED', nav[0]['state']) # First item belongs to the submitter, and is locked.
+ self.assertEquals('COMPLETED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user.
+ self.assertEquals('COMPLETED', nav[2]['state']) # third item is a gateway, and is now complete.
+ self.assertEquals('LOCKED', workflow_api.next_task.state)
+
+ # Navigation as Submitter, coming back in to a rejected workflow to view the rejection message.
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+ nav = workflow_api.navigation
+ self.assertEquals(5, len(nav))
+ self.assertEquals('COMPLETED', nav[0]['state']) # First item belongs to the submitter, and is locked.
+ self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user.
+ self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked.
+ self.assertEquals('READY', workflow_api.next_task.state)
+
+ # Navigation as Submitter, re-completing the original request a second time, and sending it for review.
+ workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid)
+ nav = workflow_api.navigation
+ self.assertEquals(5, len(nav))
+ self.assertEquals('COMPLETED', nav[0]['state']) # We still have some issues here, the navigation will be off when looping back.
+ self.assertEquals('LOCKED', nav[1]['state']) # Second item is locked, it is the review and doesn't belong to this user.
+ self.assertEquals('LOCKED', nav[2]['state']) # third item is a gateway belonging to the supervisor, and is locked.
+ self.assertEquals('READY', workflow_api.next_task.state)
+
+ data["favorite_color"] = "blue"
+ data["quest"] = "to seek the holy grail"
+ workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid)
+ self.assertEquals('LOCKED', workflow_api.next_task.state)
+
+ workflow_api = self.get_workflow_api(workflow, user_uid=supervisor.uid)
+ self.assertEquals('READY', workflow_api.next_task.state)
+
+ data = workflow_api.next_task.data
+ data["approval"] = True
+ workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=supervisor.uid)
+ self.assertEquals('LOCKED', workflow_api.next_task.state)
+
+ workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
+ self.assertEquals('COMPLETED', workflow_api.next_task.state)
+ self.assertEquals('EndEvent', workflow_api.next_task.type) # Are are at the end.
+ self.assertEquals(WorkflowStatus.complete, workflow_api.status)
\ No newline at end of file
diff --git a/tests/workflow/test_workflow_processor.py b/tests/workflow/test_workflow_processor.py
index d30f9cd1..a51f029d 100644
--- a/tests/workflow/test_workflow_processor.py
+++ b/tests/workflow/test_workflow_processor.py
@@ -371,4 +371,16 @@ class TestWorkflowProcessor(BaseTest):
self._populate_form_with_random_data(task)
+ def test_get_role_by_name(self):
+ self.load_example_data()
+ workflow_spec_model = self.load_test_spec("roles")
+ study = session.query(StudyModel).first()
+ processor = self.get_processor(study, workflow_spec_model)
+ processor.do_engine_steps()
+ tasks = processor.next_user_tasks()
+ task = tasks[0]
+ self._populate_form_with_random_data(task)
+ processor.complete_task(task)
+ supervisor_task = processor.next_user_tasks()[0]
+ self.assertEquals("supervisor", supervisor_task.task_spec.lane)
diff --git a/tests/workflow/test_workflow_service.py b/tests/workflow/test_workflow_service.py
index 3fbd3a23..748dcedc 100644
--- a/tests/workflow/test_workflow_service.py
+++ b/tests/workflow/test_workflow_service.py
@@ -7,7 +7,7 @@ from crc.services.workflow_service import WorkflowService
from SpiffWorkflow import Task as SpiffTask, WorkflowException
from example_data import ExampleDataLoader
from crc import db
-from crc.models.stats import TaskEventModel
+from crc.models.task_event import TaskEventModel
from crc.models.api_models import Task
from crc.api.common import ApiError
@@ -102,9 +102,8 @@ class TestWorkflowService(BaseTest):
WorkflowService.populate_form_with_random_data(task, task_api, False)
task.complete()
# create the task events
- WorkflowService.log_task_action('dhf8r', workflow, task,
- WorkflowService.TASK_ACTION_COMPLETE,
- version=processor.get_version_string())
+ WorkflowService.log_task_action('dhf8r', processor, task,
+ WorkflowService.TASK_ACTION_COMPLETE)
processor.save()
db.session.commit()