diff --git a/Pipfile b/Pipfile
index 17497132..0079962c 100644
--- a/Pipfile
+++ b/Pipfile
@@ -38,6 +38,8 @@ xlrd = "*"
ldap3 = "*"
gunicorn = "*"
werkzeug = "*"
+sentry-sdk = {extras = ["flask"],version = "==0.14.4"}
+flask-mail = "*"
[requires]
python_version = "3.7"
diff --git a/Pipfile.lock b/Pipfile.lock
index d9c2bfab..fb38d03c 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "979f996148ee181e3e0af2a3777aa1d00d0fd5d943d49df65963e694b8a88871"
+ "sha256": "6c89585086260ebcb41918b8ef3b1d9e189e1b492208d3ff000a138bc2f2fcee"
},
"pipfile-spec": 6,
"requires": {
@@ -32,10 +32,10 @@
},
"amqp": {
"hashes": [
- "sha256:6e649ca13a7df3faacdc8bbb280aa9a6602d22fd9d545336077e573a1f4ff3b8",
- "sha256:77f1aef9410698d20eaeac5b73a87817365f457a507d82edf292e12cbb83b08d"
+ "sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b",
+ "sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139"
],
- "version": "==2.5.2"
+ "version": "==2.6.0"
},
"aniso8601": {
"hashes": [
@@ -96,19 +96,25 @@
],
"version": "==3.6.3.0"
},
+ "blinker": {
+ "hashes": [
+ "sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
+ ],
+ "version": "==1.4"
+ },
"celery": {
"hashes": [
- "sha256:108a0bf9018a871620936c33a3ee9f6336a89f8ef0a0f567a9001f4aa361415f",
- "sha256:5b4b37e276033fe47575107a2775469f0b721646a08c96ec2c61531e4fe45f2a"
+ "sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647",
+ "sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b"
],
- "version": "==4.4.2"
+ "version": "==4.4.5"
},
"certifi": {
"hashes": [
- "sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304",
- "sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519"
+ "sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1",
+ "sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc"
],
- "version": "==2020.4.5.1"
+ "version": "==2020.4.5.2"
},
"cffi": {
"hashes": [
@@ -270,13 +276,20 @@
"index": "pypi",
"version": "==3.0.8"
},
- "flask-marshmallow": {
+ "flask-mail": {
"hashes": [
- "sha256:6e6aec171b8e092e0eafaf035ff5b8637bf3a58ab46f568c4c1bab02f2a3c196",
- "sha256:a1685536e7ab5abdc712bbc1ac1a6b0b50951a368502f7985e7d1c27b3c21e59"
+ "sha256:22e5eb9a940bf407bcf30410ecc3708f3c56cc44b29c34e1726fe85006935f41"
],
"index": "pypi",
- "version": "==0.12.0"
+ "version": "==0.9.1"
+ },
+ "flask-marshmallow": {
+ "hashes": [
+ "sha256:1da1e6454a56a3e15107b987121729f152325bdef23f3df2f9b52bbd074af38e",
+ "sha256:aefc1f1d96256c430a409f08241bab75ffe97e5d14ac5d1f000764e39bf4873a"
+ ],
+ "index": "pypi",
+ "version": "==0.13.0"
},
"flask-migrate": {
"hashes": [
@@ -338,18 +351,18 @@
},
"importlib-metadata": {
"hashes": [
- "sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f",
- "sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e"
+ "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
+ "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"
],
"markers": "python_version < '3.8'",
- "version": "==1.6.0"
+ "version": "==1.6.1"
},
"inflection": {
"hashes": [
- "sha256:32a5c3341d9583ec319548b9015b7fbdf8c429cbcb575d326c33ae3a0e90d52c",
- "sha256:9a15d3598f01220e93f2207c432cfede50daff53137ce660fb8be838ef1ca6cc"
+ "sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9",
+ "sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924"
],
- "version": "==0.4.0"
+ "version": "==0.5.0"
},
"itsdangerous": {
"hashes": [
@@ -381,10 +394,10 @@
},
"kombu": {
"hashes": [
- "sha256:2d1cda774126a044d91a7ff5fa6d09edf99f46924ab332a810760fe6740e9b76",
- "sha256:598e7e749d6ab54f646b74b2d2df67755dee13894f73ab02a2a9feb8870c7cb2"
+ "sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a",
+ "sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3"
],
- "version": "==4.6.8"
+ "version": "==4.6.10"
},
"ldap3": {
"hashes": [
@@ -428,10 +441,10 @@
},
"mako": {
"hashes": [
- "sha256:3139c5d64aa5d175dbafb95027057128b5fbd05a40c53999f3905ceb53366d9d",
- "sha256:8e8b53c71c7e59f3de716b6832c4e401d903af574f6962edbbbf6ecc2a5fe6c9"
+ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27",
+ "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9"
],
- "version": "==1.1.2"
+ "version": "==1.1.3"
},
"markupsafe": {
"hashes": [
@@ -473,11 +486,11 @@
},
"marshmallow": {
"hashes": [
- "sha256:c2673233aa21dde264b84349dc2fd1dce5f30ed724a0a00e75426734de5b84ab",
- "sha256:f88fe96434b1f0f476d54224d59333eba8ca1a203a2695683c1855675c4049a7"
+ "sha256:35ee2fb188f0bd9fc1cf9ac35e45fd394bd1c153cee430745a465ea435514bd5",
+ "sha256:9aa20f9b71c992b4782dad07c51d92884fd0f7c5cb9d3c737bea17ec1bad765f"
],
"index": "pypi",
- "version": "==3.6.0"
+ "version": "==3.6.1"
},
"marshmallow-enum": {
"hashes": [
@@ -489,37 +502,37 @@
},
"marshmallow-sqlalchemy": {
"hashes": [
- "sha256:3247e41e424146340b03a369f2b7c6f0364477ccedc4e2481e84d5f3a8d3c67f",
- "sha256:dbbe51d28bb28e7ee2782e51310477f7a2c5a111a301f6dd8e264e11ab820427"
+ "sha256:03a555b610bb307689b821b64e2416593ec21a85925c8c436c2cd08ebc6bb85e",
+ "sha256:0ef59c8da8da2e18e808e3880158049e9d72f3031c84cc804b6c533a0eb668a9"
],
"index": "pypi",
- "version": "==0.23.0"
+ "version": "==0.23.1"
},
"numpy": {
"hashes": [
- "sha256:00d7b54c025601e28f468953d065b9b121ddca7fff30bed7be082d3656dd798d",
- "sha256:02ec9582808c4e48be4e93cd629c855e644882faf704bc2bd6bbf58c08a2a897",
- "sha256:0e6f72f7bb08f2f350ed4408bb7acdc0daba637e73bce9f5ea2b207039f3af88",
- "sha256:1be2e96314a66f5f1ce7764274327fd4fb9da58584eaff00b5a5221edefee7d6",
- "sha256:2466fbcf23711ebc5daa61d28ced319a6159b260a18839993d871096d66b93f7",
- "sha256:2b573fcf6f9863ce746e4ad00ac18a948978bb3781cffa4305134d31801f3e26",
- "sha256:3f0dae97e1126f529ebb66f3c63514a0f72a177b90d56e4bce8a0b5def34627a",
- "sha256:50fb72bcbc2cf11e066579cb53c4ca8ac0227abb512b6cbc1faa02d1595a2a5d",
- "sha256:57aea170fb23b1fd54fa537359d90d383d9bf5937ee54ae8045a723caa5e0961",
- "sha256:709c2999b6bd36cdaf85cf888d8512da7433529f14a3689d6e37ab5242e7add5",
- "sha256:7d59f21e43bbfd9a10953a7e26b35b6849d888fc5a331fa84a2d9c37bd9fe2a2",
- "sha256:904b513ab8fbcbdb062bed1ce2f794ab20208a1b01ce9bd90776c6c7e7257032",
- "sha256:96dd36f5cdde152fd6977d1bbc0f0561bccffecfde63cd397c8e6033eb66baba",
- "sha256:9933b81fecbe935e6a7dc89cbd2b99fea1bf362f2790daf9422a7bb1dc3c3085",
- "sha256:bbcc85aaf4cd84ba057decaead058f43191cc0e30d6bc5d44fe336dc3d3f4509",
- "sha256:dccd380d8e025c867ddcb2f84b439722cf1f23f3a319381eac45fd077dee7170",
- "sha256:e22cd0f72fc931d6abc69dc7764484ee20c6a60b0d0fee9ce0426029b1c1bdae",
- "sha256:ed722aefb0ebffd10b32e67f48e8ac4c5c4cf5d3a785024fdf0e9eb17529cd9d",
- "sha256:efb7ac5572c9a57159cf92c508aad9f856f1cb8e8302d7fdb99061dbe52d712c",
- "sha256:efdba339fffb0e80fcc19524e4fdbda2e2b5772ea46720c44eaac28096d60720",
- "sha256:f22273dd6a403ed870207b853a856ff6327d5cbce7a835dfa0645b3fc00273ec"
+ "sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233",
+ "sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b",
+ "sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7",
+ "sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f",
+ "sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5",
+ "sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb",
+ "sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583",
+ "sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1",
+ "sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a",
+ "sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271",
+ "sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824",
+ "sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3",
+ "sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc",
+ "sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161",
+ "sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f",
+ "sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f",
+ "sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf",
+ "sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b",
+ "sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0",
+ "sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675",
+ "sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"
],
- "version": "==1.18.4"
+ "version": "==1.18.5"
},
"openapi-spec-validator": {
"hashes": [
@@ -704,6 +717,17 @@
"index": "pypi",
"version": "==2.23.0"
},
+ "sentry-sdk": {
+ "extras": [
+ "flask"
+ ],
+ "hashes": [
+ "sha256:0e5e947d0f7a969314aa23669a94a9712be5a688ff069ff7b9fc36c66adc160c",
+ "sha256:799a8bf76b012e3030a881be00e97bc0b922ce35dde699c6537122b751d80e2c"
+ ],
+ "index": "pypi",
+ "version": "==0.14.4"
+ },
"six": {
"hashes": [
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
@@ -727,11 +751,11 @@
},
"sphinx": {
"hashes": [
- "sha256:779a519adbd3a70fc7c468af08c5e74829868b0a5b34587b33340e010291856c",
- "sha256:ea64df287958ee5aac46be7ac2b7277305b0381d213728c3a49d8bb9b8415807"
+ "sha256:1c445320a3310baa5ccb8d957267ef4a0fc930dc1234db5098b3d7af14fbb242",
+ "sha256:7d3d5087e39ab5a031b75588e9859f011de70e213cd0080ccbc28079fb0786d1"
],
"index": "pypi",
- "version": "==3.0.4"
+ "version": "==3.1.0"
},
"sphinxcontrib-applehelp": {
"hashes": [
@@ -778,7 +802,7 @@
"spiffworkflow": {
"editable": true,
"git": "https://github.com/sartography/SpiffWorkflow.git",
- "ref": "c8d87826d496af825a184bdc3f0a751e603cfe44"
+ "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0"
},
"sqlalchemy": {
"hashes": [
@@ -838,10 +862,10 @@
},
"waitress": {
"hashes": [
- "sha256:045b3efc3d97c93362173ab1dfc159b52cfa22b46c3334ffc805dbdbf0e4309e",
- "sha256:77ff3f3226931a1d7d8624c5371de07c8e90c7e5d80c5cc660d72659aaf23f38"
+ "sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261",
+ "sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db"
],
- "version": "==1.4.3"
+ "version": "==1.4.4"
},
"webob": {
"hashes": [
@@ -876,11 +900,11 @@
},
"xlsxwriter": {
"hashes": [
- "sha256:488e1988ab16ff3a9cd58c7656d0a58f8abe46ee58b98eecea78c022db28656b",
- "sha256:97ab487b81534415c5313154203f3e8a637d792b1e6a8201e8f7f71da0203c2a"
+ "sha256:828b3285fc95105f5b1946a6a015b31cf388bd5378fdc6604e4d1b7839df2e77",
+ "sha256:82a3b0e73e3913483da23791d1a25e4d2dbb3837d1be4129473526b9a270a5cc"
],
"index": "pypi",
- "version": "==1.2.8"
+ "version": "==1.2.9"
},
"zipp": {
"hashes": [
@@ -900,11 +924,11 @@
},
"importlib-metadata": {
"hashes": [
- "sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f",
- "sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e"
+ "sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
+ "sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"
],
"markers": "python_version < '3.8'",
- "version": "==1.6.0"
+ "version": "==1.6.1"
},
"more-itertools": {
"hashes": [
@@ -951,11 +975,11 @@
},
"pytest": {
"hashes": [
- "sha256:95c710d0a72d91c13fae35dce195633c929c3792f54125919847fdcdf7caa0d3",
- "sha256:eb2b5e935f6a019317e455b6da83dd8650ac9ffd2ee73a7b657a30873d67a698"
+ "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1",
+ "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"
],
"index": "pypi",
- "version": "==5.4.2"
+ "version": "==5.4.3"
},
"six": {
"hashes": [
@@ -966,10 +990,10 @@
},
"wcwidth": {
"hashes": [
- "sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1",
- "sha256:ee73862862a156bf77ff92b09034fc4825dd3af9cf81bc5b360668d425f3c5f1"
+ "sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f",
+ "sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f"
],
- "version": "==0.1.9"
+ "version": "==0.2.4"
},
"zipp": {
"hashes": [
diff --git a/config/default.py b/config/default.py
index e368b32d..93e4a933 100644
--- a/config/default.py
+++ b/config/default.py
@@ -9,9 +9,13 @@ JSON_SORT_KEYS = False # CRITICAL. Do not sort the data when returning values
NAME = "CR Connect Workflow"
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default="5000")
CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default="localhost:4200, localhost:5002"))
-DEVELOPMENT = environ.get('DEVELOPMENT', default="true") == "true"
TESTING = environ.get('TESTING', default="false") == "true"
-PRODUCTION = (environ.get('PRODUCTION', default="false") == "true") or (not DEVELOPMENT and not TESTING)
+PRODUCTION = (environ.get('PRODUCTION', default="false") == "true")
+TEST_UID = environ.get('TEST_UID', default="dhf8r")
+ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah13us,cl3wf"))
+
+# Sentry flag
+ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true"
# Add trailing slash to base path
APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/'))
@@ -25,7 +29,7 @@ SQLALCHEMY_DATABASE_URI = environ.get(
'SQLALCHEMY_DATABASE_URI',
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
)
-TOKEN_AUTH_TTL_HOURS = int(environ.get('TOKEN_AUTH_TTL_HOURS', default=4))
+TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24))
TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
@@ -39,6 +43,14 @@ PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL +
PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + "study?studyid=%i")
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http://
-LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=3))
-
+LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1))
+# Email configuration
+FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com']
+MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
+MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
+MAIL_PORT = environ.get('MAIL_PORT', default=2525)
+MAIL_USE_SSL = environ.get('MAIL_USE_SSL', default=False)
+MAIL_USE_TLS = environ.get('MAIL_USE_TLS', default=True)
+MAIL_USERNAME = environ.get('MAIL_USERNAME', default='')
+MAIL_PASSWORD = environ.get('MAIL_PASSWORD', default='')
diff --git a/config/testing.py b/config/testing.py
index a7c6a893..546ea829 100644
--- a/config/testing.py
+++ b/config/testing.py
@@ -4,7 +4,6 @@ from os import environ
basedir = os.path.abspath(os.path.dirname(__file__))
NAME = "CR Connect Workflow"
-DEVELOPMENT = True
TESTING = True
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
PB_ENABLED = False
@@ -23,8 +22,8 @@ SQLALCHEMY_DATABASE_URI = environ.get(
'SQLALCHEMY_DATABASE_URI',
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
)
+ADMIN_UIDS = ['dhf8r']
print('### USING TESTING CONFIG: ###')
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
-print('DEVELOPMENT = ', DEVELOPMENT)
print('TESTING = ', TESTING)
diff --git a/config/travis-testing.py b/config/travis-testing.py
index 17a4b914..8949061a 100644
--- a/config/travis-testing.py
+++ b/config/travis-testing.py
@@ -2,7 +2,6 @@ import os
basedir = os.path.abspath(os.path.dirname(__file__))
NAME = "CR Connect Workflow"
-DEVELOPMENT = True
TESTING = True
SQLALCHEMY_DATABASE_URI = "postgresql://postgres:@localhost:5432/crc_test"
TOKEN_AUTH_TTL_HOURS = 2
@@ -12,6 +11,5 @@ PB_ENABLED = False
print('+++ USING TRAVIS TESTING CONFIG: +++')
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
-print('DEVELOPMENT = ', DEVELOPMENT)
print('TESTING = ', TESTING)
print('FRONTEND_AUTH_CALLBACK = ', FRONTEND_AUTH_CALLBACK)
diff --git a/crc/__init__.py b/crc/__init__.py
index fe510daf..1ac2678f 100644
--- a/crc/__init__.py
+++ b/crc/__init__.py
@@ -1,11 +1,15 @@
import logging
import os
+import sentry_sdk
import connexion
+from jinja2 import Environment, FileSystemLoader
from flask_cors import CORS
from flask_marshmallow import Marshmallow
+from flask_mail import Mail
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
+from sentry_sdk.integrations.flask import FlaskIntegration
logging.basicConfig(level=logging.INFO)
@@ -40,16 +44,29 @@ connexion_app.add_api('api.yml', base_path='/v1.0')
origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']]
cors = CORS(connexion_app.app, origins=origins_re)
+if app.config['ENABLE_SENTRY']:
+ sentry_sdk.init(
+ dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915",
+ integrations=[FlaskIntegration()]
+ )
+
+# Jinja environment definition, used to render mail templates
+template_dir = os.getcwd() + '/crc/static/templates/mails'
+env = Environment(loader=FileSystemLoader(template_dir))
+# Mail settings
+mail = Mail(app)
+
print('=== USING THESE CONFIG SETTINGS: ===')
-print('DB_HOST = ', )
-print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
-print('DEVELOPMENT = ', app.config['DEVELOPMENT'])
-print('TESTING = ', app.config['TESTING'])
-print('PRODUCTION = ', app.config['PRODUCTION'])
-print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
-print('LDAP_URL = ', app.config['LDAP_URL'])
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
+print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
+print('DB_HOST = ', app.config['DB_HOST'])
+print('LDAP_URL = ', app.config['LDAP_URL'])
+print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
print('PB_ENABLED = ', app.config['PB_ENABLED'])
+print('PRODUCTION = ', app.config['PRODUCTION'])
+print('TESTING = ', app.config['TESTING'])
+print('TEST_UID = ', app.config['TEST_UID'])
+print('ADMIN_UIDS = ', app.config['ADMIN_UIDS'])
@app.cli.command()
def load_example_data():
@@ -65,3 +82,9 @@ def load_example_rrt_data():
from example_data import ExampleDataLoader
ExampleDataLoader.clean_db()
ExampleDataLoader().load_rrt()
+
+@app.cli.command()
+def clear_db():
+ """Load example data into the database."""
+ from example_data import ExampleDataLoader
+ ExampleDataLoader.clean_db()
diff --git a/crc/api.yml b/crc/api.yml
index edc3861b..64f6086a 100644
--- a/crc/api.yml
+++ b/crc/api.yml
@@ -9,54 +9,18 @@ servers:
security:
- jwt: ['secret']
paths:
- /sso_backdoor:
+ /login:
get:
- operationId: crc.api.user.backdoor
- summary: A backdoor that allows someone to log in as a specific user, if they
- are in a staging environment.
+ operationId: crc.api.user.login
+ summary: In production, logs the user in via SSO. If not in production, logs in as a specific user for testing.
security: [] # Disable security for this endpoint only.
parameters:
- name: uid
- in: query
- required: true
- schema:
- type: string
- - name: email_address
in: query
required: false
schema:
type: string
- - name: display_name
- in: query
- required: false
- schema:
- type: string
- - name: affiliation
- in: query
- required: false
- schema:
- type: string
- - name: eppn
- in: query
- required: false
- schema:
- type: string
- - name: first_name
- in: query
- required: false
- schema:
- type: string
- - name: last_name
- in: query
- required: false
- schema:
- type: string
- - name: title
- in: query
- required: false
- schema:
- type: string
- - name: redirect
+ - name: redirect_url
in: query
required: false
schema:
@@ -150,6 +114,8 @@ paths:
$ref: "#/components/schemas/Study"
delete:
operationId: crc.api.study.delete_study
+ security:
+ - auth_admin: ['secret']
summary: Removes the given study completely.
tags:
- Studies
@@ -173,6 +139,30 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Study"
+ /study/{study_id}/approvals:
+ parameters:
+ - name: study_id
+ in: path
+ required: true
+ description: The id of the study for which workflows should be returned.
+ schema:
+ type: integer
+ format: int32
+ get:
+ operationId: crc.api.approval.get_approvals_for_study
+ summary: Returns approvals for a single study
+ tags:
+ - Studies
+ - Approvals
+ responses:
+ '200':
+ description: An array of approvals
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "#/components/schemas/Approval"
/workflow-specification:
get:
operationId: crc.api.workflow.all_specifications
@@ -227,6 +217,8 @@ paths:
$ref: "#/components/schemas/WorkflowSpec"
put:
operationId: crc.api.workflow.update_workflow_specification
+ security:
+ - auth_admin: ['secret']
summary: Modifies an existing workflow specification with the given parameters.
tags:
- Workflow Specifications
@@ -244,6 +236,8 @@ paths:
$ref: "#/components/schemas/WorkflowSpec"
delete:
operationId: crc.api.workflow.delete_workflow_specification
+ security:
+ - auth_admin: ['secret']
summary: Removes an existing workflow specification
tags:
- Workflow Specifications
@@ -289,6 +283,8 @@ paths:
$ref: "#/components/schemas/WorkflowSpecCategory"
post:
operationId: crc.api.workflow.add_workflow_spec_category
+ security:
+ - auth_admin: ['secret']
summary: Creates a new workflow spec category with the given parameters.
tags:
- Workflow Specification Category
@@ -326,6 +322,8 @@ paths:
$ref: "#/components/schemas/WorkflowSpecCategory"
put:
operationId: crc.api.workflow.update_workflow_spec_category
+ security:
+ - auth_admin: ['secret']
summary: Modifies an existing workflow spec category with the given parameters.
tags:
- Workflow Specification Category
@@ -343,6 +341,8 @@ paths:
$ref: "#/components/schemas/WorkflowSpecCategory"
delete:
operationId: crc.api.workflow.delete_workflow_spec_category
+ security:
+ - auth_admin: ['secret']
summary: Removes an existing workflow spec category
tags:
- Workflow Specification Category
@@ -444,7 +444,7 @@ paths:
$ref: "#/components/schemas/File"
delete:
operationId: crc.api.file.delete_file
- summary: Removes an existing file
+ summary: Removes an existing file. In the event the file can not be deleted, it is marked as "archived" in the database and is no longer returned unless specifically requested by id.
tags:
- Files
responses:
@@ -542,6 +542,8 @@ paths:
example: ''
put:
operationId: crc.api.file.set_reference_file
+ security:
+ - auth_admin: ['secret']
summary: Update the contents of a named reference file.
tags:
- Files
@@ -600,6 +602,8 @@ paths:
$ref: "#/components/schemas/Workflow"
delete:
operationId: crc.api.workflow.delete_workflow
+ security:
+ - auth_admin: ['secret']
summary: Removes an existing workflow
tags:
- Workflows and Tasks
@@ -738,6 +742,26 @@ paths:
text/plain:
schema:
type: string
+ /send_email:
+ parameters:
+ - name: address
+ in: query
+ required: true
+ description: The address to send a test email to.
+ schema:
+ type: string
+ get:
+ operationId: crc.api.tools.send_email
+ summary: Sends an email so we can see if things work or not.
+ tags:
+ - Configurator Tools
+ responses:
+ '201':
+ description: Returns any error messages that might come back from sending the email.
+ content:
+ text/plain:
+ schema:
+ type: string
/render_docx:
put:
operationId: crc.api.tools.render_docx
@@ -782,12 +806,62 @@ paths:
type: array
items:
$ref: "#/components/schemas/Script"
- /approval:
+ /approval-counts:
parameters:
- - name: approver_uid
+ - name: as_user
in: query
required: false
- description: Restrict results to a given approver uid, maybe we restrict the use of this at somepoint.
+ description: If provided, returns the approval counts for that user.
+ schema:
+ type: string
+ get:
+ operationId: crc.api.approval.get_approval_counts
+ summary: Provides counts for approvals by status for the given user, or all users if no user is provided
+ tags:
+ - Approvals
+ responses:
+ '200':
+ description: An dictionary of Approval Statuses and the counts for each
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "#/components/schemas/ApprovalCounts"
+ /all_approvals:
+ parameters:
+ - name: status
+ in: query
+ required: false
+ description: If set to true, returns all the approvals with any status. Defaults to false, leaving out canceled approvals.
+ schema:
+ type: boolean
+ get:
+ operationId: crc.api.approval.get_all_approvals
+ summary: Provides a list of all workflows approvals
+ tags:
+ - Approvals
+ responses:
+ '200':
+ description: An array of approvals
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "#/components/schemas/Approval"
+ /approval:
+ parameters:
+ - name: status
+ in: query
+ required: false
+ description: If provided, returns just approvals for the given status.
+ schema:
+ type: string
+ - name: as_user
+ in: query
+ required: false
+ description: If provided, returns the approval results as they would appear for that user.
schema:
type: string
get:
@@ -830,6 +904,19 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Approval"
+ /approval/csv:
+ get:
+ operationId: crc.api.approval.get_csv
+ summary: Provides a list of all users for all approved studies
+ tags:
+ - Approvals
+ responses:
+ '200':
+ description: An array of approvals
+ content:
+ application/json:
+ schema:
+ type: object
components:
securitySchemes:
jwt:
@@ -837,6 +924,11 @@ components:
scheme: bearer
bearerFormat: JWT
x-bearerInfoFunc: crc.api.user.verify_token
+ auth_admin:
+ type: http
+ scheme: bearer
+ bearerFormat: JWT
+ x-bearerInfoFunc: crc.api.user.verify_token_admin
schemas:
User:
properties:
@@ -1243,4 +1335,26 @@ components:
type: number
format: integer
example: 5
+ ApprovalCounts:
+ properties:
+ PENDING:
+ type: number
+ format: integer
+ example: 5
+ APPROVED:
+ type: number
+ format: integer
+ example: 5
+ DECLINED:
+ type: number
+ format: integer
+ example: 5
+ CANCELED:
+ type: number
+ format: integer
+ example: 5
+ AWAITING:
+ type: number
+ format: integer
+ example: 5
diff --git a/crc/api/approval.py b/crc/api/approval.py
index 739773c1..b3ee0fed 100644
--- a/crc/api/approval.py
+++ b/crc/api/approval.py
@@ -1,19 +1,161 @@
-from crc import app, db, session
+import json
+import pickle
+from base64 import b64decode
+from datetime import datetime
-from crc.api.common import ApiError, ApiErrorSchema
-from crc.models.approval import Approval, ApprovalModel, ApprovalSchema
+from flask import g
+
+from crc import db, session
+from crc.api.common import ApiError
+from crc.models.approval import Approval, ApprovalModel, ApprovalSchema, ApprovalStatus
+from crc.models.workflow import WorkflowModel
from crc.services.approval_service import ApprovalService
+from crc.services.ldap_service import LdapService
-def get_approvals(approver_uid = None):
- if not approver_uid:
- db_approvals = ApprovalService.get_all_approvals()
- else:
- db_approvals = ApprovalService.get_approvals_per_user(approver_uid)
+# Returns counts of approvals in each status group assigned to the given user.
+# The goal is to return results as quickly as possible.
+def get_approval_counts(as_user=None):
+ uid = as_user or g.user.uid
+
+ db_user_approvals = db.session.query(ApprovalModel)\
+ .filter_by(approver_uid=uid)\
+ .filter(ApprovalModel.status != ApprovalStatus.CANCELED.name)\
+ .all()
+
+ study_ids = [a.study_id for a in db_user_approvals]
+
+ db_other_approvals = db.session.query(ApprovalModel)\
+ .filter(ApprovalModel.study_id.in_(study_ids))\
+ .filter(ApprovalModel.approver_uid != uid)\
+ .filter(ApprovalModel.status != ApprovalStatus.CANCELED.name)\
+ .all()
+
+ # Make a dict of the other approvals where the key is the study id and the value is the approval
+ # TODO: This won't work if there are more than 2 approvals with the same study_id
+ other_approvals = {}
+ for approval in db_other_approvals:
+ other_approvals[approval.study_id] = approval
+
+ counts = {}
+ for name, value in ApprovalStatus.__members__.items():
+ counts[name] = 0
+
+ for approval in db_user_approvals:
+ # Check if another approval has the same study id
+ if approval.study_id in other_approvals:
+ other_approval = other_approvals[approval.study_id]
+
+ # Other approval takes precedence over this one
+ if other_approval.id < approval.id:
+ if other_approval.status == ApprovalStatus.PENDING.name:
+ counts[ApprovalStatus.AWAITING.name] += 1
+ elif other_approval.status == ApprovalStatus.DECLINED.name:
+ counts[ApprovalStatus.DECLINED.name] += 1
+ elif other_approval.status == ApprovalStatus.CANCELED.name:
+ counts[ApprovalStatus.CANCELED.name] += 1
+ elif other_approval.status == ApprovalStatus.APPROVED.name:
+ counts[approval.status] += 1
+ else:
+ counts[approval.status] += 1
+ else:
+ counts[approval.status] += 1
+
+ return counts
+
+
+def get_all_approvals(status=None):
+ approvals = ApprovalService.get_all_approvals(include_cancelled=status is True)
+ results = ApprovalSchema(many=True).dump(approvals)
+ return results
+
+
+def get_approvals(status=None, as_user=None):
+ #status = ApprovalStatus.PENDING.value
+ user = g.user.uid
+ if as_user:
+ user = as_user
+ approvals = ApprovalService.get_approvals_per_user(user, status,
+ include_cancelled=False)
+ results = ApprovalSchema(many=True).dump(approvals)
+ return results
+
+
+def get_approvals_for_study(study_id=None):
+ db_approvals = ApprovalService.get_approvals_for_study(study_id)
approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
results = ApprovalSchema(many=True).dump(approvals)
return results
+
+# ----- Begin descent into madness ---- #
+def get_csv():
+ """A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a
+ man to do just about anything"""
+ approvals = ApprovalService.get_all_approvals(include_cancelled=False)
+ output = []
+ errors = []
+ for approval in approvals:
+ try:
+ if approval.status != ApprovalStatus.APPROVED.value:
+ continue
+ for related_approval in approval.related_approvals:
+ if related_approval.status != ApprovalStatus.APPROVED.value:
+ continue
+ workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first()
+ data = json.loads(workflow.bpmn_workflow_json)
+ last_task = find_task(data['last_task']['__uuid__'], data['task_tree'])
+ personnel = extract_value(last_task, 'personnel')
+ training_val = extract_value(last_task, 'RequiredTraining')
+ pi_supervisor = extract_value(last_task, 'PISupervisor')['value']
+ review_complete = 'AllRequiredTraining' in training_val
+ pi_uid = workflow.study.primary_investigator_id
+ pi_details = LdapService.user_info(pi_uid)
+ details = []
+ details.append(pi_details)
+ for person in personnel:
+ uid = person['PersonnelComputingID']['value']
+ details.append(LdapService.user_info(uid))
+
+ for person in details:
+ record = {
+ "study_id": approval.study_id,
+ "pi_uid": pi_details.uid,
+ "pi": pi_details.display_name,
+ "name": person.display_name,
+ "uid": person.uid,
+ "email": person.email_address,
+ "supervisor": "",
+ "review_complete": review_complete,
+ }
+ # We only know the PI's supervisor.
+ if person.uid == pi_details.uid:
+ record["supervisor"] = pi_supervisor
+
+ output.append(record)
+
+ except Exception as e:
+ errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e)))
+ return {"results": output, "errors": errors }
+
+
+def extract_value(task, key):
+ if key in task['data']:
+ return pickle.loads(b64decode(task['data'][key]['__bytes__']))
+ else:
+ return ""
+
+
+def find_task(uuid, task):
+ if task['id']['__uuid__'] == uuid:
+ return task
+ for child in task['children']:
+ task = find_task(uuid, child)
+ if task:
+ return task
+# ----- come back to the world of the living ---- #
+
+
def update_approval(approval_id, body):
if approval_id is None:
raise ApiError('unknown_approval', 'Please provide a valid Approval ID.')
@@ -22,9 +164,18 @@ def update_approval(approval_id, body):
if approval_model is None:
raise ApiError('unknown_approval', 'The approval "' + str(approval_id) + '" is not recognized.')
- approval: Approval = ApprovalSchema().load(body)
- approval.update_model(approval_model)
+ if approval_model.approver_uid != g.user.uid:
+ raise ApiError("not_your_approval", "You may not modify this approval. It belongs to another user.")
+
+ approval_model.status = body['status']
+ approval_model.message = body['message']
+ approval_model.date_approved = datetime.now()
+ session.add(approval_model)
session.commit()
- result = ApprovalSchema().dump(approval)
+ # Called only to send emails
+ approver = body['approver']['uid']
+ ApprovalService.update_approval(approval_id, approver)
+
+ result = ApprovalSchema().dump(approval_model)
return result
diff --git a/crc/api/common.py b/crc/api/common.py
index 2cd09522..f8673a5b 100644
--- a/crc/api/common.py
+++ b/crc/api/common.py
@@ -1,9 +1,12 @@
+from SpiffWorkflow import WorkflowException
+from SpiffWorkflow.exceptions import WorkflowTaskExecException
+
from crc import ma, app
class ApiError(Exception):
def __init__(self, code, message, status_code=400,
- file_name="", task_id="", task_name="", tag=""):
+ file_name="", task_id="", task_name="", tag="", task_data = {}):
self.status_code = status_code
self.code = code # a short consistent string describing the error.
self.message = message # A detailed message that provides more information.
@@ -11,6 +14,7 @@ class ApiError(Exception):
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
+ self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error ocurred.
Exception.__init__(self, self.message)
@classmethod
@@ -20,6 +24,7 @@ class ApiError(Exception):
instance.task_id = task.task_spec.name or ""
instance.task_name = task.task_spec.description or ""
instance.file_name = task.workflow.spec.file or ""
+ instance.task_data = task.data
return instance
@classmethod
@@ -32,10 +37,21 @@ class ApiError(Exception):
instance.file_name = task_spec._wf_spec.file
return instance
+ @classmethod
+ def from_workflow_exception(cls, code, message, exp: WorkflowException):
+ """We catch a lot of workflow exception errors,
+ so consolidating the code, and doing the best things
+ we can with the data we have."""
+ if isinstance(exp, WorkflowTaskExecException):
+ return ApiError.from_task(code, message, exp.task)
+ else:
+ return ApiError.from_task_spec(code, message, exp.sender)
+
class ApiErrorSchema(ma.Schema):
class Meta:
- fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id")
+ fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id",
+ "task_data")
@app.errorhandler(ApiError)
diff --git a/crc/api/file.py b/crc/api/file.py
index 07ced388..5cf54221 100644
--- a/crc/api/file.py
+++ b/crc/api/file.py
@@ -12,8 +12,9 @@ from crc.services.file_service import FileService
def to_file_api(file_model):
- """Converts a FileModel object to something we can return via the aip"""
- return File.from_models(file_model, FileService.get_file_data(file_model.id))
+ """Converts a FileModel object to something we can return via the api"""
+ return File.from_models(file_model, FileService.get_file_data(file_model.id),
+ FileService.get_doc_dictionary())
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None):
@@ -121,7 +122,7 @@ def get_file_info(file_id):
def update_file_info(file_id, body):
if file_id is None:
- raise ApiError('unknown_file', 'Please provide a valid File ID.')
+ raise ApiError('no_such_file', 'Please provide a valid File ID.')
file_model = session.query(FileModel).filter_by(id=file_id).first()
diff --git a/crc/api/study.py b/crc/api/study.py
index 423f6fe2..8fdd1b4a 100644
--- a/crc/api/study.py
+++ b/crc/api/study.py
@@ -48,12 +48,10 @@ def update_study(study_id, body):
def get_study(study_id):
- study_service = StudyService()
- study = study_service.get_study(study_id)
+ study = StudyService.get_study(study_id)
if (study is None):
- raise ApiError("Study not found", status_code=404)
- schema = StudySchema()
- return schema.dump(study)
+ raise ApiError("unknown_study", 'The study "' + study_id + '" is not recognized.', status_code=404)
+ return StudySchema().dump(study)
def delete_study(study_id):
diff --git a/crc/api/tools.py b/crc/api/tools.py
index 6fb31b71..d140e962 100644
--- a/crc/api/tools.py
+++ b/crc/api/tools.py
@@ -9,6 +9,8 @@ from crc.api.common import ApiError
from crc.scripts.complete_template import CompleteTemplate
from crc.scripts.script import Script
import crc.scripts
+from crc.services.mails import send_test_email
+
def render_markdown(data, template):
"""
@@ -20,9 +22,9 @@ def render_markdown(data, template):
data = json.loads(data)
return template.render(**data)
except UndefinedError as ue:
- raise ApiError(code="undefined field", message=ue.message)
+ raise ApiError(code="undefined_field", message=ue.message)
except Exception as e:
- raise ApiError(code="invalid", message=str(e))
+ raise ApiError(code="invalid_render", message=str(e))
def render_docx():
@@ -42,9 +44,9 @@ def render_docx():
cache_timeout=-1 # Don't cache these files on the browser.
)
except ValueError as e:
- raise ApiError(code="invalid", message=str(e))
+ raise ApiError(code="undefined_field", message=str(e))
except Exception as e:
- raise ApiError(code="invalid", message=str(e))
+ raise ApiError(code="invalid_render", message=str(e))
def list_scripts():
@@ -59,3 +61,8 @@ def list_scripts():
})
return script_meta
+def send_email(address):
+ """Just sends a quick test email to assure the system is working."""
+ if not address:
+ address = "dan@sartography.com"
+ return send_test_email(address, [address])
\ No newline at end of file
diff --git a/crc/api/user.py b/crc/api/user.py
index afa2e894..a298808d 100644
--- a/crc/api/user.py
+++ b/crc/api/user.py
@@ -1,41 +1,122 @@
-import json
-
-import connexion
import flask
-from flask import redirect, g, request
+from flask import g, request
from crc import app, db
from crc.api.common import ApiError
from crc.models.user import UserModel, UserModelSchema
-from crc.services.ldap_service import LdapService, LdapUserInfo
+from crc.services.ldap_service import LdapService, LdapModel
"""
.. module:: crc.api.user
:synopsis: Single Sign On (SSO) user login and session handlers
"""
-def verify_token(token):
- failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate", status_code=403)
- if (not 'PRODUCTION' in app.config or not app.config['PRODUCTION']) and token == app.config["SWAGGER_AUTH_KEY"]:
+
+
+def verify_token(token=None):
+ """
+ Verifies the token for the user (if provided). If in production environment and token is not provided,
+ gets user from the SSO headers and returns their token.
+
+ Args:
+ token: Optional[str]
+
+ Returns:
+ token: str
+
+ Raises:
+ ApiError. If not on production and token is not valid, returns an 'invalid_token' 403 error.
+ If on production and user is not authenticated, returns a 'no_user' 403 error.
+ """
+
+ failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate",
+ status_code=403)
+
+ if not _is_production() and (token is None or 'user' not in g):
g.user = UserModel.query.first()
token = g.user.encode_auth_token()
- try:
- token_info = UserModel.decode_auth_token(token)
- g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
- except:
- raise failure_error
- if g.user is not None:
- return token_info
+ if token:
+ try:
+ token_info = UserModel.decode_auth_token(token)
+ g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
+ except:
+ raise failure_error
+ if g.user is not None:
+ return token_info
+ else:
+ raise failure_error
+
+ # If there's no token and we're in production, get the user from the SSO headers and return their token
+ if not token and _is_production():
+ uid = _get_request_uid(request)
+
+ if uid is not None:
+ db_user = UserModel.query.filter_by(uid=uid).first()
+
+ if db_user is not None:
+ g.user = db_user
+ token = g.user.encode_auth_token().decode()
+ token_info = UserModel.decode_auth_token(token)
+ return token_info
+
+ else:
+ raise ApiError("no_user", "User not found. Please login via the frontend app before accessing this feature.",
+ status_code=403)
+
+
+def verify_token_admin(token=None):
+ """
+ Verifies the token for the user (if provided) in non-production environment. If in production environment,
+ checks that the user is in the list of authorized admins
+
+ Args:
+ token: Optional[str]
+
+ Returns:
+ token: str
+ """
+
+ # If this is production, check that the user is in the list of admins
+ if _is_production():
+ uid = _get_request_uid(request)
+
+ if uid is not None and uid in app.config['ADMIN_UIDS']:
+ return verify_token()
+
+ # If we're not in production, just use the normal verify_token method
else:
- raise failure_error
+ return verify_token(token)
def get_current_user():
return UserModelSchema().dump(g.user)
-@app.route('/v1.0/login')
-def sso_login():
- # This what I see coming back:
+
+def login(
+ uid=None,
+ redirect_url=None,
+):
+ """
+ In non-production environment, provides an endpoint for end-to-end system testing that allows the system
+ to simulate logging in as a specific user. In production environment, simply logs user in via single-sign-on
+ (SSO) Shibboleth authentication headers.
+
+ Args:
+ uid: Optional[str]
+ redirect_url: Optional[str]
+
+ Returns:
+ str. If not on production, returns the frontend auth callback URL, with auth token appended.
+ If on production and user is authenticated via SSO, returns the frontend auth callback URL,
+ with auth token appended.
+
+ Raises:
+ ApiError. If on production and user is not authenticated, returns a 404 error.
+ """
+
+ # ----------------------------------------
+ # Shibboleth Authentication Headers
+ # ----------------------------------------
# X-Remote-Cn: Daniel Harold Funk (dhf8r)
# X-Remote-Sn: Funk
# X-Remote-Givenname: Daniel
@@ -50,62 +131,52 @@ def sso_login():
# X-Forwarded-Host: dev.crconnect.uvadcos.io
# X-Forwarded-Server: dev.crconnect.uvadcos.io
# Connection: Keep-Alive
- uid = request.headers.get("Uid")
- if not uid:
- uid = request.headers.get("X-Remote-Uid")
- if not uid:
- raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
- % str(request.headers))
- redirect = request.args.get('redirect')
- app.logger.info("SSO_LOGIN: Full URL: " + request.url)
- app.logger.info("SSO_LOGIN: User Id: " + uid)
- app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect))
+ # If we're in production, override any uid with the uid from the SSO request headers
+ if _is_production():
+ uid = _get_request_uid(request)
- ldap_service = LdapService()
- info = ldap_service.user_info(uid)
+ if uid:
+ app.logger.info("SSO_LOGIN: Full URL: " + request.url)
+ app.logger.info("SSO_LOGIN: User Id: " + uid)
+ app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect_url))
+
+ ldap_info = LdapService().user_info(uid)
+
+ if ldap_info:
+ return _handle_login(ldap_info, redirect_url)
+
+ raise ApiError('404', 'unknown')
- return _handle_login(info, redirect)
@app.route('/sso')
def sso():
response = ""
response += "
Headers
"
response += "
"
- for k,v in request.headers:
+ for k, v in request.headers:
response += "
%s %s
\n" % (k, v)
response += "
Environment
"
- for k,v in request.environ:
+ for k, v in request.environ:
response += "
%s %s
\n" % (k, v)
return response
-def _handle_login(user_info: LdapUserInfo, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
- """On successful login, adds user to database if the user is not already in the system,
- then returns the frontend auth callback URL, with auth token appended.
+def _handle_login(user_info: LdapModel, redirect_url=None):
+ """
+ On successful login, adds user to database if the user is not already in the system,
+ then returns the frontend auth callback URL, with auth token appended.
- Args:
- user_info - an ldap user_info object.
- redirect_url: Optional[str]
+ Args:
+ user_info - an ldap user_info object.
+ redirect_url: Optional[str]
- Returns:
- Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
+ Returns:
+ Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
"""
- user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
-
- if user is None:
- # Add new user
- user = UserModel()
-
- user.uid = user_info.uid
- user.display_name = user_info.display_name
- user.email_address = user_info.email_address
- user.affiliation = user_info.affiliation
- user.title = user_info.title
-
- db.session.add(user)
- db.session.commit()
+ user = _upsert_user(user_info)
+ g.user = user
# Return the frontend auth callback URL, with auth token appended.
auth_token = user.encode_auth_token().decode()
@@ -120,41 +191,44 @@ def _handle_login(user_info: LdapUserInfo, redirect_url=app.config['FRONTEND_AUT
return auth_token
+def _upsert_user(user_info):
+ user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
-def backdoor(
- uid=None,
- affiliation=None,
- display_name=None,
- email_address=None,
- eppn=None,
- first_name=None,
- last_name=None,
- title=None,
- redirect=None,
-):
- """A backdoor for end-to-end system testing that allows the system to simulate logging in as a specific user.
- Only works if the application is running in a non-production environment.
-
- Args:
- uid: str
- affiliation: Optional[str]
- display_name: Optional[str]
- email_address: Optional[str]
- eppn: Optional[str]
- first_name: Optional[str]
- last_name: Optional[str]
- title: Optional[str]
- redirect_url: Optional[str]
-
- Returns:
- str. If not on production, returns the frontend auth callback URL, with auth token appended.
-
- Raises:
- ApiError. If on production, returns a 404 error.
- """
- if not 'PRODUCTION' in app.config or not app.config['PRODUCTION']:
-
- ldap_info = LdapService().user_info(uid)
- return _handle_login(ldap_info, redirect)
+ if user is None:
+ # Add new user
+ user = UserModel()
else:
- raise ApiError('404', 'unknown')
+ user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).with_for_update().first()
+
+ user.uid = user_info.uid
+ user.display_name = user_info.display_name
+ user.email_address = user_info.email_address
+ user.affiliation = user_info.affiliation
+ user.title = user_info.title
+
+ db.session.add(user)
+ db.session.commit()
+ return user
+
+
+def _get_request_uid(req):
+ uid = None
+
+ if _is_production():
+
+ if 'user' in g and g.user is not None:
+ return g.user.uid
+
+ uid = req.headers.get("Uid")
+ if not uid:
+ uid = req.headers.get("X-Remote-Uid")
+
+ if not uid:
+ raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
+ % str(req.headers))
+
+ return uid
+
+
+def _is_production():
+ return 'PRODUCTION' in app.config and app.config['PRODUCTION']
diff --git a/crc/api/workflow.py b/crc/api/workflow.py
index efcccc26..655a85e7 100644
--- a/crc/api/workflow.py
+++ b/crc/api/workflow.py
@@ -1,6 +1,8 @@
import uuid
-from crc import session
+from flask import g
+
+from crc import session, app
from crc.api.common import ApiError, ApiErrorSchema
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
from crc.models.file import FileModel, LookupDataSchema
@@ -44,6 +46,13 @@ def validate_workflow_specification(spec_id):
try:
WorkflowService.test_spec(spec_id)
except ApiError as ae:
+ ae.message = "When populating all fields ... " + ae.message
+ errors.append(ae)
+ try:
+ # Run the validation twice, the second time, just populate the required fields.
+ WorkflowService.test_spec(spec_id, required_only=True)
+ except ApiError as ae:
+ ae.message = "When populating only required fields ... " + ae.message
errors.append(ae)
return ApiErrorSchema(many=True).dump(errors)
@@ -112,6 +121,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
navigation.append(NavigationItem(**nav_item))
NavigationItemSchema().dump(nav_item)
+
+ spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first()
workflow_api = WorkflowApi(
id=processor.get_workflow_id(),
status=processor.get_status(),
@@ -120,9 +131,10 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
workflow_spec_id=processor.workflow_spec_id,
spec_version=processor.get_version_string(),
is_latest_spec=processor.is_latest_spec,
- total_tasks=processor.workflow_model.total_tasks,
+ total_tasks=len(navigation),
completed_tasks=processor.workflow_model.completed_tasks,
- last_updated=processor.workflow_model.last_updated
+ last_updated=processor.workflow_model.last_updated,
+ title=spec.display_name
)
if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks.
# This may or may not work, sometimes there is no next task to complete.
@@ -146,6 +158,7 @@ def delete_workflow(workflow_id):
def set_current_task(workflow_id, task_id):
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
+ user_uid = __get_user_uid(workflow_model.study.user_uid)
processor = WorkflowProcessor(workflow_model)
task_id = uuid.UUID(task_id)
task = processor.bpmn_workflow.get_task(task_id)
@@ -157,13 +170,21 @@ def set_current_task(workflow_id, task_id):
if task.state == task.COMPLETED:
task.reset_token(reset_data=False) # we could optionally clear the previous data.
processor.save()
- WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
+ WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
workflow_api_model = __get_workflow_api_model(processor, task)
return WorkflowApiSchema().dump(workflow_api_model)
def update_task(workflow_id, task_id, body):
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
+
+ if workflow_model is None:
+ raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
+
+ elif workflow_model.study is None:
+ raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
+
+ user_uid = __get_user_uid(workflow_model.study.user_uid)
processor = WorkflowProcessor(workflow_model)
task_id = uuid.UUID(task_id)
task = processor.bpmn_workflow.get_task(task_id)
@@ -174,7 +195,7 @@ def update_task(workflow_id, task_id, body):
processor.complete_task(task)
processor.do_engine_steps()
processor.save()
- WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE)
+ WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
workflow_api_model = __get_workflow_api_model(processor)
return WorkflowApiSchema().dump(workflow_api_model)
@@ -228,4 +249,15 @@ def lookup(workflow_id, field_id, query, limit):
"""
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
- return LookupDataSchema(many=True).dump(lookup_data)
\ No newline at end of file
+ return LookupDataSchema(many=True).dump(lookup_data)
+
+
+def __get_user_uid(user_uid):
+ if 'user' in g:
+ if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
+ raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
+ else:
+ return g.user.uid
+
+ else:
+ raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
diff --git a/crc/models/api_models.py b/crc/models/api_models.py
index 4b279965..b8b535a7 100644
--- a/crc/models/api_models.py
+++ b/crc/models/api_models.py
@@ -31,10 +31,12 @@ class NavigationItem(object):
class Task(object):
+ PROP_OPTIONS_REPEAT = "repeat"
PROP_OPTIONS_FILE = "spreadsheet.name"
PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column"
PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column"
PROP_LDAP_LOOKUP = "ldap.lookup"
+ VALIDATION_REQUIRED = "required"
FIELD_TYPE_AUTO_COMPLETE = "autocomplete"
@@ -117,7 +119,7 @@ class NavigationItemSchema(ma.Schema):
class WorkflowApi(object):
def __init__(self, id, status, next_task, navigation,
- spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated):
+ spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated, title):
self.id = id
self.status = status
self.next_task = next_task # The next task that requires user input.
@@ -128,13 +130,14 @@ class WorkflowApi(object):
self.total_tasks = total_tasks
self.completed_tasks = completed_tasks
self.last_updated = last_updated
+ self.title = title
class WorkflowApiSchema(ma.Schema):
class Meta:
model = WorkflowApi
fields = ["id", "status", "next_task", "navigation",
"workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks",
- "last_updated"]
+ "last_updated", "title"]
unknown = INCLUDE
status = EnumField(WorkflowStatus)
@@ -145,7 +148,7 @@ class WorkflowApiSchema(ma.Schema):
def make_workflow(self, data, **kwargs):
keys = ['id', 'status', 'next_task', 'navigation',
'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks",
- "last_updated"]
+ "last_updated", "title"]
filtered_fields = {key: data[key] for key in keys}
filtered_fields['next_task'] = TaskSchema().make_task(data['next_task'])
return WorkflowApi(**filtered_fields)
diff --git a/crc/models/approval.py b/crc/models/approval.py
index f7aa2e06..0592fbd1 100644
--- a/crc/models/approval.py
+++ b/crc/models/approval.py
@@ -1,24 +1,28 @@
import enum
import marshmallow
-from ldap3.core.exceptions import LDAPSocketOpenError
-from marshmallow import INCLUDE
+from marshmallow import INCLUDE, fields
from sqlalchemy import func
-from crc import db, ma
+from crc import db, ma, app
from crc.api.common import ApiError
from crc.models.file import FileDataModel
+from crc.models.ldap import LdapSchema
from crc.models.study import StudyModel
from crc.models.workflow import WorkflowModel
+from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
class ApprovalStatus(enum.Enum):
- WAITING = "WAITING" # no one has done jack.
+ PENDING = "PENDING" # no one has done jack.
APPROVED = "APPROVED" # approved by the reviewer
DECLINED = "DECLINED" # rejected by the reviewer
CANCELED = "CANCELED" # The document was replaced with a new version and this review is no longer needed.
+ # Used for overall status only, never set on a task.
+ AWAITING = "AWAITING" # awaiting another approval
+
class ApprovalFile(db.Model):
file_data_id = db.Column(db.Integer, db.ForeignKey(FileDataModel.id), primary_key=True)
@@ -32,13 +36,14 @@ class ApprovalModel(db.Model):
__tablename__ = 'approval'
id = db.Column(db.Integer, primary_key=True)
study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=False)
- study = db.relationship(StudyModel, backref='approval', cascade='all,delete')
+ study = db.relationship(StudyModel)
workflow_id = db.Column(db.Integer, db.ForeignKey(WorkflowModel.id), nullable=False)
workflow = db.relationship(WorkflowModel)
approver_uid = db.Column(db.String) # Not linked to user model, as they may not have logged in yet.
status = db.Column(db.String)
message = db.Column(db.String, default='')
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
+ date_approved = db.Column(db.DateTime(timezone=True), default=None)
version = db.Column(db.Integer) # Incremented integer, so 1,2,3 as requests are made.
approval_files = db.relationship(ApprovalFile, back_populates="approval",
cascade="all, delete, delete-orphan",
@@ -62,33 +67,37 @@ class Approval(object):
instance.status = model.status
instance.message = model.message
instance.date_created = model.date_created
+ instance.date_approved = model.date_approved
instance.version = model.version
instance.title = ''
+ instance.related_approvals = []
+
if model.study:
instance.title = model.study.title
-
- instance.approver = {}
try:
- ldap_service = LdapService()
- principal_investigator_id = model.study.primary_investigator_id
- user_info = ldap_service.user_info(principal_investigator_id)
- except (ApiError, LDAPSocketOpenError) as exception:
- user_info = None
- instance.approver['display_name'] = 'Primary Investigator details'
- instance.approver['department'] = 'currently not available'
-
- if user_info:
- # TODO: Rename approver to primary investigator
- instance.approver['uid'] = model.approver_uid
- instance.approver['display_name'] = user_info.display_name
- instance.approver['title'] = user_info.title
- instance.approver['department'] = user_info.department
+ instance.approver = LdapService.user_info(model.approver_uid)
+ instance.primary_investigator = LdapService.user_info(model.study.primary_investigator_id)
+ except ApiError as ae:
+ app.logger.error("Ldap lookup failed for approval record %i" % model.id)
+ doc_dictionary = FileService.get_doc_dictionary()
instance.associated_files = []
for approval_file in model.approval_files:
+ try:
+ # fixme: This is slow because we are doing a ton of queries to find the irb code.
+ extra_info = doc_dictionary[approval_file.file_data.file_model.irb_doc_code]
+ except:
+ extra_info = None
associated_file = {}
associated_file['id'] = approval_file.file_data.file_model.id
- associated_file['name'] = approval_file.file_data.file_model.name
+ if extra_info:
+ associated_file['name'] = '_'.join((extra_info['category1'],
+ approval_file.file_data.file_model.name))
+ associated_file['description'] = extra_info['description']
+ else:
+ associated_file['name'] = approval_file.file_data.file_model.name
+ associated_file['description'] = 'No description available'
+ associated_file['name'] = '(' + model.study.primary_investigator_id + ')' + associated_file['name']
associated_file['content_type'] = approval_file.file_data.file_model.content_type
instance.associated_files.append(associated_file)
@@ -100,10 +109,17 @@ class Approval(object):
class ApprovalSchema(ma.Schema):
+
+ approver = fields.Nested(LdapSchema, dump_only=True)
+ primary_investigator = fields.Nested(LdapSchema, dump_only=True)
+ related_approvals = fields.List(fields.Nested('ApprovalSchema', allow_none=True, dump_only=True))
+
class Meta:
model = Approval
fields = ["id", "study_id", "workflow_id", "version", "title",
- "version", "status", "message", "approver", "associated_files"]
+ "status", "message", "approver", "primary_investigator",
+ "associated_files", "date_created", "date_approved",
+ "related_approvals"]
unknown = INCLUDE
@marshmallow.post_load
@@ -111,30 +127,4 @@ class ApprovalSchema(ma.Schema):
"""Loads the basic approval data for updates to the database"""
return Approval(**data)
-# Carlos: Here is the data structure I was trying to imagine.
-# If I were to continue down my current traing of thought, I'd create
-# another class called just "Approval" that can take an ApprovalModel from the
-# database and construct a data structure like this one, that can
-# be provided to the API at an /approvals endpoint with GET and PUT
-# dat = { "approvals": [
-# {"id": 1,
-# "study_id": 20,
-# "workflow_id": 454,
-# "study_title": "Dan Funk (dhf8r)", # Really it's just the name of the Principal Investigator
-# "workflow_version": "21",
-# "approver": { # Pulled from ldap
-# "uid": "bgb22",
-# "display_name": "Billy Bob (bgb22)",
-# "title": "E42:He's a hoopy frood",
-# "department": "E0:EN-Eng Study of Parallel Universes",
-# },
-# "files": [
-# {
-# "id": 124,
-# "name": "ResearchRestart.docx",
-# "content_type": "docx-something-whatever"
-# }
-# ]
-# }
-# ...
-# ]
+
diff --git a/crc/models/file.py b/crc/models/file.py
index 184979e6..15a48709 100644
--- a/crc/models/file.py
+++ b/crc/models/file.py
@@ -82,11 +82,14 @@ class FileModel(db.Model):
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
-
+ # A request was made to delete the file, but we can't because there are
+ # active approvals or running workflows that depend on it. So we archive
+ # it instead, hide it in the interface.
+ archived = db.Column(db.Boolean, default=False, nullable=False)
class File(object):
@classmethod
- def from_models(cls, model: FileModel, data_model: FileDataModel):
+ def from_models(cls, model: FileModel, data_model: FileDataModel, doc_dictionary):
instance = cls()
instance.id = model.id
instance.name = model.name
@@ -99,6 +102,15 @@ class File(object):
instance.workflow_id = model.workflow_id
instance.irb_doc_code = model.irb_doc_code
instance.type = model.type
+ if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
+ instance.category = "/".join(filter(None, [doc_dictionary[model.irb_doc_code]['category1'],
+ doc_dictionary[model.irb_doc_code]['category2'],
+ doc_dictionary[model.irb_doc_code]['category3']]))
+ instance.description = doc_dictionary[model.irb_doc_code]['description']
+ instance.download_name = ".".join([instance.category, model.type.value])
+ else:
+ instance.category = ""
+ instance.description = ""
if data_model:
instance.last_modified = data_model.date_created
instance.latest_version = data_model.version
@@ -122,7 +134,8 @@ class FileSchema(ma.Schema):
model = File
fields = ["id", "name", "is_status", "is_reference", "content_type",
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
- "irb_doc_code", "last_modified", "latest_version", "type"]
+ "irb_doc_code", "last_modified", "latest_version", "type", "categories",
+ "description", "category", "description", "download_name"]
unknown = INCLUDE
type = EnumField(FileType)
diff --git a/crc/models/ldap.py b/crc/models/ldap.py
new file mode 100644
index 00000000..7e05eccd
--- /dev/null
+++ b/crc/models/ldap.py
@@ -0,0 +1,39 @@
+from flask_marshmallow.sqla import SQLAlchemyAutoSchema
+from marshmallow import EXCLUDE
+from sqlalchemy import func
+
+from crc import db
+
+
+class LdapModel(db.Model):
+ uid = db.Column(db.String, primary_key=True)
+ display_name = db.Column(db.String)
+ given_name = db.Column(db.String)
+ email_address = db.Column(db.String)
+ telephone_number = db.Column(db.String)
+ title = db.Column(db.String)
+ department = db.Column(db.String)
+ affiliation = db.Column(db.String)
+ sponsor_type = db.Column(db.String)
+ date_cached = db.Column(db.DateTime(timezone=True), default=func.now())
+
+ @classmethod
+ def from_entry(cls, entry):
+ return LdapModel(uid=entry.uid.value,
+ display_name=entry.displayName.value,
+ given_name=", ".join(entry.givenName),
+ email_address=entry.mail.value,
+ telephone_number=entry.telephoneNumber.value,
+ title=", ".join(entry.title),
+ department=", ".join(entry.uvaDisplayDepartment),
+ affiliation=", ".join(entry.uvaPersonIAMAffiliation),
+ sponsor_type=", ".join(entry.uvaPersonSponsoredType))
+
+
+class LdapSchema(SQLAlchemyAutoSchema):
+ class Meta:
+ model = LdapModel
+ load_instance = True
+ include_relationships = True
+ include_fk = True # Includes foreign keys
+ unknown = EXCLUDE
diff --git a/crc/models/study.py b/crc/models/study.py
index 38bd2f3b..540ee018 100644
--- a/crc/models/study.py
+++ b/crc/models/study.py
@@ -5,7 +5,7 @@ from sqlalchemy import func
from crc import db, ma
from crc.api.common import ApiErrorSchema
-from crc.models.file import FileModel, SimpleFileSchema
+from crc.models.file import FileModel, SimpleFileSchema, FileSchema
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \
WorkflowModel
@@ -106,7 +106,8 @@ class Study(object):
def __init__(self, title, last_updated, primary_investigator_id, user_uid,
id=None,
protocol_builder_status=None,
- sponsor="", hsr_number="", ind_number="", categories=[], **argsv):
+ sponsor="", hsr_number="", ind_number="", categories=[],
+ files=[], approvals=[], **argsv):
self.id = id
self.user_uid = user_uid
self.title = title
@@ -117,8 +118,9 @@ class Study(object):
self.hsr_number = hsr_number
self.ind_number = ind_number
self.categories = categories
+ self.approvals = approvals
self.warnings = []
- self.files = []
+ self.files = files
@classmethod
def from_model(cls, study_model: StudyModel):
@@ -149,12 +151,13 @@ class StudySchema(ma.Schema):
hsr_number = fields.String(allow_none=True)
sponsor = fields.String(allow_none=True)
ind_number = fields.String(allow_none=True)
- files = fields.List(fields.Nested(SimpleFileSchema), dump_only=True)
+ files = fields.List(fields.Nested(FileSchema), dump_only=True)
+ approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True)
class Meta:
model = Study
additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid",
- "sponsor", "ind_number"]
+ "sponsor", "ind_number", "approvals", "files"]
unknown = INCLUDE
@marshmallow.post_load
diff --git a/crc/models/user.py b/crc/models/user.py
index d9ee8f72..55bba35f 100644
--- a/crc/models/user.py
+++ b/crc/models/user.py
@@ -19,7 +19,7 @@ class UserModel(db.Model):
last_name = db.Column(db.String, nullable=True)
title = db.Column(db.String, nullable=True)
- # Add Department and School
+ # TODO: Add Department and School
def encode_auth_token(self):
@@ -27,7 +27,7 @@ class UserModel(db.Model):
Generates the Auth Token
:return: string
"""
- hours = int(app.config['TOKEN_AUTH_TTL_HOURS'])
+ hours = float(app.config['TOKEN_AUTH_TTL_HOURS'])
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=hours, minutes=0, seconds=0),
'iat': datetime.datetime.utcnow(),
@@ -36,7 +36,7 @@ class UserModel(db.Model):
return jwt.encode(
payload,
app.config.get('TOKEN_AUTH_SECRET_KEY'),
- algorithm='HS256'
+ algorithm='HS256',
)
@staticmethod
@@ -50,9 +50,9 @@ class UserModel(db.Model):
payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256')
return payload
except jwt.ExpiredSignatureError:
- raise ApiError('token_expired', 'The Authentication token you provided expired, and must be renewed.')
+ raise ApiError('token_expired', 'The Authentication token you provided expired and must be renewed.')
except jwt.InvalidTokenError:
- raise ApiError('token_invalid', 'The Authentication token you provided. You need a new token. ')
+ raise ApiError('token_invalid', 'The Authentication token you provided is invalid. You need a new token. ')
class UserModelSchema(SQLAlchemyAutoSchema):
diff --git a/crc/scripts/complete_template.py b/crc/scripts/complete_template.py
index 59f63158..32bee509 100644
--- a/crc/scripts/complete_template.py
+++ b/crc/scripts/complete_template.py
@@ -29,7 +29,8 @@ Takes two arguments:
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
"""For validation only, process the template, but do not store it in the database."""
- self.process_template(task, study_id, None, *args, **kwargs)
+ workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
+ self.process_template(task, study_id, workflow, *args, **kwargs)
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
@@ -62,13 +63,13 @@ Takes two arguments:
# Get the workflow specification file with the given name.
file_data_models = FileService.get_spec_data_files(
workflow_spec_id=workflow.workflow_spec_id,
- workflow_id=workflow.id)
- for file_data in file_data_models:
- if file_data.file_model.name == file_name:
- file_data_model = file_data
-
- if workflow is None or file_data_model is None:
- file_data_model = FileService.get_workflow_file_data(task.workflow, file_name)
+ workflow_id=workflow.id,
+ name=file_name)
+ if len(file_data_models) > 0:
+ file_data_model = file_data_models[0]
+ else:
+ raise ApiError(code="invalid_argument",
+ message="Uable to locate a file with the given name.")
# Get images from file/files fields
if len(args) == 3:
diff --git a/crc/scripts/request_approval.py b/crc/scripts/request_approval.py
index 1df1a670..0a4c76ff 100644
--- a/crc/scripts/request_approval.py
+++ b/crc/scripts/request_approval.py
@@ -11,7 +11,8 @@ class RequestApproval(Script):
return """
Creates an approval request on this workflow, by the given approver_uid(s),"
Takes multiple arguments, which should point to data located in current task
-or be quoted strings.
+or be quoted strings. The order is important. Approvals will be processed
+in this order.
Example:
RequestApproval approver1 "dhf8r"
@@ -26,7 +27,8 @@ RequestApproval approver1 "dhf8r"
ApprovalService.add_approval(study_id, workflow_id, args)
elif isinstance(uids, list):
for id in uids:
- ApprovalService.add_approval(study_id, workflow_id, id)
+ if id: ## Assure it's not empty or null
+ ApprovalService.add_approval(study_id, workflow_id, id)
def get_uids(self, task, args):
if len(args) < 1:
diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py
index 8a13e6c2..1f6f56b3 100644
--- a/crc/services/approval_service.py
+++ b/crc/services/approval_service.py
@@ -2,37 +2,168 @@ from datetime import datetime
from sqlalchemy import desc
-from crc import db, session
+from crc import app, db, session
from crc.api.common import ApiError
-from crc.models.approval import ApprovalModel, ApprovalStatus, ApprovalFile
+from crc.models.approval import ApprovalModel, ApprovalStatus, ApprovalFile, Approval
+from crc.models.study import StudyModel
from crc.models.workflow import WorkflowModel
from crc.services.file_service import FileService
-
+from crc.services.ldap_service import LdapService
+from crc.services.mails import (
+ send_ramp_up_submission_email,
+ send_ramp_up_approval_request_email,
+ send_ramp_up_approval_request_first_review_email,
+ send_ramp_up_approved_email,
+ send_ramp_up_denied_email,
+ send_ramp_up_denied_email_to_approver
+)
class ApprovalService(object):
"""Provides common tools for working with an Approval"""
@staticmethod
- def get_approvals_per_user(approver_uid):
- """Returns a list of all approvals for the given user (approver)"""
- db_approvals = session.query(ApprovalModel).filter_by(approver_uid=approver_uid).all()
- return db_approvals
+ def __one_approval_from_study(study, approver_uid = None, status=None,
+ include_cancelled=True):
+ """Returns one approval, with all additional approvals as 'related_approvals',
+ the main approval can be pinned to an approver with an optional argument.
+ Will return null if no approvals exist on the study."""
+ main_approval = None
+ related_approvals = []
+ query = db.session.query(ApprovalModel).filter(ApprovalModel.study_id == study.id)
+ if not include_cancelled:
+ query=query.filter(ApprovalModel.status != ApprovalStatus.CANCELED.value)
+ approvals = query.all() # All non-cancelled approvals.
+
+ for approval_model in approvals:
+ if approval_model.approver_uid == approver_uid:
+ main_approval = approval_model
+ else:
+ related_approvals.append(approval_model)
+
+ # IF WE ARE JUST RETURNING ALL OF THE APPROVALS PER STUDY
+ if not main_approval and len(related_approvals) > 0:
+ main_approval = related_approvals[0]
+ related_approvals = related_approvals[1:]
+
+ if main_approval is not None: # May be null if the study has no approvals.
+ final_status = ApprovalService.__calculate_overall_approval_status(main_approval, related_approvals)
+ if status and final_status != status: return # Now that we are certain of the status, filter on it.
+
+ main_approval = Approval.from_model(main_approval)
+ main_approval.status = final_status
+ for ra in related_approvals:
+ main_approval.related_approvals.append(Approval.from_model(ra))
+
+ return main_approval
@staticmethod
- def get_all_approvals():
- """Returns a list of all approvlas"""
- db_approvals = session.query(ApprovalModel).all()
- return db_approvals
+ def __calculate_overall_approval_status(approval, related):
+ # In the case of pending approvals, check to see if there is a related approval
+ # that proceeds this approval - and if it is declined, or still pending, then change
+ # the state of the approval to be Declined, or Waiting respectively.
+ if approval.status == ApprovalStatus.PENDING.value:
+ for ra in related:
+ if ra.id < approval.id:
+ if ra.status == ApprovalStatus.DECLINED.value or ra.status == ApprovalStatus.CANCELED.value:
+ return ra.status # If any prior approval id declined or cancelled so is this approval.
+ elif ra.status == ApprovalStatus.PENDING.value:
+ return ApprovalStatus.AWAITING.value # if any prior approval is pending, then this is waiting.
+ return approval.status
+ else:
+ return approval.status
@staticmethod
- def update_approval(approval_id, approver_uid, status):
+ def get_approvals_per_user(approver_uid, status=None, include_cancelled=False):
+ """Returns a list of approval objects (not db models) for the given
+ approver. """
+ studies = db.session.query(StudyModel).join(ApprovalModel).\
+ filter(ApprovalModel.approver_uid == approver_uid).all()
+ approvals = []
+ for study in studies:
+ approval = ApprovalService.__one_approval_from_study(study, approver_uid,
+ status, include_cancelled)
+ if approval:
+ approvals.append(approval)
+ return approvals
+
+ @staticmethod
+ def get_all_approvals(include_cancelled=True):
+ """Returns a list of all approval objects (not db models), one record
+ per study, with any associated approvals grouped under the first approval."""
+ studies = db.session.query(StudyModel).all()
+ approvals = []
+ for study in studies:
+ approval = ApprovalService.__one_approval_from_study(study, include_cancelled=include_cancelled)
+ if approval:
+ approvals.append(approval)
+ return approvals
+
+ @staticmethod
+ def get_approvals_for_study(study_id, include_cancelled=True):
+ """Returns an array of Approval objects for the study, it does not
+ compute the related approvals."""
+ query = session.query(ApprovalModel).filter_by(study_id=study_id)
+ if not include_cancelled:
+ query = query.filter(ApprovalModel.status != ApprovalStatus.CANCELED.value)
+ db_approvals = query.all()
+ return [Approval.from_model(approval_model) for approval_model in db_approvals]
+
+
+ @staticmethod
+ def update_approval(approval_id, approver_uid):
"""Update a specific approval"""
db_approval = session.query(ApprovalModel).get(approval_id)
+ status = db_approval.status
if db_approval:
- db_approval.status = status
- session.add(db_approval)
- session.commit()
+ # db_approval.status = status
+ # session.add(db_approval)
+ # session.commit()
+ if status == ApprovalStatus.APPROVED.value:
+ # second_approval = ApprovalModel().query.filter_by(
+ # study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
+ # status=ApprovalStatus.PENDING.value, version=db_approval.version).first()
+ # if second_approval:
+ # send rrp approval request for second approver
+ ldap_service = LdapService()
+ pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
+ approver_info = ldap_service.user_info(approver_uid)
+ # send rrp submission
+ mail_result = send_ramp_up_approved_email(
+ 'askresearch@virginia.edu',
+ [pi_user_info.email_address],
+ f'{approver_info.display_name} - ({approver_info.uid})'
+ )
+ if mail_result:
+ app.logger.error(mail_result)
+ elif status == ApprovalStatus.DECLINED.value:
+ ldap_service = LdapService()
+ pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
+ approver_info = ldap_service.user_info(approver_uid)
+ # send rrp submission
+ mail_result = send_ramp_up_denied_email(
+ 'askresearch@virginia.edu',
+ [pi_user_info.email_address],
+ f'{approver_info.display_name} - ({approver_info.uid})'
+ )
+ if mail_result:
+ app.logger.error(mail_result)
+ first_approval = ApprovalModel().query.filter_by(
+ study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
+ status=ApprovalStatus.APPROVED.value, version=db_approval.version).first()
+ if first_approval:
+ # Second approver denies
+ first_approver_info = ldap_service.user_info(first_approval.approver_uid)
+ approver_email = [first_approver_info.email_address] if first_approver_info.email_address else app.config['FALLBACK_EMAILS']
+ # send rrp denied by second approver email to first approver
+ mail_result = send_ramp_up_denied_email_to_approver(
+ 'askresearch@virginia.edu',
+ approver_email,
+ f'{pi_user_info.display_name} - ({pi_user_info.uid})',
+ f'{approver_info.display_name} - ({approver_info.uid})'
+ )
+ if mail_result:
+ app.logger.error(mail_result)
# TODO: Log update action by approver_uid - maybe ?
return db_approval
@@ -78,14 +209,43 @@ class ApprovalService(object):
version = 1
model = ApprovalModel(study_id=study_id, workflow_id=workflow_id,
- approver_uid=approver_uid, status=ApprovalStatus.WAITING.value,
+ approver_uid=approver_uid, status=ApprovalStatus.PENDING.value,
message="", date_created=datetime.now(),
version=version)
approval_files = ApprovalService._create_approval_files(workflow_data_files, model)
+
+ # Check approvals count
+ approvals_count = ApprovalModel().query.filter_by(study_id=study_id, workflow_id=workflow_id,
+ version=version).count()
+
db.session.add(model)
db.session.add_all(approval_files)
db.session.commit()
+ # Send first email
+ if approvals_count == 0:
+ ldap_service = LdapService()
+ pi_user_info = ldap_service.user_info(model.study.primary_investigator_id)
+ approver_info = ldap_service.user_info(approver_uid)
+ # send rrp submission
+ mail_result = send_ramp_up_submission_email(
+ 'askresearch@virginia.edu',
+ [pi_user_info.email_address],
+ f'{approver_info.display_name} - ({approver_info.uid})'
+ )
+ if mail_result:
+ app.logger.error(mail_result)
+ # send rrp approval request for first approver
+ # enhance the second part in case it bombs
+ approver_email = [approver_info.email_address] if approver_info.email_address else app.config['FALLBACK_EMAILS']
+ mail_result = send_ramp_up_approval_request_first_review_email(
+ 'askresearch@virginia.edu',
+ approver_email,
+ f'{pi_user_info.display_name} - ({pi_user_info.uid})'
+ )
+ if mail_result:
+ app.logger.error(mail_result)
+
@staticmethod
def _create_approval_files(workflow_data_files, approval):
"""Currently based exclusively on the status of files associated with a workflow."""
diff --git a/crc/services/file_service.py b/crc/services/file_service.py
index beb22831..ff234a79 100644
--- a/crc/services/file_service.py
+++ b/crc/services/file_service.py
@@ -5,11 +5,13 @@ from datetime import datetime
from uuid import UUID
from xml.etree import ElementTree
+import flask
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from pandas import ExcelFile
from sqlalchemy import desc
+from sqlalchemy.exc import IntegrityError
-from crc import session
+from crc import session, app
from crc.api.common import ApiError
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
@@ -20,6 +22,14 @@ class FileService(object):
DOCUMENT_LIST = "irb_documents.xlsx"
INVESTIGATOR_LIST = "investigators.xlsx"
+ __doc_dictionary = None
+
+ @staticmethod
+ def get_doc_dictionary():
+ if not FileService.__doc_dictionary:
+ FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
+ return FileService.__doc_dictionary
+
@staticmethod
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
name, content_type, binary_data, primary=False, is_status=False):
@@ -35,10 +45,8 @@ class FileService(object):
@staticmethod
def is_allowed_document(code):
- data_model = FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
- xls = ExcelFile(data_model.data)
- df = xls.parse(xls.sheet_names[0])
- return code in df['code'].values
+ doc_dict = FileService.get_doc_dictionary()
+ return code in doc_dict
@staticmethod
def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data):
@@ -86,6 +94,7 @@ class FileService(object):
def get_workflow_files(workflow_id):
"""Returns all the file models associated with a running workflow."""
return session.query(FileModel).filter(FileModel.workflow_id == workflow_id).\
+ filter(FileModel.archived == False).\
order_by(FileModel.id).all()
@staticmethod
@@ -117,7 +126,11 @@ class FileService(object):
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
- # This file does not need to be updated, it's the same file.
+ # This file does not need to be updated, it's the same file. If it is arhived,
+ # then de-arvhive it.
+ file_model.archived = False
+ session.add(file_model)
+ session.commit()
return file_model
# Verify the extension
@@ -129,6 +142,7 @@ class FileService(object):
else:
file_model.type = FileType[file_extension]
file_model.content_type = content_type
+ file_model.archived = False # Unarchive the file if it is archived.
if latest_data_model is None:
version = 1
@@ -178,7 +192,8 @@ class FileService(object):
def get_files_for_study(study_id, irb_doc_code=None):
query = session.query(FileModel).\
join(WorkflowModel).\
- filter(WorkflowModel.study_id == study_id)
+ filter(WorkflowModel.study_id == study_id).\
+ filter(FileModel.archived == False)
if irb_doc_code:
query = query.filter(FileModel.irb_doc_code == irb_doc_code)
return query.all()
@@ -198,6 +213,9 @@ class FileService(object):
if name:
query = query.filter_by(name=name)
+
+ query = query.filter(FileModel.archived == False)
+
query = query.order_by(FileModel.id)
results = query.all()
@@ -260,11 +278,12 @@ class FileService(object):
@staticmethod
def get_workflow_file_data(workflow, file_name):
- """Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns its data"""
+ """This method should be deleted, find where it is used, and remove this method.
+ Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns its data"""
workflow_spec_model = FileService.find_spec_model_in_db(workflow)
if workflow_spec_model is None:
- raise ApiError(code="workflow_model_error",
+ raise ApiError(code="unknown_workflow",
message="Something is wrong. I can't find the workflow you are using.")
file_data_model = session.query(FileDataModel) \
@@ -295,12 +314,21 @@ class FileService(object):
@staticmethod
def delete_file(file_id):
- data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
- for dm in data_models:
- lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
- for lf in lookup_files:
- session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
- session.query(LookupFileModel).filter_by(id=lf.id).delete()
- session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
- session.query(FileModel).filter_by(id=file_id).delete()
- session.commit()
+ try:
+ data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
+ for dm in data_models:
+ lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
+ for lf in lookup_files:
+ session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
+ session.query(LookupFileModel).filter_by(id=lf.id).delete()
+ session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
+ session.query(FileModel).filter_by(id=file_id).delete()
+ session.commit()
+ except IntegrityError as ie:
+ # We can't delete the file or file data, because it is referenced elsewhere,
+ # but we can at least mark it as deleted on the table.
+ session.rollback()
+ file_model = session.query(FileModel).filter_by(id=file_id).first()
+ file_model.archived = True
+ session.commit()
+ app.logger.info("Failed to delete file, so archiving it instead. %i, due to %s" % (file_id, str(ie)))
diff --git a/crc/services/ldap_service.py b/crc/services/ldap_service.py
index 4602ed59..ef3d25f4 100644
--- a/crc/services/ldap_service.py
+++ b/crc/services/ldap_service.py
@@ -1,84 +1,90 @@
import os
-from crc import app
-from ldap3 import Connection, Server, MOCK_SYNC
+from attr import asdict
+from ldap3.core.exceptions import LDAPExceptionError
+
+from crc import app, db
+from ldap3 import Connection, Server, MOCK_SYNC, RESTARTABLE
from crc.api.common import ApiError
+from crc.models.ldap import LdapModel, LdapSchema
-class LdapUserInfo(object):
-
- def __init__(self):
- self.display_name = ''
- self.given_name = ''
- self.email_address = ''
- self.telephone_number = ''
- self.title = ''
- self.department = ''
- self.affiliation = ''
- self.sponsor_type = ''
- self.uid = ''
-
- @classmethod
- def from_entry(cls, entry):
- instance = cls()
- instance.display_name = entry.displayName.value
- instance.given_name = ", ".join(entry.givenName)
- instance.email_address = entry.mail.value
- instance.telephone_number = ", ".join(entry.telephoneNumber)
- instance.title = ", ".join(entry.title)
- instance.department = ", ".join(entry.uvaDisplayDepartment)
- instance.affiliation = ", ".join(entry.uvaPersonIAMAffiliation)
- instance.sponsor_type = ", ".join(entry.uvaPersonSponsoredType)
- instance.uid = entry.uid.value
- return instance
-
class LdapService(object):
search_base = "ou=People,o=University of Virginia,c=US"
attributes = ['uid', 'cn', 'sn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
'telephoneNumber', 'title', 'uvaPersonIAMAffiliation', 'uvaPersonSponsoredType']
uid_search_string = "(&(objectclass=person)(uid=%s))"
- user_or_last_name_search_string = "(&(objectclass=person)(|(uid=%s*)(sn=%s*)))"
+ user_or_last_name_search = "(&(objectclass=person)(|(uid=%s*)(sn=%s*)))"
+ cn_single_search = '(&(objectclass=person)(cn=%s*))'
+ cn_double_search = '(&(objectclass=person)(&(cn=%s*)(cn=*%s*)))'
+ temp_cache = {}
+ conn = None
- def __init__(self):
- if app.config['TESTING']:
- server = Server('my_fake_server')
- self.conn = Connection(server, client_strategy=MOCK_SYNC)
- file_path = os.path.abspath(os.path.join(app.root_path, '..', 'tests', 'data', 'ldap_response.json'))
- self.conn.strategy.entries_from_json(file_path)
- self.conn.bind()
+ @staticmethod
+ def __get_conn():
+ if not LdapService.conn:
+ if app.config['TESTING']:
+ server = Server('my_fake_server')
+ conn = Connection(server, client_strategy=MOCK_SYNC)
+ file_path = os.path.abspath(os.path.join(app.root_path, '..', 'tests', 'data', 'ldap_response.json'))
+ conn.strategy.entries_from_json(file_path)
+ conn.bind()
+ else:
+ server = Server(app.config['LDAP_URL'], connect_timeout=app.config['LDAP_TIMEOUT_SEC'])
+ conn = Connection(server, auto_bind=True,
+ receive_timeout=app.config['LDAP_TIMEOUT_SEC'],
+ client_strategy=RESTARTABLE)
+ LdapService.conn = conn
+ return LdapService.conn
+
+
+ @staticmethod
+ def user_info(uva_uid):
+ user_info = db.session.query(LdapModel).filter(LdapModel.uid == uva_uid).first()
+ if not user_info:
+ app.logger.info("No cache for " + uva_uid)
+ search_string = LdapService.uid_search_string % uva_uid
+ conn = LdapService.__get_conn()
+ conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
+ if len(conn.entries) < 1:
+ raise ApiError("missing_ldap_record", "Unable to locate a user with id %s in LDAP" % uva_uid)
+ entry = conn.entries[0]
+ user_info = LdapModel.from_entry(entry)
+ db.session.add(user_info)
+ db.session.commit()
+ return user_info
+
+ @staticmethod
+ def search_users(query, limit):
+ if len(query.strip()) < 3:
+ return []
+ elif query.endswith(' '):
+ search_string = LdapService.cn_single_search % (query.strip())
+ elif query.strip().count(',') == 1:
+ f, l = query.split(",")
+ search_string = LdapService.cn_double_search % (l.strip(), f.strip())
+ elif query.strip().count(' ') == 1:
+ f,l = query.split(" ")
+ search_string = LdapService.cn_double_search % (f, l)
else:
- server = Server(app.config['LDAP_URL'], connect_timeout=app.config['LDAP_TIMEOUT_SEC'])
- self.conn = Connection(server,
- auto_bind=True,
- receive_timeout=app.config['LDAP_TIMEOUT_SEC'],
- )
-
- def __del__(self):
- if self.conn:
- self.conn.unbind()
-
- def user_info(self, uva_uid):
- search_string = LdapService.uid_search_string % uva_uid
- self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
- if len(self.conn.entries) < 1:
- raise ApiError("missing_ldap_record", "Unable to locate a user with id %s in LDAP" % uva_uid)
- entry = self.conn.entries[0]
- return LdapUserInfo.from_entry(entry)
-
- def search_users(self, query, limit):
- if len(query) < 3: return []
- search_string = LdapService.user_or_last_name_search_string % (query, query)
- self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
-
- # Entries are returned as a generator, accessing entries
- # can make subsequent calls to the ldap service, so limit
- # those here.
- count = 0
+ # Search by user_id or last name
+ search_string = LdapService.user_or_last_name_search % (query, query)
results = []
- for entry in self.conn.entries:
- if count > limit:
- break
- results.append(LdapUserInfo.from_entry(entry))
- count += 1
+ app.logger.info(search_string)
+ try:
+ conn = LdapService.__get_conn()
+ conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
+ # Entries are returned as a generator, accessing entries
+ # can make subsequent calls to the ldap service, so limit
+ # those here.
+ count = 0
+ for entry in conn.entries:
+ if count > limit:
+ break
+ results.append(LdapSchema().dump(LdapModel.from_entry(entry)))
+ count += 1
+ except LDAPExceptionError as le:
+ app.logger.info("Failed to execute ldap search. %s", str(le))
+
return results
diff --git a/crc/services/lookup_service.py b/crc/services/lookup_service.py
index 95902fe0..b3e0bddc 100644
--- a/crc/services/lookup_service.py
+++ b/crc/services/lookup_service.py
@@ -103,7 +103,7 @@ class LookupService(object):
workflow_id=workflow_model.id,
name=file_name)
if len(latest_files) < 1:
- raise ApiError("missing_file", "Unable to locate the lookup data file '%s'" % file_name)
+ raise ApiError("invalid_enum", "Unable to locate the lookup data file '%s'" % file_name)
else:
data_model = latest_files[0]
@@ -189,15 +189,15 @@ class LookupService(object):
@staticmethod
def _run_ldap_query(query, limit):
- users = LdapService().search_users(query, limit)
+ users = LdapService.search_users(query, limit)
"""Converts the user models into something akin to the
LookupModel in models/file.py, so this can be returned in the same way
we return a lookup data model."""
user_list = []
for user in users:
- user_list.append( {"value": user.uid,
- "label": user.display_name + " (" + user.uid + ")",
- "data": user.__dict__
+ user_list.append( {"value": user['uid'],
+ "label": user['display_name'] + " (" + user['uid'] + ")",
+ "data": user
})
return user_list
\ No newline at end of file
diff --git a/crc/services/mails.py b/crc/services/mails.py
new file mode 100644
index 00000000..bd825f69
--- /dev/null
+++ b/crc/services/mails.py
@@ -0,0 +1,128 @@
+import os
+
+from flask import render_template, render_template_string
+from flask_mail import Message
+
+
+# TODO: Extract common mailing code into its own function
+def send_test_email(sender, recipients):
+ try:
+ msg = Message('Research Ramp-up Plan test',
+ sender=sender,
+ recipients=recipients)
+ from crc import env, mail
+ template = env.get_template('ramp_up_approval_request_first_review.txt')
+ template_vars = {'primary_investigator': "test"}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_approval_request_first_review.html')
+ msg.html = template.render(template_vars)
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
+
+
+
+def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None):
+ try:
+ msg = Message('Research Ramp-up Plan Submitted',
+ sender=sender,
+ recipients=recipients,
+ bcc=['rrt_emails@googlegroups.com'])
+ from crc import env, mail
+ template = env.get_template('ramp_up_submission.txt')
+ template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_submission.html')
+ msg.html = template.render(template_vars)
+
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
+
+def send_ramp_up_approval_request_email(sender, recipients, primary_investigator):
+ try:
+ msg = Message('Research Ramp-up Plan Approval Request',
+ sender=sender,
+ recipients=recipients,
+ bcc=['rrt_emails@googlegroups.com'])
+ from crc import env, mail
+ template = env.get_template('ramp_up_approval_request.txt')
+ template_vars = {'primary_investigator': primary_investigator}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_approval_request.html')
+ msg.html = template.render(template_vars)
+
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
+
+def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator):
+ try:
+ msg = Message('Research Ramp-up Plan Approval Request',
+ sender=sender,
+ recipients=recipients,
+ bcc=['rrt_emails@googlegroups.com'])
+ from crc import env, mail
+ template = env.get_template('ramp_up_approval_request_first_review.txt')
+ template_vars = {'primary_investigator': primary_investigator}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_approval_request_first_review.html')
+ msg.html = template.render(template_vars)
+
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
+
+def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None):
+ try:
+ msg = Message('Research Ramp-up Plan Approved',
+ sender=sender,
+ recipients=recipients,
+ bcc=['rrt_emails@googlegroups.com'])
+
+ from crc import env, mail
+ template = env.get_template('ramp_up_approved.txt')
+ template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_approved.html')
+ msg.html = template.render(template_vars)
+
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
+
+def send_ramp_up_denied_email(sender, recipients, approver):
+ try:
+ msg = Message('Research Ramp-up Plan Denied',
+ sender=sender,
+ recipients=recipients,
+ bcc=['rrt_emails@googlegroups.com'])
+
+ from crc import env, mail
+ template = env.get_template('ramp_up_denied.txt')
+ template_vars = {'approver': approver}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_denied.html')
+ msg.html = template.render(template_vars)
+
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
+
+def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2):
+ try:
+ msg = Message('Research Ramp-up Plan Denied',
+ sender=sender,
+ recipients=recipients,
+ bcc=['rrt_emails@googlegroups.com'])
+
+ from crc import env, mail
+ template = env.get_template('ramp_up_denied_first_approver.txt')
+ template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2}
+ msg.body = template.render(template_vars)
+ template = env.get_template('ramp_up_denied_first_approver.html')
+ msg.html = template.render(template_vars)
+
+ mail.send(msg)
+ except Exception as e:
+ return str(e)
diff --git a/crc/services/protocol_builder.py b/crc/services/protocol_builder.py
index 5fc5535f..8d1d7886 100644
--- a/crc/services/protocol_builder.py
+++ b/crc/services/protocol_builder.py
@@ -25,7 +25,7 @@ class ProtocolBuilderService(object):
def get_studies(user_id) -> {}:
ProtocolBuilderService.__enabled_or_raise()
if not isinstance(user_id, str):
- raise ApiError("invalid_user_id", "This user id is invalid: " + str(user_id))
+ raise ApiError("protocol_builder_error", "This user id is invalid: " + str(user_id))
response = requests.get(ProtocolBuilderService.STUDY_URL % user_id)
if response.ok and response.text:
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
diff --git a/crc/services/study_service.py b/crc/services/study_service.py
index 98a8d15a..92ec265d 100644
--- a/crc/services/study_service.py
+++ b/crc/services/study_service.py
@@ -4,11 +4,13 @@ from typing import List
import requests
from SpiffWorkflow import WorkflowException
+from SpiffWorkflow.exceptions import WorkflowTaskExecException
from ldap3.core.exceptions import LDAPSocketOpenError
from crc import db, session, app
from crc.api.common import ApiError
-from crc.models.file import FileModel, FileModelSchema
+from crc.models.file import FileModel, FileModelSchema, File
+from crc.models.ldap import LdapSchema
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
@@ -18,6 +20,8 @@ from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
+from crc.services.approval_service import ApprovalService
+from crc.models.approval import Approval
class StudyService(object):
@@ -53,7 +57,11 @@ class StudyService(object):
study = Study.from_model(study_model)
study.categories = StudyService.get_categories()
workflow_metas = StudyService.__get_workflow_metas(study_id)
- study.files = FileService.get_files_for_study(study.id)
+ study.approvals = ApprovalService.get_approvals_for_study(study.id)
+ files = FileService.get_files_for_study(study.id)
+ files = (File.from_models(model, FileService.get_file_data(model.id),
+ FileService.get_doc_dictionary()) for model in files)
+ study.files = list(files)
# Calling this line repeatedly is very very slow. It creates the
# master spec and runs it.
@@ -78,8 +86,8 @@ class StudyService(object):
def delete_workflow(workflow):
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
FileService.delete_file(file.id)
- for deb in workflow.dependencies:
- session.delete(deb)
+ for dep in workflow.dependencies:
+ session.delete(dep)
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
@@ -174,6 +182,7 @@ class StudyService(object):
return documents
+
@staticmethod
def get_investigators(study_id):
@@ -197,8 +206,7 @@ class StudyService(object):
@staticmethod
def get_ldap_dict_if_available(user_id):
try:
- ldap_service = LdapService()
- return ldap_service.user_info(user_id).__dict__
+ return LdapSchema().dump(LdapService().user_info(user_id))
except ApiError as ae:
app.logger.info(str(ae))
return {"error": str(ae)}
@@ -309,8 +317,10 @@ class StudyService(object):
for workflow_spec in new_specs:
try:
StudyService._create_workflow_model(study_model, workflow_spec)
+ except WorkflowTaskExecException as wtee:
+ errors.append(ApiError.from_task("workflow_startup_exception", str(wtee), wtee.task))
except WorkflowException as we:
- errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
+ errors.append(ApiError.from_task_spec("workflow_startup_exception", str(we), we.sender))
return errors
@staticmethod
diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py
index d032b94a..93590d94 100644
--- a/crc/services/workflow_processor.py
+++ b/crc/services/workflow_processor.py
@@ -299,21 +299,27 @@ class WorkflowProcessor(object):
return WorkflowStatus.waiting
def hard_reset(self):
- """Recreate this workflow, but keep the data from the last completed task and add it back into the first task.
- This may be useful when a workflow specification changes, and users need to review all the
- prior steps, but don't need to reenter all the previous data.
+ """Recreate this workflow, but keep the data from the last completed task and add
+ it back into the first task. This may be useful when a workflow specification changes,
+ and users need to review all the prior steps, but they don't need to reenter all the previous data.
Returns the new version.
"""
+
+ # Create a new workflow based on the latest specs.
self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
- spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
- # spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
- bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
- bpmn_workflow.data = self.bpmn_workflow.data
- for task in bpmn_workflow.get_tasks(SpiffTask.READY):
- task.data = self.bpmn_workflow.last_task.data
- bpmn_workflow.do_engine_steps()
- self.bpmn_workflow = bpmn_workflow
+ new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
+ new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine)
+ new_bpmn_workflow.data = self.bpmn_workflow.data
+
+ # Reset the current workflow to the beginning - which we will consider to be the first task after the root
+ # element. This feels a little sketchy, but I think it is safe to assume root will have one child.
+ first_task = self.bpmn_workflow.task_tree.children[0]
+ first_task.reset_token(reset_data=False)
+ for task in new_bpmn_workflow.get_tasks(SpiffTask.READY):
+ task.data = first_task.data
+ new_bpmn_workflow.do_engine_steps()
+ self.bpmn_workflow = new_bpmn_workflow
def get_status(self):
return self.status_of(self.bpmn_workflow)
diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py
index c6cb8638..310bd7fd 100644
--- a/crc/services/workflow_service.py
+++ b/crc/services/workflow_service.py
@@ -7,7 +7,6 @@ from SpiffWorkflow import Task as SpiffTask, WorkflowException
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
-from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
from SpiffWorkflow.specs import CancelTask, StartTask
from flask import g
@@ -17,7 +16,6 @@ from crc import db, app
from crc.api.common import ApiError
from crc.models.api_models import Task, MultiInstanceType
from crc.models.file import LookupDataModel
-from crc.models.protocol_builder import ProtocolBuilderStatus
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel
from crc.models.user import UserModel
@@ -39,7 +37,9 @@ class WorkflowService(object):
the workflow Processor should be hidden behind this service.
This will help maintain a structure that avoids circular dependencies.
But for now, this contains tools for converting spiff-workflow models into our
- own API models with additional information and capabilities."""
+ own API models with additional information and capabilities and
+ handles the testing of a workflow specification by completing it with
+ random selections, attempting to mimic a front end as much as possible. """
@staticmethod
def make_test_workflow(spec_id):
@@ -58,15 +58,23 @@ class WorkflowService(object):
@staticmethod
def delete_test_data():
- for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
+ for study in db.session.query(StudyModel).filter(StudyModel.user_uid == "test"):
StudyService.delete_study(study.id)
db.session.commit()
- db.session.query(UserModel).filter_by(uid="test").delete()
+
+ user = db.session.query(UserModel).filter_by(uid="test").first()
+ if user:
+ db.session.delete(user)
@staticmethod
- def test_spec(spec_id):
- """Runs a spec through it's paces to see if it results in any errors. Not fool-proof, but a good
- sanity check."""
+ def test_spec(spec_id, required_only=False):
+ """Runs a spec through it's paces to see if it results in any errors.
+ Not fool-proof, but a good sanity check. Returns the final data
+ output form the last task if successful.
+
+ required_only can be set to true, in which case this will run the
+ spec, only completing the required fields, rather than everything.
+ """
workflow_model = WorkflowService.make_test_workflow(spec_id)
@@ -74,8 +82,7 @@ class WorkflowService(object):
processor = WorkflowProcessor(workflow_model, validate_only=True)
except WorkflowException as we:
WorkflowService.delete_test_data()
- raise ApiError.from_task_spec("workflow_execution_exception", str(we),
- we.sender)
+ raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we)
while not processor.bpmn_workflow.is_completed():
try:
@@ -85,38 +92,57 @@ class WorkflowService(object):
task_api = WorkflowService.spiff_task_to_api_task(
task,
add_docs_and_forms=True) # Assure we try to process the documenation, and raise those errors.
- WorkflowService.populate_form_with_random_data(task, task_api)
+ WorkflowService.populate_form_with_random_data(task, task_api, required_only)
task.complete()
except WorkflowException as we:
WorkflowService.delete_test_data()
- raise ApiError.from_task_spec("workflow_execution_exception", str(we),
- we.sender)
+ raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we)
+
WorkflowService.delete_test_data()
+ return processor.bpmn_workflow.last_task.data
@staticmethod
- def populate_form_with_random_data(task, task_api):
+ def populate_form_with_random_data(task, task_api, required_only):
"""populates a task with random data - useful for testing a spec."""
if not hasattr(task.task_spec, 'form'): return
- form_data = {}
+ form_data = task.data # Just like with the front end, we start with what was already there, and modify it.
for field in task_api.form.fields:
- if field.type == "enum":
- if len(field.options) > 0:
- random_choice = random.choice(field.options)
- if isinstance(random_choice, dict):
- form_data[field.id] = random.choice(field.options)['id']
- else:
- # fixme: why it is sometimes an EnumFormFieldOption, and other times not?
- form_data[field.id] = random_choice.id ## Assume it is an EnumFormFieldOption
+ if required_only and (not field.has_validation(Task.VALIDATION_REQUIRED) or
+ field.get_validation(Task.VALIDATION_REQUIRED).lower().strip() != "true"):
+ continue # Don't include any fields that aren't specifically marked as required.
+ if field.has_property("read_only") and field.get_property("read_only").lower().strip() == "true":
+ continue # Don't mess about with read only fields.
+ if field.has_property(Task.PROP_OPTIONS_REPEAT):
+ group = field.get_property(Task.PROP_OPTIONS_REPEAT)
+ if group not in form_data:
+ form_data[group] = [{},{},{}]
+ for i in range(3):
+ form_data[group][i][field.id] = WorkflowService.get_random_data_for_field(field, task)
+ else:
+ form_data[field.id] = WorkflowService.get_random_data_for_field(field, task)
+ if task.data is None:
+ task.data = {}
+ task.data.update(form_data)
+
+ @staticmethod
+ def get_random_data_for_field(field, task):
+ if field.type == "enum":
+ if len(field.options) > 0:
+ random_choice = random.choice(field.options)
+ if isinstance(random_choice, dict):
+ return random.choice(field.options)['id']
else:
- raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
- " with no options" % field.id,
- task)
- elif field.type == "autocomplete":
- lookup_model = LookupService.get_lookup_model(task, field)
- if field.has_property(Task.PROP_LDAP_LOOKUP):
- form_data[field.id] = {
+ # fixme: why it is sometimes an EnumFormFieldOption, and other times not?
+ return random_choice.id ## Assume it is an EnumFormFieldOption
+ else:
+ raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
+ " with no options" % field.id, task)
+ elif field.type == "autocomplete":
+ lookup_model = LookupService.get_lookup_model(task, field)
+ if field.has_property(Task.PROP_LDAP_LOOKUP): # All ldap records get the same person.
+ return {
"label": "dhf8r",
"value": "Dan Funk",
"data": {
@@ -126,32 +152,30 @@ class WorkflowService(object):
"email_address": "dhf8r@virginia.edu",
"department": "Depertment of Psychocosmographictology",
"affiliation": "Rousabout",
- "sponsor_type": "Staff"
+ "sponsor_type": "Staff"}
}
- }
- elif lookup_model:
- data = db.session.query(LookupDataModel).filter(
- LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
- options = []
- for d in data:
- options.append({"id": d.value, "name": d.label})
- form_data[field.id] = random.choice(options)
- else:
- raise ApiError.from_task("invalid_autocomplete", "The settings for this auto complete field "
- "are incorrect: %s " % field.id, task)
- elif field.type == "long":
- form_data[field.id] = random.randint(1, 1000)
- elif field.type == 'boolean':
- form_data[field.id] = random.choice([True, False])
- elif field.type == 'file':
- form_data[field.id] = random.randint(1, 100)
- elif field.type == 'files':
- form_data[field.id] = random.randrange(1, 100)
+ elif lookup_model:
+ data = db.session.query(LookupDataModel).filter(
+ LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
+ options = []
+ for d in data:
+ options.append({"id": d.value, "label": d.label})
+ return random.choice(options)
else:
- form_data[field.id] = WorkflowService._random_string()
- if task.data is None:
- task.data = {}
- task.data.update(form_data)
+ raise ApiError.from_task("unknown_lookup_option", "The settings for this auto complete field "
+ "are incorrect: %s " % field.id, task)
+ elif field.type == "long":
+ return random.randint(1, 1000)
+ elif field.type == 'boolean':
+ return random.choice([True, False])
+ elif field.type == 'file':
+ # fixme: produce some something sensible for files.
+ return random.randint(1, 100)
+ # fixme: produce some something sensible for files.
+ elif field.type == 'files':
+ return random.randrange(1, 100)
+ else:
+ return WorkflowService._random_string()
def __get_options(self):
pass
@@ -272,10 +296,11 @@ class WorkflowService(object):
template = Template(raw_doc)
return template.render(**spiff_task.data)
except jinja2.exceptions.TemplateError as ue:
-
- # return "Error processing template. %s" % ue.message
- raise ApiError(code="template_error", message="Error processing template for task %s: %s" %
- (spiff_task.task_spec.name, str(ue)), status_code=500)
+ raise ApiError.from_task(code="template_error", message="Error processing template for task %s: %s" %
+ (spiff_task.task_spec.name, str(ue)), task=spiff_task)
+ except TypeError as te:
+ raise ApiError.from_task(code="template_error", message="Error processing template for task %s: %s" %
+ (spiff_task.task_spec.name, str(te)), task=spiff_task)
# TODO: Catch additional errors and report back.
@staticmethod
@@ -293,12 +318,12 @@ class WorkflowService(object):
field.options.append({"id": d.value, "name": d.label})
@staticmethod
- def log_task_action(processor, spiff_task, action):
+ def log_task_action(user_uid, processor, spiff_task, action):
task = WorkflowService.spiff_task_to_api_task(spiff_task)
workflow_model = processor.workflow_model
task_event = TaskEventModel(
study_id=workflow_model.study_id,
- user_uid=g.user.uid,
+ user_uid=user_uid,
workflow_id=workflow_model.id,
workflow_spec_id=workflow_model.workflow_spec_id,
spec_version=processor.get_version_string(),
diff --git a/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx b/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx
index 0c555fdb..2df53330 100644
Binary files a/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx and b/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx differ
diff --git a/crc/static/bpmn/research_rampup/exclusive_area_monitors.dmn b/crc/static/bpmn/research_rampup/exclusive_area_monitors.dmn
new file mode 100644
index 00000000..236796b3
--- /dev/null
+++ b/crc/static/bpmn/research_rampup/exclusive_area_monitors.dmn
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+ len(exclusive)
+
+
+
+
+ sum([1 for x in exclusive if x.get('ExclusiveSpaceAMComputingID',None) == None])
+
+
+
+
+ No exclusvie spaces without Area Monitor
+
+ >0
+
+
+ 0
+
+
+ true
+
+
+
+ One or more exclusive space without an Area Monitor
+
+ >0
+
+
+ > 0
+
+
+ false
+
+
+
+ No exclusive spaces entered
+
+ 0
+
+
+
+
+
+ true
+
+
+
+
+
diff --git a/crc/static/bpmn/research_rampup/research_rampup.bpmn b/crc/static/bpmn/research_rampup/research_rampup.bpmn
index d3438d69..19588731 100644
--- a/crc/static/bpmn/research_rampup/research_rampup.bpmn
+++ b/crc/static/bpmn/research_rampup/research_rampup.bpmn
@@ -5,10 +5,7 @@
SequenceFlow_05ja25w
- ## **Beta Stage: All data entered will be destroyed before public launch**
-
-
-### UNIVERSITY OF VIRGINIA RESEARCH
+ ### UNIVERSITY OF VIRGINIA RESEARCH
[From Research Ramp-up Guidance](https://research.virginia.edu/research-ramp-guidance)
@@ -34,7 +31,7 @@ Schools are developing a process for the approval of ramp up requests and enforc
1. The Research Ramp-up Plan allows for one request to be entered for a single Principle Investigator. In the form that follows enter the Primary Investigator this request is for and other identifying information. The PI's School and Supervisor will be used as needed for approval routing.
2. Provide all available information in the forms that follow to provide an overview of where the research will resume, who will be involved, what supporting resources will be needed and what steps have been taken to assure compliance with [Research Ramp-up Guidance](https://research.virginia.edu/research-ramp-guidance).
3. After all forms have been completed, you will be presented with the option to create your Research Recovery Plan in Word format. Download the document and review it. If you see any corrections that need to be made, return to the corresponding form and make the correction.
-4. Once the generated Research Recovery Plan is finalize, proceed to the Plan Submission step to submit your plan for approval.
+4. Once the generated Research Recovery Plan is finalized, proceed to the Plan Submission step to submit your plan for approval.SequenceFlow_05ja25wSequenceFlow_0h50bp3
@@ -47,6 +44,7 @@ Enter the following information for the PI submitting this request
+
@@ -60,6 +58,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -68,6 +69,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -77,6 +81,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -85,6 +92,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -93,6 +103,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -101,6 +114,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -109,21 +125,28 @@ Enter the following information for the PI submitting this request
+
+
+
+
+
+
+
-
+
@@ -133,34 +156,37 @@ Enter the following information for the PI submitting this request
-
- #### People for whom you are requesting access
-Provide information on all researchers you are requesting approval for reentry into the previously entered lab/research and/or office space(s) for conducting research on-Grounds. (If there are personnel already working in the space, include them).
-
-**Note: no undergraduates will be allowed to work on-Grounds during Phase I.**
+
+ #### Personnel for whom you are requesting access
+Provide information on all personnel you are requesting approval for reentry into the previously entered lab, workspace and/or office space(s) for conducting research on-Grounds. (If there are personnel already working in the space, include them).
#### Exclusive Space previously entered
-{% for es in exclusive %}
-{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label }}
-{% else %}
-No exclusive space entered
-{% endfor %}
+{%+ for es in exclusive %}{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label }}{% if loop.last %}{% else %}, {% endif %}{% else %}No exclusive space entered{% endfor %}
#### Shared Space previously entered
-{% for ss in shared %}
-{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label }}
-{% else %}
-No shared space entered
-{% endfor %}
+{%+ for ss in shared %}{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label }}{% if loop.last %}{% else %}, {% endif %}{% else %}No shared space entered.{% endfor %}
+
+**Note: no undergraduates will be allowed to work on-Grounds during Phase I.**
+
+
+
+
+
+
+
-
+
+
+
+
+
@@ -168,8 +194,11 @@ No shared space entered
-
+
+
+
+
@@ -177,12 +206,10 @@ No shared space entered
-
+
-
-
-
-
+
+
@@ -193,24 +220,19 @@ No shared space entered
-
+
-
-
-
-
-
- Flow_1eiud85
- Flow_1nbjr72
+ Flow_0hc1r8a
+ Flow_1yxaewj
- #### If applicable, provide a list of any [Core Resources](https://research.virginia.edu/research-core-resources) you will utilize space or instruments in and name/email of contact person in the core you have coordinated your plan with. (Core facility managers are responsible for developing a plan for their space)
+ If applicable, provide a list of any [Core Resources](https://research.virginia.edu/research-core-resources) utilization of space and/or instruments along with the name(s) and email(s) of contact person(s) in the core with whom you have coordinated your plan. (Core facility managers are responsible for developing a plan for their space)
@@ -224,16 +246,17 @@ No shared space entered
+
- Flow_15zy1q7
- Flow_12ie6w0
+ Flow_1n69wsr
+ Flow_13pusfu
- #### End of Workflow
-Place instruction here,
+ #### End of Research Ramp-up Plan Workflow
+Thank you for participating,Flow_05w8yd6
@@ -250,11 +273,13 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
-
+
-
+
+
+
@@ -262,55 +287,65 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
-
+
+
+
+
+
-
-
+
+
+
-
+
+
+
+
-
+
-
+
+
-
+
+
-
+
@@ -318,40 +353,25 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
- Flow_19xeq76
- Flow_16342pm
+ Flow_0o4tg9g
+ Flow_1n69wsr
-
- Flow_1v7r1tg
- Flow_19xeq76
- Flow_0qf2y84
- Flow_15zy1q7
- Flow_0ya8hw8
-
-
- Flow_0tk64b6
- Flow_12ie6w0
- Flow_0zz2hbq
- Flow_16342pm
- Flow_1eiud85
-
-
-
-
- #### Space managed exclusively by {{ PIComputingID.label }}
-Submit one entry for each space the PI is the exclusive investigator. If all space is shared with one or more other investigators, Click Save to skip this section and proceed to the Shared Space section.
+
+Submit one entry for each space the PI is the exclusive investigator. If all space is shared with one or more other investigators, click Save to skip this section and proceed to the Shared Space section.
-
+
-
+
-
+
+
+
@@ -360,71 +380,76 @@ Submit one entry for each space the PI is the exclusive investigator. If all sp
-
+
+
+
+
-
+
+
-
+
+
-
+
-
-
+
+
+
-
+
-
+
-
+
+
-
-
+
+
+
- Flow_0qf2y84
- Flow_0tk64b6
+ Flow_0uc4o6c
+ Flow_0o4tg9g
-
-
-
@@ -447,12 +472,9 @@ Submit one entry for each space the PI is the exclusive investigator. If all sp
- Flow_0ya8hw8
- Flow_0zz2hbq
+ Flow_13pusfu
+ Flow_0hc1r8a
-
-
- #### Distancing requirements:
Maintain social distancing by designing space between people to be at least 9 feet during prolonged work which will be accomplished by restricting the number of people in the lab to a density of ~250 sq. ft. /person in lab areas. When moving around, a minimum of 6 feet social distancing is required. Ideally only one person per lab bench and not more than one person can work at the same time in the same bay.
@@ -470,32 +492,23 @@ Maintain social distancing by designing space between people to be at least 9 fe
- Flow_0p2r1bo
- Flow_0tz5c2v
+ Flow_1itd8db
+ Flow_1lo964l
-
-
- Flow_1nbjr72
- Flow_0p2r1bo
- Flow_0mkh1wn
- Flow_1yqkpgu
- Flow_1c6m5wv
-
-
- Describe physical work arrangements for each lab. Show schematic of the lab and space organization to meet the distancing guidelines (see key safety expectations for ramp-up).
+ Describe physical work arrangements for each lab, workspace and/or office space previously entered. Show schematic of the space organization to meet the distancing guidelines (see key safety expectations for ramp-up).
- Show gross dimensions, location of desks, and equipment in blocks (not details) that show available space for work and foot traffic.
- Indicate total square footage for every lab/space that you are requesting adding personnel to in this application. If you would like help obtaining a floor plan for your lab, your department or deans office can help. You can also create a hand drawing/block diagram of your space and the location of objects on a graph paper.
- Upload your physical layout and workspace organization in the form of a jpg image or a pdf file. This can be hand-drawn or actual floor plans.
- Show and/or describe designated work location for each member (during their shift) in the lab when multiple members are present at a time to meet the distancing guidelines.
-- Provide a foot traffic plan (on the schematic) to indicate how people can move around while maintaining distancing requirements. This can be a freeform sketch on your floor plan showing where foot traffic can occur in your lab, and conditions, if any, to ensure distancing at all times. (e.g., direction to walk around a lab bench, rules for using shared equipment located in the lab, certain areas of lab prohibited from access, etc.).
-- Provide your initial weekly laboratory schedule (see excel template) for all members that you are requesting access for, indicating all shifts as necessary. If schedule changes, please submit your revised schedule through the web portal.
+- Provide a foot traffic plan (on the schematic) to indicate how people can move around while maintaining distancing requirements. This can be a freeform sketch on your floor plan showing where foot traffic can occur in your lab, and conditions, if any, to ensure distancing at all times. (e.g., direction to walk around a lab bench, rules for using shared equipment located in the lab, certain areas of lab prohibited from access, etc.).
+
@@ -504,11 +517,10 @@ Maintain social distancing by designing space between people to be at least 9 fe
- Flow_0mkh1wn
- Flow_0zrsh65
+ Flow_1lo964l
+ Flow_0wgdxa6
-
-
+ #### Health Safety Requirements:
Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/url?q=http://ehs.virginia.edu/files/Lab-Safety-Plan-During-COVID-19.docx&source=gmail&ust=1590687968958000&usg=AFQjCNE83uGDFtxGkKaxjuXGhTocu-FDmw) to create and upload a copy of your laboratory policy statement to all members which includes at a minimum the following details:
- Laboratory face covering rules, use of other PPE use as required
@@ -519,13 +531,12 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur
- Where and how to obtain PPE including face covering
-
+
- Flow_1yqkpgu
- Flow_1ox5nv6
+ Flow_0wgdxa6
+ Flow_0judgmp
-
@@ -569,41 +580,38 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur
- Flow_1c6m5wv
- Flow_0qbi47d
+ Flow_0judgmp
+ Flow_11uqavk#### By submitting this request, you understand that every member listed in this form for on Grounds laboratory access will:
-- Complete online COVID awareness & precaution training module (link forthcoming-May 25)
+- Complete [online COVID awareness & precaution training module](https://researchcompliance.web.virginia.edu/training_html5/module_content/154/index.cfm)
- Complete daily health acknowledgement form signed (electronically) –email generated daily to those listed on your plan for access to on Grounds lab/research space
- Fill out daily work attendance log for all lab members following your school process to check-in and out of work each day.Flow_08njvviFlow_0j4rs82
-
-
-
-
-
-
- Flow_0zrsh65
- Flow_0tz5c2v
- Flow_1ox5nv6
- Flow_0qbi47d
- Flow_06873ag
-
-
- Flow_06873ag
+
+ #### Script Task
+
+
+This step is internal to the system and do not require and user interaction
+ Flow_11uqavkFlow_0aqgwvuCompleteTemplate ResearchRampUpPlan.docx RESEARCH_RAMPUP
-
+
-
-
+ #### Approval Process
-The Research Ramp-up Plan and associated documents will be reviewed by{{ " " + ApprvlApprvrName1 }}{{ '.' if ApprvlApprvrName2 == 'n/a' else ' and ' + ApprvlApprvrName2 + '.' }} While waiting for approval, be sure that all required training has been completed and supplies secured. When the approval email notification is received, confirming the three questions below will allow you to proceed.
+The Research Ramp-up Plan and associated documents will be reviewed by{{ " " + ApprvlApprvrName1 }}{{ '.' if ApprvlApprvrName2 == 'n/a' else ' and ' + ApprvlApprvrName2 + '.' }}
+
+
+While waiting for approval, be sure that all required training has been completed and supplies secured. Additionally, if any Area Monitors were not known prior to submission, they will need to be discovered before proceeding.
+
+
+When the approval email notification is received, confirming the three questions below and adding any missing Area Monitors will enable the Save button.
If a rejection notification is received, go back to the first step that needs to be addressed and step through each subsequent form from that point.
@@ -613,28 +621,104 @@ If a rejection notification is received, go back to the first step that needs to
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
- Flow_07ge8uf
- Flow_1ufh44h
+ SequenceFlow_0qc39tw
+ #### Business Rule Task
+
+
+This step is internal to the system and do not require and user interactionFlow_1e2qi9sFlow_08njvvi
@@ -653,254 +737,261 @@ If notification is received that the Research Ramp-up Plan approval process is n
Notify the Area Monitor for
-#### Exclusive Space Area Monitors
-{% for es in exclusive %}
-{{ es.ExclusiveSpaceAMComputingID.data.display_name }}
-{% else %}
-No exclusive space entered
-{% endfor %}
+#### Exclusive Space
+{%+ for es in exclusive %}{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label + " - " }}{% if es.ExclusiveSpaceAMComputingID is not defined %}No Area Monitor entered{% else %}{{ es.ExclusiveSpaceAMComputingID.label }}{% endif %}{% if loop.last %}{% else %}, {% endif %}{% else %}No exclusive space entered{% endfor %}
-#### Shared Space Area Monitors
-{% for ss in shared %}
-{{ ss.SharedSpaceAMComputingID.data.display_name }}
-{% else %}
-No shared space entered
-{% endfor %}
- Flow_1ufh44h
+
+
+#### Shared Space
+{%+ for ss in shared %}{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label + " - " }}{% if ss.SharedSpaceAMComputingID is not defined %}No Area Monitor entered{% else %}{{ ss.SharedSpaceAMComputingID.label }}{% endif %}{% if loop.last %}{% else %}, {% endif %}{% else %}No shared space entered.{% endfor %}
+ SequenceFlow_0qc39twFlow_0cpmvcw
+ #### Script Task
+
+
+This step is internal to the system and do not require and user interactionFlow_0j4rs82Flow_07ge8ufRequestApproval ApprvlApprvr1 ApprvlApprvr2
-
+ #### Script Task
+
+
+This step is internal to the system and do not require and user interactionFlow_16y8glw
- Flow_1v7r1tg
+ Flow_0uc4o6cUpdateStudy title:PIComputingID.label pi:PIComputingID.value
+
+ #### Weekly Personnel Schedule(s)
+Provide initial weekly schedule(s) for the PI and all personnel for whom access has been requested, indicating each space they will be working in and all shifts, if applicable.
+
+##### Personnel and spaces they will work in previously entered
+{%+ for p in personnel %}{{ p.PersonnelComputingID.label + " - " + p.PersonnelSpace }}{% if loop.last %}{% else %}; {% endif %}{% endfor %}
+
+**Note:** If any schedule changes after approval, please re-submit revised schedule(s) here for re-approval.
+
+
+
+
+
+
+
+
+
+
+
+
+ Flow_1yxaewj
+ Flow_1itd8db
+
+
+
+
+
+
+
+
+
+
+
+
+
+ #### Business Rule Task
+
+
+This step is internal to the system and do not require and user interaction
+ Flow_07ge8uf
+ Flow_0peeyne
+
+
+
+ #### Business Rule Task
+
+
+This step is internal to the system and do not require and user interaction
+ Flow_0peeyne
+ Flow_0tqna2m
+
+
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
+
+
-
-
-
-
-
-
+
+
-
-
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
+
-
+
-
+
-
-
+
+
-
+
-
+
-
+
-
-
-
-
-
-
-
+
-
+
-
+
-
-
-
-
+
-
+
-
+
-
+
-
+
-
-
-
-
-
+
+
-
+
-
+
-
+
-
+
-
+
-
+
+
+
+
+
+
+
+
+
+
diff --git a/crc/static/bpmn/research_rampup/shared_area_monitors.dmn b/crc/static/bpmn/research_rampup/shared_area_monitors.dmn
new file mode 100644
index 00000000..c8e696d4
--- /dev/null
+++ b/crc/static/bpmn/research_rampup/shared_area_monitors.dmn
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+ len(shared)
+
+
+
+
+ sum([1 for x in shared if x.get('SharedSpaceAMComputingID',None) == None])
+
+
+
+
+ No shared spaces without Area Monitor
+
+ >0
+
+
+ 0
+
+
+ true
+
+
+
+ One or more shared space without an Area Monitor
+
+ >0
+
+
+ > 0
+
+
+ false
+
+
+
+ No shared spaces entered
+
+ 0
+
+
+
+
+
+ true
+
+
+
+
+
diff --git a/crc/static/reference/rrt_documents.xlsx b/crc/static/reference/rrt_documents.xlsx
index 4e1663b2..cb09fd0f 100644
Binary files a/crc/static/reference/rrt_documents.xlsx and b/crc/static/reference/rrt_documents.xlsx differ
diff --git a/crc/static/templates/mails/ramp_up_approval_request.html b/crc/static/templates/mails/ramp_up_approval_request.html
new file mode 100644
index 00000000..506fdf16
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_approval_request.html
@@ -0,0 +1,2 @@
+
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
+ Research Ramp-up Toolkit]
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_approval_request.txt b/crc/static/templates/mails/ramp_up_approval_request.txt
new file mode 100644
index 00000000..53d8e1ef
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_approval_request.txt
@@ -0,0 +1,2 @@
+A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
+Research Ramp-up Toolkit: https://rrt.uvadcos.io/app/approvals.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_approval_request_first_review.html b/crc/static/templates/mails/ramp_up_approval_request_first_review.html
new file mode 100644
index 00000000..1c7cb3c7
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_approval_request_first_review.html
@@ -0,0 +1,2 @@
+
A Research Ramp-up approval request from {{ primary_investigator }} and is now available for your review in your
+ Research Ramp-up Toolkit.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_approval_request_first_review.txt b/crc/static/templates/mails/ramp_up_approval_request_first_review.txt
new file mode 100644
index 00000000..db6cc50e
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_approval_request_first_review.txt
@@ -0,0 +1,2 @@
+A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
+Research Ramp-up Toolkit at https://rrt.uvadcos.io/app/approvals.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_approved.html b/crc/static/templates/mails/ramp_up_approved.html
new file mode 100644
index 00000000..57fc1dc4
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_approved.html
@@ -0,0 +1 @@
+
Your Research Ramp-up Plan has been approved by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_approved.txt b/crc/static/templates/mails/ramp_up_approved.txt
new file mode 100644
index 00000000..2eec582b
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_approved.txt
@@ -0,0 +1 @@
+Your Research Ramp-up Plan has been approved by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_denied.html b/crc/static/templates/mails/ramp_up_denied.html
new file mode 100644
index 00000000..7a40c1ea
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_denied.html
@@ -0,0 +1 @@
+
Your Research Ramp-up Plan has been denied by {{ approver }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_denied.txt b/crc/static/templates/mails/ramp_up_denied.txt
new file mode 100644
index 00000000..5fbaefda
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_denied.txt
@@ -0,0 +1 @@
+ Your Research Ramp-up Plan has been denied by {{ approver_1 }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver_1 }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_denied_first_approver.html b/crc/static/templates/mails/ramp_up_denied_first_approver.html
new file mode 100644
index 00000000..e58cae99
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_denied_first_approver.html
@@ -0,0 +1 @@
+
The Research Ramp-up Plan submitted by {{ primary_investigator }} was denied by {{ approver_2 }} and returned for requested updates. You may see comments related to this denial in on your Research Ramp-up Toolkit Approval dashboard.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_denied_first_approver.txt b/crc/static/templates/mails/ramp_up_denied_first_approver.txt
new file mode 100644
index 00000000..7172c856
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_denied_first_approver.txt
@@ -0,0 +1 @@
+The Research Ramp-up Plan submitted by {{ primary_investigator }} was denied by {{ approver_2 }} and returned for requested updates. You may see comments related to this denial in on your Research Ramp-up Toolkit Approval dashboard.
\ No newline at end of file
diff --git a/crc/static/templates/mails/ramp_up_submission.html b/crc/static/templates/mails/ramp_up_submission.html
new file mode 100644
index 00000000..2c57c916
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_submission.html
@@ -0,0 +1,5 @@
+
Your Research Ramp-up Plan (RRP) has been submitted for review by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}. After completion of the review step you will receive email notification of its approval or if additional information and/or modifications are required, along with instructions on how to proceed. Return to the Research Ramp-up Plan application to proceed as instructed.
+
+
In the meantime, please make sure all required training has been completed and needed supplies secured. You will be asked to confirm that both of these requirements have been met before reopening the research space approved in your RRP.
+
+
Additionally, if there are any unknown Area Monitors for the spaces listed in your RRP, please contact your approvers to determine either who they are or how you can find out. Missing Area Monitors will need to be entered before proceeding as well.
diff --git a/crc/static/templates/mails/ramp_up_submission.txt b/crc/static/templates/mails/ramp_up_submission.txt
new file mode 100644
index 00000000..14c34500
--- /dev/null
+++ b/crc/static/templates/mails/ramp_up_submission.txt
@@ -0,0 +1,5 @@
+Your Research Ramp-up Plan (RRP) has been submitted for review by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}. After completion of the review step you will receive email notification of its approval or if additional information and/or modifications are required, along with instructions on how to proceed. Return to the Research Ramp-up Plan application to proceed as instructed.
+
+In the meantime, please make sure all required training has been completed and needed supplies secured. You will be asked to confirm that both of these requirements have been met before reopening the research space approved in your RRP.
+
+Additionally, if there are any unknown Area Monitors for the spaces listed in your RRP, please contact your approvers to determine either who they are or how you can find out. Missing Area Monitors will need to be entered before proceeding as well.
diff --git a/migrations/versions/13424d5a6de8_.py b/migrations/versions/13424d5a6de8_.py
new file mode 100644
index 00000000..632a1761
--- /dev/null
+++ b/migrations/versions/13424d5a6de8_.py
@@ -0,0 +1,42 @@
+"""empty message
+
+Revision ID: 13424d5a6de8
+Revises: 5064b72284b7
+Create Date: 2020-06-02 18:17:29.990159
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '13424d5a6de8'
+down_revision = '5064b72284b7'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('ldap_model',
+ sa.Column('uid', sa.String(), nullable=False),
+ sa.Column('display_name', sa.String(), nullable=True),
+ sa.Column('given_name', sa.String(), nullable=True),
+ sa.Column('email_address', sa.String(), nullable=True),
+ sa.Column('telephone_number', sa.String(), nullable=True),
+ sa.Column('title', sa.String(), nullable=True),
+ sa.Column('department', sa.String(), nullable=True),
+ sa.Column('affiliation', sa.String(), nullable=True),
+ sa.Column('sponsor_type', sa.String(), nullable=True),
+ sa.Column('date_cached', sa.DateTime(timezone=True), nullable=True),
+ sa.PrimaryKeyConstraint('uid')
+ )
+ op.add_column('approval', sa.Column('date_approved', sa.DateTime(timezone=True), nullable=True))
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('approval', 'date_approved')
+ op.drop_table('ldap_model')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/17597692d0b0_.py b/migrations/versions/17597692d0b0_.py
new file mode 100644
index 00000000..0b15c956
--- /dev/null
+++ b/migrations/versions/17597692d0b0_.py
@@ -0,0 +1,28 @@
+"""empty message
+
+Revision ID: 17597692d0b0
+Revises: 13424d5a6de8
+Create Date: 2020-06-03 17:33:56.454339
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '17597692d0b0'
+down_revision = '13424d5a6de8'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('file', sa.Column('archived', sa.Boolean(), nullable=True, default=False))
+ op.execute("UPDATE file SET archived = false")
+ op.alter_column('file', 'archived', nullable=False)
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('file', 'archived')
+ # ### end Alembic commands ###
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 00000000..48e341a0
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,3 @@
+{
+ "lockfileVersion": 1
+}
diff --git a/tests/base_test.py b/tests/base_test.py
index f8ffd1ca..93294193 100644
--- a/tests/base_test.py
+++ b/tests/base_test.py
@@ -2,24 +2,27 @@
# IMPORTANT - Environment must be loaded before app, models, etc....
import os
-from sqlalchemy import Sequence
-
os.environ["TESTING"] = "true"
import json
import unittest
import urllib.parse
import datetime
-
-from crc.models.protocol_builder import ProtocolBuilderStatus
-from crc.models.study import StudyModel
-from crc.services.file_service import FileService
-from crc.services.study_service import StudyService
-from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
-from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
-from crc.models.user import UserModel
+from flask import g
+from sqlalchemy import Sequence
from crc import app, db, session
+from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
+from crc.models.approval import ApprovalModel, ApprovalStatus
+from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
+from crc.models.protocol_builder import ProtocolBuilderStatus
+from crc.models.stats import TaskEventModel
+from crc.models.study import StudyModel
+from crc.models.user import UserModel
+from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
+from crc.services.file_service import FileService
+from crc.services.study_service import StudyService
+from crc.services.workflow_service import WorkflowService
from example_data import ExampleDataLoader
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
@@ -95,7 +98,7 @@ class BaseTest(unittest.TestCase):
def tearDown(self):
ExampleDataLoader.clean_db()
- session.flush()
+ g.user = None
self.auths = {}
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
@@ -107,23 +110,28 @@ class BaseTest(unittest.TestCase):
user_info = {'uid': user.uid}
query_string = self.user_info_to_query_string(user_info, redirect_url)
- rv = self.app.get("/v1.0/sso_backdoor%s" % query_string, follow_redirects=False)
+ rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
self.assertTrue(rv.status_code == 302)
self.assertTrue(str.startswith(rv.location, redirect_url))
user_model = session.query(UserModel).filter_by(uid=uid).first()
self.assertIsNotNone(user_model.display_name)
+ self.assertEqual(user_model.uid, uid)
+ self.assertTrue('user' in g, 'User should be in Flask globals')
+ self.assertEqual(uid, g.user.uid, 'Logged in user should match given user uid')
+
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
- def load_example_data(self, use_crc_data=False):
+ def load_example_data(self, use_crc_data=False, use_rrt_data=False):
"""use_crc_data will cause this to load the mammoth collection of documents
- we built up developing crc, otherwise it depends on a small setup for
- running tests."""
-
+ we built up developing crc, use_rrt_data will do the same for hte rrt project,
+ otherwise it depends on a small setup for running tests."""
from example_data import ExampleDataLoader
ExampleDataLoader.clean_db()
- if(use_crc_data):
+ if use_crc_data:
ExampleDataLoader().load_all()
+ elif use_rrt_data:
+ ExampleDataLoader().load_rrt()
else:
ExampleDataLoader().load_test_data()
@@ -158,6 +166,7 @@ class BaseTest(unittest.TestCase):
@staticmethod
def load_test_spec(dir_name, master_spec=False, category_id=None):
"""Loads a spec into the database based on a directory in /tests/data"""
+
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
@@ -197,7 +206,7 @@ class BaseTest(unittest.TestCase):
for key, value in items:
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
- query_string_list.append('redirect=%s' % redirect_url)
+ query_string_list.append('redirect_url=%s' % redirect_url)
return '?%s' % '&'.join(query_string_list)
@@ -221,12 +230,12 @@ class BaseTest(unittest.TestCase):
db.session.commit()
return user
- def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer"):
- study = session.query(StudyModel).first()
+ def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"):
+ study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
if study is None:
user = self.create_user(uid=uid)
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
- user_uid=user.uid)
+ user_uid=user.uid, primary_investigator_id=primary_investigator_id)
db.session.add(study)
db.session.commit()
return study
@@ -248,3 +257,97 @@ class BaseTest(unittest.TestCase):
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()
+
+ def create_approval(
+ self,
+ study=None,
+ workflow=None,
+ approver_uid=None,
+ status=None,
+ version=None,
+ ):
+ study = study or self.create_study()
+ workflow = workflow or self.create_workflow()
+ approver_uid = approver_uid or self.test_uid
+ status = status or ApprovalStatus.PENDING.value
+ version = version or 1
+ approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version)
+ db.session.add(approval)
+ db.session.commit()
+ return approval
+
+ def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"):
+ user = session.query(UserModel).filter_by(uid=user_uid).first()
+ self.assertIsNotNone(user)
+
+ rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
+ (workflow.id, str(soft_reset), str(hard_reset)),
+ headers=self.logged_in_headers(user),
+ content_type="application/json")
+ self.assert_success(rv)
+ json_data = json.loads(rv.get_data(as_text=True))
+ workflow_api = WorkflowApiSchema().load(json_data)
+ self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
+ return workflow_api
+
+ def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"):
+ prev_completed_task_count = workflow_in.completed_tasks
+ if isinstance(task_in, dict):
+ task_id = task_in["id"]
+ else:
+ task_id = task_in.id
+
+ user = session.query(UserModel).filter_by(uid=user_uid).first()
+ self.assertIsNotNone(user)
+
+ rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
+ headers=self.logged_in_headers(user=user),
+ content_type="application/json",
+ data=json.dumps(dict_data))
+ if error_code:
+ self.assert_failure(rv, error_code=error_code)
+ return
+
+ self.assert_success(rv)
+ json_data = json.loads(rv.get_data(as_text=True))
+
+ # Assure stats are updated on the model
+ workflow = WorkflowApiSchema().load(json_data)
+ # The total number of tasks may change over time, as users move through gateways
+ # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
+ self.assertIsNotNone(workflow.total_tasks)
+ self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
+
+ # Assure a record exists in the Task Events
+ task_events = session.query(TaskEventModel) \
+ .filter_by(workflow_id=workflow.id) \
+ .filter_by(task_id=task_id) \
+ .order_by(TaskEventModel.date.desc()).all()
+ self.assertGreater(len(task_events), 0)
+ event = task_events[0]
+ self.assertIsNotNone(event.study_id)
+ self.assertEqual(user_uid, event.user_uid)
+ self.assertEqual(workflow.id, event.workflow_id)
+ self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
+ self.assertEqual(workflow.spec_version, event.spec_version)
+ self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
+ self.assertEqual(task_in.id, task_id)
+ self.assertEqual(task_in.name, event.task_name)
+ self.assertEqual(task_in.title, event.task_title)
+ self.assertEqual(task_in.type, event.task_type)
+ self.assertEqual("COMPLETED", event.task_state)
+
+ # Not sure what voodoo is happening inside of marshmallow to get me in this state.
+ if isinstance(task_in.multi_instance_type, MultiInstanceType):
+ self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
+ else:
+ self.assertEqual(task_in.multi_instance_type, event.mi_type)
+
+ self.assertEqual(task_in.multi_instance_count, event.mi_count)
+ self.assertEqual(task_in.multi_instance_index, event.mi_index)
+ self.assertEqual(task_in.process_name, event.process_name)
+ self.assertIsNotNone(event.date)
+
+
+ workflow = WorkflowApiSchema().load(json_data)
+ return workflow
diff --git a/tests/data/decision_table/decision_table.bpmn b/tests/data/decision_table/decision_table.bpmn
index 796233e5..82bcb385 100644
--- a/tests/data/decision_table/decision_table.bpmn
+++ b/tests/data/decision_table/decision_table.bpmn
@@ -1,5 +1,5 @@
-
+SequenceFlow_1ma1wxb
@@ -8,7 +8,11 @@
-
+
+
+
+
+ SequenceFlow_1ma1wxb
@@ -26,38 +30,37 @@ Based on the information you provided (Ginger left {{num_presents}}, we recommen
## {{message}}
-We hope you both have an excellent day!
-
+We hope you both have an excellent day!
SequenceFlow_0grui6f
-
-
-
-
-
-
+
+
+
-
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
diff --git a/tests/data/exclusive_gateway/exclusive_gateway.bpmn b/tests/data/exclusive_gateway/exclusive_gateway.bpmn
index 1c7e55fe..8467c954 100644
--- a/tests/data/exclusive_gateway/exclusive_gateway.bpmn
+++ b/tests/data/exclusive_gateway/exclusive_gateway.bpmn
@@ -8,7 +8,11 @@
-
+
+
+
+
+ SequenceFlow_1pnq3kg
diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn
index 81f355c3..628f1bd4 100644
--- a/tests/data/random_fact/random_fact.bpmn
+++ b/tests/data/random_fact/random_fact.bpmn
@@ -1,5 +1,5 @@
-
+SequenceFlow_0c7wlth
@@ -108,6 +108,9 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
+
+
+
@@ -121,8 +124,7 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
SequenceFlow_0641sh6
-
-
+
@@ -155,6 +157,18 @@ Your random fact is:
+
+
+
+
+
+
+
+
+
+
+
+
@@ -164,35 +178,23 @@ Your random fact is:
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/tests/data/repeat_form/repeat_form.bpmn b/tests/data/repeat_form/repeat_form.bpmn
new file mode 100644
index 00000000..f0e3f922
--- /dev/null
+++ b/tests/data/repeat_form/repeat_form.bpmn
@@ -0,0 +1,47 @@
+
+
+
+
+ SequenceFlow_0lvudp8
+
+
+
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0lvudp8
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/data/required_fields/required_fields.bpmn b/tests/data/required_fields/required_fields.bpmn
new file mode 100644
index 00000000..7612f69b
--- /dev/null
+++ b/tests/data/required_fields/required_fields.bpmn
@@ -0,0 +1,48 @@
+
+
+
+
+ SequenceFlow_0lvudp8
+
+
+
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0lvudp8
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/test_approvals_api.py b/tests/test_approvals_api.py
index 393831e7..ed0f7c5d 100644
--- a/tests/test_approvals_api.py
+++ b/tests/test_approvals_api.py
@@ -1,113 +1,260 @@
import json
+import random
+import string
+
+from flask import g
+
from tests.base_test import BaseTest
-
-from crc import app, db, session
-from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
-
-
-APPROVAL_PAYLOAD = {
- 'id': None,
- 'approver': {
- 'uid': 'bgb22',
- 'display_name': 'Billy Bob (bgb22)',
- 'title': 'E42:He\'s a hoopy frood',
- 'department': 'E0:EN-Eng Study of Parallel Universes'
- },
- 'title': 'El Study',
- 'status': 'DECLINED',
- 'version': 1,
- 'message': 'Incorrect documents',
- 'associated_files': [
- {
- 'id': 42,
- 'name': 'File 1',
- 'content_type': 'document'
- },
- {
- 'id': 43,
- 'name': 'File 2',
- 'content_type': 'document'
- }
- ],
- 'workflow_id': 1,
- 'study_id': 1
-}
+from crc import session, db
+from crc.models.approval import ApprovalModel, ApprovalStatus
+from crc.models.study import StudyModel
+from crc.models.workflow import WorkflowModel
class TestApprovals(BaseTest):
def setUp(self):
"""Initial setup shared by all TestApprovals tests"""
self.load_example_data()
- self.study = self.create_study()
- self.workflow = self.create_workflow('random_fact')
- # TODO: Move to base_test as a helper
- self.approval = ApprovalModel(
- study=self.study,
- workflow=self.workflow,
- approver_uid='arc93',
- status=ApprovalStatus.WAITING.value,
- version=1
- )
- session.add(self.approval)
- self.approval_2 = ApprovalModel(
- study=self.study,
- workflow=self.workflow,
- approver_uid='dhf8r',
- status=ApprovalStatus.WAITING.value,
- version=1
+ # Add a study with 2 approvers
+ study_workflow_approvals_1 = self._create_study_workflow_approvals(
+ user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp",
+ approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value]
)
- session.add(self.approval_2)
+ self.study = study_workflow_approvals_1['study']
+ self.workflow = study_workflow_approvals_1['workflow']
+ self.approval = study_workflow_approvals_1['approvals'][0]
+ self.approval_2 = study_workflow_approvals_1['approvals'][1]
- session.commit()
+ # Add a study with 1 approver
+ study_workflow_approvals_2 = self._create_study_workflow_approvals(
+ user_uid="dhf8r", title="second study", primary_investigator_id="dhf8r",
+ approver_uids=["lb3dp"], statuses=[ApprovalStatus.PENDING.value]
+ )
+ self.unrelated_study = study_workflow_approvals_2['study']
+ self.unrelated_workflow = study_workflow_approvals_2['workflow']
+ self.approval_3 = study_workflow_approvals_2['approvals'][0]
def test_list_approvals_per_approver(self):
"""Only approvals associated with approver should be returned"""
approver_uid = self.approval_2.approver_uid
- rv = self.app.get(f'/v1.0/approval?approver_uid={approver_uid}', headers=self.logged_in_headers())
+ rv = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
- # Stored approvals are 2
+ # Stored approvals are 3
approvals_count = ApprovalModel.query.count()
- self.assertEqual(approvals_count, 2)
+ self.assertEqual(approvals_count, 3)
# but Dan's approvals should be only 1
self.assertEqual(len(response), 1)
# Confirm approver UID matches returned payload
- approval = ApprovalSchema().load(response[0])
- self.assertEqual(approval.approver['uid'], approver_uid)
+ approval = response[0]
+ self.assertEqual(approval['approver']['uid'], approver_uid)
- def test_list_approvals_per_admin(self):
- """All approvals will be returned"""
- rv = self.app.get('/v1.0/approval', headers=self.logged_in_headers())
+ def test_list_approvals_as_user(self):
+ """All approvals as different user"""
+ rv = self.app.get('/v1.0/approval?as_user=lb3dp', headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
- # Returned approvals should match what's in the db
- approvals_count = ApprovalModel.query.count()
+ # Returned approvals should match what's in the db for user ld3dp, we should get one
+ # approval back per study (2 studies), and that approval should have one related approval.
response_count = len(response)
- self.assertEqual(approvals_count, response_count)
+ self.assertEqual(2, response_count)
- def test_update_approval(self):
- """Approval status will be updated"""
- approval_id = self.approval.id
- data = dict(APPROVAL_PAYLOAD)
- data['id'] = approval_id
+ rv = self.app.get('/v1.0/approval', headers=self.logged_in_headers())
+ self.assert_success(rv)
+ response = json.loads(rv.get_data(as_text=True))
+ response_count = len(response)
+ self.assertEqual(1, response_count)
+ self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
- self.assertEqual(self.approval.status, ApprovalStatus.WAITING.value)
+ def test_update_approval_fails_if_not_the_approver(self):
+ approval = session.query(ApprovalModel).filter_by(approver_uid='lb3dp').first()
+ data = {'id': approval.id,
+ "approver_uid": "dhf8r",
+ 'message': "Approved. I like the cut of your jib.",
+ 'status': ApprovalStatus.APPROVED.value}
- rv = self.app.put(f'/v1.0/approval/{approval_id}',
+ self.assertEqual(approval.status, ApprovalStatus.PENDING.value)
+
+ rv = self.app.put(f'/v1.0/approval/{approval.id}',
content_type="application/json",
- headers=self.logged_in_headers(),
+ headers=self.logged_in_headers(), # As dhf8r
+ data=json.dumps(data))
+ self.assert_failure(rv)
+
+ def test_accept_approval(self):
+ approval = session.query(ApprovalModel).filter_by(approver_uid='dhf8r').first()
+ data = {'id': approval.id,
+ "approver": {"uid": "dhf8r"},
+ 'message': "Approved. I like the cut of your jib.",
+ 'status': ApprovalStatus.APPROVED.value}
+
+ self.assertEqual(approval.status, ApprovalStatus.PENDING.value)
+
+ rv = self.app.put(f'/v1.0/approval/{approval.id}',
+ content_type="application/json",
+ headers=self.logged_in_headers(), # As dhf8r
data=json.dumps(data))
self.assert_success(rv)
- session.refresh(self.approval)
+ session.refresh(approval)
# Updated record should now have the data sent to the endpoint
- self.assertEqual(self.approval.message, data['message'])
- self.assertEqual(self.approval.status, ApprovalStatus.DECLINED.value)
+ self.assertEqual(approval.message, data['message'])
+ self.assertEqual(approval.status, ApprovalStatus.APPROVED.value)
+
+ def test_decline_approval(self):
+ approval = session.query(ApprovalModel).filter_by(approver_uid='dhf8r').first()
+ data = {'id': approval.id,
+ "approver": {"uid": "dhf8r"},
+ 'message': "Approved. I find the cut of your jib lacking.",
+ 'status': ApprovalStatus.DECLINED.value}
+
+ self.assertEqual(approval.status, ApprovalStatus.PENDING.value)
+
+ rv = self.app.put(f'/v1.0/approval/{approval.id}',
+ content_type="application/json",
+ headers=self.logged_in_headers(), # As dhf8r
+ data=json.dumps(data))
+ self.assert_success(rv)
+
+ session.refresh(approval)
+
+ # Updated record should now have the data sent to the endpoint
+ self.assertEqual(approval.message, data['message'])
+ self.assertEqual(approval.status, ApprovalStatus.DECLINED.value)
+
+ def test_csv_export(self):
+ self.load_test_spec('two_forms')
+ self._add_lots_of_random_approvals(n=50, workflow_spec_name='two_forms')
+
+ # Get all workflows
+ workflows = db.session.query(WorkflowModel).filter_by(workflow_spec_id='two_forms').all()
+
+ # For each workflow, complete all tasks
+ for workflow in workflows:
+ workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
+ self.assertEqual('two_forms', workflow_api.workflow_spec_id)
+
+ # Log current user out.
+ g.user = None
+ self.assertIsNone(g.user)
+
+ # Complete the form for Step one and post it.
+ self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}, error_code=None, user_uid=workflow.study.user_uid)
+
+ # Get the next Task
+ workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
+ self.assertEqual("StepTwo", workflow_api.next_task.name)
+
+ # Get all user Tasks and check that the data have been saved
+ task = workflow_api.next_task
+ self.assertIsNotNone(task.data)
+ for val in task.data.values():
+ self.assertIsNotNone(val)
+
+ rv = self.app.get(f'/v1.0/approval/csv', headers=self.logged_in_headers())
+ self.assert_success(rv)
+
+ def test_all_approvals(self):
+ self._add_lots_of_random_approvals()
+
+ not_canceled = session.query(ApprovalModel).filter(ApprovalModel.status != 'CANCELED').all()
+ not_canceled_study_ids = []
+ for a in not_canceled:
+ if a.study_id not in not_canceled_study_ids:
+ not_canceled_study_ids.append(a.study_id)
+
+ rv_all = self.app.get(f'/v1.0/all_approvals?status=false', headers=self.logged_in_headers())
+ self.assert_success(rv_all)
+ all_data = json.loads(rv_all.get_data(as_text=True))
+ self.assertEqual(len(all_data), len(not_canceled_study_ids), 'Should return all non-canceled approvals, grouped by study')
+
+ all_approvals = session.query(ApprovalModel).all()
+ all_approvals_study_ids = []
+ for a in all_approvals:
+ if a.study_id not in all_approvals_study_ids:
+ all_approvals_study_ids.append(a.study_id)
+
+ rv_all = self.app.get(f'/v1.0/all_approvals?status=true', headers=self.logged_in_headers())
+ self.assert_success(rv_all)
+ all_data = json.loads(rv_all.get_data(as_text=True))
+ self.assertEqual(len(all_data), len(all_approvals_study_ids), 'Should return all approvals, grouped by study')
+
+ def test_approvals_counts(self):
+ statuses = [name for name, value in ApprovalStatus.__members__.items()]
+ self._add_lots_of_random_approvals()
+
+ # Get the counts
+ rv_counts = self.app.get(f'/v1.0/approval-counts', headers=self.logged_in_headers())
+ self.assert_success(rv_counts)
+ counts = json.loads(rv_counts.get_data(as_text=True))
+
+ # Get the actual approvals
+ rv_approvals = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
+ self.assert_success(rv_approvals)
+ approvals = json.loads(rv_approvals.get_data(as_text=True))
+
+ # Tally up the number of approvals in each status category
+ manual_counts = {}
+ for status in statuses:
+ manual_counts[status] = 0
+
+ for approval in approvals:
+ manual_counts[approval['status']] += 1
+
+ # Numbers in each category should match
+ for status in statuses:
+ self.assertEqual(counts[status], manual_counts[status], 'Approval counts for status %s should match' % status)
+
+ # Total number of approvals should match
+ total_counts = sum(counts[status] for status in statuses)
+ self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user')
+
+ def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
+ workflow_spec_name="random_fact"):
+ study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
+ workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
+ approvals = []
+
+ for i in range(len(approver_uids)):
+ approvals.append(self.create_approval(
+ study=study,
+ workflow=workflow,
+ approver_uid=approver_uids[i],
+ status=statuses[i],
+ version=1
+ ))
+
+ return {
+ 'study': study,
+ 'workflow': workflow,
+ 'approvals': approvals,
+ }
+
+ def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"):
+ num_studies_before = db.session.query(StudyModel).count()
+ statuses = [name for name, value in ApprovalStatus.__members__.items()]
+
+ # Add a whole bunch of approvals with random statuses
+ for i in range(n):
+ approver_uids = random.choices(["lb3dp", "dhf8r"])
+ self._create_study_workflow_approvals(
+ user_uid=random.choice(["lb3dp", "dhf8r"]),
+ title="".join(random.choices(string.ascii_lowercase, k=64)),
+ primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
+ approver_uids=approver_uids,
+ statuses=random.choices(statuses, k=len(approver_uids)),
+ workflow_spec_name=workflow_spec_name
+ )
+
+ session.flush()
+ num_studies_after = db.session.query(StudyModel).count()
+ self.assertEqual(num_studies_after, num_studies_before + n)
+
diff --git a/tests/test_approvals_service.py b/tests/test_approvals_service.py
index 1ec6db75..26a26ef4 100644
--- a/tests/test_approvals_service.py
+++ b/tests/test_approvals_service.py
@@ -15,13 +15,14 @@ class TestApprovalsService(BaseTest):
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
+
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
- self.assertEquals(1, db.session.query(ApprovalModel).count())
+ self.assertEqual(1, db.session.query(ApprovalModel).count())
model = db.session.query(ApprovalModel).first()
- self.assertEquals(workflow.study_id, model.study_id)
- self.assertEquals(workflow.id, model.workflow_id)
- self.assertEquals("dhf8r", model.approver_uid)
- self.assertEquals(1, model.version)
+ self.assertEqual(workflow.study_id, model.study_id)
+ self.assertEqual(workflow.id, model.workflow_id)
+ self.assertEqual("dhf8r", model.approver_uid)
+ self.assertEqual(1, model.version)
def test_new_requests_dont_add_if_approval_exists_for_current_workflow(self):
self.create_reference_document()
@@ -32,9 +33,9 @@ class TestApprovalsService(BaseTest):
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
- self.assertEquals(1, db.session.query(ApprovalModel).count())
+ self.assertEqual(1, db.session.query(ApprovalModel).count())
model = db.session.query(ApprovalModel).first()
- self.assertEquals(1, model.version)
+ self.assertEqual(1, model.version)
def test_new_approval_requests_after_file_modification_create_new_requests(self):
self.load_example_data()
@@ -51,9 +52,20 @@ class TestApprovalsService(BaseTest):
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr")
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
- self.assertEquals(2, db.session.query(ApprovalModel).count())
+ self.assertEqual(2, db.session.query(ApprovalModel).count())
models = db.session.query(ApprovalModel).order_by(ApprovalModel.version).all()
- self.assertEquals(1, models[0].version)
- self.assertEquals(2, models[1].version)
+ self.assertEqual(1, models[0].version)
+ self.assertEqual(2, models[1].version)
+ def test_new_approval_sends_proper_emails(self):
+ self.assertEqual(1, 1)
+ def test_new_approval_failed_ldap_lookup(self):
+ # failed lookup should send email to sartographysupport@googlegroups.com + Cheryl
+ self.assertEqual(1, 1)
+
+ def test_approve_approval_sends_proper_emails(self):
+ self.assertEqual(1, 1)
+
+ def test_deny_approval_sends_proper_emails(self):
+ self.assertEqual(1, 1)
diff --git a/tests/test_authentication.py b/tests/test_authentication.py
index 11b77d07..7d706949 100644
--- a/tests/test_authentication.py
+++ b/tests/test_authentication.py
@@ -1,29 +1,73 @@
-from tests.base_test import BaseTest
+import json
+from calendar import timegm
+from datetime import timezone, datetime, timedelta
-from crc import db
+import jwt
+
+from tests.base_test import BaseTest
+from crc import db, app
+from crc.api.common import ApiError
+from crc.models.protocol_builder import ProtocolBuilderStatus
+from crc.models.study import StudySchema, StudyModel
from crc.models.user import UserModel
class TestAuthentication(BaseTest):
- def test_auth_token(self):
- self.load_example_data()
- user = UserModel(uid="dhf8r")
- auth_token = user.encode_auth_token()
- self.assertTrue(isinstance(auth_token, bytes))
- self.assertEqual("dhf8r", user.decode_auth_token(auth_token).get("sub"))
+ def tearDown(self):
+ # Assure we set the production flag back to false.
+ app.config['PRODUCTION'] = False
+ super().tearDown()
- def test_backdoor_auth_creates_user(self):
- new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
+ def test_auth_token(self):
+ # Save the orginal timeout setting
+ orig_ttl = float(app.config['TOKEN_AUTH_TTL_HOURS'])
+
+ self.load_example_data()
+
+ # Set the timeout to something else
+ new_ttl = 4.0
+ app.config['TOKEN_AUTH_TTL_HOURS'] = new_ttl
+ user_1 = UserModel(uid="dhf8r")
+ expected_exp_1 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
+ auth_token_1 = user_1.encode_auth_token()
+ self.assertTrue(isinstance(auth_token_1, bytes))
+ self.assertEqual("dhf8r", user_1.decode_auth_token(auth_token_1).get("sub"))
+ actual_exp_1 = user_1.decode_auth_token(auth_token_1).get("exp")
+ self.assertTrue(expected_exp_1 - 1000 <= actual_exp_1 <= expected_exp_1 + 1000)
+
+ # Set the timeout to something else
+ neg_ttl = -0.01
+ app.config['TOKEN_AUTH_TTL_HOURS'] = neg_ttl
+ user_2 = UserModel(uid="dhf8r")
+ expected_exp_2 = timegm((datetime.utcnow() + timedelta(hours=neg_ttl)).utctimetuple())
+ auth_token_2 = user_2.encode_auth_token()
+ self.assertTrue(isinstance(auth_token_2, bytes))
+ with self.assertRaises(ApiError) as api_error:
+ with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
+ user_2.decode_auth_token(auth_token_2)
+ self.assertEqual(api_error.exception.status_code, 400, 'Should raise an API Error if token is expired')
+
+ # Set the timeout back to where it was
+ app.config['TOKEN_AUTH_TTL_HOURS'] = orig_ttl
+ user_3 = UserModel(uid="dhf8r")
+ expected_exp_3 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
+ auth_token_3 = user_3.encode_auth_token()
+ self.assertTrue(isinstance(auth_token_3, bytes))
+ actual_exp_3 = user_3.decode_auth_token(auth_token_1).get("exp")
+ self.assertTrue(expected_exp_3 - 1000 <= actual_exp_3 <= expected_exp_3 + 1000)
+
+ def test_non_production_auth_creates_user(self):
+ new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
self.load_example_data()
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
self.assertIsNone(user)
user_info = {'uid': new_uid, 'first_name': 'Cordi', 'last_name': 'Nator',
- 'email_address': 'czn1z@virginia.edu'}
+ 'email_address': 'czn1z@virginia.edu'}
redirect_url = 'http://worlds.best.website/admin'
query_string = self.user_info_to_query_string(user_info, redirect_url)
- url = '/v1.0/sso_backdoor%s' % query_string
+ url = '/v1.0/login%s' % query_string
rv_1 = self.app.get(url, follow_redirects=False)
self.assertTrue(rv_1.status_code == 302)
self.assertTrue(str.startswith(rv_1.location, redirect_url))
@@ -38,22 +82,30 @@ class TestAuthentication(BaseTest):
self.assertTrue(rv_2.status_code == 302)
self.assertTrue(str.startswith(rv_2.location, redirect_url))
- def test_normal_auth_creates_user(self):
- new_uid = 'lb3dp' # This user is in the test ldap system.
+ def test_production_auth_creates_user(self):
+ # Switch production mode on
+ app.config['PRODUCTION'] = True
+
self.load_example_data()
- user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
+
+ new_uid = 'lb3dp' # This user is in the test ldap system.
+ user = db.session.query(UserModel).filter_by(uid=new_uid).first()
self.assertIsNone(user)
redirect_url = 'http://worlds.best.website/admin'
headers = dict(Uid=new_uid)
+ db.session.flush()
rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers)
- self.assert_success(rv)
- user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
- self.assertIsNotNone(user)
- self.assertEquals(new_uid, user.uid)
- self.assertEquals("Laura Barnes", user.display_name)
- self.assertEquals("lb3dp@virginia.edu", user.email_address)
- self.assertEquals("E0:Associate Professor of Systems and Information Engineering", user.title)
+ self.assert_success(rv)
+ user = db.session.query(UserModel).filter_by(uid=new_uid).first()
+ self.assertIsNotNone(user)
+ self.assertEqual(new_uid, user.uid)
+ self.assertEqual("Laura Barnes", user.display_name)
+ self.assertEqual("lb3dp@virginia.edu", user.email_address)
+ self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user.title)
+
+ # Switch production mode back off
+ app.config['PRODUCTION'] = False
def test_current_user_status(self):
self.load_example_data()
@@ -67,3 +119,108 @@ class TestAuthentication(BaseTest):
user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu')
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut'))
self.assert_success(rv)
+
+ def test_admin_can_access_admin_only_endpoints(self):
+ # Switch production mode on
+ app.config['PRODUCTION'] = True
+
+ self.load_example_data()
+
+ admin_uids = app.config['ADMIN_UIDS']
+ self.assertGreater(len(admin_uids), 0)
+ admin_uid = admin_uids[0]
+ self.assertEqual(admin_uid, 'dhf8r') # This user is in the test ldap system.
+ admin_headers = dict(Uid=admin_uid)
+
+ rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers)
+ self.assert_success(rv)
+
+ admin_user = db.session.query(UserModel).filter(UserModel.uid == admin_uid).first()
+ self.assertIsNotNone(admin_user)
+ self.assertEqual(admin_uid, admin_user.uid)
+
+ admin_study = self._make_fake_study(admin_uid)
+
+ admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode())
+
+ rv_add_study = self.app.post(
+ '/v1.0/study',
+ content_type="application/json",
+ headers=admin_token_headers,
+ data=json.dumps(StudySchema().dump(admin_study)),
+ follow_redirects=False
+ )
+ self.assert_success(rv_add_study, 'Admin user should be able to add a study')
+
+ new_admin_study = json.loads(rv_add_study.get_data(as_text=True))
+ db_admin_study = db.session.query(StudyModel).filter_by(id=new_admin_study['id']).first()
+ self.assertIsNotNone(db_admin_study)
+
+ rv_del_study = self.app.delete(
+ '/v1.0/study/%i' % db_admin_study.id,
+ follow_redirects=False,
+ headers=admin_token_headers
+ )
+ self.assert_success(rv_del_study, 'Admin user should be able to delete a study')
+
+ # Switch production mode back off
+ app.config['PRODUCTION'] = False
+
+ def test_nonadmin_cannot_access_admin_only_endpoints(self):
+ # Switch production mode on
+ app.config['PRODUCTION'] = True
+
+ self.load_example_data()
+
+ # Non-admin user should not be able to delete a study
+ non_admin_uid = 'lb3dp'
+ admin_uids = app.config['ADMIN_UIDS']
+ self.assertGreater(len(admin_uids), 0)
+ self.assertNotIn(non_admin_uid, admin_uids)
+
+ non_admin_headers = dict(Uid=non_admin_uid)
+
+ rv = self.app.get(
+ 'v1.0/login',
+ follow_redirects=False,
+ headers=non_admin_headers
+ )
+ self.assert_success(rv)
+
+ non_admin_user = db.session.query(UserModel).filter_by(uid=non_admin_uid).first()
+ self.assertIsNotNone(non_admin_user)
+
+ non_admin_token_headers = dict(Authorization='Bearer ' + non_admin_user.encode_auth_token().decode())
+
+ non_admin_study = self._make_fake_study(non_admin_uid)
+
+ rv_add_study = self.app.post(
+ '/v1.0/study',
+ content_type="application/json",
+ headers=non_admin_token_headers,
+ data=json.dumps(StudySchema().dump(non_admin_study))
+ )
+ self.assert_success(rv_add_study, 'Non-admin user should be able to add a study')
+
+ new_non_admin_study = json.loads(rv_add_study.get_data(as_text=True))
+ db_non_admin_study = db.session.query(StudyModel).filter_by(id=new_non_admin_study['id']).first()
+ self.assertIsNotNone(db_non_admin_study)
+
+ rv_non_admin_del_study = self.app.delete(
+ '/v1.0/study/%i' % db_non_admin_study.id,
+ follow_redirects=False,
+ headers=non_admin_token_headers
+ )
+ self.assert_failure(rv_non_admin_del_study, 401)
+
+ # Switch production mode back off
+ app.config['PRODUCTION'] = False
+
+ def _make_fake_study(self, uid):
+ return {
+ "title": "blah",
+ "last_updated": datetime.now(tz=timezone.utc),
+ "protocol_builder_status": ProtocolBuilderStatus.ACTIVE,
+ "primary_investigator_id": uid,
+ "user_uid": uid,
+ }
diff --git a/tests/test_complete_template_script.py b/tests/test_complete_template_script.py
index dfac20c6..985c2a87 100644
--- a/tests/test_complete_template_script.py
+++ b/tests/test_complete_template_script.py
@@ -17,7 +17,7 @@ class TestCompleteTemplate(unittest.TestCase):
data = {"name": "Dan"}
data_copy = copy.deepcopy(data)
script.rich_text_update(data_copy)
- self.assertEquals(data, data_copy)
+ self.assertEqual(data, data_copy)
def test_rich_text_update_new_line(self):
script = CompleteTemplate()
diff --git a/tests/test_file_service.py b/tests/test_file_service.py
index 705fef95..1dea810c 100644
--- a/tests/test_file_service.py
+++ b/tests/test_file_service.py
@@ -1,8 +1,9 @@
from tests.base_test import BaseTest
+
+from crc import db
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
-
class TestFileService(BaseTest):
"""Largely tested via the test_file_api, and time is tight, but adding new tests here."""
@@ -22,11 +23,11 @@ class TestFileService(BaseTest):
binary_data=b'5678', irb_doc_code=irb_code)
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
- self.assertEquals(1, len(file_models))
+ self.assertEqual(1, len(file_models))
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
- self.assertEquals(1, len(file_data))
- self.assertEquals(2, file_data[0].version)
+ self.assertEqual(1, len(file_data))
+ self.assertEqual(2, file_data[0].version)
def test_add_file_from_form_increments_version_and_replaces_on_subsequent_add_with_same_name(self):
@@ -46,12 +47,43 @@ class TestFileService(BaseTest):
name="anything.png", content_type="text",
binary_data=b'5678')
+ def test_replace_archive_file_unarchives_the_file_and_updates(self):
+ self.load_example_data()
+ self.create_reference_document()
+ workflow = self.create_workflow('file_upload_form')
+ processor = WorkflowProcessor(workflow)
+ task = processor.next_task()
+ irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ irb_doc_code=irb_code,
+ name="anything.png", content_type="text",
+ binary_data=b'1234')
+
+ # Archive the file
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
self.assertEquals(1, len(file_models))
+ file_model = file_models[0]
+ file_model.archived = True
+ db.session.add(file_model)
+
+ # Assure that the file no longer comes back.
+ file_models = FileService.get_workflow_files(workflow_id=workflow.id)
+ self.assertEquals(0, len(file_models))
+
+ # Add the file again with different data
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ irb_doc_code=irb_code,
+ name="anything.png", content_type="text",
+ binary_data=b'5678')
+
+ file_models = FileService.get_workflow_files(workflow_id=workflow.id)
+ self.assertEqual(1, len(file_models))
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
- self.assertEquals(1, len(file_data))
- self.assertEquals(2, file_data[0].version)
+
+ self.assertEqual(1, len(file_data))
+ self.assertEqual(2, file_data[0].version)
+ self.assertEqual(b'5678', file_data[0].data)
def test_add_file_from_form_allows_multiple_files_with_different_names(self):
self.load_example_data()
@@ -70,4 +102,4 @@ class TestFileService(BaseTest):
name="a_different_thing.png", content_type="text",
binary_data=b'5678')
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
- self.assertEquals(2, len(file_models))
+ self.assertEqual(2, len(file_models))
diff --git a/tests/test_files_api.py b/tests/test_files_api.py
index ecce309c..2d14a8b5 100644
--- a/tests/test_files_api.py
+++ b/tests/test_files_api.py
@@ -3,12 +3,14 @@ import json
from tests.base_test import BaseTest
-from crc import session
+from crc import session, db
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from example_data import ExampleDataLoader
+from crc.services.approval_service import ApprovalService
+from crc.models.approval import ApprovalModel, ApprovalStatus
class TestFilesApi(BaseTest):
@@ -46,6 +48,7 @@ class TestFilesApi(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(2, len(json_data))
+
def test_create_file(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
@@ -89,6 +92,39 @@ class TestFilesApi(BaseTest):
self.assert_success(rv)
+ def test_archive_file_no_longer_shows_up(self):
+ self.load_example_data()
+ self.create_reference_document()
+ workflow = self.create_workflow('file_upload_form')
+ processor = WorkflowProcessor(workflow)
+ task = processor.next_task()
+ data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
+ correct_name = task.task_spec.form.fields[0].id
+
+ data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
+ rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_id=%i&form_field_key=%s' %
+ (workflow.study_id, workflow.id, task.id, correct_name), data=data, follow_redirects=True,
+ content_type='multipart/form-data', headers=self.logged_in_headers())
+
+ self.assert_success(rv)
+ rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
+ self.assert_success(rv)
+ self.assertEquals(1, len(json.loads(rv.get_data(as_text=True))))
+
+ file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all()
+ self.assertEquals(1, len(file_model))
+ file_model[0].archived = True
+ db.session.commit()
+
+ rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
+ self.assert_success(rv)
+ self.assertEquals(0, len(json.loads(rv.get_data(as_text=True))))
+
+
+
+
+
+
def test_set_reference_file(self):
file_name = "irb_document_types.xls"
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.xls")}
@@ -218,6 +254,41 @@ class TestFilesApi(BaseTest):
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
self.assertEqual(404, rv.status_code)
+ def test_delete_file_after_approval(self):
+ self.create_reference_document()
+ workflow = self.create_workflow("empty_workflow")
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ name="anything.png", content_type="text",
+ binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr")
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ name="anotother_anything.png", content_type="text",
+ binary_data=b'1234', irb_doc_code="Study_App_Doc")
+
+ ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
+
+ file = session.query(FileModel).\
+ filter(FileModel.workflow_id == workflow.id).\
+ filter(FileModel.name == "anything.png").first()
+ self.assertFalse(file.archived)
+ rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
+ self.assert_success(rv)
+
+ rv = self.app.delete('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
+ self.assert_success(rv)
+
+ session.refresh(file)
+ self.assertTrue(file.archived)
+
+ ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
+
+ approvals = session.query(ApprovalModel)\
+ .filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\
+ .filter(ApprovalModel.study_id == workflow.study_id).all()
+
+ self.assertEquals(1, len(approvals))
+ self.assertEquals(1, len(approvals[0].approval_files))
+
+
def test_change_primary_bpmn(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
diff --git a/tests/test_ldap_service.py b/tests/test_ldap_service.py
index 4be65960..a6c4b364 100644
--- a/tests/test_ldap_service.py
+++ b/tests/test_ldap_service.py
@@ -1,22 +1,19 @@
-import os
+from tests.base_test import BaseTest
-from crc import app
from crc.api.common import ApiError
from crc.services.ldap_service import LdapService
-from tests.base_test import BaseTest
-from ldap3 import Server, Connection, ALL, MOCK_SYNC
class TestLdapService(BaseTest):
def setUp(self):
- self.ldap_service = LdapService()
+ pass
def tearDown(self):
pass
def test_get_single_user(self):
- user_info = self.ldap_service.user_info("lb3dp")
+ user_info = LdapService.user_info("lb3dp")
self.assertIsNotNone(user_info)
self.assertEqual("lb3dp", user_info.uid)
self.assertEqual("Laura Barnes", user_info.display_name)
@@ -30,7 +27,7 @@ class TestLdapService(BaseTest):
def test_find_missing_user(self):
try:
- user_info = self.ldap_service.user_info("nosuch")
+ user_info = LdapService.user_info("nosuch")
self.assertFalse(True, "An API error should be raised.")
except ApiError as ae:
- self.assertEquals("missing_ldap_record", ae.code)
\ No newline at end of file
+ self.assertEqual("missing_ldap_record", ae.code)
\ No newline at end of file
diff --git a/tests/test_lookup_service.py b/tests/test_lookup_service.py
index c0d72ae9..b61e20e2 100644
--- a/tests/test_lookup_service.py
+++ b/tests/test_lookup_service.py
@@ -31,7 +31,7 @@ class TestLookupService(BaseTest):
self.assertEqual(1, len(lookup_records))
lookup_record = lookup_records[0]
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
- self.assertEquals(28, len(lookup_data))
+ self.assertEqual(28, len(lookup_data))
def test_updates_to_file_cause_lookup_rebuild(self):
spec = BaseTest.load_test_spec('enum_options_with_search')
@@ -43,7 +43,7 @@ class TestLookupService(BaseTest):
self.assertEqual(1, len(lookup_records))
lookup_record = lookup_records[0]
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
- self.assertEquals(28, len(lookup_data))
+ self.assertEqual(28, len(lookup_data))
# Update the workflow specification file.
file_path = os.path.join(app.root_path, '..', 'tests', 'data',
@@ -59,7 +59,7 @@ class TestLookupService(BaseTest):
lookup_records = session.query(LookupFileModel).all()
lookup_record = lookup_records[0]
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
- self.assertEquals(4, len(lookup_data))
+ self.assertEqual(4, len(lookup_data))
@@ -70,49 +70,50 @@ class TestLookupService(BaseTest):
processor.do_engine_steps()
results = LookupService.lookup(workflow, "AllTheNames", "", limit=10)
- self.assertEquals(10, len(results), "Blank queries return everything, to the limit")
+ self.assertEqual(10, len(results), "Blank queries return everything, to the limit")
results = LookupService.lookup(workflow, "AllTheNames", "medicines", limit=10)
- self.assertEquals(1, len(results), "words in the middle of label are detected.")
- self.assertEquals("The Medicines Company", results[0].label)
+ self.assertEqual(1, len(results), "words in the middle of label are detected.")
+ self.assertEqual("The Medicines Company", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "UVA", limit=10)
- self.assertEquals(1, len(results), "Beginning of label is found.")
- self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
+ self.assertEqual(1, len(results), "Beginning of label is found.")
+ self.assertEqual("UVA - INTERNAL - GM USE ONLY", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "uva", limit=10)
- self.assertEquals(1, len(results), "case does not matter.")
- self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
+ self.assertEqual(1, len(results), "case does not matter.")
+ self.assertEqual("UVA - INTERNAL - GM USE ONLY", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "medici", limit=10)
- self.assertEquals(1, len(results), "partial words are picked up.")
- self.assertEquals("The Medicines Company", results[0].label)
+ self.assertEqual(1, len(results), "partial words are picked up.")
+ self.assertEqual("The Medicines Company", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Savings", limit=10)
- self.assertEquals(1, len(results), "multiple terms are picked up..")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+ self.assertEqual(1, len(results), "multiple terms are picked up..")
+ self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Sav", limit=10)
- self.assertEquals(1, len(results), "prefix queries still work with partial terms")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+ self.assertEqual(1, len(results), "prefix queries still work with partial terms")
+ self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "Gen Sav", limit=10)
- self.assertEquals(1, len(results), "prefix queries still work with ALL the partial terms")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+ self.assertEqual(1, len(results), "prefix queries still work with ALL the partial terms")
+ self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "Inc", limit=10)
- self.assertEquals(7, len(results), "short terms get multiple correct results.")
- self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+ self.assertEqual(7, len(results), "short terms get multiple correct results.")
+ self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
results = LookupService.lookup(workflow, "AllTheNames", "reaction design", limit=10)
- self.assertEquals(5, len(results), "all results come back for two terms.")
- self.assertEquals("Reaction Design", results[0].label, "Exact matches come first.")
+ self.assertEqual(5, len(results), "all results come back for two terms.")
+ self.assertEqual("Reaction Design", results[0].label, "Exact matches come first.")
results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10)
- self.assertEquals("1 Something", results[0].label, "Exact matches are prefered")
+ self.assertEqual("1 Something", results[0].label, "Exact matches are prefered")
results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10)
- self.assertEquals("1 Something", results[0].label, "special characters don't flake out")
+ self.assertEqual("1 Something", results[0].label, "special characters don't flake out")
+
# 1018 10000 Something Industry
@@ -123,6 +124,6 @@ class TestLookupService(BaseTest):
# Fixme: Stop words are taken into account on the query side, and haven't found a fix yet.
#results = WorkflowService.run_lookup_query(lookup_table.id, "in", limit=10)
- #self.assertEquals(7, len(results), "stop words are not removed.")
- #self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
+ #self.assertEqual(7, len(results), "stop words are not removed.")
+ #self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
diff --git a/tests/test_mails.py b/tests/test_mails.py
new file mode 100644
index 00000000..15a01583
--- /dev/null
+++ b/tests/test_mails.py
@@ -0,0 +1,55 @@
+
+from tests.base_test import BaseTest
+
+from crc.services.mails import (
+ send_ramp_up_submission_email,
+ send_ramp_up_approval_request_email,
+ send_ramp_up_approval_request_first_review_email,
+ send_ramp_up_approved_email,
+ send_ramp_up_denied_email,
+ send_ramp_up_denied_email_to_approver
+)
+
+
+class TestMails(BaseTest):
+
+ def setUp(self):
+ self.sender = 'sender@sartography.com'
+ self.recipients = ['recipient@sartography.com']
+ self.primary_investigator = 'Dr. Bartlett'
+ self.approver_1 = 'Max Approver'
+ self.approver_2 = 'Close Reviewer'
+
+ def test_send_ramp_up_submission_email(self):
+ send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1)
+ self.assertTrue(True)
+
+ send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2)
+ self.assertTrue(True)
+
+ def test_send_ramp_up_approval_request_email(self):
+ send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator)
+ self.assertTrue(True)
+
+ def test_send_ramp_up_approval_request_first_review_email(self):
+ send_ramp_up_approval_request_first_review_email(
+ self.sender, self.recipients, self.primary_investigator
+ )
+ self.assertTrue(True)
+
+ def test_send_ramp_up_approved_email(self):
+ send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1)
+ self.assertTrue(True)
+
+ send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2)
+ self.assertTrue(True)
+
+ def test_send_ramp_up_denied_email(self):
+ send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1)
+ self.assertTrue(True)
+
+ def test_send_send_ramp_up_denied_email_to_approver(self):
+ send_ramp_up_denied_email_to_approver(
+ self.sender, self.recipients, self.primary_investigator, self.approver_2
+ )
+ self.assertTrue(True)
diff --git a/tests/test_request_approval_script.py b/tests/test_request_approval_script.py
index 2f4ab49e..ebfe8436 100644
--- a/tests/test_request_approval_script.py
+++ b/tests/test_request_approval_script.py
@@ -1,6 +1,6 @@
-from crc.services.file_service import FileService
from tests.base_test import BaseTest
+from crc.services.file_service import FileService
from crc.scripts.request_approval import RequestApproval
from crc.services.workflow_processor import WorkflowProcessor
from crc.api.common import ApiError
@@ -24,7 +24,23 @@ class TestRequestApprovalScript(BaseTest):
binary_data=b'1234')
script = RequestApproval()
script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
- self.assertEquals(2, db.session.query(ApprovalModel).count())
+ self.assertEqual(2, db.session.query(ApprovalModel).count())
+
+ def test_do_task_with_blank_second_approver(self):
+ self.load_example_data()
+ self.create_reference_document()
+ workflow = self.create_workflow('empty_workflow')
+ processor = WorkflowProcessor(workflow)
+ task = processor.next_task()
+ task.data = {"study": {"approval1": "dhf8r", 'approval2':''}}
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ irb_doc_code="UVACompl_PRCAppr",
+ name="anything.png", content_type="text",
+ binary_data=b'1234')
+ script = RequestApproval()
+ script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
+ self.assertEqual(1, db.session.query(ApprovalModel).count())
+
def test_do_task_with_incorrect_argument(self):
"""This script should raise an error if it can't figure out the approvers."""
@@ -48,5 +64,5 @@ class TestRequestApprovalScript(BaseTest):
script = RequestApproval()
script.do_task_validate_only(task, workflow.study_id, workflow.id, "study.approval1")
- self.assertEquals(0, db.session.query(ApprovalModel).count())
+ self.assertEqual(0, db.session.query(ApprovalModel).count())
diff --git a/tests/test_study_api.py b/tests/test_study_api.py
index 7282ac10..cdae21c5 100644
--- a/tests/test_study_api.py
+++ b/tests/test_study_api.py
@@ -1,5 +1,6 @@
import json
from tests.base_test import BaseTest
+
from datetime import datetime, timezone
from unittest.mock import patch
@@ -8,8 +9,9 @@ from crc.models.protocol_builder import ProtocolBuilderStatus, \
ProtocolBuilderStudySchema
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel, StudySchema
-from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecCategoryModel
-from crc.services.protocol_builder import ProtocolBuilderService
+from crc.models.workflow import WorkflowSpecModel, WorkflowModel
+from crc.services.file_service import FileService
+from crc.services.workflow_processor import WorkflowProcessor
class TestStudyApi(BaseTest):
@@ -68,6 +70,34 @@ class TestStudyApi(BaseTest):
self.assertEqual(0, workflow["total_tasks"])
self.assertEqual(0, workflow["completed_tasks"])
+ def test_get_study_has_details_about_files(self):
+
+ # Set up the study and attach a file to it.
+ self.load_example_data()
+ self.create_reference_document()
+ workflow = self.create_workflow('file_upload_form')
+ processor = WorkflowProcessor(workflow)
+ task = processor.next_task()
+ irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ name="anything.png", content_type="png",
+ binary_data=b'1234', irb_doc_code=irb_code)
+
+ api_response = self.app.get('/v1.0/study/%i' % workflow.study_id,
+ headers=self.logged_in_headers(), content_type="application/json")
+ self.assert_success(api_response)
+ study = StudySchema().loads(api_response.get_data(as_text=True))
+ self.assertEqual(1, len(study.files))
+ self.assertEqual("UVA Compliance/PRC Approval", study.files[0]["category"])
+ self.assertEqual("Cancer Center's PRC Approval Form", study.files[0]["description"])
+ self.assertEqual("UVA Compliance/PRC Approval.png", study.files[0]["download_name"])
+
+ # TODO: WRITE A TEST FOR STUDY FILES
+
+ def test_get_study_has_details_about_approvals(self):
+ # TODO: WRITE A TEST FOR STUDY APPROVALS
+ pass
+
def test_add_study(self):
self.load_example_data()
study = self.add_test_study()
@@ -150,10 +180,10 @@ class TestStudyApi(BaseTest):
db_studies_after = session.query(StudyModel).all()
num_db_studies_after = len(db_studies_after)
self.assertGreater(num_db_studies_after, num_db_studies_before)
- self.assertEquals(num_abandoned, 1)
- self.assertEquals(num_open, 1)
- self.assertEquals(num_active, 1)
- self.assertEquals(num_incomplete, 1)
+ self.assertEqual(num_abandoned, 1)
+ self.assertEqual(num_open, 1)
+ self.assertEqual(num_active, 1)
+ self.assertEqual(num_incomplete, 1)
self.assertEqual(len(json_data), num_db_studies_after)
self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after)
diff --git a/tests/test_study_service.py b/tests/test_study_service.py
index 52babbb8..1c482bcb 100644
--- a/tests/test_study_service.py
+++ b/tests/test_study_service.py
@@ -153,14 +153,16 @@ class TestStudyService(BaseTest):
self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
- self.assertEquals(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
+ self.assertEqual(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
def test_get_all_studies(self):
user = self.create_user_with_study_and_workflow()
+ study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first()
+ self.assertIsNotNone(study)
# Add a document to the study with the correct code.
- workflow1 = self.create_workflow('docx')
- workflow2 = self.create_workflow('empty_workflow')
+ workflow1 = self.create_workflow('docx', study=study)
+ workflow2 = self.create_workflow('empty_workflow', study=study)
# Add files to both workflows.
FileService.add_workflow_file(workflow_id=workflow1.id,
@@ -174,8 +176,8 @@ class TestStudyService(BaseTest):
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
studies = StudyService().get_all_studies_with_files()
- self.assertEquals(1, len(studies))
- self.assertEquals(3, len(studies[0].files))
+ self.assertEqual(1, len(studies))
+ self.assertEqual(3, len(studies[0].files))
@@ -191,17 +193,17 @@ class TestStudyService(BaseTest):
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
investigators = StudyService().get_investigators(workflow.study_id)
- self.assertEquals(9, len(investigators))
+ self.assertEqual(9, len(investigators))
# dhf8r is in the ldap mock data.
- self.assertEquals("dhf8r", investigators['PI']['user_id'])
- self.assertEquals("Dan Funk", investigators['PI']['display_name']) # Data from ldap
- self.assertEquals("Primary Investigator", investigators['PI']['label']) # Data from xls file.
- self.assertEquals("Always", investigators['PI']['display']) # Data from xls file.
+ self.assertEqual("dhf8r", investigators['PI']['user_id'])
+ self.assertEqual("Dan Funk", investigators['PI']['display_name']) # Data from ldap
+ self.assertEqual("Primary Investigator", investigators['PI']['label']) # Data from xls file.
+ self.assertEqual("Always", investigators['PI']['display']) # Data from xls file.
# asd3v is not in ldap, so an error should be returned.
- self.assertEquals("asd3v", investigators['DC']['user_id'])
- self.assertEquals("Unable to locate a user with id asd3v in LDAP", investigators['DC']['error']) # Data from ldap
+ self.assertEqual("asd3v", investigators['DC']['user_id'])
+ self.assertEqual("Unable to locate a user with id asd3v in LDAP", investigators['DC']['error']) # Data from ldap
# No value is provided for Department Chair
self.assertIsNone(investigators['DEPT_CH']['user_id'])
diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py
index 67a644ef..654b777e 100644
--- a/tests/test_tasks_api.py
+++ b/tests/test_tasks_api.py
@@ -4,86 +4,14 @@ import random
from unittest.mock import patch
from tests.base_test import BaseTest
-
from crc import session, app
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
from crc.models.file import FileModelSchema
-from crc.models.stats import TaskEventModel
from crc.models.workflow import WorkflowStatus
-from crc.services.protocol_builder import ProtocolBuilderService
-from crc.services.workflow_service import WorkflowService
class TestTasksApi(BaseTest):
- def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
- rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
- (workflow.id, str(soft_reset), str(hard_reset)),
- headers=self.logged_in_headers(),
- content_type="application/json")
- self.assert_success(rv)
- json_data = json.loads(rv.get_data(as_text=True))
- workflow_api = WorkflowApiSchema().load(json_data)
- self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
- return workflow_api
-
- def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
- prev_completed_task_count = workflow_in.completed_tasks
- if isinstance(task_in, dict):
- task_id = task_in["id"]
- else:
- task_id = task_in.id
- rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
- headers=self.logged_in_headers(),
- content_type="application/json",
- data=json.dumps(dict_data))
- if error_code:
- self.assert_failure(rv, error_code=error_code)
- return
-
- self.assert_success(rv)
- json_data = json.loads(rv.get_data(as_text=True))
-
- # Assure stats are updated on the model
- workflow = WorkflowApiSchema().load(json_data)
- # The total number of tasks may change over time, as users move through gateways
- # branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
- self.assertIsNotNone(workflow.total_tasks)
- self.assertEquals(prev_completed_task_count + 1, workflow.completed_tasks)
- # Assure a record exists in the Task Events
- task_events = session.query(TaskEventModel) \
- .filter_by(workflow_id=workflow.id) \
- .filter_by(task_id=task_id) \
- .order_by(TaskEventModel.date.desc()).all()
- self.assertGreater(len(task_events), 0)
- event = task_events[0]
- self.assertIsNotNone(event.study_id)
- self.assertEquals("dhf8r", event.user_uid)
- self.assertEquals(workflow.id, event.workflow_id)
- self.assertEquals(workflow.workflow_spec_id, event.workflow_spec_id)
- self.assertEquals(workflow.spec_version, event.spec_version)
- self.assertEquals(WorkflowService.TASK_ACTION_COMPLETE, event.action)
- self.assertEquals(task_in.id, task_id)
- self.assertEquals(task_in.name, event.task_name)
- self.assertEquals(task_in.title, event.task_title)
- self.assertEquals(task_in.type, event.task_type)
- self.assertEquals("COMPLETED", event.task_state)
- # Not sure what vodoo is happening inside of marshmallow to get me in this state.
- if isinstance(task_in.multi_instance_type, MultiInstanceType):
- self.assertEquals(task_in.multi_instance_type.value, event.mi_type)
- else:
- self.assertEquals(task_in.multi_instance_type, event.mi_type)
-
- self.assertEquals(task_in.multi_instance_count, event.mi_count)
- self.assertEquals(task_in.multi_instance_index, event.mi_index)
- self.assertEquals(task_in.process_name, event.process_name)
- self.assertIsNotNone(event.date)
-
-
- workflow = WorkflowApiSchema().load(json_data)
- return workflow
-
-
def test_get_current_user_tasks(self):
self.load_example_data()
workflow = self.create_workflow('random_fact')
@@ -156,14 +84,14 @@ class TestTasksApi(BaseTest):
self.assertIsNotNone(workflow_api.navigation)
nav = workflow_api.navigation
- self.assertEquals(5, len(nav))
- self.assertEquals("Do You Have Bananas", nav[0]['title'])
- self.assertEquals("Bananas?", nav[1]['title'])
- self.assertEquals("FUTURE", nav[1]['state'])
- self.assertEquals("yes", nav[2]['title'])
- self.assertEquals("NOOP", nav[2]['state'])
- self.assertEquals("no", nav[3]['title'])
- self.assertEquals("NOOP", nav[3]['state'])
+ self.assertEqual(5, len(nav))
+ self.assertEqual("Do You Have Bananas", nav[0]['title'])
+ self.assertEqual("Bananas?", nav[1]['title'])
+ self.assertEqual("FUTURE", nav[1]['state'])
+ self.assertEqual("yes", nav[2]['title'])
+ self.assertEqual("NOOP", nav[2]['state'])
+ self.assertEqual("no", nav[3]['title'])
+ self.assertEqual("NOOP", nav[3]['state'])
def test_navigation_with_exclusive_gateway(self):
self.load_example_data()
@@ -173,19 +101,20 @@ class TestTasksApi(BaseTest):
workflow_api = self.get_workflow_api(workflow)
self.assertIsNotNone(workflow_api.navigation)
nav = workflow_api.navigation
- self.assertEquals(7, len(nav))
- self.assertEquals("Task 1", nav[0]['title'])
- self.assertEquals("Which Branch?", nav[1]['title'])
- self.assertEquals("a", nav[2]['title'])
- self.assertEquals("Task 2a", nav[3]['title'])
- self.assertEquals("b", nav[4]['title'])
- self.assertEquals("Task 2b", nav[5]['title'])
- self.assertEquals("Task 3", nav[6]['title'])
+ self.assertEqual(7, len(nav))
+ self.assertEqual("Task 1", nav[0]['title'])
+ self.assertEqual("Which Branch?", nav[1]['title'])
+ self.assertEqual("a", nav[2]['title'])
+ self.assertEqual("Task 2a", nav[3]['title'])
+ self.assertEqual("b", nav[4]['title'])
+ self.assertEqual("Task 2b", nav[5]['title'])
+ self.assertEqual("Task 3", nav[6]['title'])
def test_document_added_to_workflow_shows_up_in_file_list(self):
self.load_example_data()
self.create_reference_document()
workflow = self.create_workflow('docx')
+
# get the first form in the two form workflow.
task = self.get_workflow_api(workflow).next_task
data = {
@@ -204,12 +133,12 @@ class TestTasksApi(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
files = FileModelSchema(many=True).load(json_data, session=session)
self.assertTrue(len(files) == 1)
+
# Assure we can still delete the study even when there is a file attached to a workflow.
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
self.assert_success(rv)
-
def test_get_documentation_populated_in_end(self):
self.load_example_data()
workflow = self.create_workflow('random_fact')
@@ -287,8 +216,8 @@ class TestTasksApi(BaseTest):
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
workflow = self.get_workflow_api(workflow)
- self.assertEquals('Task_Manual_One', workflow.next_task.name)
- self.assertEquals('ManualTask', workflow_api.next_task.type)
+ self.assertEqual('Task_Manual_One', workflow.next_task.name)
+ self.assertEqual('ManualTask', workflow_api.next_task.type)
self.assertTrue('Markdown' in workflow_api.next_task.documentation)
self.assertTrue('Dan' in workflow_api.next_task.documentation)
@@ -298,7 +227,7 @@ class TestTasksApi(BaseTest):
# get the first form in the two form workflow.
task = self.get_workflow_api(workflow).next_task
- self.assertEquals("JustAValue", task.properties['JustAKey'])
+ self.assertEqual("JustAValue", task.properties['JustAKey'])
@patch('crc.services.protocol_builder.requests.get')
@@ -318,13 +247,13 @@ class TestTasksApi(BaseTest):
# get the first form in the two form workflow.
workflow = self.get_workflow_api(workflow)
navigation = self.get_workflow_api(workflow).navigation
- self.assertEquals(4, len(navigation)) # Start task, form_task, multi_task, end task
- self.assertEquals("UserTask", workflow.next_task.type)
- self.assertEquals(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type)
- self.assertEquals(9, workflow.next_task.multi_instance_count)
+ self.assertEqual(4, len(navigation)) # Start task, form_task, multi_task, end task
+ self.assertEqual("UserTask", workflow.next_task.type)
+ self.assertEqual(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type)
+ self.assertEqual(9, workflow.next_task.multi_instance_count)
# Assure that the names for each task are properly updated, so they aren't all the same.
- self.assertEquals("Primary Investigator", workflow.next_task.properties['display_name'])
+ self.assertEqual("Primary Investigator", workflow.next_task.properties['display_name'])
def test_lookup_endpoint_for_task_field_enumerations(self):
@@ -366,18 +295,18 @@ class TestTasksApi(BaseTest):
navigation = workflow_api.navigation
task = workflow_api.next_task
- self.assertEquals(2, len(navigation))
- self.assertEquals("UserTask", task.type)
- self.assertEquals("Activity_A", task.name)
- self.assertEquals("My Sub Process", task.process_name)
+ self.assertEqual(2, len(navigation))
+ self.assertEqual("UserTask", task.type)
+ self.assertEqual("Activity_A", task.name)
+ self.assertEqual("My Sub Process", task.process_name)
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
task = workflow_api.next_task
self.assertIsNotNone(task)
- self.assertEquals("Activity_B", task.name)
- self.assertEquals("Sub Workflow Example", task.process_name)
+ self.assertEqual("Activity_B", task.name)
+ self.assertEqual("Sub Workflow Example", task.process_name)
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
- self.assertEquals(WorkflowStatus.complete, workflow_api.status)
+ self.assertEqual(WorkflowStatus.complete, workflow_api.status)
def test_update_task_resets_token(self):
self.load_example_data()
@@ -387,7 +316,7 @@ class TestTasksApi(BaseTest):
first_task = self.get_workflow_api(workflow).next_task
self.complete_form(workflow, first_task, {"has_bananas": True})
workflow = self.get_workflow_api(workflow)
- self.assertEquals('Task_Num_Bananas', workflow.next_task.name)
+ self.assertEqual('Task_Num_Bananas', workflow.next_task.name)
# Trying to re-submit the initial task, and answer differently, should result in an error.
self.complete_form(workflow, first_task, {"has_bananas": False}, error_code="invalid_state")
@@ -408,18 +337,18 @@ class TestTasksApi(BaseTest):
workflow = WorkflowApiSchema().load(json_data)
# Assure the Next Task is the one we just reset the token to be on.
- self.assertEquals("Task_Has_Bananas", workflow.next_task.name)
+ self.assertEqual("Task_Has_Bananas", workflow.next_task.name)
# Go ahead and get that workflow one more time, it should still be right.
workflow = self.get_workflow_api(workflow)
# Assure the Next Task is the one we just reset the token to be on.
- self.assertEquals("Task_Has_Bananas", workflow.next_task.name)
+ self.assertEqual("Task_Has_Bananas", workflow.next_task.name)
# The next task should be a different value.
self.complete_form(workflow, workflow.next_task, {"has_bananas": False})
workflow = self.get_workflow_api(workflow)
- self.assertEquals('Task_Why_No_Bananas', workflow.next_task.name)
+ self.assertEqual('Task_Why_No_Bananas', workflow.next_task.name)
@patch('crc.services.protocol_builder.requests.get')
def test_parallel_multi_instance(self, mock_get):
@@ -434,13 +363,13 @@ class TestTasksApi(BaseTest):
workflow = self.create_workflow('multi_instance_parallel')
workflow_api = self.get_workflow_api(workflow)
- self.assertEquals(12, len(workflow_api.navigation))
+ self.assertEqual(12, len(workflow_api.navigation))
ready_items = [nav for nav in workflow_api.navigation if nav['state'] == "READY"]
- self.assertEquals(9, len(ready_items))
+ self.assertEqual(9, len(ready_items))
- self.assertEquals("UserTask", workflow_api.next_task.type)
- self.assertEquals("MutiInstanceTask",workflow_api.next_task.name)
- self.assertEquals("more information", workflow_api.next_task.title)
+ self.assertEqual("UserTask", workflow_api.next_task.type)
+ self.assertEqual("MutiInstanceTask",workflow_api.next_task.name)
+ self.assertEqual("more information", workflow_api.next_task.title)
for i in random.sample(range(9), 9):
task = TaskSchema().load(ready_items[i]['task'])
@@ -448,5 +377,5 @@ class TestTasksApi(BaseTest):
#tasks = self.get_workflow_api(workflow).user_tasks
workflow = self.get_workflow_api(workflow)
- self.assertEquals(WorkflowStatus.complete, workflow.status)
+ self.assertEqual(WorkflowStatus.complete, workflow.status)
diff --git a/tests/test_tools_api.py b/tests/test_tools_api.py
index 48ac65a7..c6f543c1 100644
--- a/tests/test_tools_api.py
+++ b/tests/test_tools_api.py
@@ -28,7 +28,7 @@ class TestStudyApi(BaseTest):
content_type='multipart/form-data')
self.assert_success(rv)
self.assertIsNotNone(rv.data)
- self.assertEquals('application/octet-stream', rv.content_type)
+ self.assertEqual('application/octet-stream', rv.content_type)
def test_list_scripts(self):
rv = self.app.get('/v1.0/list_scripts')
diff --git a/tests/test_update_study_script.py b/tests/test_update_study_script.py
index ba550a19..df59ffc2 100644
--- a/tests/test_update_study_script.py
+++ b/tests/test_update_study_script.py
@@ -19,5 +19,5 @@ class TestUpdateStudyScript(BaseTest):
script = UpdateStudy()
script.do_task(task, workflow.study_id, workflow.id, "title:details.label", "pi:details.value")
- self.assertEquals("My New Title", workflow.study.title)
- self.assertEquals("dhf8r", workflow.study.primary_investigator_id)
+ self.assertEqual("My New Title", workflow.study.title)
+ self.assertEqual("dhf8r", workflow.study.primary_investigator_id)
diff --git a/tests/test_workflow_processor.py b/tests/test_workflow_processor.py
index 36d23755..b3f6c374 100644
--- a/tests/test_workflow_processor.py
+++ b/tests/test_workflow_processor.py
@@ -25,7 +25,7 @@ class TestWorkflowProcessor(BaseTest):
def _populate_form_with_random_data(self, task):
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
- WorkflowService.populate_form_with_random_data(task, api_task)
+ WorkflowService.populate_form_with_random_data(task, api_task, required_only=False)
def get_processor(self, study_model, spec_model):
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
diff --git a/tests/test_workflow_processor_multi_instance.py b/tests/test_workflow_processor_multi_instance.py
index 21fc3b43..aefb73f1 100644
--- a/tests/test_workflow_processor_multi_instance.py
+++ b/tests/test_workflow_processor_multi_instance.py
@@ -57,13 +57,13 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
task = next_user_tasks[0]
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
- self.assertEquals("dhf8r", task.data["investigator"]["user_id"])
+ self.assertEqual("dhf8r", task.data["investigator"]["user_id"])
self.assertEqual("MutiInstanceTask", task.get_name())
api_task = WorkflowService.spiff_task_to_api_task(task)
- self.assertEquals(MultiInstanceType.sequential, api_task.multi_instance_type)
- self.assertEquals(3, api_task.multi_instance_count)
- self.assertEquals(1, api_task.multi_instance_index)
+ self.assertEqual(MultiInstanceType.sequential, api_task.multi_instance_type)
+ self.assertEqual(3, api_task.multi_instance_count)
+ self.assertEqual(1, api_task.multi_instance_index)
task.update_data({"investigator":{"email":"asd3v@virginia.edu"}})
processor.complete_task(task)
processor.do_engine_steps()
@@ -72,8 +72,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
api_task = WorkflowService.spiff_task_to_api_task(task)
self.assertEqual("MutiInstanceTask", api_task.name)
task.update_data({"investigator":{"email":"asdf32@virginia.edu"}})
- self.assertEquals(3, api_task.multi_instance_count)
- self.assertEquals(2, api_task.multi_instance_index)
+ self.assertEqual(3, api_task.multi_instance_count)
+ self.assertEqual(2, api_task.multi_instance_index)
processor.complete_task(task)
processor.do_engine_steps()
@@ -81,8 +81,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
api_task = WorkflowService.spiff_task_to_api_task(task)
self.assertEqual("MutiInstanceTask", task.get_name())
task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}})
- self.assertEquals(3, api_task.multi_instance_count)
- self.assertEquals(3, api_task.multi_instance_index)
+ self.assertEqual(3, api_task.multi_instance_count)
+ self.assertEqual(3, api_task.multi_instance_index)
processor.complete_task(task)
processor.do_engine_steps()
task = processor.bpmn_workflow.last_task
@@ -91,7 +91,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
expected['PI']['email'] = "asd3v@virginia.edu"
expected['SC_I']['email'] = "asdf32@virginia.edu"
expected['DC']['email'] = "dhf8r@virginia.edu"
- self.assertEquals(expected,
+ self.assertEqual(expected,
task.data['StudyInfo']['investigators'])
self.assertEqual(WorkflowStatus.complete, processor.get_status())
@@ -117,10 +117,10 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
task = next_user_tasks[2]
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
- self.assertEquals("asd3v", task.data["investigator"]["user_id"]) # The last of the tasks
+ self.assertEqual("asd3v", task.data["investigator"]["user_id"]) # The last of the tasks
api_task = WorkflowService.spiff_task_to_api_task(task)
- self.assertEquals(MultiInstanceType.parallel, api_task.multi_instance_type)
+ self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type)
task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}})
processor.complete_task(task)
processor.do_engine_steps()
@@ -144,7 +144,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
expected['PI']['email'] = "asd3v@virginia.edu"
expected['SC_I']['email'] = "asdf32@virginia.edu"
expected['DC']['email'] = "dhf8r@virginia.edu"
- self.assertEquals(expected,
+ self.assertEqual(expected,
task.data['StudyInfo']['investigators'])
self.assertEqual(WorkflowStatus.complete, processor.get_status())
diff --git a/tests/test_workflow_service.py b/tests/test_workflow_service.py
index 281d1756..9f3ceda1 100644
--- a/tests/test_workflow_service.py
+++ b/tests/test_workflow_service.py
@@ -66,9 +66,9 @@ class TestWorkflowService(BaseTest):
task = processor.next_task()
WorkflowService.process_options(task, task.task_spec.form.fields[0])
options = task.task_spec.form.fields[0].options
- self.assertEquals(28, len(options))
- self.assertEquals('1000', options[0]['id'])
- self.assertEquals("UVA - INTERNAL - GM USE ONLY", options[0]['name'])
+ self.assertEqual(28, len(options))
+ self.assertEqual('1000', options[0]['id'])
+ self.assertEqual("UVA - INTERNAL - GM USE ONLY", options[0]['name'])
def test_random_data_populate_form_on_auto_complete(self):
self.load_example_data()
@@ -77,5 +77,5 @@ class TestWorkflowService(BaseTest):
processor.do_engine_steps()
task = processor.next_task()
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
- WorkflowService.populate_form_with_random_data(task, task_api)
+ WorkflowService.populate_form_with_random_data(task, task_api, required_only=False)
self.assertTrue(isinstance(task.data["sponsor"], dict))
\ No newline at end of file
diff --git a/tests/test_workflow_spec_validation_api.py b/tests/test_workflow_spec_validation_api.py
index 9e581874..cb9b6b77 100644
--- a/tests/test_workflow_spec_validation_api.py
+++ b/tests/test_workflow_spec_validation_api.py
@@ -3,17 +3,16 @@ from unittest.mock import patch
from tests.base_test import BaseTest
-from crc.services.protocol_builder import ProtocolBuilderService
from crc import session, app
from crc.api.common import ApiErrorSchema
from crc.models.protocol_builder import ProtocolBuilderStudySchema
from crc.models.workflow import WorkflowSpecModel
+from crc.services.workflow_service import WorkflowService
class TestWorkflowSpecValidation(BaseTest):
def validate_workflow(self, workflow_name):
- self.load_example_data()
spec_model = self.load_test_spec(workflow_name)
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
self.assert_success(rv)
@@ -22,6 +21,7 @@ class TestWorkflowSpecValidation(BaseTest):
def test_successful_validation_of_test_workflows(self):
app.config['PB_ENABLED'] = False # Assure this is disabled.
+ self.load_example_data()
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
self.assertEqual(0, len(self.validate_workflow("decision_table")))
self.assertEqual(0, len(self.validate_workflow("docx")))
@@ -49,6 +49,13 @@ class TestWorkflowSpecValidation(BaseTest):
self.load_example_data(use_crc_data=True)
app.config['PB_ENABLED'] = True
+ self.validate_all_loaded_workflows()
+
+ def test_successful_validation_of_rrt_workflows(self):
+ self.load_example_data(use_rrt_data=True)
+ self.validate_all_loaded_workflows()
+
+ def validate_all_loaded_workflows(self):
workflows = session.query(WorkflowSpecModel).all()
errors = []
for w in workflows:
@@ -59,28 +66,54 @@ class TestWorkflowSpecValidation(BaseTest):
errors.extend(ApiErrorSchema(many=True).load(json_data))
self.assertEqual(0, len(errors), json.dumps(errors))
+
def test_invalid_expression(self):
+ self.load_example_data()
errors = self.validate_workflow("invalid_expression")
- self.assertEqual(1, len(errors))
- self.assertEqual("workflow_execution_exception", errors[0]['code'])
+ self.assertEqual(2, len(errors))
+ self.assertEqual("workflow_validation_exception", errors[0]['code'])
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
- self.assertEqual('ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
+ self.assertEqual('When populating all fields ... ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
+ self.assertIsNotNone(errors[0]['task_data'])
+ self.assertIn("has_bananas", errors[0]['task_data'])
def test_validation_error(self):
+ self.load_example_data()
errors = self.validate_workflow("invalid_spec")
- self.assertEqual(1, len(errors))
+ self.assertEqual(2, len(errors))
self.assertEqual("workflow_validation_error", errors[0]['code'])
self.assertEqual("StartEvent_1", errors[0]['task_id'])
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
def test_invalid_script(self):
+ self.load_example_data()
errors = self.validate_workflow("invalid_script")
- self.assertEqual(1, len(errors))
- self.assertEqual("workflow_execution_exception", errors[0]['code'])
+ self.assertEqual(2, len(errors))
+ self.assertEqual("workflow_validation_exception", errors[0]['code'])
self.assertTrue("NoSuchScript" in errors[0]['message'])
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
self.assertEqual("invalid_script.bpmn", errors[0]['file_name'])
+
+ def test_repeating_sections_correctly_populated(self):
+ self.load_example_data()
+ spec_model = self.load_test_spec('repeat_form')
+ final_data = WorkflowService.test_spec(spec_model.id)
+ self.assertIsNotNone(final_data)
+ self.assertIn('cats', final_data)
+
+ def test_required_fields(self):
+ self.load_example_data()
+ spec_model = self.load_test_spec('required_fields')
+ final_data = WorkflowService.test_spec(spec_model.id)
+ self.assertIsNotNone(final_data)
+ self.assertIn('string_required', final_data)
+ self.assertIn('string_not_required', final_data)
+
+ final_data = WorkflowService.test_spec(spec_model.id, required_only=True)
+ self.assertIsNotNone(final_data)
+ self.assertIn('string_required', final_data)
+ self.assertNotIn('string_not_required', final_data)