commit
2c0cda1792
2
Pipfile
2
Pipfile
|
@ -38,6 +38,8 @@ xlrd = "*"
|
||||||
ldap3 = "*"
|
ldap3 = "*"
|
||||||
gunicorn = "*"
|
gunicorn = "*"
|
||||||
werkzeug = "*"
|
werkzeug = "*"
|
||||||
|
sentry-sdk = {extras = ["flask"],version = "==0.14.4"}
|
||||||
|
flask-mail = "*"
|
||||||
|
|
||||||
[requires]
|
[requires]
|
||||||
python_version = "3.7"
|
python_version = "3.7"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"hash": {
|
"hash": {
|
||||||
"sha256": "979f996148ee181e3e0af2a3777aa1d00d0fd5d943d49df65963e694b8a88871"
|
"sha256": "6c89585086260ebcb41918b8ef3b1d9e189e1b492208d3ff000a138bc2f2fcee"
|
||||||
},
|
},
|
||||||
"pipfile-spec": 6,
|
"pipfile-spec": 6,
|
||||||
"requires": {
|
"requires": {
|
||||||
|
@ -32,10 +32,10 @@
|
||||||
},
|
},
|
||||||
"amqp": {
|
"amqp": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:6e649ca13a7df3faacdc8bbb280aa9a6602d22fd9d545336077e573a1f4ff3b8",
|
"sha256:24dbaff8ce4f30566bb88976b398e8c4e77637171af3af6f1b9650f48890e60b",
|
||||||
"sha256:77f1aef9410698d20eaeac5b73a87817365f457a507d82edf292e12cbb83b08d"
|
"sha256:bb68f8d2bced8f93ccfd07d96c689b716b3227720add971be980accfc2952139"
|
||||||
],
|
],
|
||||||
"version": "==2.5.2"
|
"version": "==2.6.0"
|
||||||
},
|
},
|
||||||
"aniso8601": {
|
"aniso8601": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -96,19 +96,25 @@
|
||||||
],
|
],
|
||||||
"version": "==3.6.3.0"
|
"version": "==3.6.3.0"
|
||||||
},
|
},
|
||||||
|
"blinker": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
|
||||||
|
],
|
||||||
|
"version": "==1.4"
|
||||||
|
},
|
||||||
"celery": {
|
"celery": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:108a0bf9018a871620936c33a3ee9f6336a89f8ef0a0f567a9001f4aa361415f",
|
"sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647",
|
||||||
"sha256:5b4b37e276033fe47575107a2775469f0b721646a08c96ec2c61531e4fe45f2a"
|
"sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b"
|
||||||
],
|
],
|
||||||
"version": "==4.4.2"
|
"version": "==4.4.5"
|
||||||
},
|
},
|
||||||
"certifi": {
|
"certifi": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304",
|
"sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1",
|
||||||
"sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519"
|
"sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc"
|
||||||
],
|
],
|
||||||
"version": "==2020.4.5.1"
|
"version": "==2020.4.5.2"
|
||||||
},
|
},
|
||||||
"cffi": {
|
"cffi": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -270,13 +276,20 @@
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==3.0.8"
|
"version": "==3.0.8"
|
||||||
},
|
},
|
||||||
"flask-marshmallow": {
|
"flask-mail": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:6e6aec171b8e092e0eafaf035ff5b8637bf3a58ab46f568c4c1bab02f2a3c196",
|
"sha256:22e5eb9a940bf407bcf30410ecc3708f3c56cc44b29c34e1726fe85006935f41"
|
||||||
"sha256:a1685536e7ab5abdc712bbc1ac1a6b0b50951a368502f7985e7d1c27b3c21e59"
|
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==0.12.0"
|
"version": "==0.9.1"
|
||||||
|
},
|
||||||
|
"flask-marshmallow": {
|
||||||
|
"hashes": [
|
||||||
|
"sha256:1da1e6454a56a3e15107b987121729f152325bdef23f3df2f9b52bbd074af38e",
|
||||||
|
"sha256:aefc1f1d96256c430a409f08241bab75ffe97e5d14ac5d1f000764e39bf4873a"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==0.13.0"
|
||||||
},
|
},
|
||||||
"flask-migrate": {
|
"flask-migrate": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -338,18 +351,18 @@
|
||||||
},
|
},
|
||||||
"importlib-metadata": {
|
"importlib-metadata": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f",
|
"sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
|
||||||
"sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e"
|
"sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"
|
||||||
],
|
],
|
||||||
"markers": "python_version < '3.8'",
|
"markers": "python_version < '3.8'",
|
||||||
"version": "==1.6.0"
|
"version": "==1.6.1"
|
||||||
},
|
},
|
||||||
"inflection": {
|
"inflection": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:32a5c3341d9583ec319548b9015b7fbdf8c429cbcb575d326c33ae3a0e90d52c",
|
"sha256:88b101b2668a1d81d6d72d4c2018e53bc6c7fc544c987849da1c7f77545c3bc9",
|
||||||
"sha256:9a15d3598f01220e93f2207c432cfede50daff53137ce660fb8be838ef1ca6cc"
|
"sha256:f576e85132d34f5bf7df5183c2c6f94cfb32e528f53065345cf71329ba0b8924"
|
||||||
],
|
],
|
||||||
"version": "==0.4.0"
|
"version": "==0.5.0"
|
||||||
},
|
},
|
||||||
"itsdangerous": {
|
"itsdangerous": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -381,10 +394,10 @@
|
||||||
},
|
},
|
||||||
"kombu": {
|
"kombu": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:2d1cda774126a044d91a7ff5fa6d09edf99f46924ab332a810760fe6740e9b76",
|
"sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a",
|
||||||
"sha256:598e7e749d6ab54f646b74b2d2df67755dee13894f73ab02a2a9feb8870c7cb2"
|
"sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3"
|
||||||
],
|
],
|
||||||
"version": "==4.6.8"
|
"version": "==4.6.10"
|
||||||
},
|
},
|
||||||
"ldap3": {
|
"ldap3": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -428,10 +441,10 @@
|
||||||
},
|
},
|
||||||
"mako": {
|
"mako": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:3139c5d64aa5d175dbafb95027057128b5fbd05a40c53999f3905ceb53366d9d",
|
"sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27",
|
||||||
"sha256:8e8b53c71c7e59f3de716b6832c4e401d903af574f6962edbbbf6ecc2a5fe6c9"
|
"sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9"
|
||||||
],
|
],
|
||||||
"version": "==1.1.2"
|
"version": "==1.1.3"
|
||||||
},
|
},
|
||||||
"markupsafe": {
|
"markupsafe": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -473,11 +486,11 @@
|
||||||
},
|
},
|
||||||
"marshmallow": {
|
"marshmallow": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:c2673233aa21dde264b84349dc2fd1dce5f30ed724a0a00e75426734de5b84ab",
|
"sha256:35ee2fb188f0bd9fc1cf9ac35e45fd394bd1c153cee430745a465ea435514bd5",
|
||||||
"sha256:f88fe96434b1f0f476d54224d59333eba8ca1a203a2695683c1855675c4049a7"
|
"sha256:9aa20f9b71c992b4782dad07c51d92884fd0f7c5cb9d3c737bea17ec1bad765f"
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==3.6.0"
|
"version": "==3.6.1"
|
||||||
},
|
},
|
||||||
"marshmallow-enum": {
|
"marshmallow-enum": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -489,37 +502,37 @@
|
||||||
},
|
},
|
||||||
"marshmallow-sqlalchemy": {
|
"marshmallow-sqlalchemy": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:3247e41e424146340b03a369f2b7c6f0364477ccedc4e2481e84d5f3a8d3c67f",
|
"sha256:03a555b610bb307689b821b64e2416593ec21a85925c8c436c2cd08ebc6bb85e",
|
||||||
"sha256:dbbe51d28bb28e7ee2782e51310477f7a2c5a111a301f6dd8e264e11ab820427"
|
"sha256:0ef59c8da8da2e18e808e3880158049e9d72f3031c84cc804b6c533a0eb668a9"
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==0.23.0"
|
"version": "==0.23.1"
|
||||||
},
|
},
|
||||||
"numpy": {
|
"numpy": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:00d7b54c025601e28f468953d065b9b121ddca7fff30bed7be082d3656dd798d",
|
"sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233",
|
||||||
"sha256:02ec9582808c4e48be4e93cd629c855e644882faf704bc2bd6bbf58c08a2a897",
|
"sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b",
|
||||||
"sha256:0e6f72f7bb08f2f350ed4408bb7acdc0daba637e73bce9f5ea2b207039f3af88",
|
"sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7",
|
||||||
"sha256:1be2e96314a66f5f1ce7764274327fd4fb9da58584eaff00b5a5221edefee7d6",
|
"sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f",
|
||||||
"sha256:2466fbcf23711ebc5daa61d28ced319a6159b260a18839993d871096d66b93f7",
|
"sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5",
|
||||||
"sha256:2b573fcf6f9863ce746e4ad00ac18a948978bb3781cffa4305134d31801f3e26",
|
"sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb",
|
||||||
"sha256:3f0dae97e1126f529ebb66f3c63514a0f72a177b90d56e4bce8a0b5def34627a",
|
"sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583",
|
||||||
"sha256:50fb72bcbc2cf11e066579cb53c4ca8ac0227abb512b6cbc1faa02d1595a2a5d",
|
"sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1",
|
||||||
"sha256:57aea170fb23b1fd54fa537359d90d383d9bf5937ee54ae8045a723caa5e0961",
|
"sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a",
|
||||||
"sha256:709c2999b6bd36cdaf85cf888d8512da7433529f14a3689d6e37ab5242e7add5",
|
"sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271",
|
||||||
"sha256:7d59f21e43bbfd9a10953a7e26b35b6849d888fc5a331fa84a2d9c37bd9fe2a2",
|
"sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824",
|
||||||
"sha256:904b513ab8fbcbdb062bed1ce2f794ab20208a1b01ce9bd90776c6c7e7257032",
|
"sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3",
|
||||||
"sha256:96dd36f5cdde152fd6977d1bbc0f0561bccffecfde63cd397c8e6033eb66baba",
|
"sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc",
|
||||||
"sha256:9933b81fecbe935e6a7dc89cbd2b99fea1bf362f2790daf9422a7bb1dc3c3085",
|
"sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161",
|
||||||
"sha256:bbcc85aaf4cd84ba057decaead058f43191cc0e30d6bc5d44fe336dc3d3f4509",
|
"sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f",
|
||||||
"sha256:dccd380d8e025c867ddcb2f84b439722cf1f23f3a319381eac45fd077dee7170",
|
"sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f",
|
||||||
"sha256:e22cd0f72fc931d6abc69dc7764484ee20c6a60b0d0fee9ce0426029b1c1bdae",
|
"sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf",
|
||||||
"sha256:ed722aefb0ebffd10b32e67f48e8ac4c5c4cf5d3a785024fdf0e9eb17529cd9d",
|
"sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b",
|
||||||
"sha256:efb7ac5572c9a57159cf92c508aad9f856f1cb8e8302d7fdb99061dbe52d712c",
|
"sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0",
|
||||||
"sha256:efdba339fffb0e80fcc19524e4fdbda2e2b5772ea46720c44eaac28096d60720",
|
"sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675",
|
||||||
"sha256:f22273dd6a403ed870207b853a856ff6327d5cbce7a835dfa0645b3fc00273ec"
|
"sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"
|
||||||
],
|
],
|
||||||
"version": "==1.18.4"
|
"version": "==1.18.5"
|
||||||
},
|
},
|
||||||
"openapi-spec-validator": {
|
"openapi-spec-validator": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -704,6 +717,17 @@
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==2.23.0"
|
"version": "==2.23.0"
|
||||||
},
|
},
|
||||||
|
"sentry-sdk": {
|
||||||
|
"extras": [
|
||||||
|
"flask"
|
||||||
|
],
|
||||||
|
"hashes": [
|
||||||
|
"sha256:0e5e947d0f7a969314aa23669a94a9712be5a688ff069ff7b9fc36c66adc160c",
|
||||||
|
"sha256:799a8bf76b012e3030a881be00e97bc0b922ce35dde699c6537122b751d80e2c"
|
||||||
|
],
|
||||||
|
"index": "pypi",
|
||||||
|
"version": "==0.14.4"
|
||||||
|
},
|
||||||
"six": {
|
"six": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
|
"sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
|
||||||
|
@ -727,11 +751,11 @@
|
||||||
},
|
},
|
||||||
"sphinx": {
|
"sphinx": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:779a519adbd3a70fc7c468af08c5e74829868b0a5b34587b33340e010291856c",
|
"sha256:1c445320a3310baa5ccb8d957267ef4a0fc930dc1234db5098b3d7af14fbb242",
|
||||||
"sha256:ea64df287958ee5aac46be7ac2b7277305b0381d213728c3a49d8bb9b8415807"
|
"sha256:7d3d5087e39ab5a031b75588e9859f011de70e213cd0080ccbc28079fb0786d1"
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==3.0.4"
|
"version": "==3.1.0"
|
||||||
},
|
},
|
||||||
"sphinxcontrib-applehelp": {
|
"sphinxcontrib-applehelp": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -778,7 +802,7 @@
|
||||||
"spiffworkflow": {
|
"spiffworkflow": {
|
||||||
"editable": true,
|
"editable": true,
|
||||||
"git": "https://github.com/sartography/SpiffWorkflow.git",
|
"git": "https://github.com/sartography/SpiffWorkflow.git",
|
||||||
"ref": "c8d87826d496af825a184bdc3f0a751e603cfe44"
|
"ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0"
|
||||||
},
|
},
|
||||||
"sqlalchemy": {
|
"sqlalchemy": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -838,10 +862,10 @@
|
||||||
},
|
},
|
||||||
"waitress": {
|
"waitress": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:045b3efc3d97c93362173ab1dfc159b52cfa22b46c3334ffc805dbdbf0e4309e",
|
"sha256:1bb436508a7487ac6cb097ae7a7fe5413aefca610550baf58f0940e51ecfb261",
|
||||||
"sha256:77ff3f3226931a1d7d8624c5371de07c8e90c7e5d80c5cc660d72659aaf23f38"
|
"sha256:3d633e78149eb83b60a07dfabb35579c29aac2d24bb803c18b26fb2ab1a584db"
|
||||||
],
|
],
|
||||||
"version": "==1.4.3"
|
"version": "==1.4.4"
|
||||||
},
|
},
|
||||||
"webob": {
|
"webob": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -876,11 +900,11 @@
|
||||||
},
|
},
|
||||||
"xlsxwriter": {
|
"xlsxwriter": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:488e1988ab16ff3a9cd58c7656d0a58f8abe46ee58b98eecea78c022db28656b",
|
"sha256:828b3285fc95105f5b1946a6a015b31cf388bd5378fdc6604e4d1b7839df2e77",
|
||||||
"sha256:97ab487b81534415c5313154203f3e8a637d792b1e6a8201e8f7f71da0203c2a"
|
"sha256:82a3b0e73e3913483da23791d1a25e4d2dbb3837d1be4129473526b9a270a5cc"
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==1.2.8"
|
"version": "==1.2.9"
|
||||||
},
|
},
|
||||||
"zipp": {
|
"zipp": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -900,11 +924,11 @@
|
||||||
},
|
},
|
||||||
"importlib-metadata": {
|
"importlib-metadata": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f",
|
"sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
|
||||||
"sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e"
|
"sha256:15ec6c0fd909e893e3a08b3a7c76ecb149122fb14b7efe1199ddd4c7c57ea958"
|
||||||
],
|
],
|
||||||
"markers": "python_version < '3.8'",
|
"markers": "python_version < '3.8'",
|
||||||
"version": "==1.6.0"
|
"version": "==1.6.1"
|
||||||
},
|
},
|
||||||
"more-itertools": {
|
"more-itertools": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -951,11 +975,11 @@
|
||||||
},
|
},
|
||||||
"pytest": {
|
"pytest": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:95c710d0a72d91c13fae35dce195633c929c3792f54125919847fdcdf7caa0d3",
|
"sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1",
|
||||||
"sha256:eb2b5e935f6a019317e455b6da83dd8650ac9ffd2ee73a7b657a30873d67a698"
|
"sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"
|
||||||
],
|
],
|
||||||
"index": "pypi",
|
"index": "pypi",
|
||||||
"version": "==5.4.2"
|
"version": "==5.4.3"
|
||||||
},
|
},
|
||||||
"six": {
|
"six": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
@ -966,10 +990,10 @@
|
||||||
},
|
},
|
||||||
"wcwidth": {
|
"wcwidth": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
"sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1",
|
"sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f",
|
||||||
"sha256:ee73862862a156bf77ff92b09034fc4825dd3af9cf81bc5b360668d425f3c5f1"
|
"sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f"
|
||||||
],
|
],
|
||||||
"version": "==0.1.9"
|
"version": "==0.2.4"
|
||||||
},
|
},
|
||||||
"zipp": {
|
"zipp": {
|
||||||
"hashes": [
|
"hashes": [
|
||||||
|
|
|
@ -9,9 +9,13 @@ JSON_SORT_KEYS = False # CRITICAL. Do not sort the data when returning values
|
||||||
NAME = "CR Connect Workflow"
|
NAME = "CR Connect Workflow"
|
||||||
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default="5000")
|
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default="5000")
|
||||||
CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default="localhost:4200, localhost:5002"))
|
CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default="localhost:4200, localhost:5002"))
|
||||||
DEVELOPMENT = environ.get('DEVELOPMENT', default="true") == "true"
|
|
||||||
TESTING = environ.get('TESTING', default="false") == "true"
|
TESTING = environ.get('TESTING', default="false") == "true"
|
||||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true") or (not DEVELOPMENT and not TESTING)
|
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true")
|
||||||
|
TEST_UID = environ.get('TEST_UID', default="dhf8r")
|
||||||
|
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah13us,cl3wf"))
|
||||||
|
|
||||||
|
# Sentry flag
|
||||||
|
ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true"
|
||||||
|
|
||||||
# Add trailing slash to base path
|
# Add trailing slash to base path
|
||||||
APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/'))
|
APPLICATION_ROOT = re.sub(r'//', '/', '/%s/' % environ.get('APPLICATION_ROOT', default="/").strip('/'))
|
||||||
|
@ -25,7 +29,7 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
||||||
'SQLALCHEMY_DATABASE_URI',
|
'SQLALCHEMY_DATABASE_URI',
|
||||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||||
)
|
)
|
||||||
TOKEN_AUTH_TTL_HOURS = int(environ.get('TOKEN_AUTH_TTL_HOURS', default=4))
|
TOKEN_AUTH_TTL_HOURS = float(environ.get('TOKEN_AUTH_TTL_HOURS', default=24))
|
||||||
TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
|
TOKEN_AUTH_SECRET_KEY = environ.get('TOKEN_AUTH_SECRET_KEY', default="Shhhh!!! This is secret! And better darn well not show up in prod.")
|
||||||
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
|
FRONTEND_AUTH_CALLBACK = environ.get('FRONTEND_AUTH_CALLBACK', default="http://localhost:4200/session")
|
||||||
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
|
SWAGGER_AUTH_KEY = environ.get('SWAGGER_AUTH_KEY', default="SWAGGER")
|
||||||
|
@ -39,6 +43,14 @@ PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL +
|
||||||
PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + "study?studyid=%i")
|
PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + "study?studyid=%i")
|
||||||
|
|
||||||
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http://
|
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http://
|
||||||
LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=3))
|
LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1))
|
||||||
|
|
||||||
|
|
||||||
|
# Email configuration
|
||||||
|
FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com']
|
||||||
|
MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
|
||||||
|
MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
|
||||||
|
MAIL_PORT = environ.get('MAIL_PORT', default=2525)
|
||||||
|
MAIL_USE_SSL = environ.get('MAIL_USE_SSL', default=False)
|
||||||
|
MAIL_USE_TLS = environ.get('MAIL_USE_TLS', default=True)
|
||||||
|
MAIL_USERNAME = environ.get('MAIL_USERNAME', default='')
|
||||||
|
MAIL_PASSWORD = environ.get('MAIL_PASSWORD', default='')
|
||||||
|
|
|
@ -4,7 +4,6 @@ from os import environ
|
||||||
basedir = os.path.abspath(os.path.dirname(__file__))
|
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
NAME = "CR Connect Workflow"
|
NAME = "CR Connect Workflow"
|
||||||
DEVELOPMENT = True
|
|
||||||
TESTING = True
|
TESTING = True
|
||||||
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
TOKEN_AUTH_SECRET_KEY = "Shhhh!!! This is secret! And better darn well not show up in prod."
|
||||||
PB_ENABLED = False
|
PB_ENABLED = False
|
||||||
|
@ -23,8 +22,8 @@ SQLALCHEMY_DATABASE_URI = environ.get(
|
||||||
'SQLALCHEMY_DATABASE_URI',
|
'SQLALCHEMY_DATABASE_URI',
|
||||||
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
default="postgresql://%s:%s@%s:%s/%s" % (DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DB_NAME)
|
||||||
)
|
)
|
||||||
|
ADMIN_UIDS = ['dhf8r']
|
||||||
|
|
||||||
print('### USING TESTING CONFIG: ###')
|
print('### USING TESTING CONFIG: ###')
|
||||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
|
||||||
print('TESTING = ', TESTING)
|
print('TESTING = ', TESTING)
|
||||||
|
|
|
@ -2,7 +2,6 @@ import os
|
||||||
basedir = os.path.abspath(os.path.dirname(__file__))
|
basedir = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
NAME = "CR Connect Workflow"
|
NAME = "CR Connect Workflow"
|
||||||
DEVELOPMENT = True
|
|
||||||
TESTING = True
|
TESTING = True
|
||||||
SQLALCHEMY_DATABASE_URI = "postgresql://postgres:@localhost:5432/crc_test"
|
SQLALCHEMY_DATABASE_URI = "postgresql://postgres:@localhost:5432/crc_test"
|
||||||
TOKEN_AUTH_TTL_HOURS = 2
|
TOKEN_AUTH_TTL_HOURS = 2
|
||||||
|
@ -12,6 +11,5 @@ PB_ENABLED = False
|
||||||
|
|
||||||
print('+++ USING TRAVIS TESTING CONFIG: +++')
|
print('+++ USING TRAVIS TESTING CONFIG: +++')
|
||||||
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
print('SQLALCHEMY_DATABASE_URI = ', SQLALCHEMY_DATABASE_URI)
|
||||||
print('DEVELOPMENT = ', DEVELOPMENT)
|
|
||||||
print('TESTING = ', TESTING)
|
print('TESTING = ', TESTING)
|
||||||
print('FRONTEND_AUTH_CALLBACK = ', FRONTEND_AUTH_CALLBACK)
|
print('FRONTEND_AUTH_CALLBACK = ', FRONTEND_AUTH_CALLBACK)
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import sentry_sdk
|
||||||
|
|
||||||
import connexion
|
import connexion
|
||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
from flask_marshmallow import Marshmallow
|
from flask_marshmallow import Marshmallow
|
||||||
|
from flask_mail import Mail
|
||||||
from flask_migrate import Migrate
|
from flask_migrate import Migrate
|
||||||
from flask_sqlalchemy import SQLAlchemy
|
from flask_sqlalchemy import SQLAlchemy
|
||||||
|
from sentry_sdk.integrations.flask import FlaskIntegration
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
@ -40,16 +44,29 @@ connexion_app.add_api('api.yml', base_path='/v1.0')
|
||||||
origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']]
|
origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']]
|
||||||
cors = CORS(connexion_app.app, origins=origins_re)
|
cors = CORS(connexion_app.app, origins=origins_re)
|
||||||
|
|
||||||
|
if app.config['ENABLE_SENTRY']:
|
||||||
|
sentry_sdk.init(
|
||||||
|
dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915",
|
||||||
|
integrations=[FlaskIntegration()]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Jinja environment definition, used to render mail templates
|
||||||
|
template_dir = os.getcwd() + '/crc/static/templates/mails'
|
||||||
|
env = Environment(loader=FileSystemLoader(template_dir))
|
||||||
|
# Mail settings
|
||||||
|
mail = Mail(app)
|
||||||
|
|
||||||
print('=== USING THESE CONFIG SETTINGS: ===')
|
print('=== USING THESE CONFIG SETTINGS: ===')
|
||||||
print('DB_HOST = ', )
|
|
||||||
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
|
||||||
print('DEVELOPMENT = ', app.config['DEVELOPMENT'])
|
|
||||||
print('TESTING = ', app.config['TESTING'])
|
|
||||||
print('PRODUCTION = ', app.config['PRODUCTION'])
|
|
||||||
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
|
||||||
print('LDAP_URL = ', app.config['LDAP_URL'])
|
|
||||||
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
|
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
|
||||||
|
print('CORS_ALLOW_ORIGINS = ', app.config['CORS_ALLOW_ORIGINS'])
|
||||||
|
print('DB_HOST = ', app.config['DB_HOST'])
|
||||||
|
print('LDAP_URL = ', app.config['LDAP_URL'])
|
||||||
|
print('PB_BASE_URL = ', app.config['PB_BASE_URL'])
|
||||||
print('PB_ENABLED = ', app.config['PB_ENABLED'])
|
print('PB_ENABLED = ', app.config['PB_ENABLED'])
|
||||||
|
print('PRODUCTION = ', app.config['PRODUCTION'])
|
||||||
|
print('TESTING = ', app.config['TESTING'])
|
||||||
|
print('TEST_UID = ', app.config['TEST_UID'])
|
||||||
|
print('ADMIN_UIDS = ', app.config['ADMIN_UIDS'])
|
||||||
|
|
||||||
@app.cli.command()
|
@app.cli.command()
|
||||||
def load_example_data():
|
def load_example_data():
|
||||||
|
@ -65,3 +82,9 @@ def load_example_rrt_data():
|
||||||
from example_data import ExampleDataLoader
|
from example_data import ExampleDataLoader
|
||||||
ExampleDataLoader.clean_db()
|
ExampleDataLoader.clean_db()
|
||||||
ExampleDataLoader().load_rrt()
|
ExampleDataLoader().load_rrt()
|
||||||
|
|
||||||
|
@app.cli.command()
|
||||||
|
def clear_db():
|
||||||
|
"""Load example data into the database."""
|
||||||
|
from example_data import ExampleDataLoader
|
||||||
|
ExampleDataLoader.clean_db()
|
||||||
|
|
202
crc/api.yml
202
crc/api.yml
|
@ -9,54 +9,18 @@ servers:
|
||||||
security:
|
security:
|
||||||
- jwt: ['secret']
|
- jwt: ['secret']
|
||||||
paths:
|
paths:
|
||||||
/sso_backdoor:
|
/login:
|
||||||
get:
|
get:
|
||||||
operationId: crc.api.user.backdoor
|
operationId: crc.api.user.login
|
||||||
summary: A backdoor that allows someone to log in as a specific user, if they
|
summary: In production, logs the user in via SSO. If not in production, logs in as a specific user for testing.
|
||||||
are in a staging environment.
|
|
||||||
security: [] # Disable security for this endpoint only.
|
security: [] # Disable security for this endpoint only.
|
||||||
parameters:
|
parameters:
|
||||||
- name: uid
|
- name: uid
|
||||||
in: query
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: email_address
|
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
- name: display_name
|
- name: redirect_url
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: affiliation
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: eppn
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: first_name
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: last_name
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: title
|
|
||||||
in: query
|
|
||||||
required: false
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: redirect
|
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
schema:
|
schema:
|
||||||
|
@ -150,6 +114,8 @@ paths:
|
||||||
$ref: "#/components/schemas/Study"
|
$ref: "#/components/schemas/Study"
|
||||||
delete:
|
delete:
|
||||||
operationId: crc.api.study.delete_study
|
operationId: crc.api.study.delete_study
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Removes the given study completely.
|
summary: Removes the given study completely.
|
||||||
tags:
|
tags:
|
||||||
- Studies
|
- Studies
|
||||||
|
@ -173,6 +139,30 @@ paths:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/Study"
|
$ref: "#/components/schemas/Study"
|
||||||
|
/study/{study_id}/approvals:
|
||||||
|
parameters:
|
||||||
|
- name: study_id
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
description: The id of the study for which workflows should be returned.
|
||||||
|
schema:
|
||||||
|
type: integer
|
||||||
|
format: int32
|
||||||
|
get:
|
||||||
|
operationId: crc.api.approval.get_approvals_for_study
|
||||||
|
summary: Returns approvals for a single study
|
||||||
|
tags:
|
||||||
|
- Studies
|
||||||
|
- Approvals
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: An array of approvals
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/Approval"
|
||||||
/workflow-specification:
|
/workflow-specification:
|
||||||
get:
|
get:
|
||||||
operationId: crc.api.workflow.all_specifications
|
operationId: crc.api.workflow.all_specifications
|
||||||
|
@ -227,6 +217,8 @@ paths:
|
||||||
$ref: "#/components/schemas/WorkflowSpec"
|
$ref: "#/components/schemas/WorkflowSpec"
|
||||||
put:
|
put:
|
||||||
operationId: crc.api.workflow.update_workflow_specification
|
operationId: crc.api.workflow.update_workflow_specification
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Modifies an existing workflow specification with the given parameters.
|
summary: Modifies an existing workflow specification with the given parameters.
|
||||||
tags:
|
tags:
|
||||||
- Workflow Specifications
|
- Workflow Specifications
|
||||||
|
@ -244,6 +236,8 @@ paths:
|
||||||
$ref: "#/components/schemas/WorkflowSpec"
|
$ref: "#/components/schemas/WorkflowSpec"
|
||||||
delete:
|
delete:
|
||||||
operationId: crc.api.workflow.delete_workflow_specification
|
operationId: crc.api.workflow.delete_workflow_specification
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Removes an existing workflow specification
|
summary: Removes an existing workflow specification
|
||||||
tags:
|
tags:
|
||||||
- Workflow Specifications
|
- Workflow Specifications
|
||||||
|
@ -289,6 +283,8 @@ paths:
|
||||||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||||
post:
|
post:
|
||||||
operationId: crc.api.workflow.add_workflow_spec_category
|
operationId: crc.api.workflow.add_workflow_spec_category
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Creates a new workflow spec category with the given parameters.
|
summary: Creates a new workflow spec category with the given parameters.
|
||||||
tags:
|
tags:
|
||||||
- Workflow Specification Category
|
- Workflow Specification Category
|
||||||
|
@ -326,6 +322,8 @@ paths:
|
||||||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||||
put:
|
put:
|
||||||
operationId: crc.api.workflow.update_workflow_spec_category
|
operationId: crc.api.workflow.update_workflow_spec_category
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Modifies an existing workflow spec category with the given parameters.
|
summary: Modifies an existing workflow spec category with the given parameters.
|
||||||
tags:
|
tags:
|
||||||
- Workflow Specification Category
|
- Workflow Specification Category
|
||||||
|
@ -343,6 +341,8 @@ paths:
|
||||||
$ref: "#/components/schemas/WorkflowSpecCategory"
|
$ref: "#/components/schemas/WorkflowSpecCategory"
|
||||||
delete:
|
delete:
|
||||||
operationId: crc.api.workflow.delete_workflow_spec_category
|
operationId: crc.api.workflow.delete_workflow_spec_category
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Removes an existing workflow spec category
|
summary: Removes an existing workflow spec category
|
||||||
tags:
|
tags:
|
||||||
- Workflow Specification Category
|
- Workflow Specification Category
|
||||||
|
@ -444,7 +444,7 @@ paths:
|
||||||
$ref: "#/components/schemas/File"
|
$ref: "#/components/schemas/File"
|
||||||
delete:
|
delete:
|
||||||
operationId: crc.api.file.delete_file
|
operationId: crc.api.file.delete_file
|
||||||
summary: Removes an existing file
|
summary: Removes an existing file. In the event the file can not be deleted, it is marked as "archived" in the database and is no longer returned unless specifically requested by id.
|
||||||
tags:
|
tags:
|
||||||
- Files
|
- Files
|
||||||
responses:
|
responses:
|
||||||
|
@ -542,6 +542,8 @@ paths:
|
||||||
example: '<?xml version="1.0" encoding="UTF-8"?><bpmn:definitions></bpmn:definitions>'
|
example: '<?xml version="1.0" encoding="UTF-8"?><bpmn:definitions></bpmn:definitions>'
|
||||||
put:
|
put:
|
||||||
operationId: crc.api.file.set_reference_file
|
operationId: crc.api.file.set_reference_file
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Update the contents of a named reference file.
|
summary: Update the contents of a named reference file.
|
||||||
tags:
|
tags:
|
||||||
- Files
|
- Files
|
||||||
|
@ -600,6 +602,8 @@ paths:
|
||||||
$ref: "#/components/schemas/Workflow"
|
$ref: "#/components/schemas/Workflow"
|
||||||
delete:
|
delete:
|
||||||
operationId: crc.api.workflow.delete_workflow
|
operationId: crc.api.workflow.delete_workflow
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Removes an existing workflow
|
summary: Removes an existing workflow
|
||||||
tags:
|
tags:
|
||||||
- Workflows and Tasks
|
- Workflows and Tasks
|
||||||
|
@ -738,6 +742,26 @@ paths:
|
||||||
text/plain:
|
text/plain:
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
|
/send_email:
|
||||||
|
parameters:
|
||||||
|
- name: address
|
||||||
|
in: query
|
||||||
|
required: true
|
||||||
|
description: The address to send a test email to.
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
get:
|
||||||
|
operationId: crc.api.tools.send_email
|
||||||
|
summary: Sends an email so we can see if things work or not.
|
||||||
|
tags:
|
||||||
|
- Configurator Tools
|
||||||
|
responses:
|
||||||
|
'201':
|
||||||
|
description: Returns any error messages that might come back from sending the email.
|
||||||
|
content:
|
||||||
|
text/plain:
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
/render_docx:
|
/render_docx:
|
||||||
put:
|
put:
|
||||||
operationId: crc.api.tools.render_docx
|
operationId: crc.api.tools.render_docx
|
||||||
|
@ -782,12 +806,62 @@ paths:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
$ref: "#/components/schemas/Script"
|
$ref: "#/components/schemas/Script"
|
||||||
/approval:
|
/approval-counts:
|
||||||
parameters:
|
parameters:
|
||||||
- name: approver_uid
|
- name: as_user
|
||||||
in: query
|
in: query
|
||||||
required: false
|
required: false
|
||||||
description: Restrict results to a given approver uid, maybe we restrict the use of this at somepoint.
|
description: If provided, returns the approval counts for that user.
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
get:
|
||||||
|
operationId: crc.api.approval.get_approval_counts
|
||||||
|
summary: Provides counts for approvals by status for the given user, or all users if no user is provided
|
||||||
|
tags:
|
||||||
|
- Approvals
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: An dictionary of Approval Statuses and the counts for each
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/ApprovalCounts"
|
||||||
|
/all_approvals:
|
||||||
|
parameters:
|
||||||
|
- name: status
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: If set to true, returns all the approvals with any status. Defaults to false, leaving out canceled approvals.
|
||||||
|
schema:
|
||||||
|
type: boolean
|
||||||
|
get:
|
||||||
|
operationId: crc.api.approval.get_all_approvals
|
||||||
|
summary: Provides a list of all workflows approvals
|
||||||
|
tags:
|
||||||
|
- Approvals
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: An array of approvals
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: "#/components/schemas/Approval"
|
||||||
|
/approval:
|
||||||
|
parameters:
|
||||||
|
- name: status
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: If provided, returns just approvals for the given status.
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
- name: as_user
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: If provided, returns the approval results as they would appear for that user.
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
get:
|
get:
|
||||||
|
@ -830,6 +904,19 @@ paths:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/Approval"
|
$ref: "#/components/schemas/Approval"
|
||||||
|
/approval/csv:
|
||||||
|
get:
|
||||||
|
operationId: crc.api.approval.get_csv
|
||||||
|
summary: Provides a list of all users for all approved studies
|
||||||
|
tags:
|
||||||
|
- Approvals
|
||||||
|
responses:
|
||||||
|
'200':
|
||||||
|
description: An array of approvals
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
components:
|
components:
|
||||||
securitySchemes:
|
securitySchemes:
|
||||||
jwt:
|
jwt:
|
||||||
|
@ -837,6 +924,11 @@ components:
|
||||||
scheme: bearer
|
scheme: bearer
|
||||||
bearerFormat: JWT
|
bearerFormat: JWT
|
||||||
x-bearerInfoFunc: crc.api.user.verify_token
|
x-bearerInfoFunc: crc.api.user.verify_token
|
||||||
|
auth_admin:
|
||||||
|
type: http
|
||||||
|
scheme: bearer
|
||||||
|
bearerFormat: JWT
|
||||||
|
x-bearerInfoFunc: crc.api.user.verify_token_admin
|
||||||
schemas:
|
schemas:
|
||||||
User:
|
User:
|
||||||
properties:
|
properties:
|
||||||
|
@ -1243,4 +1335,26 @@ components:
|
||||||
type: number
|
type: number
|
||||||
format: integer
|
format: integer
|
||||||
example: 5
|
example: 5
|
||||||
|
ApprovalCounts:
|
||||||
|
properties:
|
||||||
|
PENDING:
|
||||||
|
type: number
|
||||||
|
format: integer
|
||||||
|
example: 5
|
||||||
|
APPROVED:
|
||||||
|
type: number
|
||||||
|
format: integer
|
||||||
|
example: 5
|
||||||
|
DECLINED:
|
||||||
|
type: number
|
||||||
|
format: integer
|
||||||
|
example: 5
|
||||||
|
CANCELED:
|
||||||
|
type: number
|
||||||
|
format: integer
|
||||||
|
example: 5
|
||||||
|
AWAITING:
|
||||||
|
type: number
|
||||||
|
format: integer
|
||||||
|
example: 5
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,161 @@
|
||||||
from crc import app, db, session
|
import json
|
||||||
|
import pickle
|
||||||
|
from base64 import b64decode
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from crc.api.common import ApiError, ApiErrorSchema
|
from flask import g
|
||||||
from crc.models.approval import Approval, ApprovalModel, ApprovalSchema
|
|
||||||
|
from crc import db, session
|
||||||
|
from crc.api.common import ApiError
|
||||||
|
from crc.models.approval import Approval, ApprovalModel, ApprovalSchema, ApprovalStatus
|
||||||
|
from crc.models.workflow import WorkflowModel
|
||||||
from crc.services.approval_service import ApprovalService
|
from crc.services.approval_service import ApprovalService
|
||||||
|
from crc.services.ldap_service import LdapService
|
||||||
|
|
||||||
|
|
||||||
def get_approvals(approver_uid = None):
|
# Returns counts of approvals in each status group assigned to the given user.
|
||||||
if not approver_uid:
|
# The goal is to return results as quickly as possible.
|
||||||
db_approvals = ApprovalService.get_all_approvals()
|
def get_approval_counts(as_user=None):
|
||||||
else:
|
uid = as_user or g.user.uid
|
||||||
db_approvals = ApprovalService.get_approvals_per_user(approver_uid)
|
|
||||||
|
db_user_approvals = db.session.query(ApprovalModel)\
|
||||||
|
.filter_by(approver_uid=uid)\
|
||||||
|
.filter(ApprovalModel.status != ApprovalStatus.CANCELED.name)\
|
||||||
|
.all()
|
||||||
|
|
||||||
|
study_ids = [a.study_id for a in db_user_approvals]
|
||||||
|
|
||||||
|
db_other_approvals = db.session.query(ApprovalModel)\
|
||||||
|
.filter(ApprovalModel.study_id.in_(study_ids))\
|
||||||
|
.filter(ApprovalModel.approver_uid != uid)\
|
||||||
|
.filter(ApprovalModel.status != ApprovalStatus.CANCELED.name)\
|
||||||
|
.all()
|
||||||
|
|
||||||
|
# Make a dict of the other approvals where the key is the study id and the value is the approval
|
||||||
|
# TODO: This won't work if there are more than 2 approvals with the same study_id
|
||||||
|
other_approvals = {}
|
||||||
|
for approval in db_other_approvals:
|
||||||
|
other_approvals[approval.study_id] = approval
|
||||||
|
|
||||||
|
counts = {}
|
||||||
|
for name, value in ApprovalStatus.__members__.items():
|
||||||
|
counts[name] = 0
|
||||||
|
|
||||||
|
for approval in db_user_approvals:
|
||||||
|
# Check if another approval has the same study id
|
||||||
|
if approval.study_id in other_approvals:
|
||||||
|
other_approval = other_approvals[approval.study_id]
|
||||||
|
|
||||||
|
# Other approval takes precedence over this one
|
||||||
|
if other_approval.id < approval.id:
|
||||||
|
if other_approval.status == ApprovalStatus.PENDING.name:
|
||||||
|
counts[ApprovalStatus.AWAITING.name] += 1
|
||||||
|
elif other_approval.status == ApprovalStatus.DECLINED.name:
|
||||||
|
counts[ApprovalStatus.DECLINED.name] += 1
|
||||||
|
elif other_approval.status == ApprovalStatus.CANCELED.name:
|
||||||
|
counts[ApprovalStatus.CANCELED.name] += 1
|
||||||
|
elif other_approval.status == ApprovalStatus.APPROVED.name:
|
||||||
|
counts[approval.status] += 1
|
||||||
|
else:
|
||||||
|
counts[approval.status] += 1
|
||||||
|
else:
|
||||||
|
counts[approval.status] += 1
|
||||||
|
|
||||||
|
return counts
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_approvals(status=None):
|
||||||
|
approvals = ApprovalService.get_all_approvals(include_cancelled=status is True)
|
||||||
|
results = ApprovalSchema(many=True).dump(approvals)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_approvals(status=None, as_user=None):
|
||||||
|
#status = ApprovalStatus.PENDING.value
|
||||||
|
user = g.user.uid
|
||||||
|
if as_user:
|
||||||
|
user = as_user
|
||||||
|
approvals = ApprovalService.get_approvals_per_user(user, status,
|
||||||
|
include_cancelled=False)
|
||||||
|
results = ApprovalSchema(many=True).dump(approvals)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_approvals_for_study(study_id=None):
|
||||||
|
db_approvals = ApprovalService.get_approvals_for_study(study_id)
|
||||||
approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
|
approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
|
||||||
results = ApprovalSchema(many=True).dump(approvals)
|
results = ApprovalSchema(many=True).dump(approvals)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
# ----- Begin descent into madness ---- #
|
||||||
|
def get_csv():
|
||||||
|
"""A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a
|
||||||
|
man to do just about anything"""
|
||||||
|
approvals = ApprovalService.get_all_approvals(include_cancelled=False)
|
||||||
|
output = []
|
||||||
|
errors = []
|
||||||
|
for approval in approvals:
|
||||||
|
try:
|
||||||
|
if approval.status != ApprovalStatus.APPROVED.value:
|
||||||
|
continue
|
||||||
|
for related_approval in approval.related_approvals:
|
||||||
|
if related_approval.status != ApprovalStatus.APPROVED.value:
|
||||||
|
continue
|
||||||
|
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first()
|
||||||
|
data = json.loads(workflow.bpmn_workflow_json)
|
||||||
|
last_task = find_task(data['last_task']['__uuid__'], data['task_tree'])
|
||||||
|
personnel = extract_value(last_task, 'personnel')
|
||||||
|
training_val = extract_value(last_task, 'RequiredTraining')
|
||||||
|
pi_supervisor = extract_value(last_task, 'PISupervisor')['value']
|
||||||
|
review_complete = 'AllRequiredTraining' in training_val
|
||||||
|
pi_uid = workflow.study.primary_investigator_id
|
||||||
|
pi_details = LdapService.user_info(pi_uid)
|
||||||
|
details = []
|
||||||
|
details.append(pi_details)
|
||||||
|
for person in personnel:
|
||||||
|
uid = person['PersonnelComputingID']['value']
|
||||||
|
details.append(LdapService.user_info(uid))
|
||||||
|
|
||||||
|
for person in details:
|
||||||
|
record = {
|
||||||
|
"study_id": approval.study_id,
|
||||||
|
"pi_uid": pi_details.uid,
|
||||||
|
"pi": pi_details.display_name,
|
||||||
|
"name": person.display_name,
|
||||||
|
"uid": person.uid,
|
||||||
|
"email": person.email_address,
|
||||||
|
"supervisor": "",
|
||||||
|
"review_complete": review_complete,
|
||||||
|
}
|
||||||
|
# We only know the PI's supervisor.
|
||||||
|
if person.uid == pi_details.uid:
|
||||||
|
record["supervisor"] = pi_supervisor
|
||||||
|
|
||||||
|
output.append(record)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e)))
|
||||||
|
return {"results": output, "errors": errors }
|
||||||
|
|
||||||
|
|
||||||
|
def extract_value(task, key):
|
||||||
|
if key in task['data']:
|
||||||
|
return pickle.loads(b64decode(task['data'][key]['__bytes__']))
|
||||||
|
else:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
def find_task(uuid, task):
|
||||||
|
if task['id']['__uuid__'] == uuid:
|
||||||
|
return task
|
||||||
|
for child in task['children']:
|
||||||
|
task = find_task(uuid, child)
|
||||||
|
if task:
|
||||||
|
return task
|
||||||
|
# ----- come back to the world of the living ---- #
|
||||||
|
|
||||||
|
|
||||||
def update_approval(approval_id, body):
|
def update_approval(approval_id, body):
|
||||||
if approval_id is None:
|
if approval_id is None:
|
||||||
raise ApiError('unknown_approval', 'Please provide a valid Approval ID.')
|
raise ApiError('unknown_approval', 'Please provide a valid Approval ID.')
|
||||||
|
@ -22,9 +164,18 @@ def update_approval(approval_id, body):
|
||||||
if approval_model is None:
|
if approval_model is None:
|
||||||
raise ApiError('unknown_approval', 'The approval "' + str(approval_id) + '" is not recognized.')
|
raise ApiError('unknown_approval', 'The approval "' + str(approval_id) + '" is not recognized.')
|
||||||
|
|
||||||
approval: Approval = ApprovalSchema().load(body)
|
if approval_model.approver_uid != g.user.uid:
|
||||||
approval.update_model(approval_model)
|
raise ApiError("not_your_approval", "You may not modify this approval. It belongs to another user.")
|
||||||
|
|
||||||
|
approval_model.status = body['status']
|
||||||
|
approval_model.message = body['message']
|
||||||
|
approval_model.date_approved = datetime.now()
|
||||||
|
session.add(approval_model)
|
||||||
session.commit()
|
session.commit()
|
||||||
|
|
||||||
result = ApprovalSchema().dump(approval)
|
# Called only to send emails
|
||||||
|
approver = body['approver']['uid']
|
||||||
|
ApprovalService.update_approval(approval_id, approver)
|
||||||
|
|
||||||
|
result = ApprovalSchema().dump(approval_model)
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
|
from SpiffWorkflow import WorkflowException
|
||||||
|
from SpiffWorkflow.exceptions import WorkflowTaskExecException
|
||||||
|
|
||||||
from crc import ma, app
|
from crc import ma, app
|
||||||
|
|
||||||
|
|
||||||
class ApiError(Exception):
|
class ApiError(Exception):
|
||||||
def __init__(self, code, message, status_code=400,
|
def __init__(self, code, message, status_code=400,
|
||||||
file_name="", task_id="", task_name="", tag=""):
|
file_name="", task_id="", task_name="", tag="", task_data = {}):
|
||||||
self.status_code = status_code
|
self.status_code = status_code
|
||||||
self.code = code # a short consistent string describing the error.
|
self.code = code # a short consistent string describing the error.
|
||||||
self.message = message # A detailed message that provides more information.
|
self.message = message # A detailed message that provides more information.
|
||||||
|
@ -11,6 +14,7 @@ class ApiError(Exception):
|
||||||
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
|
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
|
||||||
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
|
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
|
||||||
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
|
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
|
||||||
|
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error ocurred.
|
||||||
Exception.__init__(self, self.message)
|
Exception.__init__(self, self.message)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -20,6 +24,7 @@ class ApiError(Exception):
|
||||||
instance.task_id = task.task_spec.name or ""
|
instance.task_id = task.task_spec.name or ""
|
||||||
instance.task_name = task.task_spec.description or ""
|
instance.task_name = task.task_spec.description or ""
|
||||||
instance.file_name = task.workflow.spec.file or ""
|
instance.file_name = task.workflow.spec.file or ""
|
||||||
|
instance.task_data = task.data
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -32,10 +37,21 @@ class ApiError(Exception):
|
||||||
instance.file_name = task_spec._wf_spec.file
|
instance.file_name = task_spec._wf_spec.file
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_workflow_exception(cls, code, message, exp: WorkflowException):
|
||||||
|
"""We catch a lot of workflow exception errors,
|
||||||
|
so consolidating the code, and doing the best things
|
||||||
|
we can with the data we have."""
|
||||||
|
if isinstance(exp, WorkflowTaskExecException):
|
||||||
|
return ApiError.from_task(code, message, exp.task)
|
||||||
|
else:
|
||||||
|
return ApiError.from_task_spec(code, message, exp.sender)
|
||||||
|
|
||||||
|
|
||||||
class ApiErrorSchema(ma.Schema):
|
class ApiErrorSchema(ma.Schema):
|
||||||
class Meta:
|
class Meta:
|
||||||
fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id")
|
fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id",
|
||||||
|
"task_data")
|
||||||
|
|
||||||
|
|
||||||
@app.errorhandler(ApiError)
|
@app.errorhandler(ApiError)
|
||||||
|
|
|
@ -12,8 +12,9 @@ from crc.services.file_service import FileService
|
||||||
|
|
||||||
|
|
||||||
def to_file_api(file_model):
|
def to_file_api(file_model):
|
||||||
"""Converts a FileModel object to something we can return via the aip"""
|
"""Converts a FileModel object to something we can return via the api"""
|
||||||
return File.from_models(file_model, FileService.get_file_data(file_model.id))
|
return File.from_models(file_model, FileService.get_file_data(file_model.id),
|
||||||
|
FileService.get_doc_dictionary())
|
||||||
|
|
||||||
|
|
||||||
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None):
|
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None):
|
||||||
|
@ -121,7 +122,7 @@ def get_file_info(file_id):
|
||||||
|
|
||||||
def update_file_info(file_id, body):
|
def update_file_info(file_id, body):
|
||||||
if file_id is None:
|
if file_id is None:
|
||||||
raise ApiError('unknown_file', 'Please provide a valid File ID.')
|
raise ApiError('no_such_file', 'Please provide a valid File ID.')
|
||||||
|
|
||||||
file_model = session.query(FileModel).filter_by(id=file_id).first()
|
file_model = session.query(FileModel).filter_by(id=file_id).first()
|
||||||
|
|
||||||
|
|
|
@ -48,12 +48,10 @@ def update_study(study_id, body):
|
||||||
|
|
||||||
|
|
||||||
def get_study(study_id):
|
def get_study(study_id):
|
||||||
study_service = StudyService()
|
study = StudyService.get_study(study_id)
|
||||||
study = study_service.get_study(study_id)
|
|
||||||
if (study is None):
|
if (study is None):
|
||||||
raise ApiError("Study not found", status_code=404)
|
raise ApiError("unknown_study", 'The study "' + study_id + '" is not recognized.', status_code=404)
|
||||||
schema = StudySchema()
|
return StudySchema().dump(study)
|
||||||
return schema.dump(study)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_study(study_id):
|
def delete_study(study_id):
|
||||||
|
|
|
@ -9,6 +9,8 @@ from crc.api.common import ApiError
|
||||||
from crc.scripts.complete_template import CompleteTemplate
|
from crc.scripts.complete_template import CompleteTemplate
|
||||||
from crc.scripts.script import Script
|
from crc.scripts.script import Script
|
||||||
import crc.scripts
|
import crc.scripts
|
||||||
|
from crc.services.mails import send_test_email
|
||||||
|
|
||||||
|
|
||||||
def render_markdown(data, template):
|
def render_markdown(data, template):
|
||||||
"""
|
"""
|
||||||
|
@ -20,9 +22,9 @@ def render_markdown(data, template):
|
||||||
data = json.loads(data)
|
data = json.loads(data)
|
||||||
return template.render(**data)
|
return template.render(**data)
|
||||||
except UndefinedError as ue:
|
except UndefinedError as ue:
|
||||||
raise ApiError(code="undefined field", message=ue.message)
|
raise ApiError(code="undefined_field", message=ue.message)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ApiError(code="invalid", message=str(e))
|
raise ApiError(code="invalid_render", message=str(e))
|
||||||
|
|
||||||
|
|
||||||
def render_docx():
|
def render_docx():
|
||||||
|
@ -42,9 +44,9 @@ def render_docx():
|
||||||
cache_timeout=-1 # Don't cache these files on the browser.
|
cache_timeout=-1 # Don't cache these files on the browser.
|
||||||
)
|
)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise ApiError(code="invalid", message=str(e))
|
raise ApiError(code="undefined_field", message=str(e))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ApiError(code="invalid", message=str(e))
|
raise ApiError(code="invalid_render", message=str(e))
|
||||||
|
|
||||||
|
|
||||||
def list_scripts():
|
def list_scripts():
|
||||||
|
@ -59,3 +61,8 @@ def list_scripts():
|
||||||
})
|
})
|
||||||
return script_meta
|
return script_meta
|
||||||
|
|
||||||
|
def send_email(address):
|
||||||
|
"""Just sends a quick test email to assure the system is working."""
|
||||||
|
if not address:
|
||||||
|
address = "dan@sartography.com"
|
||||||
|
return send_test_email(address, [address])
|
258
crc/api/user.py
258
crc/api/user.py
|
@ -1,41 +1,122 @@
|
||||||
import json
|
|
||||||
|
|
||||||
import connexion
|
|
||||||
import flask
|
import flask
|
||||||
from flask import redirect, g, request
|
from flask import g, request
|
||||||
|
|
||||||
from crc import app, db
|
from crc import app, db
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
from crc.models.user import UserModel, UserModelSchema
|
from crc.models.user import UserModel, UserModelSchema
|
||||||
from crc.services.ldap_service import LdapService, LdapUserInfo
|
from crc.services.ldap_service import LdapService, LdapModel
|
||||||
|
|
||||||
"""
|
"""
|
||||||
.. module:: crc.api.user
|
.. module:: crc.api.user
|
||||||
:synopsis: Single Sign On (SSO) user login and session handlers
|
:synopsis: Single Sign On (SSO) user login and session handlers
|
||||||
"""
|
"""
|
||||||
def verify_token(token):
|
|
||||||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate", status_code=403)
|
|
||||||
if (not 'PRODUCTION' in app.config or not app.config['PRODUCTION']) and token == app.config["SWAGGER_AUTH_KEY"]:
|
def verify_token(token=None):
|
||||||
|
"""
|
||||||
|
Verifies the token for the user (if provided). If in production environment and token is not provided,
|
||||||
|
gets user from the SSO headers and returns their token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token: Optional[str]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
token: str
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ApiError. If not on production and token is not valid, returns an 'invalid_token' 403 error.
|
||||||
|
If on production and user is not authenticated, returns a 'no_user' 403 error.
|
||||||
|
"""
|
||||||
|
|
||||||
|
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate",
|
||||||
|
status_code=403)
|
||||||
|
|
||||||
|
if not _is_production() and (token is None or 'user' not in g):
|
||||||
g.user = UserModel.query.first()
|
g.user = UserModel.query.first()
|
||||||
token = g.user.encode_auth_token()
|
token = g.user.encode_auth_token()
|
||||||
|
|
||||||
try:
|
if token:
|
||||||
token_info = UserModel.decode_auth_token(token)
|
try:
|
||||||
g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
|
token_info = UserModel.decode_auth_token(token)
|
||||||
except:
|
g.user = UserModel.query.filter_by(uid=token_info['sub']).first()
|
||||||
raise failure_error
|
except:
|
||||||
if g.user is not None:
|
raise failure_error
|
||||||
return token_info
|
if g.user is not None:
|
||||||
|
return token_info
|
||||||
|
else:
|
||||||
|
raise failure_error
|
||||||
|
|
||||||
|
# If there's no token and we're in production, get the user from the SSO headers and return their token
|
||||||
|
if not token and _is_production():
|
||||||
|
uid = _get_request_uid(request)
|
||||||
|
|
||||||
|
if uid is not None:
|
||||||
|
db_user = UserModel.query.filter_by(uid=uid).first()
|
||||||
|
|
||||||
|
if db_user is not None:
|
||||||
|
g.user = db_user
|
||||||
|
token = g.user.encode_auth_token().decode()
|
||||||
|
token_info = UserModel.decode_auth_token(token)
|
||||||
|
return token_info
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ApiError("no_user", "User not found. Please login via the frontend app before accessing this feature.",
|
||||||
|
status_code=403)
|
||||||
|
|
||||||
|
|
||||||
|
def verify_token_admin(token=None):
|
||||||
|
"""
|
||||||
|
Verifies the token for the user (if provided) in non-production environment. If in production environment,
|
||||||
|
checks that the user is in the list of authorized admins
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token: Optional[str]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
token: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If this is production, check that the user is in the list of admins
|
||||||
|
if _is_production():
|
||||||
|
uid = _get_request_uid(request)
|
||||||
|
|
||||||
|
if uid is not None and uid in app.config['ADMIN_UIDS']:
|
||||||
|
return verify_token()
|
||||||
|
|
||||||
|
# If we're not in production, just use the normal verify_token method
|
||||||
else:
|
else:
|
||||||
raise failure_error
|
return verify_token(token)
|
||||||
|
|
||||||
|
|
||||||
def get_current_user():
|
def get_current_user():
|
||||||
return UserModelSchema().dump(g.user)
|
return UserModelSchema().dump(g.user)
|
||||||
|
|
||||||
@app.route('/v1.0/login')
|
|
||||||
def sso_login():
|
def login(
|
||||||
# This what I see coming back:
|
uid=None,
|
||||||
|
redirect_url=None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
In non-production environment, provides an endpoint for end-to-end system testing that allows the system
|
||||||
|
to simulate logging in as a specific user. In production environment, simply logs user in via single-sign-on
|
||||||
|
(SSO) Shibboleth authentication headers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
uid: Optional[str]
|
||||||
|
redirect_url: Optional[str]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str. If not on production, returns the frontend auth callback URL, with auth token appended.
|
||||||
|
If on production and user is authenticated via SSO, returns the frontend auth callback URL,
|
||||||
|
with auth token appended.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ApiError. If on production and user is not authenticated, returns a 404 error.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# ----------------------------------------
|
||||||
|
# Shibboleth Authentication Headers
|
||||||
|
# ----------------------------------------
|
||||||
# X-Remote-Cn: Daniel Harold Funk (dhf8r)
|
# X-Remote-Cn: Daniel Harold Funk (dhf8r)
|
||||||
# X-Remote-Sn: Funk
|
# X-Remote-Sn: Funk
|
||||||
# X-Remote-Givenname: Daniel
|
# X-Remote-Givenname: Daniel
|
||||||
|
@ -50,62 +131,52 @@ def sso_login():
|
||||||
# X-Forwarded-Host: dev.crconnect.uvadcos.io
|
# X-Forwarded-Host: dev.crconnect.uvadcos.io
|
||||||
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
||||||
# Connection: Keep-Alive
|
# Connection: Keep-Alive
|
||||||
uid = request.headers.get("Uid")
|
|
||||||
if not uid:
|
|
||||||
uid = request.headers.get("X-Remote-Uid")
|
|
||||||
|
|
||||||
if not uid:
|
|
||||||
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
|
||||||
% str(request.headers))
|
|
||||||
|
|
||||||
redirect = request.args.get('redirect')
|
# If we're in production, override any uid with the uid from the SSO request headers
|
||||||
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
if _is_production():
|
||||||
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
uid = _get_request_uid(request)
|
||||||
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect))
|
|
||||||
|
|
||||||
ldap_service = LdapService()
|
if uid:
|
||||||
info = ldap_service.user_info(uid)
|
app.logger.info("SSO_LOGIN: Full URL: " + request.url)
|
||||||
|
app.logger.info("SSO_LOGIN: User Id: " + uid)
|
||||||
|
app.logger.info("SSO_LOGIN: Will try to redirect to : " + str(redirect_url))
|
||||||
|
|
||||||
|
ldap_info = LdapService().user_info(uid)
|
||||||
|
|
||||||
|
if ldap_info:
|
||||||
|
return _handle_login(ldap_info, redirect_url)
|
||||||
|
|
||||||
|
raise ApiError('404', 'unknown')
|
||||||
|
|
||||||
return _handle_login(info, redirect)
|
|
||||||
|
|
||||||
@app.route('/sso')
|
@app.route('/sso')
|
||||||
def sso():
|
def sso():
|
||||||
response = ""
|
response = ""
|
||||||
response += "<h1>Headers</h1>"
|
response += "<h1>Headers</h1>"
|
||||||
response += "<ul>"
|
response += "<ul>"
|
||||||
for k,v in request.headers:
|
for k, v in request.headers:
|
||||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||||
response += "<h1>Environment</h1>"
|
response += "<h1>Environment</h1>"
|
||||||
for k,v in request.environ:
|
for k, v in request.environ:
|
||||||
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
response += "<li><b>%s</b> %s</li>\n" % (k, v)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
def _handle_login(user_info: LdapUserInfo, redirect_url=app.config['FRONTEND_AUTH_CALLBACK']):
|
def _handle_login(user_info: LdapModel, redirect_url=None):
|
||||||
"""On successful login, adds user to database if the user is not already in the system,
|
"""
|
||||||
then returns the frontend auth callback URL, with auth token appended.
|
On successful login, adds user to database if the user is not already in the system,
|
||||||
|
then returns the frontend auth callback URL, with auth token appended.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_info - an ldap user_info object.
|
user_info - an ldap user_info object.
|
||||||
redirect_url: Optional[str]
|
redirect_url: Optional[str]
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||||
"""
|
"""
|
||||||
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
user = _upsert_user(user_info)
|
||||||
|
g.user = user
|
||||||
if user is None:
|
|
||||||
# Add new user
|
|
||||||
user = UserModel()
|
|
||||||
|
|
||||||
user.uid = user_info.uid
|
|
||||||
user.display_name = user_info.display_name
|
|
||||||
user.email_address = user_info.email_address
|
|
||||||
user.affiliation = user_info.affiliation
|
|
||||||
user.title = user_info.title
|
|
||||||
|
|
||||||
db.session.add(user)
|
|
||||||
db.session.commit()
|
|
||||||
|
|
||||||
# Return the frontend auth callback URL, with auth token appended.
|
# Return the frontend auth callback URL, with auth token appended.
|
||||||
auth_token = user.encode_auth_token().decode()
|
auth_token = user.encode_auth_token().decode()
|
||||||
|
@ -120,41 +191,44 @@ def _handle_login(user_info: LdapUserInfo, redirect_url=app.config['FRONTEND_AUT
|
||||||
return auth_token
|
return auth_token
|
||||||
|
|
||||||
|
|
||||||
|
def _upsert_user(user_info):
|
||||||
|
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).first()
|
||||||
|
|
||||||
def backdoor(
|
if user is None:
|
||||||
uid=None,
|
# Add new user
|
||||||
affiliation=None,
|
user = UserModel()
|
||||||
display_name=None,
|
|
||||||
email_address=None,
|
|
||||||
eppn=None,
|
|
||||||
first_name=None,
|
|
||||||
last_name=None,
|
|
||||||
title=None,
|
|
||||||
redirect=None,
|
|
||||||
):
|
|
||||||
"""A backdoor for end-to-end system testing that allows the system to simulate logging in as a specific user.
|
|
||||||
Only works if the application is running in a non-production environment.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
uid: str
|
|
||||||
affiliation: Optional[str]
|
|
||||||
display_name: Optional[str]
|
|
||||||
email_address: Optional[str]
|
|
||||||
eppn: Optional[str]
|
|
||||||
first_name: Optional[str]
|
|
||||||
last_name: Optional[str]
|
|
||||||
title: Optional[str]
|
|
||||||
redirect_url: Optional[str]
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str. If not on production, returns the frontend auth callback URL, with auth token appended.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ApiError. If on production, returns a 404 error.
|
|
||||||
"""
|
|
||||||
if not 'PRODUCTION' in app.config or not app.config['PRODUCTION']:
|
|
||||||
|
|
||||||
ldap_info = LdapService().user_info(uid)
|
|
||||||
return _handle_login(ldap_info, redirect)
|
|
||||||
else:
|
else:
|
||||||
raise ApiError('404', 'unknown')
|
user = db.session.query(UserModel).filter(UserModel.uid == user_info.uid).with_for_update().first()
|
||||||
|
|
||||||
|
user.uid = user_info.uid
|
||||||
|
user.display_name = user_info.display_name
|
||||||
|
user.email_address = user_info.email_address
|
||||||
|
user.affiliation = user_info.affiliation
|
||||||
|
user.title = user_info.title
|
||||||
|
|
||||||
|
db.session.add(user)
|
||||||
|
db.session.commit()
|
||||||
|
return user
|
||||||
|
|
||||||
|
|
||||||
|
def _get_request_uid(req):
|
||||||
|
uid = None
|
||||||
|
|
||||||
|
if _is_production():
|
||||||
|
|
||||||
|
if 'user' in g and g.user is not None:
|
||||||
|
return g.user.uid
|
||||||
|
|
||||||
|
uid = req.headers.get("Uid")
|
||||||
|
if not uid:
|
||||||
|
uid = req.headers.get("X-Remote-Uid")
|
||||||
|
|
||||||
|
if not uid:
|
||||||
|
raise ApiError("invalid_sso_credentials", "'Uid' nor 'X-Remote-Uid' were present in the headers: %s"
|
||||||
|
% str(req.headers))
|
||||||
|
|
||||||
|
return uid
|
||||||
|
|
||||||
|
|
||||||
|
def _is_production():
|
||||||
|
return 'PRODUCTION' in app.config and app.config['PRODUCTION']
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from crc import session
|
from flask import g
|
||||||
|
|
||||||
|
from crc import session, app
|
||||||
from crc.api.common import ApiError, ApiErrorSchema
|
from crc.api.common import ApiError, ApiErrorSchema
|
||||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||||
from crc.models.file import FileModel, LookupDataSchema
|
from crc.models.file import FileModel, LookupDataSchema
|
||||||
|
@ -44,6 +46,13 @@ def validate_workflow_specification(spec_id):
|
||||||
try:
|
try:
|
||||||
WorkflowService.test_spec(spec_id)
|
WorkflowService.test_spec(spec_id)
|
||||||
except ApiError as ae:
|
except ApiError as ae:
|
||||||
|
ae.message = "When populating all fields ... " + ae.message
|
||||||
|
errors.append(ae)
|
||||||
|
try:
|
||||||
|
# Run the validation twice, the second time, just populate the required fields.
|
||||||
|
WorkflowService.test_spec(spec_id, required_only=True)
|
||||||
|
except ApiError as ae:
|
||||||
|
ae.message = "When populating only required fields ... " + ae.message
|
||||||
errors.append(ae)
|
errors.append(ae)
|
||||||
return ApiErrorSchema(many=True).dump(errors)
|
return ApiErrorSchema(many=True).dump(errors)
|
||||||
|
|
||||||
|
@ -112,6 +121,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
|
||||||
|
|
||||||
navigation.append(NavigationItem(**nav_item))
|
navigation.append(NavigationItem(**nav_item))
|
||||||
NavigationItemSchema().dump(nav_item)
|
NavigationItemSchema().dump(nav_item)
|
||||||
|
|
||||||
|
spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first()
|
||||||
workflow_api = WorkflowApi(
|
workflow_api = WorkflowApi(
|
||||||
id=processor.get_workflow_id(),
|
id=processor.get_workflow_id(),
|
||||||
status=processor.get_status(),
|
status=processor.get_status(),
|
||||||
|
@ -120,9 +131,10 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
|
||||||
workflow_spec_id=processor.workflow_spec_id,
|
workflow_spec_id=processor.workflow_spec_id,
|
||||||
spec_version=processor.get_version_string(),
|
spec_version=processor.get_version_string(),
|
||||||
is_latest_spec=processor.is_latest_spec,
|
is_latest_spec=processor.is_latest_spec,
|
||||||
total_tasks=processor.workflow_model.total_tasks,
|
total_tasks=len(navigation),
|
||||||
completed_tasks=processor.workflow_model.completed_tasks,
|
completed_tasks=processor.workflow_model.completed_tasks,
|
||||||
last_updated=processor.workflow_model.last_updated
|
last_updated=processor.workflow_model.last_updated,
|
||||||
|
title=spec.display_name
|
||||||
)
|
)
|
||||||
if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks.
|
if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks.
|
||||||
# This may or may not work, sometimes there is no next task to complete.
|
# This may or may not work, sometimes there is no next task to complete.
|
||||||
|
@ -146,6 +158,7 @@ def delete_workflow(workflow_id):
|
||||||
|
|
||||||
def set_current_task(workflow_id, task_id):
|
def set_current_task(workflow_id, task_id):
|
||||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||||
|
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||||
processor = WorkflowProcessor(workflow_model)
|
processor = WorkflowProcessor(workflow_model)
|
||||||
task_id = uuid.UUID(task_id)
|
task_id = uuid.UUID(task_id)
|
||||||
task = processor.bpmn_workflow.get_task(task_id)
|
task = processor.bpmn_workflow.get_task(task_id)
|
||||||
|
@ -157,13 +170,21 @@ def set_current_task(workflow_id, task_id):
|
||||||
if task.state == task.COMPLETED:
|
if task.state == task.COMPLETED:
|
||||||
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
||||||
processor.save()
|
processor.save()
|
||||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||||
workflow_api_model = __get_workflow_api_model(processor, task)
|
workflow_api_model = __get_workflow_api_model(processor, task)
|
||||||
return WorkflowApiSchema().dump(workflow_api_model)
|
return WorkflowApiSchema().dump(workflow_api_model)
|
||||||
|
|
||||||
|
|
||||||
def update_task(workflow_id, task_id, body):
|
def update_task(workflow_id, task_id, body):
|
||||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||||
|
|
||||||
|
if workflow_model is None:
|
||||||
|
raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
|
||||||
|
|
||||||
|
elif workflow_model.study is None:
|
||||||
|
raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
|
||||||
|
|
||||||
|
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||||
processor = WorkflowProcessor(workflow_model)
|
processor = WorkflowProcessor(workflow_model)
|
||||||
task_id = uuid.UUID(task_id)
|
task_id = uuid.UUID(task_id)
|
||||||
task = processor.bpmn_workflow.get_task(task_id)
|
task = processor.bpmn_workflow.get_task(task_id)
|
||||||
|
@ -174,7 +195,7 @@ def update_task(workflow_id, task_id, body):
|
||||||
processor.complete_task(task)
|
processor.complete_task(task)
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
processor.save()
|
processor.save()
|
||||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||||
|
|
||||||
workflow_api_model = __get_workflow_api_model(processor)
|
workflow_api_model = __get_workflow_api_model(processor)
|
||||||
return WorkflowApiSchema().dump(workflow_api_model)
|
return WorkflowApiSchema().dump(workflow_api_model)
|
||||||
|
@ -228,4 +249,15 @@ def lookup(workflow_id, field_id, query, limit):
|
||||||
"""
|
"""
|
||||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||||
return LookupDataSchema(many=True).dump(lookup_data)
|
return LookupDataSchema(many=True).dump(lookup_data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_user_uid(user_uid):
|
||||||
|
if 'user' in g:
|
||||||
|
if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
|
||||||
|
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
|
||||||
|
else:
|
||||||
|
return g.user.uid
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
|
||||||
|
|
|
@ -31,10 +31,12 @@ class NavigationItem(object):
|
||||||
|
|
||||||
class Task(object):
|
class Task(object):
|
||||||
|
|
||||||
|
PROP_OPTIONS_REPEAT = "repeat"
|
||||||
PROP_OPTIONS_FILE = "spreadsheet.name"
|
PROP_OPTIONS_FILE = "spreadsheet.name"
|
||||||
PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column"
|
PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column"
|
||||||
PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column"
|
PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column"
|
||||||
PROP_LDAP_LOOKUP = "ldap.lookup"
|
PROP_LDAP_LOOKUP = "ldap.lookup"
|
||||||
|
VALIDATION_REQUIRED = "required"
|
||||||
FIELD_TYPE_AUTO_COMPLETE = "autocomplete"
|
FIELD_TYPE_AUTO_COMPLETE = "autocomplete"
|
||||||
|
|
||||||
|
|
||||||
|
@ -117,7 +119,7 @@ class NavigationItemSchema(ma.Schema):
|
||||||
|
|
||||||
class WorkflowApi(object):
|
class WorkflowApi(object):
|
||||||
def __init__(self, id, status, next_task, navigation,
|
def __init__(self, id, status, next_task, navigation,
|
||||||
spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated):
|
spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated, title):
|
||||||
self.id = id
|
self.id = id
|
||||||
self.status = status
|
self.status = status
|
||||||
self.next_task = next_task # The next task that requires user input.
|
self.next_task = next_task # The next task that requires user input.
|
||||||
|
@ -128,13 +130,14 @@ class WorkflowApi(object):
|
||||||
self.total_tasks = total_tasks
|
self.total_tasks = total_tasks
|
||||||
self.completed_tasks = completed_tasks
|
self.completed_tasks = completed_tasks
|
||||||
self.last_updated = last_updated
|
self.last_updated = last_updated
|
||||||
|
self.title = title
|
||||||
|
|
||||||
class WorkflowApiSchema(ma.Schema):
|
class WorkflowApiSchema(ma.Schema):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = WorkflowApi
|
model = WorkflowApi
|
||||||
fields = ["id", "status", "next_task", "navigation",
|
fields = ["id", "status", "next_task", "navigation",
|
||||||
"workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks",
|
"workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks",
|
||||||
"last_updated"]
|
"last_updated", "title"]
|
||||||
unknown = INCLUDE
|
unknown = INCLUDE
|
||||||
|
|
||||||
status = EnumField(WorkflowStatus)
|
status = EnumField(WorkflowStatus)
|
||||||
|
@ -145,7 +148,7 @@ class WorkflowApiSchema(ma.Schema):
|
||||||
def make_workflow(self, data, **kwargs):
|
def make_workflow(self, data, **kwargs):
|
||||||
keys = ['id', 'status', 'next_task', 'navigation',
|
keys = ['id', 'status', 'next_task', 'navigation',
|
||||||
'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks",
|
'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks",
|
||||||
"last_updated"]
|
"last_updated", "title"]
|
||||||
filtered_fields = {key: data[key] for key in keys}
|
filtered_fields = {key: data[key] for key in keys}
|
||||||
filtered_fields['next_task'] = TaskSchema().make_task(data['next_task'])
|
filtered_fields['next_task'] = TaskSchema().make_task(data['next_task'])
|
||||||
return WorkflowApi(**filtered_fields)
|
return WorkflowApi(**filtered_fields)
|
||||||
|
|
|
@ -1,24 +1,28 @@
|
||||||
import enum
|
import enum
|
||||||
|
|
||||||
import marshmallow
|
import marshmallow
|
||||||
from ldap3.core.exceptions import LDAPSocketOpenError
|
from marshmallow import INCLUDE, fields
|
||||||
from marshmallow import INCLUDE
|
|
||||||
from sqlalchemy import func
|
from sqlalchemy import func
|
||||||
|
|
||||||
from crc import db, ma
|
from crc import db, ma, app
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
from crc.models.file import FileDataModel
|
from crc.models.file import FileDataModel
|
||||||
|
from crc.models.ldap import LdapSchema
|
||||||
from crc.models.study import StudyModel
|
from crc.models.study import StudyModel
|
||||||
from crc.models.workflow import WorkflowModel
|
from crc.models.workflow import WorkflowModel
|
||||||
|
from crc.services.file_service import FileService
|
||||||
from crc.services.ldap_service import LdapService
|
from crc.services.ldap_service import LdapService
|
||||||
|
|
||||||
|
|
||||||
class ApprovalStatus(enum.Enum):
|
class ApprovalStatus(enum.Enum):
|
||||||
WAITING = "WAITING" # no one has done jack.
|
PENDING = "PENDING" # no one has done jack.
|
||||||
APPROVED = "APPROVED" # approved by the reviewer
|
APPROVED = "APPROVED" # approved by the reviewer
|
||||||
DECLINED = "DECLINED" # rejected by the reviewer
|
DECLINED = "DECLINED" # rejected by the reviewer
|
||||||
CANCELED = "CANCELED" # The document was replaced with a new version and this review is no longer needed.
|
CANCELED = "CANCELED" # The document was replaced with a new version and this review is no longer needed.
|
||||||
|
|
||||||
|
# Used for overall status only, never set on a task.
|
||||||
|
AWAITING = "AWAITING" # awaiting another approval
|
||||||
|
|
||||||
|
|
||||||
class ApprovalFile(db.Model):
|
class ApprovalFile(db.Model):
|
||||||
file_data_id = db.Column(db.Integer, db.ForeignKey(FileDataModel.id), primary_key=True)
|
file_data_id = db.Column(db.Integer, db.ForeignKey(FileDataModel.id), primary_key=True)
|
||||||
|
@ -32,13 +36,14 @@ class ApprovalModel(db.Model):
|
||||||
__tablename__ = 'approval'
|
__tablename__ = 'approval'
|
||||||
id = db.Column(db.Integer, primary_key=True)
|
id = db.Column(db.Integer, primary_key=True)
|
||||||
study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=False)
|
study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=False)
|
||||||
study = db.relationship(StudyModel, backref='approval', cascade='all,delete')
|
study = db.relationship(StudyModel)
|
||||||
workflow_id = db.Column(db.Integer, db.ForeignKey(WorkflowModel.id), nullable=False)
|
workflow_id = db.Column(db.Integer, db.ForeignKey(WorkflowModel.id), nullable=False)
|
||||||
workflow = db.relationship(WorkflowModel)
|
workflow = db.relationship(WorkflowModel)
|
||||||
approver_uid = db.Column(db.String) # Not linked to user model, as they may not have logged in yet.
|
approver_uid = db.Column(db.String) # Not linked to user model, as they may not have logged in yet.
|
||||||
status = db.Column(db.String)
|
status = db.Column(db.String)
|
||||||
message = db.Column(db.String, default='')
|
message = db.Column(db.String, default='')
|
||||||
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
|
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
|
||||||
|
date_approved = db.Column(db.DateTime(timezone=True), default=None)
|
||||||
version = db.Column(db.Integer) # Incremented integer, so 1,2,3 as requests are made.
|
version = db.Column(db.Integer) # Incremented integer, so 1,2,3 as requests are made.
|
||||||
approval_files = db.relationship(ApprovalFile, back_populates="approval",
|
approval_files = db.relationship(ApprovalFile, back_populates="approval",
|
||||||
cascade="all, delete, delete-orphan",
|
cascade="all, delete, delete-orphan",
|
||||||
|
@ -62,33 +67,37 @@ class Approval(object):
|
||||||
instance.status = model.status
|
instance.status = model.status
|
||||||
instance.message = model.message
|
instance.message = model.message
|
||||||
instance.date_created = model.date_created
|
instance.date_created = model.date_created
|
||||||
|
instance.date_approved = model.date_approved
|
||||||
instance.version = model.version
|
instance.version = model.version
|
||||||
instance.title = ''
|
instance.title = ''
|
||||||
|
instance.related_approvals = []
|
||||||
|
|
||||||
if model.study:
|
if model.study:
|
||||||
instance.title = model.study.title
|
instance.title = model.study.title
|
||||||
|
|
||||||
instance.approver = {}
|
|
||||||
try:
|
try:
|
||||||
ldap_service = LdapService()
|
instance.approver = LdapService.user_info(model.approver_uid)
|
||||||
principal_investigator_id = model.study.primary_investigator_id
|
instance.primary_investigator = LdapService.user_info(model.study.primary_investigator_id)
|
||||||
user_info = ldap_service.user_info(principal_investigator_id)
|
except ApiError as ae:
|
||||||
except (ApiError, LDAPSocketOpenError) as exception:
|
app.logger.error("Ldap lookup failed for approval record %i" % model.id)
|
||||||
user_info = None
|
|
||||||
instance.approver['display_name'] = 'Primary Investigator details'
|
|
||||||
instance.approver['department'] = 'currently not available'
|
|
||||||
|
|
||||||
if user_info:
|
|
||||||
# TODO: Rename approver to primary investigator
|
|
||||||
instance.approver['uid'] = model.approver_uid
|
|
||||||
instance.approver['display_name'] = user_info.display_name
|
|
||||||
instance.approver['title'] = user_info.title
|
|
||||||
instance.approver['department'] = user_info.department
|
|
||||||
|
|
||||||
|
doc_dictionary = FileService.get_doc_dictionary()
|
||||||
instance.associated_files = []
|
instance.associated_files = []
|
||||||
for approval_file in model.approval_files:
|
for approval_file in model.approval_files:
|
||||||
|
try:
|
||||||
|
# fixme: This is slow because we are doing a ton of queries to find the irb code.
|
||||||
|
extra_info = doc_dictionary[approval_file.file_data.file_model.irb_doc_code]
|
||||||
|
except:
|
||||||
|
extra_info = None
|
||||||
associated_file = {}
|
associated_file = {}
|
||||||
associated_file['id'] = approval_file.file_data.file_model.id
|
associated_file['id'] = approval_file.file_data.file_model.id
|
||||||
associated_file['name'] = approval_file.file_data.file_model.name
|
if extra_info:
|
||||||
|
associated_file['name'] = '_'.join((extra_info['category1'],
|
||||||
|
approval_file.file_data.file_model.name))
|
||||||
|
associated_file['description'] = extra_info['description']
|
||||||
|
else:
|
||||||
|
associated_file['name'] = approval_file.file_data.file_model.name
|
||||||
|
associated_file['description'] = 'No description available'
|
||||||
|
associated_file['name'] = '(' + model.study.primary_investigator_id + ')' + associated_file['name']
|
||||||
associated_file['content_type'] = approval_file.file_data.file_model.content_type
|
associated_file['content_type'] = approval_file.file_data.file_model.content_type
|
||||||
instance.associated_files.append(associated_file)
|
instance.associated_files.append(associated_file)
|
||||||
|
|
||||||
|
@ -100,10 +109,17 @@ class Approval(object):
|
||||||
|
|
||||||
|
|
||||||
class ApprovalSchema(ma.Schema):
|
class ApprovalSchema(ma.Schema):
|
||||||
|
|
||||||
|
approver = fields.Nested(LdapSchema, dump_only=True)
|
||||||
|
primary_investigator = fields.Nested(LdapSchema, dump_only=True)
|
||||||
|
related_approvals = fields.List(fields.Nested('ApprovalSchema', allow_none=True, dump_only=True))
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Approval
|
model = Approval
|
||||||
fields = ["id", "study_id", "workflow_id", "version", "title",
|
fields = ["id", "study_id", "workflow_id", "version", "title",
|
||||||
"version", "status", "message", "approver", "associated_files"]
|
"status", "message", "approver", "primary_investigator",
|
||||||
|
"associated_files", "date_created", "date_approved",
|
||||||
|
"related_approvals"]
|
||||||
unknown = INCLUDE
|
unknown = INCLUDE
|
||||||
|
|
||||||
@marshmallow.post_load
|
@marshmallow.post_load
|
||||||
|
@ -111,30 +127,4 @@ class ApprovalSchema(ma.Schema):
|
||||||
"""Loads the basic approval data for updates to the database"""
|
"""Loads the basic approval data for updates to the database"""
|
||||||
return Approval(**data)
|
return Approval(**data)
|
||||||
|
|
||||||
# Carlos: Here is the data structure I was trying to imagine.
|
|
||||||
# If I were to continue down my current traing of thought, I'd create
|
|
||||||
# another class called just "Approval" that can take an ApprovalModel from the
|
|
||||||
# database and construct a data structure like this one, that can
|
|
||||||
# be provided to the API at an /approvals endpoint with GET and PUT
|
|
||||||
# dat = { "approvals": [
|
|
||||||
# {"id": 1,
|
|
||||||
# "study_id": 20,
|
|
||||||
# "workflow_id": 454,
|
|
||||||
# "study_title": "Dan Funk (dhf8r)", # Really it's just the name of the Principal Investigator
|
|
||||||
# "workflow_version": "21",
|
|
||||||
# "approver": { # Pulled from ldap
|
|
||||||
# "uid": "bgb22",
|
|
||||||
# "display_name": "Billy Bob (bgb22)",
|
|
||||||
# "title": "E42:He's a hoopy frood",
|
|
||||||
# "department": "E0:EN-Eng Study of Parallel Universes",
|
|
||||||
# },
|
|
||||||
# "files": [
|
|
||||||
# {
|
|
||||||
# "id": 124,
|
|
||||||
# "name": "ResearchRestart.docx",
|
|
||||||
# "content_type": "docx-something-whatever"
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
# }
|
|
||||||
# ...
|
|
||||||
# ]
|
|
||||||
|
|
|
@ -82,11 +82,14 @@ class FileModel(db.Model):
|
||||||
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
|
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
|
||||||
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
|
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
|
||||||
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
|
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
|
||||||
|
# A request was made to delete the file, but we can't because there are
|
||||||
|
# active approvals or running workflows that depend on it. So we archive
|
||||||
|
# it instead, hide it in the interface.
|
||||||
|
archived = db.Column(db.Boolean, default=False, nullable=False)
|
||||||
|
|
||||||
class File(object):
|
class File(object):
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_models(cls, model: FileModel, data_model: FileDataModel):
|
def from_models(cls, model: FileModel, data_model: FileDataModel, doc_dictionary):
|
||||||
instance = cls()
|
instance = cls()
|
||||||
instance.id = model.id
|
instance.id = model.id
|
||||||
instance.name = model.name
|
instance.name = model.name
|
||||||
|
@ -99,6 +102,15 @@ class File(object):
|
||||||
instance.workflow_id = model.workflow_id
|
instance.workflow_id = model.workflow_id
|
||||||
instance.irb_doc_code = model.irb_doc_code
|
instance.irb_doc_code = model.irb_doc_code
|
||||||
instance.type = model.type
|
instance.type = model.type
|
||||||
|
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
|
||||||
|
instance.category = "/".join(filter(None, [doc_dictionary[model.irb_doc_code]['category1'],
|
||||||
|
doc_dictionary[model.irb_doc_code]['category2'],
|
||||||
|
doc_dictionary[model.irb_doc_code]['category3']]))
|
||||||
|
instance.description = doc_dictionary[model.irb_doc_code]['description']
|
||||||
|
instance.download_name = ".".join([instance.category, model.type.value])
|
||||||
|
else:
|
||||||
|
instance.category = ""
|
||||||
|
instance.description = ""
|
||||||
if data_model:
|
if data_model:
|
||||||
instance.last_modified = data_model.date_created
|
instance.last_modified = data_model.date_created
|
||||||
instance.latest_version = data_model.version
|
instance.latest_version = data_model.version
|
||||||
|
@ -122,7 +134,8 @@ class FileSchema(ma.Schema):
|
||||||
model = File
|
model = File
|
||||||
fields = ["id", "name", "is_status", "is_reference", "content_type",
|
fields = ["id", "name", "is_status", "is_reference", "content_type",
|
||||||
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
|
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
|
||||||
"irb_doc_code", "last_modified", "latest_version", "type"]
|
"irb_doc_code", "last_modified", "latest_version", "type", "categories",
|
||||||
|
"description", "category", "description", "download_name"]
|
||||||
unknown = INCLUDE
|
unknown = INCLUDE
|
||||||
type = EnumField(FileType)
|
type = EnumField(FileType)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
from flask_marshmallow.sqla import SQLAlchemyAutoSchema
|
||||||
|
from marshmallow import EXCLUDE
|
||||||
|
from sqlalchemy import func
|
||||||
|
|
||||||
|
from crc import db
|
||||||
|
|
||||||
|
|
||||||
|
class LdapModel(db.Model):
|
||||||
|
uid = db.Column(db.String, primary_key=True)
|
||||||
|
display_name = db.Column(db.String)
|
||||||
|
given_name = db.Column(db.String)
|
||||||
|
email_address = db.Column(db.String)
|
||||||
|
telephone_number = db.Column(db.String)
|
||||||
|
title = db.Column(db.String)
|
||||||
|
department = db.Column(db.String)
|
||||||
|
affiliation = db.Column(db.String)
|
||||||
|
sponsor_type = db.Column(db.String)
|
||||||
|
date_cached = db.Column(db.DateTime(timezone=True), default=func.now())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_entry(cls, entry):
|
||||||
|
return LdapModel(uid=entry.uid.value,
|
||||||
|
display_name=entry.displayName.value,
|
||||||
|
given_name=", ".join(entry.givenName),
|
||||||
|
email_address=entry.mail.value,
|
||||||
|
telephone_number=entry.telephoneNumber.value,
|
||||||
|
title=", ".join(entry.title),
|
||||||
|
department=", ".join(entry.uvaDisplayDepartment),
|
||||||
|
affiliation=", ".join(entry.uvaPersonIAMAffiliation),
|
||||||
|
sponsor_type=", ".join(entry.uvaPersonSponsoredType))
|
||||||
|
|
||||||
|
|
||||||
|
class LdapSchema(SQLAlchemyAutoSchema):
|
||||||
|
class Meta:
|
||||||
|
model = LdapModel
|
||||||
|
load_instance = True
|
||||||
|
include_relationships = True
|
||||||
|
include_fk = True # Includes foreign keys
|
||||||
|
unknown = EXCLUDE
|
|
@ -5,7 +5,7 @@ from sqlalchemy import func
|
||||||
|
|
||||||
from crc import db, ma
|
from crc import db, ma
|
||||||
from crc.api.common import ApiErrorSchema
|
from crc.api.common import ApiErrorSchema
|
||||||
from crc.models.file import FileModel, SimpleFileSchema
|
from crc.models.file import FileModel, SimpleFileSchema, FileSchema
|
||||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
||||||
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \
|
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \
|
||||||
WorkflowModel
|
WorkflowModel
|
||||||
|
@ -106,7 +106,8 @@ class Study(object):
|
||||||
def __init__(self, title, last_updated, primary_investigator_id, user_uid,
|
def __init__(self, title, last_updated, primary_investigator_id, user_uid,
|
||||||
id=None,
|
id=None,
|
||||||
protocol_builder_status=None,
|
protocol_builder_status=None,
|
||||||
sponsor="", hsr_number="", ind_number="", categories=[], **argsv):
|
sponsor="", hsr_number="", ind_number="", categories=[],
|
||||||
|
files=[], approvals=[], **argsv):
|
||||||
self.id = id
|
self.id = id
|
||||||
self.user_uid = user_uid
|
self.user_uid = user_uid
|
||||||
self.title = title
|
self.title = title
|
||||||
|
@ -117,8 +118,9 @@ class Study(object):
|
||||||
self.hsr_number = hsr_number
|
self.hsr_number = hsr_number
|
||||||
self.ind_number = ind_number
|
self.ind_number = ind_number
|
||||||
self.categories = categories
|
self.categories = categories
|
||||||
|
self.approvals = approvals
|
||||||
self.warnings = []
|
self.warnings = []
|
||||||
self.files = []
|
self.files = files
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_model(cls, study_model: StudyModel):
|
def from_model(cls, study_model: StudyModel):
|
||||||
|
@ -149,12 +151,13 @@ class StudySchema(ma.Schema):
|
||||||
hsr_number = fields.String(allow_none=True)
|
hsr_number = fields.String(allow_none=True)
|
||||||
sponsor = fields.String(allow_none=True)
|
sponsor = fields.String(allow_none=True)
|
||||||
ind_number = fields.String(allow_none=True)
|
ind_number = fields.String(allow_none=True)
|
||||||
files = fields.List(fields.Nested(SimpleFileSchema), dump_only=True)
|
files = fields.List(fields.Nested(FileSchema), dump_only=True)
|
||||||
|
approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
model = Study
|
model = Study
|
||||||
additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid",
|
additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid",
|
||||||
"sponsor", "ind_number"]
|
"sponsor", "ind_number", "approvals", "files"]
|
||||||
unknown = INCLUDE
|
unknown = INCLUDE
|
||||||
|
|
||||||
@marshmallow.post_load
|
@marshmallow.post_load
|
||||||
|
|
|
@ -19,7 +19,7 @@ class UserModel(db.Model):
|
||||||
last_name = db.Column(db.String, nullable=True)
|
last_name = db.Column(db.String, nullable=True)
|
||||||
title = db.Column(db.String, nullable=True)
|
title = db.Column(db.String, nullable=True)
|
||||||
|
|
||||||
# Add Department and School
|
# TODO: Add Department and School
|
||||||
|
|
||||||
|
|
||||||
def encode_auth_token(self):
|
def encode_auth_token(self):
|
||||||
|
@ -27,7 +27,7 @@ class UserModel(db.Model):
|
||||||
Generates the Auth Token
|
Generates the Auth Token
|
||||||
:return: string
|
:return: string
|
||||||
"""
|
"""
|
||||||
hours = int(app.config['TOKEN_AUTH_TTL_HOURS'])
|
hours = float(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||||
payload = {
|
payload = {
|
||||||
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=hours, minutes=0, seconds=0),
|
'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=hours, minutes=0, seconds=0),
|
||||||
'iat': datetime.datetime.utcnow(),
|
'iat': datetime.datetime.utcnow(),
|
||||||
|
@ -36,7 +36,7 @@ class UserModel(db.Model):
|
||||||
return jwt.encode(
|
return jwt.encode(
|
||||||
payload,
|
payload,
|
||||||
app.config.get('TOKEN_AUTH_SECRET_KEY'),
|
app.config.get('TOKEN_AUTH_SECRET_KEY'),
|
||||||
algorithm='HS256'
|
algorithm='HS256',
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -50,9 +50,9 @@ class UserModel(db.Model):
|
||||||
payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256')
|
payload = jwt.decode(auth_token, app.config.get('TOKEN_AUTH_SECRET_KEY'), algorithms='HS256')
|
||||||
return payload
|
return payload
|
||||||
except jwt.ExpiredSignatureError:
|
except jwt.ExpiredSignatureError:
|
||||||
raise ApiError('token_expired', 'The Authentication token you provided expired, and must be renewed.')
|
raise ApiError('token_expired', 'The Authentication token you provided expired and must be renewed.')
|
||||||
except jwt.InvalidTokenError:
|
except jwt.InvalidTokenError:
|
||||||
raise ApiError('token_invalid', 'The Authentication token you provided. You need a new token. ')
|
raise ApiError('token_invalid', 'The Authentication token you provided is invalid. You need a new token. ')
|
||||||
|
|
||||||
|
|
||||||
class UserModelSchema(SQLAlchemyAutoSchema):
|
class UserModelSchema(SQLAlchemyAutoSchema):
|
||||||
|
|
|
@ -29,7 +29,8 @@ Takes two arguments:
|
||||||
|
|
||||||
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
||||||
"""For validation only, process the template, but do not store it in the database."""
|
"""For validation only, process the template, but do not store it in the database."""
|
||||||
self.process_template(task, study_id, None, *args, **kwargs)
|
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||||
|
self.process_template(task, study_id, workflow, *args, **kwargs)
|
||||||
|
|
||||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||||
|
@ -62,13 +63,13 @@ Takes two arguments:
|
||||||
# Get the workflow specification file with the given name.
|
# Get the workflow specification file with the given name.
|
||||||
file_data_models = FileService.get_spec_data_files(
|
file_data_models = FileService.get_spec_data_files(
|
||||||
workflow_spec_id=workflow.workflow_spec_id,
|
workflow_spec_id=workflow.workflow_spec_id,
|
||||||
workflow_id=workflow.id)
|
workflow_id=workflow.id,
|
||||||
for file_data in file_data_models:
|
name=file_name)
|
||||||
if file_data.file_model.name == file_name:
|
if len(file_data_models) > 0:
|
||||||
file_data_model = file_data
|
file_data_model = file_data_models[0]
|
||||||
|
else:
|
||||||
if workflow is None or file_data_model is None:
|
raise ApiError(code="invalid_argument",
|
||||||
file_data_model = FileService.get_workflow_file_data(task.workflow, file_name)
|
message="Uable to locate a file with the given name.")
|
||||||
|
|
||||||
# Get images from file/files fields
|
# Get images from file/files fields
|
||||||
if len(args) == 3:
|
if len(args) == 3:
|
||||||
|
|
|
@ -11,7 +11,8 @@ class RequestApproval(Script):
|
||||||
return """
|
return """
|
||||||
Creates an approval request on this workflow, by the given approver_uid(s),"
|
Creates an approval request on this workflow, by the given approver_uid(s),"
|
||||||
Takes multiple arguments, which should point to data located in current task
|
Takes multiple arguments, which should point to data located in current task
|
||||||
or be quoted strings.
|
or be quoted strings. The order is important. Approvals will be processed
|
||||||
|
in this order.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
RequestApproval approver1 "dhf8r"
|
RequestApproval approver1 "dhf8r"
|
||||||
|
@ -26,7 +27,8 @@ RequestApproval approver1 "dhf8r"
|
||||||
ApprovalService.add_approval(study_id, workflow_id, args)
|
ApprovalService.add_approval(study_id, workflow_id, args)
|
||||||
elif isinstance(uids, list):
|
elif isinstance(uids, list):
|
||||||
for id in uids:
|
for id in uids:
|
||||||
ApprovalService.add_approval(study_id, workflow_id, id)
|
if id: ## Assure it's not empty or null
|
||||||
|
ApprovalService.add_approval(study_id, workflow_id, id)
|
||||||
|
|
||||||
def get_uids(self, task, args):
|
def get_uids(self, task, args):
|
||||||
if len(args) < 1:
|
if len(args) < 1:
|
||||||
|
|
|
@ -2,37 +2,168 @@ from datetime import datetime
|
||||||
|
|
||||||
from sqlalchemy import desc
|
from sqlalchemy import desc
|
||||||
|
|
||||||
from crc import db, session
|
from crc import app, db, session
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
|
|
||||||
from crc.models.approval import ApprovalModel, ApprovalStatus, ApprovalFile
|
from crc.models.approval import ApprovalModel, ApprovalStatus, ApprovalFile, Approval
|
||||||
|
from crc.models.study import StudyModel
|
||||||
from crc.models.workflow import WorkflowModel
|
from crc.models.workflow import WorkflowModel
|
||||||
from crc.services.file_service import FileService
|
from crc.services.file_service import FileService
|
||||||
|
from crc.services.ldap_service import LdapService
|
||||||
|
from crc.services.mails import (
|
||||||
|
send_ramp_up_submission_email,
|
||||||
|
send_ramp_up_approval_request_email,
|
||||||
|
send_ramp_up_approval_request_first_review_email,
|
||||||
|
send_ramp_up_approved_email,
|
||||||
|
send_ramp_up_denied_email,
|
||||||
|
send_ramp_up_denied_email_to_approver
|
||||||
|
)
|
||||||
|
|
||||||
class ApprovalService(object):
|
class ApprovalService(object):
|
||||||
"""Provides common tools for working with an Approval"""
|
"""Provides common tools for working with an Approval"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_approvals_per_user(approver_uid):
|
def __one_approval_from_study(study, approver_uid = None, status=None,
|
||||||
"""Returns a list of all approvals for the given user (approver)"""
|
include_cancelled=True):
|
||||||
db_approvals = session.query(ApprovalModel).filter_by(approver_uid=approver_uid).all()
|
"""Returns one approval, with all additional approvals as 'related_approvals',
|
||||||
return db_approvals
|
the main approval can be pinned to an approver with an optional argument.
|
||||||
|
Will return null if no approvals exist on the study."""
|
||||||
|
main_approval = None
|
||||||
|
related_approvals = []
|
||||||
|
query = db.session.query(ApprovalModel).filter(ApprovalModel.study_id == study.id)
|
||||||
|
if not include_cancelled:
|
||||||
|
query=query.filter(ApprovalModel.status != ApprovalStatus.CANCELED.value)
|
||||||
|
approvals = query.all() # All non-cancelled approvals.
|
||||||
|
|
||||||
|
for approval_model in approvals:
|
||||||
|
if approval_model.approver_uid == approver_uid:
|
||||||
|
main_approval = approval_model
|
||||||
|
else:
|
||||||
|
related_approvals.append(approval_model)
|
||||||
|
|
||||||
|
# IF WE ARE JUST RETURNING ALL OF THE APPROVALS PER STUDY
|
||||||
|
if not main_approval and len(related_approvals) > 0:
|
||||||
|
main_approval = related_approvals[0]
|
||||||
|
related_approvals = related_approvals[1:]
|
||||||
|
|
||||||
|
if main_approval is not None: # May be null if the study has no approvals.
|
||||||
|
final_status = ApprovalService.__calculate_overall_approval_status(main_approval, related_approvals)
|
||||||
|
if status and final_status != status: return # Now that we are certain of the status, filter on it.
|
||||||
|
|
||||||
|
main_approval = Approval.from_model(main_approval)
|
||||||
|
main_approval.status = final_status
|
||||||
|
for ra in related_approvals:
|
||||||
|
main_approval.related_approvals.append(Approval.from_model(ra))
|
||||||
|
|
||||||
|
return main_approval
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_all_approvals():
|
def __calculate_overall_approval_status(approval, related):
|
||||||
"""Returns a list of all approvlas"""
|
# In the case of pending approvals, check to see if there is a related approval
|
||||||
db_approvals = session.query(ApprovalModel).all()
|
# that proceeds this approval - and if it is declined, or still pending, then change
|
||||||
return db_approvals
|
# the state of the approval to be Declined, or Waiting respectively.
|
||||||
|
if approval.status == ApprovalStatus.PENDING.value:
|
||||||
|
for ra in related:
|
||||||
|
if ra.id < approval.id:
|
||||||
|
if ra.status == ApprovalStatus.DECLINED.value or ra.status == ApprovalStatus.CANCELED.value:
|
||||||
|
return ra.status # If any prior approval id declined or cancelled so is this approval.
|
||||||
|
elif ra.status == ApprovalStatus.PENDING.value:
|
||||||
|
return ApprovalStatus.AWAITING.value # if any prior approval is pending, then this is waiting.
|
||||||
|
return approval.status
|
||||||
|
else:
|
||||||
|
return approval.status
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def update_approval(approval_id, approver_uid, status):
|
def get_approvals_per_user(approver_uid, status=None, include_cancelled=False):
|
||||||
|
"""Returns a list of approval objects (not db models) for the given
|
||||||
|
approver. """
|
||||||
|
studies = db.session.query(StudyModel).join(ApprovalModel).\
|
||||||
|
filter(ApprovalModel.approver_uid == approver_uid).all()
|
||||||
|
approvals = []
|
||||||
|
for study in studies:
|
||||||
|
approval = ApprovalService.__one_approval_from_study(study, approver_uid,
|
||||||
|
status, include_cancelled)
|
||||||
|
if approval:
|
||||||
|
approvals.append(approval)
|
||||||
|
return approvals
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_all_approvals(include_cancelled=True):
|
||||||
|
"""Returns a list of all approval objects (not db models), one record
|
||||||
|
per study, with any associated approvals grouped under the first approval."""
|
||||||
|
studies = db.session.query(StudyModel).all()
|
||||||
|
approvals = []
|
||||||
|
for study in studies:
|
||||||
|
approval = ApprovalService.__one_approval_from_study(study, include_cancelled=include_cancelled)
|
||||||
|
if approval:
|
||||||
|
approvals.append(approval)
|
||||||
|
return approvals
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_approvals_for_study(study_id, include_cancelled=True):
|
||||||
|
"""Returns an array of Approval objects for the study, it does not
|
||||||
|
compute the related approvals."""
|
||||||
|
query = session.query(ApprovalModel).filter_by(study_id=study_id)
|
||||||
|
if not include_cancelled:
|
||||||
|
query = query.filter(ApprovalModel.status != ApprovalStatus.CANCELED.value)
|
||||||
|
db_approvals = query.all()
|
||||||
|
return [Approval.from_model(approval_model) for approval_model in db_approvals]
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update_approval(approval_id, approver_uid):
|
||||||
"""Update a specific approval"""
|
"""Update a specific approval"""
|
||||||
db_approval = session.query(ApprovalModel).get(approval_id)
|
db_approval = session.query(ApprovalModel).get(approval_id)
|
||||||
|
status = db_approval.status
|
||||||
if db_approval:
|
if db_approval:
|
||||||
db_approval.status = status
|
# db_approval.status = status
|
||||||
session.add(db_approval)
|
# session.add(db_approval)
|
||||||
session.commit()
|
# session.commit()
|
||||||
|
if status == ApprovalStatus.APPROVED.value:
|
||||||
|
# second_approval = ApprovalModel().query.filter_by(
|
||||||
|
# study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
|
||||||
|
# status=ApprovalStatus.PENDING.value, version=db_approval.version).first()
|
||||||
|
# if second_approval:
|
||||||
|
# send rrp approval request for second approver
|
||||||
|
ldap_service = LdapService()
|
||||||
|
pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
|
||||||
|
approver_info = ldap_service.user_info(approver_uid)
|
||||||
|
# send rrp submission
|
||||||
|
mail_result = send_ramp_up_approved_email(
|
||||||
|
'askresearch@virginia.edu',
|
||||||
|
[pi_user_info.email_address],
|
||||||
|
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||||
|
)
|
||||||
|
if mail_result:
|
||||||
|
app.logger.error(mail_result)
|
||||||
|
elif status == ApprovalStatus.DECLINED.value:
|
||||||
|
ldap_service = LdapService()
|
||||||
|
pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
|
||||||
|
approver_info = ldap_service.user_info(approver_uid)
|
||||||
|
# send rrp submission
|
||||||
|
mail_result = send_ramp_up_denied_email(
|
||||||
|
'askresearch@virginia.edu',
|
||||||
|
[pi_user_info.email_address],
|
||||||
|
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||||
|
)
|
||||||
|
if mail_result:
|
||||||
|
app.logger.error(mail_result)
|
||||||
|
first_approval = ApprovalModel().query.filter_by(
|
||||||
|
study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
|
||||||
|
status=ApprovalStatus.APPROVED.value, version=db_approval.version).first()
|
||||||
|
if first_approval:
|
||||||
|
# Second approver denies
|
||||||
|
first_approver_info = ldap_service.user_info(first_approval.approver_uid)
|
||||||
|
approver_email = [first_approver_info.email_address] if first_approver_info.email_address else app.config['FALLBACK_EMAILS']
|
||||||
|
# send rrp denied by second approver email to first approver
|
||||||
|
mail_result = send_ramp_up_denied_email_to_approver(
|
||||||
|
'askresearch@virginia.edu',
|
||||||
|
approver_email,
|
||||||
|
f'{pi_user_info.display_name} - ({pi_user_info.uid})',
|
||||||
|
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||||
|
)
|
||||||
|
if mail_result:
|
||||||
|
app.logger.error(mail_result)
|
||||||
# TODO: Log update action by approver_uid - maybe ?
|
# TODO: Log update action by approver_uid - maybe ?
|
||||||
return db_approval
|
return db_approval
|
||||||
|
|
||||||
|
@ -78,14 +209,43 @@ class ApprovalService(object):
|
||||||
version = 1
|
version = 1
|
||||||
|
|
||||||
model = ApprovalModel(study_id=study_id, workflow_id=workflow_id,
|
model = ApprovalModel(study_id=study_id, workflow_id=workflow_id,
|
||||||
approver_uid=approver_uid, status=ApprovalStatus.WAITING.value,
|
approver_uid=approver_uid, status=ApprovalStatus.PENDING.value,
|
||||||
message="", date_created=datetime.now(),
|
message="", date_created=datetime.now(),
|
||||||
version=version)
|
version=version)
|
||||||
approval_files = ApprovalService._create_approval_files(workflow_data_files, model)
|
approval_files = ApprovalService._create_approval_files(workflow_data_files, model)
|
||||||
|
|
||||||
|
# Check approvals count
|
||||||
|
approvals_count = ApprovalModel().query.filter_by(study_id=study_id, workflow_id=workflow_id,
|
||||||
|
version=version).count()
|
||||||
|
|
||||||
db.session.add(model)
|
db.session.add(model)
|
||||||
db.session.add_all(approval_files)
|
db.session.add_all(approval_files)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
||||||
|
# Send first email
|
||||||
|
if approvals_count == 0:
|
||||||
|
ldap_service = LdapService()
|
||||||
|
pi_user_info = ldap_service.user_info(model.study.primary_investigator_id)
|
||||||
|
approver_info = ldap_service.user_info(approver_uid)
|
||||||
|
# send rrp submission
|
||||||
|
mail_result = send_ramp_up_submission_email(
|
||||||
|
'askresearch@virginia.edu',
|
||||||
|
[pi_user_info.email_address],
|
||||||
|
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||||
|
)
|
||||||
|
if mail_result:
|
||||||
|
app.logger.error(mail_result)
|
||||||
|
# send rrp approval request for first approver
|
||||||
|
# enhance the second part in case it bombs
|
||||||
|
approver_email = [approver_info.email_address] if approver_info.email_address else app.config['FALLBACK_EMAILS']
|
||||||
|
mail_result = send_ramp_up_approval_request_first_review_email(
|
||||||
|
'askresearch@virginia.edu',
|
||||||
|
approver_email,
|
||||||
|
f'{pi_user_info.display_name} - ({pi_user_info.uid})'
|
||||||
|
)
|
||||||
|
if mail_result:
|
||||||
|
app.logger.error(mail_result)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_approval_files(workflow_data_files, approval):
|
def _create_approval_files(workflow_data_files, approval):
|
||||||
"""Currently based exclusively on the status of files associated with a workflow."""
|
"""Currently based exclusively on the status of files associated with a workflow."""
|
||||||
|
|
|
@ -5,11 +5,13 @@ from datetime import datetime
|
||||||
from uuid import UUID
|
from uuid import UUID
|
||||||
from xml.etree import ElementTree
|
from xml.etree import ElementTree
|
||||||
|
|
||||||
|
import flask
|
||||||
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
||||||
from pandas import ExcelFile
|
from pandas import ExcelFile
|
||||||
from sqlalchemy import desc
|
from sqlalchemy import desc
|
||||||
|
from sqlalchemy.exc import IntegrityError
|
||||||
|
|
||||||
from crc import session
|
from crc import session, app
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
||||||
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
|
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
|
||||||
|
@ -20,6 +22,14 @@ class FileService(object):
|
||||||
DOCUMENT_LIST = "irb_documents.xlsx"
|
DOCUMENT_LIST = "irb_documents.xlsx"
|
||||||
INVESTIGATOR_LIST = "investigators.xlsx"
|
INVESTIGATOR_LIST = "investigators.xlsx"
|
||||||
|
|
||||||
|
__doc_dictionary = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_doc_dictionary():
|
||||||
|
if not FileService.__doc_dictionary:
|
||||||
|
FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
|
||||||
|
return FileService.__doc_dictionary
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
|
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
|
||||||
name, content_type, binary_data, primary=False, is_status=False):
|
name, content_type, binary_data, primary=False, is_status=False):
|
||||||
|
@ -35,10 +45,8 @@ class FileService(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def is_allowed_document(code):
|
def is_allowed_document(code):
|
||||||
data_model = FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
|
doc_dict = FileService.get_doc_dictionary()
|
||||||
xls = ExcelFile(data_model.data)
|
return code in doc_dict
|
||||||
df = xls.parse(xls.sheet_names[0])
|
|
||||||
return code in df['code'].values
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data):
|
def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data):
|
||||||
|
@ -86,6 +94,7 @@ class FileService(object):
|
||||||
def get_workflow_files(workflow_id):
|
def get_workflow_files(workflow_id):
|
||||||
"""Returns all the file models associated with a running workflow."""
|
"""Returns all the file models associated with a running workflow."""
|
||||||
return session.query(FileModel).filter(FileModel.workflow_id == workflow_id).\
|
return session.query(FileModel).filter(FileModel.workflow_id == workflow_id).\
|
||||||
|
filter(FileModel.archived == False).\
|
||||||
order_by(FileModel.id).all()
|
order_by(FileModel.id).all()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -117,7 +126,11 @@ class FileService(object):
|
||||||
|
|
||||||
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
|
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
|
||||||
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
|
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
|
||||||
# This file does not need to be updated, it's the same file.
|
# This file does not need to be updated, it's the same file. If it is arhived,
|
||||||
|
# then de-arvhive it.
|
||||||
|
file_model.archived = False
|
||||||
|
session.add(file_model)
|
||||||
|
session.commit()
|
||||||
return file_model
|
return file_model
|
||||||
|
|
||||||
# Verify the extension
|
# Verify the extension
|
||||||
|
@ -129,6 +142,7 @@ class FileService(object):
|
||||||
else:
|
else:
|
||||||
file_model.type = FileType[file_extension]
|
file_model.type = FileType[file_extension]
|
||||||
file_model.content_type = content_type
|
file_model.content_type = content_type
|
||||||
|
file_model.archived = False # Unarchive the file if it is archived.
|
||||||
|
|
||||||
if latest_data_model is None:
|
if latest_data_model is None:
|
||||||
version = 1
|
version = 1
|
||||||
|
@ -178,7 +192,8 @@ class FileService(object):
|
||||||
def get_files_for_study(study_id, irb_doc_code=None):
|
def get_files_for_study(study_id, irb_doc_code=None):
|
||||||
query = session.query(FileModel).\
|
query = session.query(FileModel).\
|
||||||
join(WorkflowModel).\
|
join(WorkflowModel).\
|
||||||
filter(WorkflowModel.study_id == study_id)
|
filter(WorkflowModel.study_id == study_id).\
|
||||||
|
filter(FileModel.archived == False)
|
||||||
if irb_doc_code:
|
if irb_doc_code:
|
||||||
query = query.filter(FileModel.irb_doc_code == irb_doc_code)
|
query = query.filter(FileModel.irb_doc_code == irb_doc_code)
|
||||||
return query.all()
|
return query.all()
|
||||||
|
@ -198,6 +213,9 @@ class FileService(object):
|
||||||
|
|
||||||
if name:
|
if name:
|
||||||
query = query.filter_by(name=name)
|
query = query.filter_by(name=name)
|
||||||
|
|
||||||
|
query = query.filter(FileModel.archived == False)
|
||||||
|
|
||||||
query = query.order_by(FileModel.id)
|
query = query.order_by(FileModel.id)
|
||||||
|
|
||||||
results = query.all()
|
results = query.all()
|
||||||
|
@ -260,11 +278,12 @@ class FileService(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_workflow_file_data(workflow, file_name):
|
def get_workflow_file_data(workflow, file_name):
|
||||||
"""Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns its data"""
|
"""This method should be deleted, find where it is used, and remove this method.
|
||||||
|
Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns its data"""
|
||||||
workflow_spec_model = FileService.find_spec_model_in_db(workflow)
|
workflow_spec_model = FileService.find_spec_model_in_db(workflow)
|
||||||
|
|
||||||
if workflow_spec_model is None:
|
if workflow_spec_model is None:
|
||||||
raise ApiError(code="workflow_model_error",
|
raise ApiError(code="unknown_workflow",
|
||||||
message="Something is wrong. I can't find the workflow you are using.")
|
message="Something is wrong. I can't find the workflow you are using.")
|
||||||
|
|
||||||
file_data_model = session.query(FileDataModel) \
|
file_data_model = session.query(FileDataModel) \
|
||||||
|
@ -295,12 +314,21 @@ class FileService(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def delete_file(file_id):
|
def delete_file(file_id):
|
||||||
data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
|
try:
|
||||||
for dm in data_models:
|
data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
|
||||||
lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
|
for dm in data_models:
|
||||||
for lf in lookup_files:
|
lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
|
||||||
session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
|
for lf in lookup_files:
|
||||||
session.query(LookupFileModel).filter_by(id=lf.id).delete()
|
session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
|
||||||
session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
|
session.query(LookupFileModel).filter_by(id=lf.id).delete()
|
||||||
session.query(FileModel).filter_by(id=file_id).delete()
|
session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
|
||||||
session.commit()
|
session.query(FileModel).filter_by(id=file_id).delete()
|
||||||
|
session.commit()
|
||||||
|
except IntegrityError as ie:
|
||||||
|
# We can't delete the file or file data, because it is referenced elsewhere,
|
||||||
|
# but we can at least mark it as deleted on the table.
|
||||||
|
session.rollback()
|
||||||
|
file_model = session.query(FileModel).filter_by(id=file_id).first()
|
||||||
|
file_model.archived = True
|
||||||
|
session.commit()
|
||||||
|
app.logger.info("Failed to delete file, so archiving it instead. %i, due to %s" % (file_id, str(ie)))
|
||||||
|
|
|
@ -1,84 +1,90 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from crc import app
|
from attr import asdict
|
||||||
from ldap3 import Connection, Server, MOCK_SYNC
|
from ldap3.core.exceptions import LDAPExceptionError
|
||||||
|
|
||||||
|
from crc import app, db
|
||||||
|
from ldap3 import Connection, Server, MOCK_SYNC, RESTARTABLE
|
||||||
|
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
|
from crc.models.ldap import LdapModel, LdapSchema
|
||||||
|
|
||||||
|
|
||||||
class LdapUserInfo(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.display_name = ''
|
|
||||||
self.given_name = ''
|
|
||||||
self.email_address = ''
|
|
||||||
self.telephone_number = ''
|
|
||||||
self.title = ''
|
|
||||||
self.department = ''
|
|
||||||
self.affiliation = ''
|
|
||||||
self.sponsor_type = ''
|
|
||||||
self.uid = ''
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_entry(cls, entry):
|
|
||||||
instance = cls()
|
|
||||||
instance.display_name = entry.displayName.value
|
|
||||||
instance.given_name = ", ".join(entry.givenName)
|
|
||||||
instance.email_address = entry.mail.value
|
|
||||||
instance.telephone_number = ", ".join(entry.telephoneNumber)
|
|
||||||
instance.title = ", ".join(entry.title)
|
|
||||||
instance.department = ", ".join(entry.uvaDisplayDepartment)
|
|
||||||
instance.affiliation = ", ".join(entry.uvaPersonIAMAffiliation)
|
|
||||||
instance.sponsor_type = ", ".join(entry.uvaPersonSponsoredType)
|
|
||||||
instance.uid = entry.uid.value
|
|
||||||
return instance
|
|
||||||
|
|
||||||
class LdapService(object):
|
class LdapService(object):
|
||||||
search_base = "ou=People,o=University of Virginia,c=US"
|
search_base = "ou=People,o=University of Virginia,c=US"
|
||||||
attributes = ['uid', 'cn', 'sn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
|
attributes = ['uid', 'cn', 'sn', 'displayName', 'givenName', 'mail', 'objectClass', 'UvaDisplayDepartment',
|
||||||
'telephoneNumber', 'title', 'uvaPersonIAMAffiliation', 'uvaPersonSponsoredType']
|
'telephoneNumber', 'title', 'uvaPersonIAMAffiliation', 'uvaPersonSponsoredType']
|
||||||
uid_search_string = "(&(objectclass=person)(uid=%s))"
|
uid_search_string = "(&(objectclass=person)(uid=%s))"
|
||||||
user_or_last_name_search_string = "(&(objectclass=person)(|(uid=%s*)(sn=%s*)))"
|
user_or_last_name_search = "(&(objectclass=person)(|(uid=%s*)(sn=%s*)))"
|
||||||
|
cn_single_search = '(&(objectclass=person)(cn=%s*))'
|
||||||
|
cn_double_search = '(&(objectclass=person)(&(cn=%s*)(cn=*%s*)))'
|
||||||
|
temp_cache = {}
|
||||||
|
conn = None
|
||||||
|
|
||||||
def __init__(self):
|
@staticmethod
|
||||||
if app.config['TESTING']:
|
def __get_conn():
|
||||||
server = Server('my_fake_server')
|
if not LdapService.conn:
|
||||||
self.conn = Connection(server, client_strategy=MOCK_SYNC)
|
if app.config['TESTING']:
|
||||||
file_path = os.path.abspath(os.path.join(app.root_path, '..', 'tests', 'data', 'ldap_response.json'))
|
server = Server('my_fake_server')
|
||||||
self.conn.strategy.entries_from_json(file_path)
|
conn = Connection(server, client_strategy=MOCK_SYNC)
|
||||||
self.conn.bind()
|
file_path = os.path.abspath(os.path.join(app.root_path, '..', 'tests', 'data', 'ldap_response.json'))
|
||||||
|
conn.strategy.entries_from_json(file_path)
|
||||||
|
conn.bind()
|
||||||
|
else:
|
||||||
|
server = Server(app.config['LDAP_URL'], connect_timeout=app.config['LDAP_TIMEOUT_SEC'])
|
||||||
|
conn = Connection(server, auto_bind=True,
|
||||||
|
receive_timeout=app.config['LDAP_TIMEOUT_SEC'],
|
||||||
|
client_strategy=RESTARTABLE)
|
||||||
|
LdapService.conn = conn
|
||||||
|
return LdapService.conn
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def user_info(uva_uid):
|
||||||
|
user_info = db.session.query(LdapModel).filter(LdapModel.uid == uva_uid).first()
|
||||||
|
if not user_info:
|
||||||
|
app.logger.info("No cache for " + uva_uid)
|
||||||
|
search_string = LdapService.uid_search_string % uva_uid
|
||||||
|
conn = LdapService.__get_conn()
|
||||||
|
conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
|
||||||
|
if len(conn.entries) < 1:
|
||||||
|
raise ApiError("missing_ldap_record", "Unable to locate a user with id %s in LDAP" % uva_uid)
|
||||||
|
entry = conn.entries[0]
|
||||||
|
user_info = LdapModel.from_entry(entry)
|
||||||
|
db.session.add(user_info)
|
||||||
|
db.session.commit()
|
||||||
|
return user_info
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def search_users(query, limit):
|
||||||
|
if len(query.strip()) < 3:
|
||||||
|
return []
|
||||||
|
elif query.endswith(' '):
|
||||||
|
search_string = LdapService.cn_single_search % (query.strip())
|
||||||
|
elif query.strip().count(',') == 1:
|
||||||
|
f, l = query.split(",")
|
||||||
|
search_string = LdapService.cn_double_search % (l.strip(), f.strip())
|
||||||
|
elif query.strip().count(' ') == 1:
|
||||||
|
f,l = query.split(" ")
|
||||||
|
search_string = LdapService.cn_double_search % (f, l)
|
||||||
else:
|
else:
|
||||||
server = Server(app.config['LDAP_URL'], connect_timeout=app.config['LDAP_TIMEOUT_SEC'])
|
# Search by user_id or last name
|
||||||
self.conn = Connection(server,
|
search_string = LdapService.user_or_last_name_search % (query, query)
|
||||||
auto_bind=True,
|
|
||||||
receive_timeout=app.config['LDAP_TIMEOUT_SEC'],
|
|
||||||
)
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
if self.conn:
|
|
||||||
self.conn.unbind()
|
|
||||||
|
|
||||||
def user_info(self, uva_uid):
|
|
||||||
search_string = LdapService.uid_search_string % uva_uid
|
|
||||||
self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
|
|
||||||
if len(self.conn.entries) < 1:
|
|
||||||
raise ApiError("missing_ldap_record", "Unable to locate a user with id %s in LDAP" % uva_uid)
|
|
||||||
entry = self.conn.entries[0]
|
|
||||||
return LdapUserInfo.from_entry(entry)
|
|
||||||
|
|
||||||
def search_users(self, query, limit):
|
|
||||||
if len(query) < 3: return []
|
|
||||||
search_string = LdapService.user_or_last_name_search_string % (query, query)
|
|
||||||
self.conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
|
|
||||||
|
|
||||||
# Entries are returned as a generator, accessing entries
|
|
||||||
# can make subsequent calls to the ldap service, so limit
|
|
||||||
# those here.
|
|
||||||
count = 0
|
|
||||||
results = []
|
results = []
|
||||||
for entry in self.conn.entries:
|
app.logger.info(search_string)
|
||||||
if count > limit:
|
try:
|
||||||
break
|
conn = LdapService.__get_conn()
|
||||||
results.append(LdapUserInfo.from_entry(entry))
|
conn.search(LdapService.search_base, search_string, attributes=LdapService.attributes)
|
||||||
count += 1
|
# Entries are returned as a generator, accessing entries
|
||||||
|
# can make subsequent calls to the ldap service, so limit
|
||||||
|
# those here.
|
||||||
|
count = 0
|
||||||
|
for entry in conn.entries:
|
||||||
|
if count > limit:
|
||||||
|
break
|
||||||
|
results.append(LdapSchema().dump(LdapModel.from_entry(entry)))
|
||||||
|
count += 1
|
||||||
|
except LDAPExceptionError as le:
|
||||||
|
app.logger.info("Failed to execute ldap search. %s", str(le))
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
|
@ -103,7 +103,7 @@ class LookupService(object):
|
||||||
workflow_id=workflow_model.id,
|
workflow_id=workflow_model.id,
|
||||||
name=file_name)
|
name=file_name)
|
||||||
if len(latest_files) < 1:
|
if len(latest_files) < 1:
|
||||||
raise ApiError("missing_file", "Unable to locate the lookup data file '%s'" % file_name)
|
raise ApiError("invalid_enum", "Unable to locate the lookup data file '%s'" % file_name)
|
||||||
else:
|
else:
|
||||||
data_model = latest_files[0]
|
data_model = latest_files[0]
|
||||||
|
|
||||||
|
@ -189,15 +189,15 @@ class LookupService(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _run_ldap_query(query, limit):
|
def _run_ldap_query(query, limit):
|
||||||
users = LdapService().search_users(query, limit)
|
users = LdapService.search_users(query, limit)
|
||||||
|
|
||||||
"""Converts the user models into something akin to the
|
"""Converts the user models into something akin to the
|
||||||
LookupModel in models/file.py, so this can be returned in the same way
|
LookupModel in models/file.py, so this can be returned in the same way
|
||||||
we return a lookup data model."""
|
we return a lookup data model."""
|
||||||
user_list = []
|
user_list = []
|
||||||
for user in users:
|
for user in users:
|
||||||
user_list.append( {"value": user.uid,
|
user_list.append( {"value": user['uid'],
|
||||||
"label": user.display_name + " (" + user.uid + ")",
|
"label": user['display_name'] + " (" + user['uid'] + ")",
|
||||||
"data": user.__dict__
|
"data": user
|
||||||
})
|
})
|
||||||
return user_list
|
return user_list
|
|
@ -0,0 +1,128 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
from flask import render_template, render_template_string
|
||||||
|
from flask_mail import Message
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Extract common mailing code into its own function
|
||||||
|
def send_test_email(sender, recipients):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan test',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients)
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||||
|
template_vars = {'primary_investigator': "test"}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_approval_request_first_review.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan Submitted',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients,
|
||||||
|
bcc=['rrt_emails@googlegroups.com'])
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_submission.txt')
|
||||||
|
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_submission.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
def send_ramp_up_approval_request_email(sender, recipients, primary_investigator):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan Approval Request',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients,
|
||||||
|
bcc=['rrt_emails@googlegroups.com'])
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_approval_request.txt')
|
||||||
|
template_vars = {'primary_investigator': primary_investigator}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_approval_request.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan Approval Request',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients,
|
||||||
|
bcc=['rrt_emails@googlegroups.com'])
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||||
|
template_vars = {'primary_investigator': primary_investigator}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_approval_request_first_review.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan Approved',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients,
|
||||||
|
bcc=['rrt_emails@googlegroups.com'])
|
||||||
|
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_approved.txt')
|
||||||
|
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_approved.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
def send_ramp_up_denied_email(sender, recipients, approver):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan Denied',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients,
|
||||||
|
bcc=['rrt_emails@googlegroups.com'])
|
||||||
|
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_denied.txt')
|
||||||
|
template_vars = {'approver': approver}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_denied.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
||||||
|
|
||||||
|
def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2):
|
||||||
|
try:
|
||||||
|
msg = Message('Research Ramp-up Plan Denied',
|
||||||
|
sender=sender,
|
||||||
|
recipients=recipients,
|
||||||
|
bcc=['rrt_emails@googlegroups.com'])
|
||||||
|
|
||||||
|
from crc import env, mail
|
||||||
|
template = env.get_template('ramp_up_denied_first_approver.txt')
|
||||||
|
template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2}
|
||||||
|
msg.body = template.render(template_vars)
|
||||||
|
template = env.get_template('ramp_up_denied_first_approver.html')
|
||||||
|
msg.html = template.render(template_vars)
|
||||||
|
|
||||||
|
mail.send(msg)
|
||||||
|
except Exception as e:
|
||||||
|
return str(e)
|
|
@ -25,7 +25,7 @@ class ProtocolBuilderService(object):
|
||||||
def get_studies(user_id) -> {}:
|
def get_studies(user_id) -> {}:
|
||||||
ProtocolBuilderService.__enabled_or_raise()
|
ProtocolBuilderService.__enabled_or_raise()
|
||||||
if not isinstance(user_id, str):
|
if not isinstance(user_id, str):
|
||||||
raise ApiError("invalid_user_id", "This user id is invalid: " + str(user_id))
|
raise ApiError("protocol_builder_error", "This user id is invalid: " + str(user_id))
|
||||||
response = requests.get(ProtocolBuilderService.STUDY_URL % user_id)
|
response = requests.get(ProtocolBuilderService.STUDY_URL % user_id)
|
||||||
if response.ok and response.text:
|
if response.ok and response.text:
|
||||||
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
|
pb_studies = ProtocolBuilderStudySchema(many=True).loads(response.text)
|
||||||
|
|
|
@ -4,11 +4,13 @@ from typing import List
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from SpiffWorkflow import WorkflowException
|
from SpiffWorkflow import WorkflowException
|
||||||
|
from SpiffWorkflow.exceptions import WorkflowTaskExecException
|
||||||
from ldap3.core.exceptions import LDAPSocketOpenError
|
from ldap3.core.exceptions import LDAPSocketOpenError
|
||||||
|
|
||||||
from crc import db, session, app
|
from crc import db, session, app
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
from crc.models.file import FileModel, FileModelSchema
|
from crc.models.file import FileModel, FileModelSchema, File
|
||||||
|
from crc.models.ldap import LdapSchema
|
||||||
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
|
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
|
||||||
from crc.models.stats import TaskEventModel
|
from crc.models.stats import TaskEventModel
|
||||||
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
|
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
|
||||||
|
@ -18,6 +20,8 @@ from crc.services.file_service import FileService
|
||||||
from crc.services.ldap_service import LdapService
|
from crc.services.ldap_service import LdapService
|
||||||
from crc.services.protocol_builder import ProtocolBuilderService
|
from crc.services.protocol_builder import ProtocolBuilderService
|
||||||
from crc.services.workflow_processor import WorkflowProcessor
|
from crc.services.workflow_processor import WorkflowProcessor
|
||||||
|
from crc.services.approval_service import ApprovalService
|
||||||
|
from crc.models.approval import Approval
|
||||||
|
|
||||||
|
|
||||||
class StudyService(object):
|
class StudyService(object):
|
||||||
|
@ -53,7 +57,11 @@ class StudyService(object):
|
||||||
study = Study.from_model(study_model)
|
study = Study.from_model(study_model)
|
||||||
study.categories = StudyService.get_categories()
|
study.categories = StudyService.get_categories()
|
||||||
workflow_metas = StudyService.__get_workflow_metas(study_id)
|
workflow_metas = StudyService.__get_workflow_metas(study_id)
|
||||||
study.files = FileService.get_files_for_study(study.id)
|
study.approvals = ApprovalService.get_approvals_for_study(study.id)
|
||||||
|
files = FileService.get_files_for_study(study.id)
|
||||||
|
files = (File.from_models(model, FileService.get_file_data(model.id),
|
||||||
|
FileService.get_doc_dictionary()) for model in files)
|
||||||
|
study.files = list(files)
|
||||||
|
|
||||||
# Calling this line repeatedly is very very slow. It creates the
|
# Calling this line repeatedly is very very slow. It creates the
|
||||||
# master spec and runs it.
|
# master spec and runs it.
|
||||||
|
@ -78,8 +86,8 @@ class StudyService(object):
|
||||||
def delete_workflow(workflow):
|
def delete_workflow(workflow):
|
||||||
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
||||||
FileService.delete_file(file.id)
|
FileService.delete_file(file.id)
|
||||||
for deb in workflow.dependencies:
|
for dep in workflow.dependencies:
|
||||||
session.delete(deb)
|
session.delete(dep)
|
||||||
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
||||||
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
||||||
|
|
||||||
|
@ -174,6 +182,7 @@ class StudyService(object):
|
||||||
return documents
|
return documents
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_investigators(study_id):
|
def get_investigators(study_id):
|
||||||
|
|
||||||
|
@ -197,8 +206,7 @@ class StudyService(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_ldap_dict_if_available(user_id):
|
def get_ldap_dict_if_available(user_id):
|
||||||
try:
|
try:
|
||||||
ldap_service = LdapService()
|
return LdapSchema().dump(LdapService().user_info(user_id))
|
||||||
return ldap_service.user_info(user_id).__dict__
|
|
||||||
except ApiError as ae:
|
except ApiError as ae:
|
||||||
app.logger.info(str(ae))
|
app.logger.info(str(ae))
|
||||||
return {"error": str(ae)}
|
return {"error": str(ae)}
|
||||||
|
@ -309,8 +317,10 @@ class StudyService(object):
|
||||||
for workflow_spec in new_specs:
|
for workflow_spec in new_specs:
|
||||||
try:
|
try:
|
||||||
StudyService._create_workflow_model(study_model, workflow_spec)
|
StudyService._create_workflow_model(study_model, workflow_spec)
|
||||||
|
except WorkflowTaskExecException as wtee:
|
||||||
|
errors.append(ApiError.from_task("workflow_startup_exception", str(wtee), wtee.task))
|
||||||
except WorkflowException as we:
|
except WorkflowException as we:
|
||||||
errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
|
errors.append(ApiError.from_task_spec("workflow_startup_exception", str(we), we.sender))
|
||||||
return errors
|
return errors
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
@ -299,21 +299,27 @@ class WorkflowProcessor(object):
|
||||||
return WorkflowStatus.waiting
|
return WorkflowStatus.waiting
|
||||||
|
|
||||||
def hard_reset(self):
|
def hard_reset(self):
|
||||||
"""Recreate this workflow, but keep the data from the last completed task and add it back into the first task.
|
"""Recreate this workflow, but keep the data from the last completed task and add
|
||||||
This may be useful when a workflow specification changes, and users need to review all the
|
it back into the first task. This may be useful when a workflow specification changes,
|
||||||
prior steps, but don't need to reenter all the previous data.
|
and users need to review all the prior steps, but they don't need to reenter all the previous data.
|
||||||
|
|
||||||
Returns the new version.
|
Returns the new version.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Create a new workflow based on the latest specs.
|
||||||
self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
|
self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
|
||||||
spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
|
new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
|
||||||
# spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
|
new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine)
|
||||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
new_bpmn_workflow.data = self.bpmn_workflow.data
|
||||||
bpmn_workflow.data = self.bpmn_workflow.data
|
|
||||||
for task in bpmn_workflow.get_tasks(SpiffTask.READY):
|
# Reset the current workflow to the beginning - which we will consider to be the first task after the root
|
||||||
task.data = self.bpmn_workflow.last_task.data
|
# element. This feels a little sketchy, but I think it is safe to assume root will have one child.
|
||||||
bpmn_workflow.do_engine_steps()
|
first_task = self.bpmn_workflow.task_tree.children[0]
|
||||||
self.bpmn_workflow = bpmn_workflow
|
first_task.reset_token(reset_data=False)
|
||||||
|
for task in new_bpmn_workflow.get_tasks(SpiffTask.READY):
|
||||||
|
task.data = first_task.data
|
||||||
|
new_bpmn_workflow.do_engine_steps()
|
||||||
|
self.bpmn_workflow = new_bpmn_workflow
|
||||||
|
|
||||||
def get_status(self):
|
def get_status(self):
|
||||||
return self.status_of(self.bpmn_workflow)
|
return self.status_of(self.bpmn_workflow)
|
||||||
|
|
|
@ -7,7 +7,6 @@ from SpiffWorkflow import Task as SpiffTask, WorkflowException
|
||||||
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
|
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
|
||||||
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
|
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
|
||||||
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
|
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
|
||||||
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
|
|
||||||
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
|
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
|
||||||
from SpiffWorkflow.specs import CancelTask, StartTask
|
from SpiffWorkflow.specs import CancelTask, StartTask
|
||||||
from flask import g
|
from flask import g
|
||||||
|
@ -17,7 +16,6 @@ from crc import db, app
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
from crc.models.api_models import Task, MultiInstanceType
|
from crc.models.api_models import Task, MultiInstanceType
|
||||||
from crc.models.file import LookupDataModel
|
from crc.models.file import LookupDataModel
|
||||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
|
||||||
from crc.models.stats import TaskEventModel
|
from crc.models.stats import TaskEventModel
|
||||||
from crc.models.study import StudyModel
|
from crc.models.study import StudyModel
|
||||||
from crc.models.user import UserModel
|
from crc.models.user import UserModel
|
||||||
|
@ -39,7 +37,9 @@ class WorkflowService(object):
|
||||||
the workflow Processor should be hidden behind this service.
|
the workflow Processor should be hidden behind this service.
|
||||||
This will help maintain a structure that avoids circular dependencies.
|
This will help maintain a structure that avoids circular dependencies.
|
||||||
But for now, this contains tools for converting spiff-workflow models into our
|
But for now, this contains tools for converting spiff-workflow models into our
|
||||||
own API models with additional information and capabilities."""
|
own API models with additional information and capabilities and
|
||||||
|
handles the testing of a workflow specification by completing it with
|
||||||
|
random selections, attempting to mimic a front end as much as possible. """
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def make_test_workflow(spec_id):
|
def make_test_workflow(spec_id):
|
||||||
|
@ -58,15 +58,23 @@ class WorkflowService(object):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def delete_test_data():
|
def delete_test_data():
|
||||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
|
for study in db.session.query(StudyModel).filter(StudyModel.user_uid == "test"):
|
||||||
StudyService.delete_study(study.id)
|
StudyService.delete_study(study.id)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
db.session.query(UserModel).filter_by(uid="test").delete()
|
|
||||||
|
user = db.session.query(UserModel).filter_by(uid="test").first()
|
||||||
|
if user:
|
||||||
|
db.session.delete(user)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def test_spec(spec_id):
|
def test_spec(spec_id, required_only=False):
|
||||||
"""Runs a spec through it's paces to see if it results in any errors. Not fool-proof, but a good
|
"""Runs a spec through it's paces to see if it results in any errors.
|
||||||
sanity check."""
|
Not fool-proof, but a good sanity check. Returns the final data
|
||||||
|
output form the last task if successful.
|
||||||
|
|
||||||
|
required_only can be set to true, in which case this will run the
|
||||||
|
spec, only completing the required fields, rather than everything.
|
||||||
|
"""
|
||||||
|
|
||||||
workflow_model = WorkflowService.make_test_workflow(spec_id)
|
workflow_model = WorkflowService.make_test_workflow(spec_id)
|
||||||
|
|
||||||
|
@ -74,8 +82,7 @@ class WorkflowService(object):
|
||||||
processor = WorkflowProcessor(workflow_model, validate_only=True)
|
processor = WorkflowProcessor(workflow_model, validate_only=True)
|
||||||
except WorkflowException as we:
|
except WorkflowException as we:
|
||||||
WorkflowService.delete_test_data()
|
WorkflowService.delete_test_data()
|
||||||
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
|
raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we)
|
||||||
we.sender)
|
|
||||||
|
|
||||||
while not processor.bpmn_workflow.is_completed():
|
while not processor.bpmn_workflow.is_completed():
|
||||||
try:
|
try:
|
||||||
|
@ -85,38 +92,57 @@ class WorkflowService(object):
|
||||||
task_api = WorkflowService.spiff_task_to_api_task(
|
task_api = WorkflowService.spiff_task_to_api_task(
|
||||||
task,
|
task,
|
||||||
add_docs_and_forms=True) # Assure we try to process the documenation, and raise those errors.
|
add_docs_and_forms=True) # Assure we try to process the documenation, and raise those errors.
|
||||||
WorkflowService.populate_form_with_random_data(task, task_api)
|
WorkflowService.populate_form_with_random_data(task, task_api, required_only)
|
||||||
task.complete()
|
task.complete()
|
||||||
except WorkflowException as we:
|
except WorkflowException as we:
|
||||||
WorkflowService.delete_test_data()
|
WorkflowService.delete_test_data()
|
||||||
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
|
raise ApiError.from_workflow_exception("workflow_validation_exception", str(we), we)
|
||||||
we.sender)
|
|
||||||
WorkflowService.delete_test_data()
|
WorkflowService.delete_test_data()
|
||||||
|
return processor.bpmn_workflow.last_task.data
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def populate_form_with_random_data(task, task_api):
|
def populate_form_with_random_data(task, task_api, required_only):
|
||||||
"""populates a task with random data - useful for testing a spec."""
|
"""populates a task with random data - useful for testing a spec."""
|
||||||
|
|
||||||
if not hasattr(task.task_spec, 'form'): return
|
if not hasattr(task.task_spec, 'form'): return
|
||||||
|
|
||||||
form_data = {}
|
form_data = task.data # Just like with the front end, we start with what was already there, and modify it.
|
||||||
for field in task_api.form.fields:
|
for field in task_api.form.fields:
|
||||||
if field.type == "enum":
|
if required_only and (not field.has_validation(Task.VALIDATION_REQUIRED) or
|
||||||
if len(field.options) > 0:
|
field.get_validation(Task.VALIDATION_REQUIRED).lower().strip() != "true"):
|
||||||
random_choice = random.choice(field.options)
|
continue # Don't include any fields that aren't specifically marked as required.
|
||||||
if isinstance(random_choice, dict):
|
if field.has_property("read_only") and field.get_property("read_only").lower().strip() == "true":
|
||||||
form_data[field.id] = random.choice(field.options)['id']
|
continue # Don't mess about with read only fields.
|
||||||
else:
|
if field.has_property(Task.PROP_OPTIONS_REPEAT):
|
||||||
# fixme: why it is sometimes an EnumFormFieldOption, and other times not?
|
group = field.get_property(Task.PROP_OPTIONS_REPEAT)
|
||||||
form_data[field.id] = random_choice.id ## Assume it is an EnumFormFieldOption
|
if group not in form_data:
|
||||||
|
form_data[group] = [{},{},{}]
|
||||||
|
for i in range(3):
|
||||||
|
form_data[group][i][field.id] = WorkflowService.get_random_data_for_field(field, task)
|
||||||
|
else:
|
||||||
|
form_data[field.id] = WorkflowService.get_random_data_for_field(field, task)
|
||||||
|
if task.data is None:
|
||||||
|
task.data = {}
|
||||||
|
task.data.update(form_data)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_random_data_for_field(field, task):
|
||||||
|
if field.type == "enum":
|
||||||
|
if len(field.options) > 0:
|
||||||
|
random_choice = random.choice(field.options)
|
||||||
|
if isinstance(random_choice, dict):
|
||||||
|
return random.choice(field.options)['id']
|
||||||
else:
|
else:
|
||||||
raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
|
# fixme: why it is sometimes an EnumFormFieldOption, and other times not?
|
||||||
" with no options" % field.id,
|
return random_choice.id ## Assume it is an EnumFormFieldOption
|
||||||
task)
|
else:
|
||||||
elif field.type == "autocomplete":
|
raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
|
||||||
lookup_model = LookupService.get_lookup_model(task, field)
|
" with no options" % field.id, task)
|
||||||
if field.has_property(Task.PROP_LDAP_LOOKUP):
|
elif field.type == "autocomplete":
|
||||||
form_data[field.id] = {
|
lookup_model = LookupService.get_lookup_model(task, field)
|
||||||
|
if field.has_property(Task.PROP_LDAP_LOOKUP): # All ldap records get the same person.
|
||||||
|
return {
|
||||||
"label": "dhf8r",
|
"label": "dhf8r",
|
||||||
"value": "Dan Funk",
|
"value": "Dan Funk",
|
||||||
"data": {
|
"data": {
|
||||||
|
@ -126,32 +152,30 @@ class WorkflowService(object):
|
||||||
"email_address": "dhf8r@virginia.edu",
|
"email_address": "dhf8r@virginia.edu",
|
||||||
"department": "Depertment of Psychocosmographictology",
|
"department": "Depertment of Psychocosmographictology",
|
||||||
"affiliation": "Rousabout",
|
"affiliation": "Rousabout",
|
||||||
"sponsor_type": "Staff"
|
"sponsor_type": "Staff"}
|
||||||
}
|
}
|
||||||
}
|
elif lookup_model:
|
||||||
elif lookup_model:
|
data = db.session.query(LookupDataModel).filter(
|
||||||
data = db.session.query(LookupDataModel).filter(
|
LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
|
||||||
LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
|
options = []
|
||||||
options = []
|
for d in data:
|
||||||
for d in data:
|
options.append({"id": d.value, "label": d.label})
|
||||||
options.append({"id": d.value, "name": d.label})
|
return random.choice(options)
|
||||||
form_data[field.id] = random.choice(options)
|
|
||||||
else:
|
|
||||||
raise ApiError.from_task("invalid_autocomplete", "The settings for this auto complete field "
|
|
||||||
"are incorrect: %s " % field.id, task)
|
|
||||||
elif field.type == "long":
|
|
||||||
form_data[field.id] = random.randint(1, 1000)
|
|
||||||
elif field.type == 'boolean':
|
|
||||||
form_data[field.id] = random.choice([True, False])
|
|
||||||
elif field.type == 'file':
|
|
||||||
form_data[field.id] = random.randint(1, 100)
|
|
||||||
elif field.type == 'files':
|
|
||||||
form_data[field.id] = random.randrange(1, 100)
|
|
||||||
else:
|
else:
|
||||||
form_data[field.id] = WorkflowService._random_string()
|
raise ApiError.from_task("unknown_lookup_option", "The settings for this auto complete field "
|
||||||
if task.data is None:
|
"are incorrect: %s " % field.id, task)
|
||||||
task.data = {}
|
elif field.type == "long":
|
||||||
task.data.update(form_data)
|
return random.randint(1, 1000)
|
||||||
|
elif field.type == 'boolean':
|
||||||
|
return random.choice([True, False])
|
||||||
|
elif field.type == 'file':
|
||||||
|
# fixme: produce some something sensible for files.
|
||||||
|
return random.randint(1, 100)
|
||||||
|
# fixme: produce some something sensible for files.
|
||||||
|
elif field.type == 'files':
|
||||||
|
return random.randrange(1, 100)
|
||||||
|
else:
|
||||||
|
return WorkflowService._random_string()
|
||||||
|
|
||||||
def __get_options(self):
|
def __get_options(self):
|
||||||
pass
|
pass
|
||||||
|
@ -272,10 +296,11 @@ class WorkflowService(object):
|
||||||
template = Template(raw_doc)
|
template = Template(raw_doc)
|
||||||
return template.render(**spiff_task.data)
|
return template.render(**spiff_task.data)
|
||||||
except jinja2.exceptions.TemplateError as ue:
|
except jinja2.exceptions.TemplateError as ue:
|
||||||
|
raise ApiError.from_task(code="template_error", message="Error processing template for task %s: %s" %
|
||||||
# return "Error processing template. %s" % ue.message
|
(spiff_task.task_spec.name, str(ue)), task=spiff_task)
|
||||||
raise ApiError(code="template_error", message="Error processing template for task %s: %s" %
|
except TypeError as te:
|
||||||
(spiff_task.task_spec.name, str(ue)), status_code=500)
|
raise ApiError.from_task(code="template_error", message="Error processing template for task %s: %s" %
|
||||||
|
(spiff_task.task_spec.name, str(te)), task=spiff_task)
|
||||||
# TODO: Catch additional errors and report back.
|
# TODO: Catch additional errors and report back.
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -293,12 +318,12 @@ class WorkflowService(object):
|
||||||
field.options.append({"id": d.value, "name": d.label})
|
field.options.append({"id": d.value, "name": d.label})
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def log_task_action(processor, spiff_task, action):
|
def log_task_action(user_uid, processor, spiff_task, action):
|
||||||
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
||||||
workflow_model = processor.workflow_model
|
workflow_model = processor.workflow_model
|
||||||
task_event = TaskEventModel(
|
task_event = TaskEventModel(
|
||||||
study_id=workflow_model.study_id,
|
study_id=workflow_model.study_id,
|
||||||
user_uid=g.user.uid,
|
user_uid=user_uid,
|
||||||
workflow_id=workflow_model.id,
|
workflow_id=workflow_model.id,
|
||||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||||
spec_version=processor.get_version_string(),
|
spec_version=processor.get_version_string(),
|
||||||
|
|
Binary file not shown.
|
@ -0,0 +1,54 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<definitions xmlns="http://www.omg.org/spec/DMN/20151101/dmn.xsd" id="Definitions_06veek1" name="DRD" namespace="http://camunda.org/schema/1.0/dmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||||
|
<decision id="Decision_ExclusiveAMCheck" name="Exclusive AM Check">
|
||||||
|
<decisionTable id="decisionTable_1">
|
||||||
|
<input id="InputClause_1z0jy2o" label="How Many Exclusive Spaces?">
|
||||||
|
<inputExpression id="LiteralExpression_0tvij2j" typeRef="integer" expressionLanguage="python">
|
||||||
|
<text>len(exclusive)</text>
|
||||||
|
</inputExpression>
|
||||||
|
</input>
|
||||||
|
<input id="input_1" label="Number Without Area Monitor">
|
||||||
|
<inputExpression id="inputExpression_1" typeRef="integer" expressionLanguage="python">
|
||||||
|
<text>sum([1 for x in exclusive if x.get('ExclusiveSpaceAMComputingID',None) == None])</text>
|
||||||
|
</inputExpression>
|
||||||
|
</input>
|
||||||
|
<output id="output_1" label="All Possible Area Monitors Entered" name="isAllExclusiveAreaMonitors" typeRef="boolean" />
|
||||||
|
<rule id="DecisionRule_07162mr">
|
||||||
|
<description>No exclusvie spaces without Area Monitor</description>
|
||||||
|
<inputEntry id="UnaryTests_1892rx8">
|
||||||
|
<text>>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<inputEntry id="UnaryTests_1jqxc3u">
|
||||||
|
<text>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<outputEntry id="LiteralExpression_16l50ps">
|
||||||
|
<text>true</text>
|
||||||
|
</outputEntry>
|
||||||
|
</rule>
|
||||||
|
<rule id="DecisionRule_0ifa4wu">
|
||||||
|
<description>One or more exclusive space without an Area Monitor</description>
|
||||||
|
<inputEntry id="UnaryTests_1jakyab">
|
||||||
|
<text>>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<inputEntry id="UnaryTests_0szbwxc">
|
||||||
|
<text>> 0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<outputEntry id="LiteralExpression_0td8sa6">
|
||||||
|
<text>false</text>
|
||||||
|
</outputEntry>
|
||||||
|
</rule>
|
||||||
|
<rule id="DecisionRule_026r0im">
|
||||||
|
<description>No exclusive spaces entered</description>
|
||||||
|
<inputEntry id="UnaryTests_0c670b6">
|
||||||
|
<text>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<inputEntry id="UnaryTests_0j06ysc">
|
||||||
|
<text></text>
|
||||||
|
</inputEntry>
|
||||||
|
<outputEntry id="LiteralExpression_1apwzvv">
|
||||||
|
<text>true</text>
|
||||||
|
</outputEntry>
|
||||||
|
</rule>
|
||||||
|
</decisionTable>
|
||||||
|
</decision>
|
||||||
|
</definitions>
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,54 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<definitions xmlns="http://www.omg.org/spec/DMN/20151101/dmn.xsd" id="Definitions_06veek1" name="DRD" namespace="http://camunda.org/schema/1.0/dmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
|
||||||
|
<decision id="Decision_SharedAMCheck" name="Shared AM Check">
|
||||||
|
<decisionTable id="decisionTable_1">
|
||||||
|
<input id="InputClause_1koybx6" label="How Many Shared Spaces">
|
||||||
|
<inputExpression id="LiteralExpression_1mjo0y4" typeRef="integer" expressionLanguage="python">
|
||||||
|
<text>len(shared)</text>
|
||||||
|
</inputExpression>
|
||||||
|
</input>
|
||||||
|
<input id="input_1" label="Number Without Area Monitor">
|
||||||
|
<inputExpression id="inputExpression_1" typeRef="integer" expressionLanguage="python">
|
||||||
|
<text>sum([1 for x in shared if x.get('SharedSpaceAMComputingID',None) == None])</text>
|
||||||
|
</inputExpression>
|
||||||
|
</input>
|
||||||
|
<output id="output_1" label="All Possible Shared Area Monitors Entered" name="isAllSharedAreaMonitors" typeRef="boolean" />
|
||||||
|
<rule id="DecisionRule_07162mr">
|
||||||
|
<description>No shared spaces without Area Monitor</description>
|
||||||
|
<inputEntry id="UnaryTests_1p4ab2l">
|
||||||
|
<text>>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<inputEntry id="UnaryTests_1jqxc3u">
|
||||||
|
<text>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<outputEntry id="LiteralExpression_16l50ps">
|
||||||
|
<text>true</text>
|
||||||
|
</outputEntry>
|
||||||
|
</rule>
|
||||||
|
<rule id="DecisionRule_0ifa4wu">
|
||||||
|
<description>One or more shared space without an Area Monitor</description>
|
||||||
|
<inputEntry id="UnaryTests_06bujee">
|
||||||
|
<text>>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<inputEntry id="UnaryTests_0szbwxc">
|
||||||
|
<text>> 0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<outputEntry id="LiteralExpression_0td8sa6">
|
||||||
|
<text>false</text>
|
||||||
|
</outputEntry>
|
||||||
|
</rule>
|
||||||
|
<rule id="DecisionRule_1uh85sk">
|
||||||
|
<description>No shared spaces entered</description>
|
||||||
|
<inputEntry id="UnaryTests_15grk62">
|
||||||
|
<text>0</text>
|
||||||
|
</inputEntry>
|
||||||
|
<inputEntry id="UnaryTests_1gaiomm">
|
||||||
|
<text></text>
|
||||||
|
</inputEntry>
|
||||||
|
<outputEntry id="LiteralExpression_1iep8ai">
|
||||||
|
<text>true</text>
|
||||||
|
</outputEntry>
|
||||||
|
</rule>
|
||||||
|
</decisionTable>
|
||||||
|
</decision>
|
||||||
|
</definitions>
|
Binary file not shown.
|
@ -0,0 +1,2 @@
|
||||||
|
<p>A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
|
||||||
|
<a href="https://rrt.uvadcos.io/app/approvals">Research Ramp-up Toolkit]</a></p>
|
|
@ -0,0 +1,2 @@
|
||||||
|
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
|
||||||
|
Research Ramp-up Toolkit: https://rrt.uvadcos.io/app/approvals.
|
|
@ -0,0 +1,2 @@
|
||||||
|
<p>A Research Ramp-up approval request from {{ primary_investigator }} and is now available for your review in your
|
||||||
|
<a href="https://rrt.uvadcos.io/app/approvals">Research Ramp-up Toolkit</a>.</p>
|
|
@ -0,0 +1,2 @@
|
||||||
|
A Research Ramp-up approval request from {{ primary_investigator }} is now available for your review in your
|
||||||
|
Research Ramp-up Toolkit at https://rrt.uvadcos.io/app/approvals.
|
|
@ -0,0 +1 @@
|
||||||
|
<p>Your Research Ramp-up Plan has been approved by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}</p>
|
|
@ -0,0 +1 @@
|
||||||
|
Your Research Ramp-up Plan has been approved by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}
|
|
@ -0,0 +1 @@
|
||||||
|
<p>Your Research Ramp-up Plan has been denied by {{ approver }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval.</p>
|
|
@ -0,0 +1 @@
|
||||||
|
Your Research Ramp-up Plan has been denied by {{ approver_1 }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver_1 }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval.
|
|
@ -0,0 +1 @@
|
||||||
|
<p>The Research Ramp-up Plan submitted by {{ primary_investigator }} was denied by {{ approver_2 }} and returned for requested updates. You may see comments related to this denial in on your Research Ramp-up Toolkit Approval dashboard.</p>
|
|
@ -0,0 +1 @@
|
||||||
|
The Research Ramp-up Plan submitted by {{ primary_investigator }} was denied by {{ approver_2 }} and returned for requested updates. You may see comments related to this denial in on your Research Ramp-up Toolkit Approval dashboard.
|
|
@ -0,0 +1,5 @@
|
||||||
|
<p>Your Research Ramp-up Plan (RRP) has been submitted for review by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}. After completion of the review step you will receive email notification of its approval or if additional information and/or modifications are required, along with instructions on how to proceed. Return to the Research Ramp-up Plan application to proceed as instructed.</p>
|
||||||
|
|
||||||
|
<p>In the meantime, please make sure all required training has been completed and needed supplies secured. You will be asked to confirm that both of these requirements have been met before reopening the research space approved in your RRP.</p>
|
||||||
|
|
||||||
|
<p>Additionally, if there are any unknown Area Monitors for the spaces listed in your RRP, please contact your approvers to determine either who they are or how you can find out. Missing Area Monitors will need to be entered before proceeding as well.</p>
|
|
@ -0,0 +1,5 @@
|
||||||
|
Your Research Ramp-up Plan (RRP) has been submitted for review by {{ approver_1 }} {% if approver_2 %}and {{ approver_2 }} {% endif %}. After completion of the review step you will receive email notification of its approval or if additional information and/or modifications are required, along with instructions on how to proceed. Return to the Research Ramp-up Plan application to proceed as instructed.
|
||||||
|
|
||||||
|
In the meantime, please make sure all required training has been completed and needed supplies secured. You will be asked to confirm that both of these requirements have been met before reopening the research space approved in your RRP.
|
||||||
|
|
||||||
|
Additionally, if there are any unknown Area Monitors for the spaces listed in your RRP, please contact your approvers to determine either who they are or how you can find out. Missing Area Monitors will need to be entered before proceeding as well.
|
|
@ -0,0 +1,42 @@
|
||||||
|
"""empty message
|
||||||
|
|
||||||
|
Revision ID: 13424d5a6de8
|
||||||
|
Revises: 5064b72284b7
|
||||||
|
Create Date: 2020-06-02 18:17:29.990159
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '13424d5a6de8'
|
||||||
|
down_revision = '5064b72284b7'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.create_table('ldap_model',
|
||||||
|
sa.Column('uid', sa.String(), nullable=False),
|
||||||
|
sa.Column('display_name', sa.String(), nullable=True),
|
||||||
|
sa.Column('given_name', sa.String(), nullable=True),
|
||||||
|
sa.Column('email_address', sa.String(), nullable=True),
|
||||||
|
sa.Column('telephone_number', sa.String(), nullable=True),
|
||||||
|
sa.Column('title', sa.String(), nullable=True),
|
||||||
|
sa.Column('department', sa.String(), nullable=True),
|
||||||
|
sa.Column('affiliation', sa.String(), nullable=True),
|
||||||
|
sa.Column('sponsor_type', sa.String(), nullable=True),
|
||||||
|
sa.Column('date_cached', sa.DateTime(timezone=True), nullable=True),
|
||||||
|
sa.PrimaryKeyConstraint('uid')
|
||||||
|
)
|
||||||
|
op.add_column('approval', sa.Column('date_approved', sa.DateTime(timezone=True), nullable=True))
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_column('approval', 'date_approved')
|
||||||
|
op.drop_table('ldap_model')
|
||||||
|
# ### end Alembic commands ###
|
|
@ -0,0 +1,28 @@
|
||||||
|
"""empty message
|
||||||
|
|
||||||
|
Revision ID: 17597692d0b0
|
||||||
|
Revises: 13424d5a6de8
|
||||||
|
Create Date: 2020-06-03 17:33:56.454339
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '17597692d0b0'
|
||||||
|
down_revision = '13424d5a6de8'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.add_column('file', sa.Column('archived', sa.Boolean(), nullable=True, default=False))
|
||||||
|
op.execute("UPDATE file SET archived = false")
|
||||||
|
op.alter_column('file', 'archived', nullable=False)
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_column('file', 'archived')
|
||||||
|
# ### end Alembic commands ###
|
|
@ -0,0 +1,3 @@
|
||||||
|
{
|
||||||
|
"lockfileVersion": 1
|
||||||
|
}
|
|
@ -2,24 +2,27 @@
|
||||||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from sqlalchemy import Sequence
|
|
||||||
|
|
||||||
os.environ["TESTING"] = "true"
|
os.environ["TESTING"] = "true"
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import unittest
|
import unittest
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import datetime
|
import datetime
|
||||||
|
from flask import g
|
||||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
from sqlalchemy import Sequence
|
||||||
from crc.models.study import StudyModel
|
|
||||||
from crc.services.file_service import FileService
|
|
||||||
from crc.services.study_service import StudyService
|
|
||||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
|
||||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
|
||||||
from crc.models.user import UserModel
|
|
||||||
|
|
||||||
from crc import app, db, session
|
from crc import app, db, session
|
||||||
|
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
|
||||||
|
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||||
|
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||||
|
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||||
|
from crc.models.stats import TaskEventModel
|
||||||
|
from crc.models.study import StudyModel
|
||||||
|
from crc.models.user import UserModel
|
||||||
|
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||||
|
from crc.services.file_service import FileService
|
||||||
|
from crc.services.study_service import StudyService
|
||||||
|
from crc.services.workflow_service import WorkflowService
|
||||||
from example_data import ExampleDataLoader
|
from example_data import ExampleDataLoader
|
||||||
|
|
||||||
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||||
|
@ -95,7 +98,7 @@ class BaseTest(unittest.TestCase):
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
ExampleDataLoader.clean_db()
|
ExampleDataLoader.clean_db()
|
||||||
session.flush()
|
g.user = None
|
||||||
self.auths = {}
|
self.auths = {}
|
||||||
|
|
||||||
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
||||||
|
@ -107,23 +110,28 @@ class BaseTest(unittest.TestCase):
|
||||||
user_info = {'uid': user.uid}
|
user_info = {'uid': user.uid}
|
||||||
|
|
||||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||||
rv = self.app.get("/v1.0/sso_backdoor%s" % query_string, follow_redirects=False)
|
rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
|
||||||
self.assertTrue(rv.status_code == 302)
|
self.assertTrue(rv.status_code == 302)
|
||||||
self.assertTrue(str.startswith(rv.location, redirect_url))
|
self.assertTrue(str.startswith(rv.location, redirect_url))
|
||||||
|
|
||||||
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
||||||
self.assertIsNotNone(user_model.display_name)
|
self.assertIsNotNone(user_model.display_name)
|
||||||
|
self.assertEqual(user_model.uid, uid)
|
||||||
|
self.assertTrue('user' in g, 'User should be in Flask globals')
|
||||||
|
self.assertEqual(uid, g.user.uid, 'Logged in user should match given user uid')
|
||||||
|
|
||||||
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
||||||
|
|
||||||
def load_example_data(self, use_crc_data=False):
|
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
|
||||||
"""use_crc_data will cause this to load the mammoth collection of documents
|
"""use_crc_data will cause this to load the mammoth collection of documents
|
||||||
we built up developing crc, otherwise it depends on a small setup for
|
we built up developing crc, use_rrt_data will do the same for hte rrt project,
|
||||||
running tests."""
|
otherwise it depends on a small setup for running tests."""
|
||||||
|
|
||||||
from example_data import ExampleDataLoader
|
from example_data import ExampleDataLoader
|
||||||
ExampleDataLoader.clean_db()
|
ExampleDataLoader.clean_db()
|
||||||
if(use_crc_data):
|
if use_crc_data:
|
||||||
ExampleDataLoader().load_all()
|
ExampleDataLoader().load_all()
|
||||||
|
elif use_rrt_data:
|
||||||
|
ExampleDataLoader().load_rrt()
|
||||||
else:
|
else:
|
||||||
ExampleDataLoader().load_test_data()
|
ExampleDataLoader().load_test_data()
|
||||||
|
|
||||||
|
@ -158,6 +166,7 @@ class BaseTest(unittest.TestCase):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||||
|
|
||||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||||
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
||||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||||
|
@ -197,7 +206,7 @@ class BaseTest(unittest.TestCase):
|
||||||
for key, value in items:
|
for key, value in items:
|
||||||
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
||||||
|
|
||||||
query_string_list.append('redirect=%s' % redirect_url)
|
query_string_list.append('redirect_url=%s' % redirect_url)
|
||||||
|
|
||||||
return '?%s' % '&'.join(query_string_list)
|
return '?%s' % '&'.join(query_string_list)
|
||||||
|
|
||||||
|
@ -221,12 +230,12 @@ class BaseTest(unittest.TestCase):
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
return user
|
return user
|
||||||
|
|
||||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer"):
|
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"):
|
||||||
study = session.query(StudyModel).first()
|
study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
|
||||||
if study is None:
|
if study is None:
|
||||||
user = self.create_user(uid=uid)
|
user = self.create_user(uid=uid)
|
||||||
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||||
user_uid=user.uid)
|
user_uid=user.uid, primary_investigator_id=primary_investigator_id)
|
||||||
db.session.add(study)
|
db.session.add(study)
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
return study
|
return study
|
||||||
|
@ -248,3 +257,97 @@ class BaseTest(unittest.TestCase):
|
||||||
binary_data=file.read(),
|
binary_data=file.read(),
|
||||||
content_type=CONTENT_TYPES['xls'])
|
content_type=CONTENT_TYPES['xls'])
|
||||||
file.close()
|
file.close()
|
||||||
|
|
||||||
|
def create_approval(
|
||||||
|
self,
|
||||||
|
study=None,
|
||||||
|
workflow=None,
|
||||||
|
approver_uid=None,
|
||||||
|
status=None,
|
||||||
|
version=None,
|
||||||
|
):
|
||||||
|
study = study or self.create_study()
|
||||||
|
workflow = workflow or self.create_workflow()
|
||||||
|
approver_uid = approver_uid or self.test_uid
|
||||||
|
status = status or ApprovalStatus.PENDING.value
|
||||||
|
version = version or 1
|
||||||
|
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version)
|
||||||
|
db.session.add(approval)
|
||||||
|
db.session.commit()
|
||||||
|
return approval
|
||||||
|
|
||||||
|
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"):
|
||||||
|
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||||
|
self.assertIsNotNone(user)
|
||||||
|
|
||||||
|
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||||
|
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||||
|
headers=self.logged_in_headers(user),
|
||||||
|
content_type="application/json")
|
||||||
|
self.assert_success(rv)
|
||||||
|
json_data = json.loads(rv.get_data(as_text=True))
|
||||||
|
workflow_api = WorkflowApiSchema().load(json_data)
|
||||||
|
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||||
|
return workflow_api
|
||||||
|
|
||||||
|
def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"):
|
||||||
|
prev_completed_task_count = workflow_in.completed_tasks
|
||||||
|
if isinstance(task_in, dict):
|
||||||
|
task_id = task_in["id"]
|
||||||
|
else:
|
||||||
|
task_id = task_in.id
|
||||||
|
|
||||||
|
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||||
|
self.assertIsNotNone(user)
|
||||||
|
|
||||||
|
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||||
|
headers=self.logged_in_headers(user=user),
|
||||||
|
content_type="application/json",
|
||||||
|
data=json.dumps(dict_data))
|
||||||
|
if error_code:
|
||||||
|
self.assert_failure(rv, error_code=error_code)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.assert_success(rv)
|
||||||
|
json_data = json.loads(rv.get_data(as_text=True))
|
||||||
|
|
||||||
|
# Assure stats are updated on the model
|
||||||
|
workflow = WorkflowApiSchema().load(json_data)
|
||||||
|
# The total number of tasks may change over time, as users move through gateways
|
||||||
|
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||||
|
self.assertIsNotNone(workflow.total_tasks)
|
||||||
|
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||||
|
|
||||||
|
# Assure a record exists in the Task Events
|
||||||
|
task_events = session.query(TaskEventModel) \
|
||||||
|
.filter_by(workflow_id=workflow.id) \
|
||||||
|
.filter_by(task_id=task_id) \
|
||||||
|
.order_by(TaskEventModel.date.desc()).all()
|
||||||
|
self.assertGreater(len(task_events), 0)
|
||||||
|
event = task_events[0]
|
||||||
|
self.assertIsNotNone(event.study_id)
|
||||||
|
self.assertEqual(user_uid, event.user_uid)
|
||||||
|
self.assertEqual(workflow.id, event.workflow_id)
|
||||||
|
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||||
|
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||||
|
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||||
|
self.assertEqual(task_in.id, task_id)
|
||||||
|
self.assertEqual(task_in.name, event.task_name)
|
||||||
|
self.assertEqual(task_in.title, event.task_title)
|
||||||
|
self.assertEqual(task_in.type, event.task_type)
|
||||||
|
self.assertEqual("COMPLETED", event.task_state)
|
||||||
|
|
||||||
|
# Not sure what voodoo is happening inside of marshmallow to get me in this state.
|
||||||
|
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||||
|
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||||
|
else:
|
||||||
|
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||||
|
|
||||||
|
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||||
|
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||||
|
self.assertEqual(task_in.process_name, event.process_name)
|
||||||
|
self.assertIsNotNone(event.date)
|
||||||
|
|
||||||
|
|
||||||
|
workflow = WorkflowApiSchema().load(json_data)
|
||||||
|
return workflow
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1elv5t1" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1elv5t1" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||||
<bpmn:process id="Process_15vbyda" isExecutable="true">
|
<bpmn:process id="Process_15vbyda" isExecutable="true">
|
||||||
<bpmn:startEvent id="StartEvent_1">
|
<bpmn:startEvent id="StartEvent_1">
|
||||||
<bpmn:outgoing>SequenceFlow_1ma1wxb</bpmn:outgoing>
|
<bpmn:outgoing>SequenceFlow_1ma1wxb</bpmn:outgoing>
|
||||||
|
@ -8,7 +8,11 @@
|
||||||
<bpmn:userTask id="get_num_presents" name="Get number of presents" camunda:formKey="present_question">
|
<bpmn:userTask id="get_num_presents" name="Get number of presents" camunda:formKey="present_question">
|
||||||
<bpmn:extensionElements>
|
<bpmn:extensionElements>
|
||||||
<camunda:formData>
|
<camunda:formData>
|
||||||
<camunda:formField id="num_presents" label="How many presents will my dog Ginger leave for me today?" type="long" defaultValue="0" />
|
<camunda:formField id="num_presents" label="How many presents will my dog Ginger leave for me today?" type="long" defaultValue="0">
|
||||||
|
<camunda:validation>
|
||||||
|
<camunda:constraint name="required" config="true" />
|
||||||
|
</camunda:validation>
|
||||||
|
</camunda:formField>
|
||||||
</camunda:formData>
|
</camunda:formData>
|
||||||
</bpmn:extensionElements>
|
</bpmn:extensionElements>
|
||||||
<bpmn:incoming>SequenceFlow_1ma1wxb</bpmn:incoming>
|
<bpmn:incoming>SequenceFlow_1ma1wxb</bpmn:incoming>
|
||||||
|
@ -26,38 +30,37 @@ Based on the information you provided (Ginger left {{num_presents}}, we recommen
|
||||||
|
|
||||||
## {{message}}
|
## {{message}}
|
||||||
|
|
||||||
We hope you both have an excellent day!
|
We hope you both have an excellent day!</bpmn:documentation>
|
||||||
</bpmn:documentation>
|
|
||||||
<bpmn:incoming>SequenceFlow_0grui6f</bpmn:incoming>
|
<bpmn:incoming>SequenceFlow_0grui6f</bpmn:incoming>
|
||||||
</bpmn:endEvent>
|
</bpmn:endEvent>
|
||||||
<bpmn:sequenceFlow id="SequenceFlow_0grui6f" sourceRef="Task_0sgafty" targetRef="EndEvent_0tsqkyu" />
|
<bpmn:sequenceFlow id="SequenceFlow_0grui6f" sourceRef="Task_0sgafty" targetRef="EndEvent_0tsqkyu" />
|
||||||
</bpmn:process>
|
</bpmn:process>
|
||||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_15vbyda">
|
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_15vbyda">
|
||||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
<bpmndi:BPMNEdge id="SequenceFlow_0grui6f_di" bpmnElement="SequenceFlow_0grui6f">
|
||||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
<di:waypoint x="530" y="117" />
|
||||||
</bpmndi:BPMNShape>
|
<di:waypoint x="592" y="117" />
|
||||||
<bpmndi:BPMNEdge id="SequenceFlow_1ma1wxb_di" bpmnElement="SequenceFlow_1ma1wxb">
|
|
||||||
<di:waypoint x="215" y="117" />
|
|
||||||
<di:waypoint x="270" y="117" />
|
|
||||||
</bpmndi:BPMNEdge>
|
</bpmndi:BPMNEdge>
|
||||||
<bpmndi:BPMNShape id="UserTask_15w5gb3_di" bpmnElement="get_num_presents">
|
|
||||||
<dc:Bounds x="270" y="77" width="100" height="80" />
|
|
||||||
</bpmndi:BPMNShape>
|
|
||||||
<bpmndi:BPMNEdge id="SequenceFlow_1uxaqwp_di" bpmnElement="SequenceFlow_1uxaqwp">
|
<bpmndi:BPMNEdge id="SequenceFlow_1uxaqwp_di" bpmnElement="SequenceFlow_1uxaqwp">
|
||||||
<di:waypoint x="370" y="117" />
|
<di:waypoint x="370" y="117" />
|
||||||
<di:waypoint x="430" y="117" />
|
<di:waypoint x="430" y="117" />
|
||||||
</bpmndi:BPMNEdge>
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_1ma1wxb_di" bpmnElement="SequenceFlow_1ma1wxb">
|
||||||
|
<di:waypoint x="215" y="117" />
|
||||||
|
<di:waypoint x="270" y="117" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||||
|
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="UserTask_15w5gb3_di" bpmnElement="get_num_presents">
|
||||||
|
<dc:Bounds x="270" y="77" width="100" height="80" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
<bpmndi:BPMNShape id="BusinessRuleTask_10c5wgr_di" bpmnElement="Task_0sgafty">
|
<bpmndi:BPMNShape id="BusinessRuleTask_10c5wgr_di" bpmnElement="Task_0sgafty">
|
||||||
<dc:Bounds x="430" y="77" width="100" height="80" />
|
<dc:Bounds x="430" y="77" width="100" height="80" />
|
||||||
</bpmndi:BPMNShape>
|
</bpmndi:BPMNShape>
|
||||||
<bpmndi:BPMNShape id="EndEvent_0tsqkyu_di" bpmnElement="EndEvent_0tsqkyu">
|
<bpmndi:BPMNShape id="EndEvent_0tsqkyu_di" bpmnElement="EndEvent_0tsqkyu">
|
||||||
<dc:Bounds x="592" y="99" width="36" height="36" />
|
<dc:Bounds x="592" y="99" width="36" height="36" />
|
||||||
</bpmndi:BPMNShape>
|
</bpmndi:BPMNShape>
|
||||||
<bpmndi:BPMNEdge id="SequenceFlow_0grui6f_di" bpmnElement="SequenceFlow_0grui6f">
|
|
||||||
<di:waypoint x="530" y="117" />
|
|
||||||
<di:waypoint x="592" y="117" />
|
|
||||||
</bpmndi:BPMNEdge>
|
|
||||||
</bpmndi:BPMNPlane>
|
</bpmndi:BPMNPlane>
|
||||||
</bpmndi:BPMNDiagram>
|
</bpmndi:BPMNDiagram>
|
||||||
</bpmn:definitions>
|
</bpmn:definitions>
|
||||||
|
|
|
@ -8,7 +8,11 @@
|
||||||
<bpmn:userTask id="Task_Has_Bananas" name="Enter Do You Have Bananas" camunda:formKey="bananas_form">
|
<bpmn:userTask id="Task_Has_Bananas" name="Enter Do You Have Bananas" camunda:formKey="bananas_form">
|
||||||
<bpmn:extensionElements>
|
<bpmn:extensionElements>
|
||||||
<camunda:formData>
|
<camunda:formData>
|
||||||
<camunda:formField id="has_bananas" label="Do you have bananas?" type="boolean" />
|
<camunda:formField id="has_bananas" label="Do you have bananas?" type="boolean">
|
||||||
|
<camunda:validation>
|
||||||
|
<camunda:constraint name="required" config="true" />
|
||||||
|
</camunda:validation>
|
||||||
|
</camunda:formField>
|
||||||
</camunda:formData>
|
</camunda:formData>
|
||||||
</bpmn:extensionElements>
|
</bpmn:extensionElements>
|
||||||
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
|
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1gjhqt9" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1gjhqt9" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||||
<bpmn:process id="Process_1ds61df" isExecutable="true">
|
<bpmn:process id="Process_1ds61df" isExecutable="true">
|
||||||
<bpmn:startEvent id="StartEvent_1">
|
<bpmn:startEvent id="StartEvent_1">
|
||||||
<bpmn:outgoing>SequenceFlow_0c7wlth</bpmn:outgoing>
|
<bpmn:outgoing>SequenceFlow_0c7wlth</bpmn:outgoing>
|
||||||
|
@ -108,6 +108,9 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
|
||||||
<bpmn:extensionElements>
|
<bpmn:extensionElements>
|
||||||
<camunda:formData>
|
<camunda:formData>
|
||||||
<camunda:formField id="type" label="Type" type="enum" defaultValue="cat">
|
<camunda:formField id="type" label="Type" type="enum" defaultValue="cat">
|
||||||
|
<camunda:validation>
|
||||||
|
<camunda:constraint name="required" config="true" />
|
||||||
|
</camunda:validation>
|
||||||
<camunda:value id="norris" name="Chuck Norris" />
|
<camunda:value id="norris" name="Chuck Norris" />
|
||||||
<camunda:value id="cat" name="Cat Fact" />
|
<camunda:value id="cat" name="Cat Fact" />
|
||||||
<camunda:value id="buzzword" name="Business Buzzword" />
|
<camunda:value id="buzzword" name="Business Buzzword" />
|
||||||
|
@ -121,8 +124,7 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
|
||||||
<bpmn:outgoing>SequenceFlow_0641sh6</bpmn:outgoing>
|
<bpmn:outgoing>SequenceFlow_0641sh6</bpmn:outgoing>
|
||||||
</bpmn:userTask>
|
</bpmn:userTask>
|
||||||
<bpmn:scriptTask id="Task_Get_Fact_From_API" name="Display Fact">
|
<bpmn:scriptTask id="Task_Get_Fact_From_API" name="Display Fact">
|
||||||
<bpmn:documentation>
|
<bpmn:documentation />
|
||||||
</bpmn:documentation>
|
|
||||||
<bpmn:extensionElements>
|
<bpmn:extensionElements>
|
||||||
<camunda:inputOutput>
|
<camunda:inputOutput>
|
||||||
<camunda:inputParameter name="Fact.type" />
|
<camunda:inputParameter name="Fact.type" />
|
||||||
|
@ -155,6 +157,18 @@ Your random fact is:
|
||||||
</bpmn:process>
|
</bpmn:process>
|
||||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_1ds61df">
|
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_1ds61df">
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_0t29gjo_di" bpmnElement="SequenceFlow_0t29gjo">
|
||||||
|
<di:waypoint x="570" y="250" />
|
||||||
|
<di:waypoint x="692" y="250" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_0641sh6_di" bpmnElement="SequenceFlow_0641sh6">
|
||||||
|
<di:waypoint x="370" y="250" />
|
||||||
|
<di:waypoint x="470" y="250" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_0c7wlth_di" bpmnElement="SequenceFlow_0c7wlth">
|
||||||
|
<di:waypoint x="188" y="250" />
|
||||||
|
<di:waypoint x="270" y="250" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||||
<dc:Bounds x="152" y="232" width="36" height="36" />
|
<dc:Bounds x="152" y="232" width="36" height="36" />
|
||||||
</bpmndi:BPMNShape>
|
</bpmndi:BPMNShape>
|
||||||
|
@ -164,35 +178,23 @@ Your random fact is:
|
||||||
<bpmndi:BPMNShape id="ScriptTask_10keafb_di" bpmnElement="Task_Get_Fact_From_API">
|
<bpmndi:BPMNShape id="ScriptTask_10keafb_di" bpmnElement="Task_Get_Fact_From_API">
|
||||||
<dc:Bounds x="470" y="210" width="100" height="80" />
|
<dc:Bounds x="470" y="210" width="100" height="80" />
|
||||||
</bpmndi:BPMNShape>
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="EndEvent_0u1cgrf_di" bpmnElement="EndEvent_0u1cgrf">
|
||||||
|
<dc:Bounds x="692" y="232" width="36" height="36" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
<bpmndi:BPMNShape id="TextAnnotation_09fq7kh_di" bpmnElement="TextAnnotation_09fq7kh">
|
<bpmndi:BPMNShape id="TextAnnotation_09fq7kh_di" bpmnElement="TextAnnotation_09fq7kh">
|
||||||
<dc:Bounds x="330" y="116" width="99.99202297383536" height="68.28334396936822" />
|
<dc:Bounds x="330" y="116" width="99.99202297383536" height="68.28334396936822" />
|
||||||
</bpmndi:BPMNShape>
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="TextAnnotation_1234e5n_di" bpmnElement="TextAnnotation_1234e5n">
|
||||||
|
<dc:Bounds x="570" y="120" width="100" height="68" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
<bpmndi:BPMNEdge id="Association_1cfasjp_di" bpmnElement="Association_1cfasjp">
|
<bpmndi:BPMNEdge id="Association_1cfasjp_di" bpmnElement="Association_1cfasjp">
|
||||||
<di:waypoint x="344" y="210" />
|
<di:waypoint x="344" y="210" />
|
||||||
<di:waypoint x="359" y="184" />
|
<di:waypoint x="359" y="184" />
|
||||||
</bpmndi:BPMNEdge>
|
</bpmndi:BPMNEdge>
|
||||||
<bpmndi:BPMNShape id="TextAnnotation_1234e5n_di" bpmnElement="TextAnnotation_1234e5n">
|
|
||||||
<dc:Bounds x="570" y="120" width="100" height="68" />
|
|
||||||
</bpmndi:BPMNShape>
|
|
||||||
<bpmndi:BPMNEdge id="Association_1qirnyy_di" bpmnElement="Association_1qirnyy">
|
<bpmndi:BPMNEdge id="Association_1qirnyy_di" bpmnElement="Association_1qirnyy">
|
||||||
<di:waypoint x="561" y="210" />
|
<di:waypoint x="561" y="210" />
|
||||||
<di:waypoint x="584" y="188" />
|
<di:waypoint x="584" y="188" />
|
||||||
</bpmndi:BPMNEdge>
|
</bpmndi:BPMNEdge>
|
||||||
<bpmndi:BPMNShape id="EndEvent_0u1cgrf_di" bpmnElement="EndEvent_0u1cgrf">
|
|
||||||
<dc:Bounds x="692" y="232" width="36" height="36" />
|
|
||||||
</bpmndi:BPMNShape>
|
|
||||||
<bpmndi:BPMNEdge id="SequenceFlow_0c7wlth_di" bpmnElement="SequenceFlow_0c7wlth">
|
|
||||||
<di:waypoint x="188" y="250" />
|
|
||||||
<di:waypoint x="270" y="250" />
|
|
||||||
</bpmndi:BPMNEdge>
|
|
||||||
<bpmndi:BPMNEdge id="SequenceFlow_0641sh6_di" bpmnElement="SequenceFlow_0641sh6">
|
|
||||||
<di:waypoint x="370" y="250" />
|
|
||||||
<di:waypoint x="470" y="250" />
|
|
||||||
</bpmndi:BPMNEdge>
|
|
||||||
<bpmndi:BPMNEdge id="SequenceFlow_0t29gjo_di" bpmnElement="SequenceFlow_0t29gjo">
|
|
||||||
<di:waypoint x="570" y="250" />
|
|
||||||
<di:waypoint x="692" y="250" />
|
|
||||||
</bpmndi:BPMNEdge>
|
|
||||||
</bpmndi:BPMNPlane>
|
</bpmndi:BPMNPlane>
|
||||||
</bpmndi:BPMNDiagram>
|
</bpmndi:BPMNDiagram>
|
||||||
</bpmn:definitions>
|
</bpmn:definitions>
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1v1rp1q" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||||
|
<bpmn:process id="Repeat" isExecutable="true">
|
||||||
|
<bpmn:startEvent id="StartEvent_1">
|
||||||
|
<bpmn:outgoing>SequenceFlow_0lvudp8</bpmn:outgoing>
|
||||||
|
</bpmn:startEvent>
|
||||||
|
<bpmn:sequenceFlow id="SequenceFlow_0lvudp8" sourceRef="StartEvent_1" targetRef="Task_14svgcu" />
|
||||||
|
<bpmn:endEvent id="EndEvent_0q4qzl9">
|
||||||
|
<bpmn:incoming>SequenceFlow_02vev7n</bpmn:incoming>
|
||||||
|
</bpmn:endEvent>
|
||||||
|
<bpmn:sequenceFlow id="SequenceFlow_02vev7n" sourceRef="Task_14svgcu" targetRef="EndEvent_0q4qzl9" />
|
||||||
|
<bpmn:userTask id="Task_14svgcu" name="Repeating Form" camunda:formKey="RepeatForm">
|
||||||
|
<bpmn:extensionElements>
|
||||||
|
<camunda:formData>
|
||||||
|
<camunda:formField id="name" label="Add a cat name" type="string" defaultValue="couger buttons">
|
||||||
|
<camunda:properties>
|
||||||
|
<camunda:property id="repeat" value="cats" />
|
||||||
|
</camunda:properties>
|
||||||
|
</camunda:formField>
|
||||||
|
</camunda:formData>
|
||||||
|
</bpmn:extensionElements>
|
||||||
|
<bpmn:incoming>SequenceFlow_0lvudp8</bpmn:incoming>
|
||||||
|
<bpmn:outgoing>SequenceFlow_02vev7n</bpmn:outgoing>
|
||||||
|
</bpmn:userTask>
|
||||||
|
</bpmn:process>
|
||||||
|
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||||
|
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Repeat">
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_02vev7n_di" bpmnElement="SequenceFlow_02vev7n">
|
||||||
|
<di:waypoint x="370" y="117" />
|
||||||
|
<di:waypoint x="432" y="117" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_0lvudp8_di" bpmnElement="SequenceFlow_0lvudp8">
|
||||||
|
<di:waypoint x="215" y="117" />
|
||||||
|
<di:waypoint x="270" y="117" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||||
|
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="EndEvent_0q4qzl9_di" bpmnElement="EndEvent_0q4qzl9">
|
||||||
|
<dc:Bounds x="432" y="99" width="36" height="36" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="UserTask_18ly1yq_di" bpmnElement="Task_14svgcu">
|
||||||
|
<dc:Bounds x="270" y="77" width="100" height="80" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
</bpmndi:BPMNPlane>
|
||||||
|
</bpmndi:BPMNDiagram>
|
||||||
|
</bpmn:definitions>
|
|
@ -0,0 +1,48 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1v1rp1q" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||||
|
<bpmn:process id="Required" isExecutable="true">
|
||||||
|
<bpmn:startEvent id="StartEvent_1">
|
||||||
|
<bpmn:outgoing>SequenceFlow_0lvudp8</bpmn:outgoing>
|
||||||
|
</bpmn:startEvent>
|
||||||
|
<bpmn:sequenceFlow id="SequenceFlow_0lvudp8" sourceRef="StartEvent_1" targetRef="Task_Required_Fields" />
|
||||||
|
<bpmn:endEvent id="EndEvent_0q4qzl9">
|
||||||
|
<bpmn:incoming>SequenceFlow_02vev7n</bpmn:incoming>
|
||||||
|
</bpmn:endEvent>
|
||||||
|
<bpmn:sequenceFlow id="SequenceFlow_02vev7n" sourceRef="Task_Required_Fields" targetRef="EndEvent_0q4qzl9" />
|
||||||
|
<bpmn:userTask id="Task_Required_Fields" name="Required fields" camunda:formKey="RequiredForm">
|
||||||
|
<bpmn:extensionElements>
|
||||||
|
<camunda:formData>
|
||||||
|
<camunda:formField id="string_required" label="String" type="string" defaultValue="some string">
|
||||||
|
<camunda:validation>
|
||||||
|
<camunda:constraint name="required" config="true" />
|
||||||
|
</camunda:validation>
|
||||||
|
</camunda:formField>
|
||||||
|
<camunda:formField id="string_not_required" type="string" defaultValue="If ya like, I don't care." />
|
||||||
|
</camunda:formData>
|
||||||
|
</bpmn:extensionElements>
|
||||||
|
<bpmn:incoming>SequenceFlow_0lvudp8</bpmn:incoming>
|
||||||
|
<bpmn:outgoing>SequenceFlow_02vev7n</bpmn:outgoing>
|
||||||
|
</bpmn:userTask>
|
||||||
|
</bpmn:process>
|
||||||
|
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||||
|
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Required">
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_02vev7n_di" bpmnElement="SequenceFlow_02vev7n">
|
||||||
|
<di:waypoint x="370" y="117" />
|
||||||
|
<di:waypoint x="432" y="117" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNEdge id="SequenceFlow_0lvudp8_di" bpmnElement="SequenceFlow_0lvudp8">
|
||||||
|
<di:waypoint x="215" y="117" />
|
||||||
|
<di:waypoint x="270" y="117" />
|
||||||
|
</bpmndi:BPMNEdge>
|
||||||
|
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||||
|
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="EndEvent_0q4qzl9_di" bpmnElement="EndEvent_0q4qzl9">
|
||||||
|
<dc:Bounds x="432" y="99" width="36" height="36" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
<bpmndi:BPMNShape id="UserTask_18ly1yq_di" bpmnElement="Task_Required_Fields">
|
||||||
|
<dc:Bounds x="270" y="77" width="100" height="80" />
|
||||||
|
</bpmndi:BPMNShape>
|
||||||
|
</bpmndi:BPMNPlane>
|
||||||
|
</bpmndi:BPMNDiagram>
|
||||||
|
</bpmn:definitions>
|
|
@ -1,113 +1,260 @@
|
||||||
import json
|
import json
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
from flask import g
|
||||||
|
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
from crc import session, db
|
||||||
from crc import app, db, session
|
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||||
from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
|
from crc.models.study import StudyModel
|
||||||
|
from crc.models.workflow import WorkflowModel
|
||||||
|
|
||||||
APPROVAL_PAYLOAD = {
|
|
||||||
'id': None,
|
|
||||||
'approver': {
|
|
||||||
'uid': 'bgb22',
|
|
||||||
'display_name': 'Billy Bob (bgb22)',
|
|
||||||
'title': 'E42:He\'s a hoopy frood',
|
|
||||||
'department': 'E0:EN-Eng Study of Parallel Universes'
|
|
||||||
},
|
|
||||||
'title': 'El Study',
|
|
||||||
'status': 'DECLINED',
|
|
||||||
'version': 1,
|
|
||||||
'message': 'Incorrect documents',
|
|
||||||
'associated_files': [
|
|
||||||
{
|
|
||||||
'id': 42,
|
|
||||||
'name': 'File 1',
|
|
||||||
'content_type': 'document'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'id': 43,
|
|
||||||
'name': 'File 2',
|
|
||||||
'content_type': 'document'
|
|
||||||
}
|
|
||||||
],
|
|
||||||
'workflow_id': 1,
|
|
||||||
'study_id': 1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class TestApprovals(BaseTest):
|
class TestApprovals(BaseTest):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
"""Initial setup shared by all TestApprovals tests"""
|
"""Initial setup shared by all TestApprovals tests"""
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
self.study = self.create_study()
|
|
||||||
self.workflow = self.create_workflow('random_fact')
|
|
||||||
# TODO: Move to base_test as a helper
|
|
||||||
self.approval = ApprovalModel(
|
|
||||||
study=self.study,
|
|
||||||
workflow=self.workflow,
|
|
||||||
approver_uid='arc93',
|
|
||||||
status=ApprovalStatus.WAITING.value,
|
|
||||||
version=1
|
|
||||||
)
|
|
||||||
session.add(self.approval)
|
|
||||||
|
|
||||||
self.approval_2 = ApprovalModel(
|
# Add a study with 2 approvers
|
||||||
study=self.study,
|
study_workflow_approvals_1 = self._create_study_workflow_approvals(
|
||||||
workflow=self.workflow,
|
user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp",
|
||||||
approver_uid='dhf8r',
|
approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value]
|
||||||
status=ApprovalStatus.WAITING.value,
|
|
||||||
version=1
|
|
||||||
)
|
)
|
||||||
session.add(self.approval_2)
|
self.study = study_workflow_approvals_1['study']
|
||||||
|
self.workflow = study_workflow_approvals_1['workflow']
|
||||||
|
self.approval = study_workflow_approvals_1['approvals'][0]
|
||||||
|
self.approval_2 = study_workflow_approvals_1['approvals'][1]
|
||||||
|
|
||||||
session.commit()
|
# Add a study with 1 approver
|
||||||
|
study_workflow_approvals_2 = self._create_study_workflow_approvals(
|
||||||
|
user_uid="dhf8r", title="second study", primary_investigator_id="dhf8r",
|
||||||
|
approver_uids=["lb3dp"], statuses=[ApprovalStatus.PENDING.value]
|
||||||
|
)
|
||||||
|
self.unrelated_study = study_workflow_approvals_2['study']
|
||||||
|
self.unrelated_workflow = study_workflow_approvals_2['workflow']
|
||||||
|
self.approval_3 = study_workflow_approvals_2['approvals'][0]
|
||||||
|
|
||||||
def test_list_approvals_per_approver(self):
|
def test_list_approvals_per_approver(self):
|
||||||
"""Only approvals associated with approver should be returned"""
|
"""Only approvals associated with approver should be returned"""
|
||||||
approver_uid = self.approval_2.approver_uid
|
approver_uid = self.approval_2.approver_uid
|
||||||
rv = self.app.get(f'/v1.0/approval?approver_uid={approver_uid}', headers=self.logged_in_headers())
|
rv = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
|
||||||
response = json.loads(rv.get_data(as_text=True))
|
response = json.loads(rv.get_data(as_text=True))
|
||||||
|
|
||||||
# Stored approvals are 2
|
# Stored approvals are 3
|
||||||
approvals_count = ApprovalModel.query.count()
|
approvals_count = ApprovalModel.query.count()
|
||||||
self.assertEqual(approvals_count, 2)
|
self.assertEqual(approvals_count, 3)
|
||||||
|
|
||||||
# but Dan's approvals should be only 1
|
# but Dan's approvals should be only 1
|
||||||
self.assertEqual(len(response), 1)
|
self.assertEqual(len(response), 1)
|
||||||
|
|
||||||
# Confirm approver UID matches returned payload
|
# Confirm approver UID matches returned payload
|
||||||
approval = ApprovalSchema().load(response[0])
|
approval = response[0]
|
||||||
self.assertEqual(approval.approver['uid'], approver_uid)
|
self.assertEqual(approval['approver']['uid'], approver_uid)
|
||||||
|
|
||||||
def test_list_approvals_per_admin(self):
|
def test_list_approvals_as_user(self):
|
||||||
"""All approvals will be returned"""
|
"""All approvals as different user"""
|
||||||
rv = self.app.get('/v1.0/approval', headers=self.logged_in_headers())
|
rv = self.app.get('/v1.0/approval?as_user=lb3dp', headers=self.logged_in_headers())
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
|
||||||
response = json.loads(rv.get_data(as_text=True))
|
response = json.loads(rv.get_data(as_text=True))
|
||||||
|
|
||||||
# Returned approvals should match what's in the db
|
# Returned approvals should match what's in the db for user ld3dp, we should get one
|
||||||
approvals_count = ApprovalModel.query.count()
|
# approval back per study (2 studies), and that approval should have one related approval.
|
||||||
response_count = len(response)
|
response_count = len(response)
|
||||||
self.assertEqual(approvals_count, response_count)
|
self.assertEqual(2, response_count)
|
||||||
|
|
||||||
def test_update_approval(self):
|
rv = self.app.get('/v1.0/approval', headers=self.logged_in_headers())
|
||||||
"""Approval status will be updated"""
|
self.assert_success(rv)
|
||||||
approval_id = self.approval.id
|
response = json.loads(rv.get_data(as_text=True))
|
||||||
data = dict(APPROVAL_PAYLOAD)
|
response_count = len(response)
|
||||||
data['id'] = approval_id
|
self.assertEqual(1, response_count)
|
||||||
|
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||||
|
|
||||||
self.assertEqual(self.approval.status, ApprovalStatus.WAITING.value)
|
def test_update_approval_fails_if_not_the_approver(self):
|
||||||
|
approval = session.query(ApprovalModel).filter_by(approver_uid='lb3dp').first()
|
||||||
|
data = {'id': approval.id,
|
||||||
|
"approver_uid": "dhf8r",
|
||||||
|
'message': "Approved. I like the cut of your jib.",
|
||||||
|
'status': ApprovalStatus.APPROVED.value}
|
||||||
|
|
||||||
rv = self.app.put(f'/v1.0/approval/{approval_id}',
|
self.assertEqual(approval.status, ApprovalStatus.PENDING.value)
|
||||||
|
|
||||||
|
rv = self.app.put(f'/v1.0/approval/{approval.id}',
|
||||||
content_type="application/json",
|
content_type="application/json",
|
||||||
headers=self.logged_in_headers(),
|
headers=self.logged_in_headers(), # As dhf8r
|
||||||
|
data=json.dumps(data))
|
||||||
|
self.assert_failure(rv)
|
||||||
|
|
||||||
|
def test_accept_approval(self):
|
||||||
|
approval = session.query(ApprovalModel).filter_by(approver_uid='dhf8r').first()
|
||||||
|
data = {'id': approval.id,
|
||||||
|
"approver": {"uid": "dhf8r"},
|
||||||
|
'message': "Approved. I like the cut of your jib.",
|
||||||
|
'status': ApprovalStatus.APPROVED.value}
|
||||||
|
|
||||||
|
self.assertEqual(approval.status, ApprovalStatus.PENDING.value)
|
||||||
|
|
||||||
|
rv = self.app.put(f'/v1.0/approval/{approval.id}',
|
||||||
|
content_type="application/json",
|
||||||
|
headers=self.logged_in_headers(), # As dhf8r
|
||||||
data=json.dumps(data))
|
data=json.dumps(data))
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
|
||||||
session.refresh(self.approval)
|
session.refresh(approval)
|
||||||
|
|
||||||
# Updated record should now have the data sent to the endpoint
|
# Updated record should now have the data sent to the endpoint
|
||||||
self.assertEqual(self.approval.message, data['message'])
|
self.assertEqual(approval.message, data['message'])
|
||||||
self.assertEqual(self.approval.status, ApprovalStatus.DECLINED.value)
|
self.assertEqual(approval.status, ApprovalStatus.APPROVED.value)
|
||||||
|
|
||||||
|
def test_decline_approval(self):
|
||||||
|
approval = session.query(ApprovalModel).filter_by(approver_uid='dhf8r').first()
|
||||||
|
data = {'id': approval.id,
|
||||||
|
"approver": {"uid": "dhf8r"},
|
||||||
|
'message': "Approved. I find the cut of your jib lacking.",
|
||||||
|
'status': ApprovalStatus.DECLINED.value}
|
||||||
|
|
||||||
|
self.assertEqual(approval.status, ApprovalStatus.PENDING.value)
|
||||||
|
|
||||||
|
rv = self.app.put(f'/v1.0/approval/{approval.id}',
|
||||||
|
content_type="application/json",
|
||||||
|
headers=self.logged_in_headers(), # As dhf8r
|
||||||
|
data=json.dumps(data))
|
||||||
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
session.refresh(approval)
|
||||||
|
|
||||||
|
# Updated record should now have the data sent to the endpoint
|
||||||
|
self.assertEqual(approval.message, data['message'])
|
||||||
|
self.assertEqual(approval.status, ApprovalStatus.DECLINED.value)
|
||||||
|
|
||||||
|
def test_csv_export(self):
|
||||||
|
self.load_test_spec('two_forms')
|
||||||
|
self._add_lots_of_random_approvals(n=50, workflow_spec_name='two_forms')
|
||||||
|
|
||||||
|
# Get all workflows
|
||||||
|
workflows = db.session.query(WorkflowModel).filter_by(workflow_spec_id='two_forms').all()
|
||||||
|
|
||||||
|
# For each workflow, complete all tasks
|
||||||
|
for workflow in workflows:
|
||||||
|
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||||
|
self.assertEqual('two_forms', workflow_api.workflow_spec_id)
|
||||||
|
|
||||||
|
# Log current user out.
|
||||||
|
g.user = None
|
||||||
|
self.assertIsNone(g.user)
|
||||||
|
|
||||||
|
# Complete the form for Step one and post it.
|
||||||
|
self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}, error_code=None, user_uid=workflow.study.user_uid)
|
||||||
|
|
||||||
|
# Get the next Task
|
||||||
|
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||||
|
self.assertEqual("StepTwo", workflow_api.next_task.name)
|
||||||
|
|
||||||
|
# Get all user Tasks and check that the data have been saved
|
||||||
|
task = workflow_api.next_task
|
||||||
|
self.assertIsNotNone(task.data)
|
||||||
|
for val in task.data.values():
|
||||||
|
self.assertIsNotNone(val)
|
||||||
|
|
||||||
|
rv = self.app.get(f'/v1.0/approval/csv', headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
def test_all_approvals(self):
|
||||||
|
self._add_lots_of_random_approvals()
|
||||||
|
|
||||||
|
not_canceled = session.query(ApprovalModel).filter(ApprovalModel.status != 'CANCELED').all()
|
||||||
|
not_canceled_study_ids = []
|
||||||
|
for a in not_canceled:
|
||||||
|
if a.study_id not in not_canceled_study_ids:
|
||||||
|
not_canceled_study_ids.append(a.study_id)
|
||||||
|
|
||||||
|
rv_all = self.app.get(f'/v1.0/all_approvals?status=false', headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv_all)
|
||||||
|
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||||
|
self.assertEqual(len(all_data), len(not_canceled_study_ids), 'Should return all non-canceled approvals, grouped by study')
|
||||||
|
|
||||||
|
all_approvals = session.query(ApprovalModel).all()
|
||||||
|
all_approvals_study_ids = []
|
||||||
|
for a in all_approvals:
|
||||||
|
if a.study_id not in all_approvals_study_ids:
|
||||||
|
all_approvals_study_ids.append(a.study_id)
|
||||||
|
|
||||||
|
rv_all = self.app.get(f'/v1.0/all_approvals?status=true', headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv_all)
|
||||||
|
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||||
|
self.assertEqual(len(all_data), len(all_approvals_study_ids), 'Should return all approvals, grouped by study')
|
||||||
|
|
||||||
|
def test_approvals_counts(self):
|
||||||
|
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||||
|
self._add_lots_of_random_approvals()
|
||||||
|
|
||||||
|
# Get the counts
|
||||||
|
rv_counts = self.app.get(f'/v1.0/approval-counts', headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv_counts)
|
||||||
|
counts = json.loads(rv_counts.get_data(as_text=True))
|
||||||
|
|
||||||
|
# Get the actual approvals
|
||||||
|
rv_approvals = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv_approvals)
|
||||||
|
approvals = json.loads(rv_approvals.get_data(as_text=True))
|
||||||
|
|
||||||
|
# Tally up the number of approvals in each status category
|
||||||
|
manual_counts = {}
|
||||||
|
for status in statuses:
|
||||||
|
manual_counts[status] = 0
|
||||||
|
|
||||||
|
for approval in approvals:
|
||||||
|
manual_counts[approval['status']] += 1
|
||||||
|
|
||||||
|
# Numbers in each category should match
|
||||||
|
for status in statuses:
|
||||||
|
self.assertEqual(counts[status], manual_counts[status], 'Approval counts for status %s should match' % status)
|
||||||
|
|
||||||
|
# Total number of approvals should match
|
||||||
|
total_counts = sum(counts[status] for status in statuses)
|
||||||
|
self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user')
|
||||||
|
|
||||||
|
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
|
||||||
|
workflow_spec_name="random_fact"):
|
||||||
|
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
|
||||||
|
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
|
||||||
|
approvals = []
|
||||||
|
|
||||||
|
for i in range(len(approver_uids)):
|
||||||
|
approvals.append(self.create_approval(
|
||||||
|
study=study,
|
||||||
|
workflow=workflow,
|
||||||
|
approver_uid=approver_uids[i],
|
||||||
|
status=statuses[i],
|
||||||
|
version=1
|
||||||
|
))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'study': study,
|
||||||
|
'workflow': workflow,
|
||||||
|
'approvals': approvals,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"):
|
||||||
|
num_studies_before = db.session.query(StudyModel).count()
|
||||||
|
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||||
|
|
||||||
|
# Add a whole bunch of approvals with random statuses
|
||||||
|
for i in range(n):
|
||||||
|
approver_uids = random.choices(["lb3dp", "dhf8r"])
|
||||||
|
self._create_study_workflow_approvals(
|
||||||
|
user_uid=random.choice(["lb3dp", "dhf8r"]),
|
||||||
|
title="".join(random.choices(string.ascii_lowercase, k=64)),
|
||||||
|
primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
|
||||||
|
approver_uids=approver_uids,
|
||||||
|
statuses=random.choices(statuses, k=len(approver_uids)),
|
||||||
|
workflow_spec_name=workflow_spec_name
|
||||||
|
)
|
||||||
|
|
||||||
|
session.flush()
|
||||||
|
num_studies_after = db.session.query(StudyModel).count()
|
||||||
|
self.assertEqual(num_studies_after, num_studies_before + n)
|
||||||
|
|
||||||
|
|
|
@ -15,13 +15,14 @@ class TestApprovalsService(BaseTest):
|
||||||
name="anything.png", content_type="text",
|
name="anything.png", content_type="text",
|
||||||
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
|
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
|
||||||
|
|
||||||
|
|
||||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||||
self.assertEquals(1, db.session.query(ApprovalModel).count())
|
self.assertEqual(1, db.session.query(ApprovalModel).count())
|
||||||
model = db.session.query(ApprovalModel).first()
|
model = db.session.query(ApprovalModel).first()
|
||||||
self.assertEquals(workflow.study_id, model.study_id)
|
self.assertEqual(workflow.study_id, model.study_id)
|
||||||
self.assertEquals(workflow.id, model.workflow_id)
|
self.assertEqual(workflow.id, model.workflow_id)
|
||||||
self.assertEquals("dhf8r", model.approver_uid)
|
self.assertEqual("dhf8r", model.approver_uid)
|
||||||
self.assertEquals(1, model.version)
|
self.assertEqual(1, model.version)
|
||||||
|
|
||||||
def test_new_requests_dont_add_if_approval_exists_for_current_workflow(self):
|
def test_new_requests_dont_add_if_approval_exists_for_current_workflow(self):
|
||||||
self.create_reference_document()
|
self.create_reference_document()
|
||||||
|
@ -32,9 +33,9 @@ class TestApprovalsService(BaseTest):
|
||||||
|
|
||||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||||
self.assertEquals(1, db.session.query(ApprovalModel).count())
|
self.assertEqual(1, db.session.query(ApprovalModel).count())
|
||||||
model = db.session.query(ApprovalModel).first()
|
model = db.session.query(ApprovalModel).first()
|
||||||
self.assertEquals(1, model.version)
|
self.assertEqual(1, model.version)
|
||||||
|
|
||||||
def test_new_approval_requests_after_file_modification_create_new_requests(self):
|
def test_new_approval_requests_after_file_modification_create_new_requests(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
@ -51,9 +52,20 @@ class TestApprovalsService(BaseTest):
|
||||||
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr")
|
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr")
|
||||||
|
|
||||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||||
self.assertEquals(2, db.session.query(ApprovalModel).count())
|
self.assertEqual(2, db.session.query(ApprovalModel).count())
|
||||||
models = db.session.query(ApprovalModel).order_by(ApprovalModel.version).all()
|
models = db.session.query(ApprovalModel).order_by(ApprovalModel.version).all()
|
||||||
self.assertEquals(1, models[0].version)
|
self.assertEqual(1, models[0].version)
|
||||||
self.assertEquals(2, models[1].version)
|
self.assertEqual(2, models[1].version)
|
||||||
|
|
||||||
|
def test_new_approval_sends_proper_emails(self):
|
||||||
|
self.assertEqual(1, 1)
|
||||||
|
|
||||||
|
def test_new_approval_failed_ldap_lookup(self):
|
||||||
|
# failed lookup should send email to sartographysupport@googlegroups.com + Cheryl
|
||||||
|
self.assertEqual(1, 1)
|
||||||
|
|
||||||
|
def test_approve_approval_sends_proper_emails(self):
|
||||||
|
self.assertEqual(1, 1)
|
||||||
|
|
||||||
|
def test_deny_approval_sends_proper_emails(self):
|
||||||
|
self.assertEqual(1, 1)
|
||||||
|
|
|
@ -1,29 +1,73 @@
|
||||||
from tests.base_test import BaseTest
|
import json
|
||||||
|
from calendar import timegm
|
||||||
|
from datetime import timezone, datetime, timedelta
|
||||||
|
|
||||||
from crc import db
|
import jwt
|
||||||
|
|
||||||
|
from tests.base_test import BaseTest
|
||||||
|
from crc import db, app
|
||||||
|
from crc.api.common import ApiError
|
||||||
|
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||||
|
from crc.models.study import StudySchema, StudyModel
|
||||||
from crc.models.user import UserModel
|
from crc.models.user import UserModel
|
||||||
|
|
||||||
|
|
||||||
class TestAuthentication(BaseTest):
|
class TestAuthentication(BaseTest):
|
||||||
|
|
||||||
def test_auth_token(self):
|
def tearDown(self):
|
||||||
self.load_example_data()
|
# Assure we set the production flag back to false.
|
||||||
user = UserModel(uid="dhf8r")
|
app.config['PRODUCTION'] = False
|
||||||
auth_token = user.encode_auth_token()
|
super().tearDown()
|
||||||
self.assertTrue(isinstance(auth_token, bytes))
|
|
||||||
self.assertEqual("dhf8r", user.decode_auth_token(auth_token).get("sub"))
|
|
||||||
|
|
||||||
def test_backdoor_auth_creates_user(self):
|
def test_auth_token(self):
|
||||||
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
# Save the orginal timeout setting
|
||||||
|
orig_ttl = float(app.config['TOKEN_AUTH_TTL_HOURS'])
|
||||||
|
|
||||||
|
self.load_example_data()
|
||||||
|
|
||||||
|
# Set the timeout to something else
|
||||||
|
new_ttl = 4.0
|
||||||
|
app.config['TOKEN_AUTH_TTL_HOURS'] = new_ttl
|
||||||
|
user_1 = UserModel(uid="dhf8r")
|
||||||
|
expected_exp_1 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
|
||||||
|
auth_token_1 = user_1.encode_auth_token()
|
||||||
|
self.assertTrue(isinstance(auth_token_1, bytes))
|
||||||
|
self.assertEqual("dhf8r", user_1.decode_auth_token(auth_token_1).get("sub"))
|
||||||
|
actual_exp_1 = user_1.decode_auth_token(auth_token_1).get("exp")
|
||||||
|
self.assertTrue(expected_exp_1 - 1000 <= actual_exp_1 <= expected_exp_1 + 1000)
|
||||||
|
|
||||||
|
# Set the timeout to something else
|
||||||
|
neg_ttl = -0.01
|
||||||
|
app.config['TOKEN_AUTH_TTL_HOURS'] = neg_ttl
|
||||||
|
user_2 = UserModel(uid="dhf8r")
|
||||||
|
expected_exp_2 = timegm((datetime.utcnow() + timedelta(hours=neg_ttl)).utctimetuple())
|
||||||
|
auth_token_2 = user_2.encode_auth_token()
|
||||||
|
self.assertTrue(isinstance(auth_token_2, bytes))
|
||||||
|
with self.assertRaises(ApiError) as api_error:
|
||||||
|
with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
|
||||||
|
user_2.decode_auth_token(auth_token_2)
|
||||||
|
self.assertEqual(api_error.exception.status_code, 400, 'Should raise an API Error if token is expired')
|
||||||
|
|
||||||
|
# Set the timeout back to where it was
|
||||||
|
app.config['TOKEN_AUTH_TTL_HOURS'] = orig_ttl
|
||||||
|
user_3 = UserModel(uid="dhf8r")
|
||||||
|
expected_exp_3 = timegm((datetime.utcnow() + timedelta(hours=new_ttl)).utctimetuple())
|
||||||
|
auth_token_3 = user_3.encode_auth_token()
|
||||||
|
self.assertTrue(isinstance(auth_token_3, bytes))
|
||||||
|
actual_exp_3 = user_3.decode_auth_token(auth_token_1).get("exp")
|
||||||
|
self.assertTrue(expected_exp_3 - 1000 <= actual_exp_3 <= expected_exp_3 + 1000)
|
||||||
|
|
||||||
|
def test_non_production_auth_creates_user(self):
|
||||||
|
new_uid = 'lb3dp' ## Assure this user id is in the fake responses from ldap.
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
||||||
self.assertIsNone(user)
|
self.assertIsNone(user)
|
||||||
|
|
||||||
user_info = {'uid': new_uid, 'first_name': 'Cordi', 'last_name': 'Nator',
|
user_info = {'uid': new_uid, 'first_name': 'Cordi', 'last_name': 'Nator',
|
||||||
'email_address': 'czn1z@virginia.edu'}
|
'email_address': 'czn1z@virginia.edu'}
|
||||||
redirect_url = 'http://worlds.best.website/admin'
|
redirect_url = 'http://worlds.best.website/admin'
|
||||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||||
url = '/v1.0/sso_backdoor%s' % query_string
|
url = '/v1.0/login%s' % query_string
|
||||||
rv_1 = self.app.get(url, follow_redirects=False)
|
rv_1 = self.app.get(url, follow_redirects=False)
|
||||||
self.assertTrue(rv_1.status_code == 302)
|
self.assertTrue(rv_1.status_code == 302)
|
||||||
self.assertTrue(str.startswith(rv_1.location, redirect_url))
|
self.assertTrue(str.startswith(rv_1.location, redirect_url))
|
||||||
|
@ -38,22 +82,30 @@ class TestAuthentication(BaseTest):
|
||||||
self.assertTrue(rv_2.status_code == 302)
|
self.assertTrue(rv_2.status_code == 302)
|
||||||
self.assertTrue(str.startswith(rv_2.location, redirect_url))
|
self.assertTrue(str.startswith(rv_2.location, redirect_url))
|
||||||
|
|
||||||
def test_normal_auth_creates_user(self):
|
def test_production_auth_creates_user(self):
|
||||||
new_uid = 'lb3dp' # This user is in the test ldap system.
|
# Switch production mode on
|
||||||
|
app.config['PRODUCTION'] = True
|
||||||
|
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
|
||||||
|
new_uid = 'lb3dp' # This user is in the test ldap system.
|
||||||
|
user = db.session.query(UserModel).filter_by(uid=new_uid).first()
|
||||||
self.assertIsNone(user)
|
self.assertIsNone(user)
|
||||||
redirect_url = 'http://worlds.best.website/admin'
|
redirect_url = 'http://worlds.best.website/admin'
|
||||||
headers = dict(Uid=new_uid)
|
headers = dict(Uid=new_uid)
|
||||||
|
db.session.flush()
|
||||||
rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers)
|
rv = self.app.get('v1.0/login', follow_redirects=False, headers=headers)
|
||||||
self.assert_success(rv)
|
|
||||||
user = db.session.query(UserModel).filter(UserModel.uid == new_uid).first()
|
|
||||||
self.assertIsNotNone(user)
|
|
||||||
self.assertEquals(new_uid, user.uid)
|
|
||||||
self.assertEquals("Laura Barnes", user.display_name)
|
|
||||||
self.assertEquals("lb3dp@virginia.edu", user.email_address)
|
|
||||||
self.assertEquals("E0:Associate Professor of Systems and Information Engineering", user.title)
|
|
||||||
|
|
||||||
|
self.assert_success(rv)
|
||||||
|
user = db.session.query(UserModel).filter_by(uid=new_uid).first()
|
||||||
|
self.assertIsNotNone(user)
|
||||||
|
self.assertEqual(new_uid, user.uid)
|
||||||
|
self.assertEqual("Laura Barnes", user.display_name)
|
||||||
|
self.assertEqual("lb3dp@virginia.edu", user.email_address)
|
||||||
|
self.assertEqual("E0:Associate Professor of Systems and Information Engineering", user.title)
|
||||||
|
|
||||||
|
# Switch production mode back off
|
||||||
|
app.config['PRODUCTION'] = False
|
||||||
|
|
||||||
def test_current_user_status(self):
|
def test_current_user_status(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
@ -67,3 +119,108 @@ class TestAuthentication(BaseTest):
|
||||||
user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu')
|
user = UserModel(uid="dhf8r", first_name='Dan', last_name='Funk', email_address='dhf8r@virginia.edu')
|
||||||
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut'))
|
rv = self.app.get('/v1.0/user', headers=self.logged_in_headers(user, redirect_url='http://omg.edu/lolwut'))
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
def test_admin_can_access_admin_only_endpoints(self):
|
||||||
|
# Switch production mode on
|
||||||
|
app.config['PRODUCTION'] = True
|
||||||
|
|
||||||
|
self.load_example_data()
|
||||||
|
|
||||||
|
admin_uids = app.config['ADMIN_UIDS']
|
||||||
|
self.assertGreater(len(admin_uids), 0)
|
||||||
|
admin_uid = admin_uids[0]
|
||||||
|
self.assertEqual(admin_uid, 'dhf8r') # This user is in the test ldap system.
|
||||||
|
admin_headers = dict(Uid=admin_uid)
|
||||||
|
|
||||||
|
rv = self.app.get('v1.0/login', follow_redirects=False, headers=admin_headers)
|
||||||
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
admin_user = db.session.query(UserModel).filter(UserModel.uid == admin_uid).first()
|
||||||
|
self.assertIsNotNone(admin_user)
|
||||||
|
self.assertEqual(admin_uid, admin_user.uid)
|
||||||
|
|
||||||
|
admin_study = self._make_fake_study(admin_uid)
|
||||||
|
|
||||||
|
admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token().decode())
|
||||||
|
|
||||||
|
rv_add_study = self.app.post(
|
||||||
|
'/v1.0/study',
|
||||||
|
content_type="application/json",
|
||||||
|
headers=admin_token_headers,
|
||||||
|
data=json.dumps(StudySchema().dump(admin_study)),
|
||||||
|
follow_redirects=False
|
||||||
|
)
|
||||||
|
self.assert_success(rv_add_study, 'Admin user should be able to add a study')
|
||||||
|
|
||||||
|
new_admin_study = json.loads(rv_add_study.get_data(as_text=True))
|
||||||
|
db_admin_study = db.session.query(StudyModel).filter_by(id=new_admin_study['id']).first()
|
||||||
|
self.assertIsNotNone(db_admin_study)
|
||||||
|
|
||||||
|
rv_del_study = self.app.delete(
|
||||||
|
'/v1.0/study/%i' % db_admin_study.id,
|
||||||
|
follow_redirects=False,
|
||||||
|
headers=admin_token_headers
|
||||||
|
)
|
||||||
|
self.assert_success(rv_del_study, 'Admin user should be able to delete a study')
|
||||||
|
|
||||||
|
# Switch production mode back off
|
||||||
|
app.config['PRODUCTION'] = False
|
||||||
|
|
||||||
|
def test_nonadmin_cannot_access_admin_only_endpoints(self):
|
||||||
|
# Switch production mode on
|
||||||
|
app.config['PRODUCTION'] = True
|
||||||
|
|
||||||
|
self.load_example_data()
|
||||||
|
|
||||||
|
# Non-admin user should not be able to delete a study
|
||||||
|
non_admin_uid = 'lb3dp'
|
||||||
|
admin_uids = app.config['ADMIN_UIDS']
|
||||||
|
self.assertGreater(len(admin_uids), 0)
|
||||||
|
self.assertNotIn(non_admin_uid, admin_uids)
|
||||||
|
|
||||||
|
non_admin_headers = dict(Uid=non_admin_uid)
|
||||||
|
|
||||||
|
rv = self.app.get(
|
||||||
|
'v1.0/login',
|
||||||
|
follow_redirects=False,
|
||||||
|
headers=non_admin_headers
|
||||||
|
)
|
||||||
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
non_admin_user = db.session.query(UserModel).filter_by(uid=non_admin_uid).first()
|
||||||
|
self.assertIsNotNone(non_admin_user)
|
||||||
|
|
||||||
|
non_admin_token_headers = dict(Authorization='Bearer ' + non_admin_user.encode_auth_token().decode())
|
||||||
|
|
||||||
|
non_admin_study = self._make_fake_study(non_admin_uid)
|
||||||
|
|
||||||
|
rv_add_study = self.app.post(
|
||||||
|
'/v1.0/study',
|
||||||
|
content_type="application/json",
|
||||||
|
headers=non_admin_token_headers,
|
||||||
|
data=json.dumps(StudySchema().dump(non_admin_study))
|
||||||
|
)
|
||||||
|
self.assert_success(rv_add_study, 'Non-admin user should be able to add a study')
|
||||||
|
|
||||||
|
new_non_admin_study = json.loads(rv_add_study.get_data(as_text=True))
|
||||||
|
db_non_admin_study = db.session.query(StudyModel).filter_by(id=new_non_admin_study['id']).first()
|
||||||
|
self.assertIsNotNone(db_non_admin_study)
|
||||||
|
|
||||||
|
rv_non_admin_del_study = self.app.delete(
|
||||||
|
'/v1.0/study/%i' % db_non_admin_study.id,
|
||||||
|
follow_redirects=False,
|
||||||
|
headers=non_admin_token_headers
|
||||||
|
)
|
||||||
|
self.assert_failure(rv_non_admin_del_study, 401)
|
||||||
|
|
||||||
|
# Switch production mode back off
|
||||||
|
app.config['PRODUCTION'] = False
|
||||||
|
|
||||||
|
def _make_fake_study(self, uid):
|
||||||
|
return {
|
||||||
|
"title": "blah",
|
||||||
|
"last_updated": datetime.now(tz=timezone.utc),
|
||||||
|
"protocol_builder_status": ProtocolBuilderStatus.ACTIVE,
|
||||||
|
"primary_investigator_id": uid,
|
||||||
|
"user_uid": uid,
|
||||||
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ class TestCompleteTemplate(unittest.TestCase):
|
||||||
data = {"name": "Dan"}
|
data = {"name": "Dan"}
|
||||||
data_copy = copy.deepcopy(data)
|
data_copy = copy.deepcopy(data)
|
||||||
script.rich_text_update(data_copy)
|
script.rich_text_update(data_copy)
|
||||||
self.assertEquals(data, data_copy)
|
self.assertEqual(data, data_copy)
|
||||||
|
|
||||||
def test_rich_text_update_new_line(self):
|
def test_rich_text_update_new_line(self):
|
||||||
script = CompleteTemplate()
|
script = CompleteTemplate()
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
|
from crc import db
|
||||||
from crc.services.file_service import FileService
|
from crc.services.file_service import FileService
|
||||||
from crc.services.workflow_processor import WorkflowProcessor
|
from crc.services.workflow_processor import WorkflowProcessor
|
||||||
|
|
||||||
|
|
||||||
class TestFileService(BaseTest):
|
class TestFileService(BaseTest):
|
||||||
"""Largely tested via the test_file_api, and time is tight, but adding new tests here."""
|
"""Largely tested via the test_file_api, and time is tight, but adding new tests here."""
|
||||||
|
|
||||||
|
@ -22,11 +23,11 @@ class TestFileService(BaseTest):
|
||||||
binary_data=b'5678', irb_doc_code=irb_code)
|
binary_data=b'5678', irb_doc_code=irb_code)
|
||||||
|
|
||||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||||
self.assertEquals(1, len(file_models))
|
self.assertEqual(1, len(file_models))
|
||||||
|
|
||||||
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
||||||
self.assertEquals(1, len(file_data))
|
self.assertEqual(1, len(file_data))
|
||||||
self.assertEquals(2, file_data[0].version)
|
self.assertEqual(2, file_data[0].version)
|
||||||
|
|
||||||
|
|
||||||
def test_add_file_from_form_increments_version_and_replaces_on_subsequent_add_with_same_name(self):
|
def test_add_file_from_form_increments_version_and_replaces_on_subsequent_add_with_same_name(self):
|
||||||
|
@ -46,12 +47,43 @@ class TestFileService(BaseTest):
|
||||||
name="anything.png", content_type="text",
|
name="anything.png", content_type="text",
|
||||||
binary_data=b'5678')
|
binary_data=b'5678')
|
||||||
|
|
||||||
|
def test_replace_archive_file_unarchives_the_file_and_updates(self):
|
||||||
|
self.load_example_data()
|
||||||
|
self.create_reference_document()
|
||||||
|
workflow = self.create_workflow('file_upload_form')
|
||||||
|
processor = WorkflowProcessor(workflow)
|
||||||
|
task = processor.next_task()
|
||||||
|
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
||||||
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||||
|
irb_doc_code=irb_code,
|
||||||
|
name="anything.png", content_type="text",
|
||||||
|
binary_data=b'1234')
|
||||||
|
|
||||||
|
# Archive the file
|
||||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||||
self.assertEquals(1, len(file_models))
|
self.assertEquals(1, len(file_models))
|
||||||
|
file_model = file_models[0]
|
||||||
|
file_model.archived = True
|
||||||
|
db.session.add(file_model)
|
||||||
|
|
||||||
|
# Assure that the file no longer comes back.
|
||||||
|
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||||
|
self.assertEquals(0, len(file_models))
|
||||||
|
|
||||||
|
# Add the file again with different data
|
||||||
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||||
|
irb_doc_code=irb_code,
|
||||||
|
name="anything.png", content_type="text",
|
||||||
|
binary_data=b'5678')
|
||||||
|
|
||||||
|
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||||
|
self.assertEqual(1, len(file_models))
|
||||||
|
|
||||||
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
||||||
self.assertEquals(1, len(file_data))
|
|
||||||
self.assertEquals(2, file_data[0].version)
|
self.assertEqual(1, len(file_data))
|
||||||
|
self.assertEqual(2, file_data[0].version)
|
||||||
|
self.assertEqual(b'5678', file_data[0].data)
|
||||||
|
|
||||||
def test_add_file_from_form_allows_multiple_files_with_different_names(self):
|
def test_add_file_from_form_allows_multiple_files_with_different_names(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
@ -70,4 +102,4 @@ class TestFileService(BaseTest):
|
||||||
name="a_different_thing.png", content_type="text",
|
name="a_different_thing.png", content_type="text",
|
||||||
binary_data=b'5678')
|
binary_data=b'5678')
|
||||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||||
self.assertEquals(2, len(file_models))
|
self.assertEqual(2, len(file_models))
|
||||||
|
|
|
@ -3,12 +3,14 @@ import json
|
||||||
|
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
from crc import session
|
from crc import session, db
|
||||||
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
|
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
|
||||||
from crc.models.workflow import WorkflowSpecModel
|
from crc.models.workflow import WorkflowSpecModel
|
||||||
from crc.services.file_service import FileService
|
from crc.services.file_service import FileService
|
||||||
from crc.services.workflow_processor import WorkflowProcessor
|
from crc.services.workflow_processor import WorkflowProcessor
|
||||||
from example_data import ExampleDataLoader
|
from example_data import ExampleDataLoader
|
||||||
|
from crc.services.approval_service import ApprovalService
|
||||||
|
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||||
|
|
||||||
|
|
||||||
class TestFilesApi(BaseTest):
|
class TestFilesApi(BaseTest):
|
||||||
|
@ -46,6 +48,7 @@ class TestFilesApi(BaseTest):
|
||||||
json_data = json.loads(rv.get_data(as_text=True))
|
json_data = json.loads(rv.get_data(as_text=True))
|
||||||
self.assertEqual(2, len(json_data))
|
self.assertEqual(2, len(json_data))
|
||||||
|
|
||||||
|
|
||||||
def test_create_file(self):
|
def test_create_file(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
spec = session.query(WorkflowSpecModel).first()
|
spec = session.query(WorkflowSpecModel).first()
|
||||||
|
@ -89,6 +92,39 @@ class TestFilesApi(BaseTest):
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
|
||||||
|
def test_archive_file_no_longer_shows_up(self):
|
||||||
|
self.load_example_data()
|
||||||
|
self.create_reference_document()
|
||||||
|
workflow = self.create_workflow('file_upload_form')
|
||||||
|
processor = WorkflowProcessor(workflow)
|
||||||
|
task = processor.next_task()
|
||||||
|
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
|
||||||
|
correct_name = task.task_spec.form.fields[0].id
|
||||||
|
|
||||||
|
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
|
||||||
|
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_id=%i&form_field_key=%s' %
|
||||||
|
(workflow.study_id, workflow.id, task.id, correct_name), data=data, follow_redirects=True,
|
||||||
|
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||||
|
|
||||||
|
self.assert_success(rv)
|
||||||
|
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv)
|
||||||
|
self.assertEquals(1, len(json.loads(rv.get_data(as_text=True))))
|
||||||
|
|
||||||
|
file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all()
|
||||||
|
self.assertEquals(1, len(file_model))
|
||||||
|
file_model[0].archived = True
|
||||||
|
db.session.commit()
|
||||||
|
|
||||||
|
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv)
|
||||||
|
self.assertEquals(0, len(json.loads(rv.get_data(as_text=True))))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_set_reference_file(self):
|
def test_set_reference_file(self):
|
||||||
file_name = "irb_document_types.xls"
|
file_name = "irb_document_types.xls"
|
||||||
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.xls")}
|
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.xls")}
|
||||||
|
@ -218,6 +254,41 @@ class TestFilesApi(BaseTest):
|
||||||
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
|
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
|
||||||
self.assertEqual(404, rv.status_code)
|
self.assertEqual(404, rv.status_code)
|
||||||
|
|
||||||
|
def test_delete_file_after_approval(self):
|
||||||
|
self.create_reference_document()
|
||||||
|
workflow = self.create_workflow("empty_workflow")
|
||||||
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||||
|
name="anything.png", content_type="text",
|
||||||
|
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr")
|
||||||
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||||
|
name="anotother_anything.png", content_type="text",
|
||||||
|
binary_data=b'1234', irb_doc_code="Study_App_Doc")
|
||||||
|
|
||||||
|
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||||
|
|
||||||
|
file = session.query(FileModel).\
|
||||||
|
filter(FileModel.workflow_id == workflow.id).\
|
||||||
|
filter(FileModel.name == "anything.png").first()
|
||||||
|
self.assertFalse(file.archived)
|
||||||
|
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
rv = self.app.delete('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
|
||||||
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
session.refresh(file)
|
||||||
|
self.assertTrue(file.archived)
|
||||||
|
|
||||||
|
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||||
|
|
||||||
|
approvals = session.query(ApprovalModel)\
|
||||||
|
.filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\
|
||||||
|
.filter(ApprovalModel.study_id == workflow.study_id).all()
|
||||||
|
|
||||||
|
self.assertEquals(1, len(approvals))
|
||||||
|
self.assertEquals(1, len(approvals[0].approval_files))
|
||||||
|
|
||||||
|
|
||||||
def test_change_primary_bpmn(self):
|
def test_change_primary_bpmn(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
spec = session.query(WorkflowSpecModel).first()
|
spec = session.query(WorkflowSpecModel).first()
|
||||||
|
|
|
@ -1,22 +1,19 @@
|
||||||
import os
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
from crc import app
|
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
from crc.services.ldap_service import LdapService
|
from crc.services.ldap_service import LdapService
|
||||||
from tests.base_test import BaseTest
|
|
||||||
from ldap3 import Server, Connection, ALL, MOCK_SYNC
|
|
||||||
|
|
||||||
|
|
||||||
class TestLdapService(BaseTest):
|
class TestLdapService(BaseTest):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.ldap_service = LdapService()
|
pass
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def test_get_single_user(self):
|
def test_get_single_user(self):
|
||||||
user_info = self.ldap_service.user_info("lb3dp")
|
user_info = LdapService.user_info("lb3dp")
|
||||||
self.assertIsNotNone(user_info)
|
self.assertIsNotNone(user_info)
|
||||||
self.assertEqual("lb3dp", user_info.uid)
|
self.assertEqual("lb3dp", user_info.uid)
|
||||||
self.assertEqual("Laura Barnes", user_info.display_name)
|
self.assertEqual("Laura Barnes", user_info.display_name)
|
||||||
|
@ -30,7 +27,7 @@ class TestLdapService(BaseTest):
|
||||||
|
|
||||||
def test_find_missing_user(self):
|
def test_find_missing_user(self):
|
||||||
try:
|
try:
|
||||||
user_info = self.ldap_service.user_info("nosuch")
|
user_info = LdapService.user_info("nosuch")
|
||||||
self.assertFalse(True, "An API error should be raised.")
|
self.assertFalse(True, "An API error should be raised.")
|
||||||
except ApiError as ae:
|
except ApiError as ae:
|
||||||
self.assertEquals("missing_ldap_record", ae.code)
|
self.assertEqual("missing_ldap_record", ae.code)
|
|
@ -31,7 +31,7 @@ class TestLookupService(BaseTest):
|
||||||
self.assertEqual(1, len(lookup_records))
|
self.assertEqual(1, len(lookup_records))
|
||||||
lookup_record = lookup_records[0]
|
lookup_record = lookup_records[0]
|
||||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||||
self.assertEquals(28, len(lookup_data))
|
self.assertEqual(28, len(lookup_data))
|
||||||
|
|
||||||
def test_updates_to_file_cause_lookup_rebuild(self):
|
def test_updates_to_file_cause_lookup_rebuild(self):
|
||||||
spec = BaseTest.load_test_spec('enum_options_with_search')
|
spec = BaseTest.load_test_spec('enum_options_with_search')
|
||||||
|
@ -43,7 +43,7 @@ class TestLookupService(BaseTest):
|
||||||
self.assertEqual(1, len(lookup_records))
|
self.assertEqual(1, len(lookup_records))
|
||||||
lookup_record = lookup_records[0]
|
lookup_record = lookup_records[0]
|
||||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||||
self.assertEquals(28, len(lookup_data))
|
self.assertEqual(28, len(lookup_data))
|
||||||
|
|
||||||
# Update the workflow specification file.
|
# Update the workflow specification file.
|
||||||
file_path = os.path.join(app.root_path, '..', 'tests', 'data',
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data',
|
||||||
|
@ -59,7 +59,7 @@ class TestLookupService(BaseTest):
|
||||||
lookup_records = session.query(LookupFileModel).all()
|
lookup_records = session.query(LookupFileModel).all()
|
||||||
lookup_record = lookup_records[0]
|
lookup_record = lookup_records[0]
|
||||||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||||
self.assertEquals(4, len(lookup_data))
|
self.assertEqual(4, len(lookup_data))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -70,49 +70,50 @@ class TestLookupService(BaseTest):
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "", limit=10)
|
||||||
self.assertEquals(10, len(results), "Blank queries return everything, to the limit")
|
self.assertEqual(10, len(results), "Blank queries return everything, to the limit")
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "medicines", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "medicines", limit=10)
|
||||||
self.assertEquals(1, len(results), "words in the middle of label are detected.")
|
self.assertEqual(1, len(results), "words in the middle of label are detected.")
|
||||||
self.assertEquals("The Medicines Company", results[0].label)
|
self.assertEqual("The Medicines Company", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "UVA", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "UVA", limit=10)
|
||||||
self.assertEquals(1, len(results), "Beginning of label is found.")
|
self.assertEqual(1, len(results), "Beginning of label is found.")
|
||||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
|
self.assertEqual("UVA - INTERNAL - GM USE ONLY", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "uva", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "uva", limit=10)
|
||||||
self.assertEquals(1, len(results), "case does not matter.")
|
self.assertEqual(1, len(results), "case does not matter.")
|
||||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", results[0].label)
|
self.assertEqual("UVA - INTERNAL - GM USE ONLY", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "medici", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "medici", limit=10)
|
||||||
self.assertEquals(1, len(results), "partial words are picked up.")
|
self.assertEqual(1, len(results), "partial words are picked up.")
|
||||||
self.assertEquals("The Medicines Company", results[0].label)
|
self.assertEqual("The Medicines Company", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Savings", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Savings", limit=10)
|
||||||
self.assertEquals(1, len(results), "multiple terms are picked up..")
|
self.assertEqual(1, len(results), "multiple terms are picked up..")
|
||||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Sav", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "Genetics Sav", limit=10)
|
||||||
self.assertEquals(1, len(results), "prefix queries still work with partial terms")
|
self.assertEqual(1, len(results), "prefix queries still work with partial terms")
|
||||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "Gen Sav", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "Gen Sav", limit=10)
|
||||||
self.assertEquals(1, len(results), "prefix queries still work with ALL the partial terms")
|
self.assertEqual(1, len(results), "prefix queries still work with ALL the partial terms")
|
||||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "Inc", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "Inc", limit=10)
|
||||||
self.assertEquals(7, len(results), "short terms get multiple correct results.")
|
self.assertEqual(7, len(results), "short terms get multiple correct results.")
|
||||||
self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "reaction design", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "reaction design", limit=10)
|
||||||
self.assertEquals(5, len(results), "all results come back for two terms.")
|
self.assertEqual(5, len(results), "all results come back for two terms.")
|
||||||
self.assertEquals("Reaction Design", results[0].label, "Exact matches come first.")
|
self.assertEqual("Reaction Design", results[0].label, "Exact matches come first.")
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10)
|
||||||
self.assertEquals("1 Something", results[0].label, "Exact matches are prefered")
|
self.assertEqual("1 Something", results[0].label, "Exact matches are prefered")
|
||||||
|
|
||||||
results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10)
|
results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10)
|
||||||
self.assertEquals("1 Something", results[0].label, "special characters don't flake out")
|
self.assertEqual("1 Something", results[0].label, "special characters don't flake out")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 1018 10000 Something Industry
|
# 1018 10000 Something Industry
|
||||||
|
@ -123,6 +124,6 @@ class TestLookupService(BaseTest):
|
||||||
|
|
||||||
# Fixme: Stop words are taken into account on the query side, and haven't found a fix yet.
|
# Fixme: Stop words are taken into account on the query side, and haven't found a fix yet.
|
||||||
#results = WorkflowService.run_lookup_query(lookup_table.id, "in", limit=10)
|
#results = WorkflowService.run_lookup_query(lookup_table.id, "in", limit=10)
|
||||||
#self.assertEquals(7, len(results), "stop words are not removed.")
|
#self.assertEqual(7, len(results), "stop words are not removed.")
|
||||||
#self.assertEquals("Genetics Savings & Clone, Inc.", results[0].label)
|
#self.assertEqual("Genetics Savings & Clone, Inc.", results[0].label)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
|
||||||
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
|
from crc.services.mails import (
|
||||||
|
send_ramp_up_submission_email,
|
||||||
|
send_ramp_up_approval_request_email,
|
||||||
|
send_ramp_up_approval_request_first_review_email,
|
||||||
|
send_ramp_up_approved_email,
|
||||||
|
send_ramp_up_denied_email,
|
||||||
|
send_ramp_up_denied_email_to_approver
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestMails(BaseTest):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.sender = 'sender@sartography.com'
|
||||||
|
self.recipients = ['recipient@sartography.com']
|
||||||
|
self.primary_investigator = 'Dr. Bartlett'
|
||||||
|
self.approver_1 = 'Max Approver'
|
||||||
|
self.approver_2 = 'Close Reviewer'
|
||||||
|
|
||||||
|
def test_send_ramp_up_submission_email(self):
|
||||||
|
send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
def test_send_ramp_up_approval_request_email(self):
|
||||||
|
send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
def test_send_ramp_up_approval_request_first_review_email(self):
|
||||||
|
send_ramp_up_approval_request_first_review_email(
|
||||||
|
self.sender, self.recipients, self.primary_investigator
|
||||||
|
)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
def test_send_ramp_up_approved_email(self):
|
||||||
|
send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
def test_send_ramp_up_denied_email(self):
|
||||||
|
send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1)
|
||||||
|
self.assertTrue(True)
|
||||||
|
|
||||||
|
def test_send_send_ramp_up_denied_email_to_approver(self):
|
||||||
|
send_ramp_up_denied_email_to_approver(
|
||||||
|
self.sender, self.recipients, self.primary_investigator, self.approver_2
|
||||||
|
)
|
||||||
|
self.assertTrue(True)
|
|
@ -1,6 +1,6 @@
|
||||||
from crc.services.file_service import FileService
|
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
|
from crc.services.file_service import FileService
|
||||||
from crc.scripts.request_approval import RequestApproval
|
from crc.scripts.request_approval import RequestApproval
|
||||||
from crc.services.workflow_processor import WorkflowProcessor
|
from crc.services.workflow_processor import WorkflowProcessor
|
||||||
from crc.api.common import ApiError
|
from crc.api.common import ApiError
|
||||||
|
@ -24,7 +24,23 @@ class TestRequestApprovalScript(BaseTest):
|
||||||
binary_data=b'1234')
|
binary_data=b'1234')
|
||||||
script = RequestApproval()
|
script = RequestApproval()
|
||||||
script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
|
script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
|
||||||
self.assertEquals(2, db.session.query(ApprovalModel).count())
|
self.assertEqual(2, db.session.query(ApprovalModel).count())
|
||||||
|
|
||||||
|
def test_do_task_with_blank_second_approver(self):
|
||||||
|
self.load_example_data()
|
||||||
|
self.create_reference_document()
|
||||||
|
workflow = self.create_workflow('empty_workflow')
|
||||||
|
processor = WorkflowProcessor(workflow)
|
||||||
|
task = processor.next_task()
|
||||||
|
task.data = {"study": {"approval1": "dhf8r", 'approval2':''}}
|
||||||
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||||
|
irb_doc_code="UVACompl_PRCAppr",
|
||||||
|
name="anything.png", content_type="text",
|
||||||
|
binary_data=b'1234')
|
||||||
|
script = RequestApproval()
|
||||||
|
script.do_task(task, workflow.study_id, workflow.id, "study.approval1", "study.approval2")
|
||||||
|
self.assertEqual(1, db.session.query(ApprovalModel).count())
|
||||||
|
|
||||||
|
|
||||||
def test_do_task_with_incorrect_argument(self):
|
def test_do_task_with_incorrect_argument(self):
|
||||||
"""This script should raise an error if it can't figure out the approvers."""
|
"""This script should raise an error if it can't figure out the approvers."""
|
||||||
|
@ -48,5 +64,5 @@ class TestRequestApprovalScript(BaseTest):
|
||||||
|
|
||||||
script = RequestApproval()
|
script = RequestApproval()
|
||||||
script.do_task_validate_only(task, workflow.study_id, workflow.id, "study.approval1")
|
script.do_task_validate_only(task, workflow.study_id, workflow.id, "study.approval1")
|
||||||
self.assertEquals(0, db.session.query(ApprovalModel).count())
|
self.assertEqual(0, db.session.query(ApprovalModel).count())
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import json
|
import json
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
@ -8,8 +9,9 @@ from crc.models.protocol_builder import ProtocolBuilderStatus, \
|
||||||
ProtocolBuilderStudySchema
|
ProtocolBuilderStudySchema
|
||||||
from crc.models.stats import TaskEventModel
|
from crc.models.stats import TaskEventModel
|
||||||
from crc.models.study import StudyModel, StudySchema
|
from crc.models.study import StudyModel, StudySchema
|
||||||
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecCategoryModel
|
from crc.models.workflow import WorkflowSpecModel, WorkflowModel
|
||||||
from crc.services.protocol_builder import ProtocolBuilderService
|
from crc.services.file_service import FileService
|
||||||
|
from crc.services.workflow_processor import WorkflowProcessor
|
||||||
|
|
||||||
|
|
||||||
class TestStudyApi(BaseTest):
|
class TestStudyApi(BaseTest):
|
||||||
|
@ -68,6 +70,34 @@ class TestStudyApi(BaseTest):
|
||||||
self.assertEqual(0, workflow["total_tasks"])
|
self.assertEqual(0, workflow["total_tasks"])
|
||||||
self.assertEqual(0, workflow["completed_tasks"])
|
self.assertEqual(0, workflow["completed_tasks"])
|
||||||
|
|
||||||
|
def test_get_study_has_details_about_files(self):
|
||||||
|
|
||||||
|
# Set up the study and attach a file to it.
|
||||||
|
self.load_example_data()
|
||||||
|
self.create_reference_document()
|
||||||
|
workflow = self.create_workflow('file_upload_form')
|
||||||
|
processor = WorkflowProcessor(workflow)
|
||||||
|
task = processor.next_task()
|
||||||
|
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
|
||||||
|
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||||
|
name="anything.png", content_type="png",
|
||||||
|
binary_data=b'1234', irb_doc_code=irb_code)
|
||||||
|
|
||||||
|
api_response = self.app.get('/v1.0/study/%i' % workflow.study_id,
|
||||||
|
headers=self.logged_in_headers(), content_type="application/json")
|
||||||
|
self.assert_success(api_response)
|
||||||
|
study = StudySchema().loads(api_response.get_data(as_text=True))
|
||||||
|
self.assertEqual(1, len(study.files))
|
||||||
|
self.assertEqual("UVA Compliance/PRC Approval", study.files[0]["category"])
|
||||||
|
self.assertEqual("Cancer Center's PRC Approval Form", study.files[0]["description"])
|
||||||
|
self.assertEqual("UVA Compliance/PRC Approval.png", study.files[0]["download_name"])
|
||||||
|
|
||||||
|
# TODO: WRITE A TEST FOR STUDY FILES
|
||||||
|
|
||||||
|
def test_get_study_has_details_about_approvals(self):
|
||||||
|
# TODO: WRITE A TEST FOR STUDY APPROVALS
|
||||||
|
pass
|
||||||
|
|
||||||
def test_add_study(self):
|
def test_add_study(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
study = self.add_test_study()
|
study = self.add_test_study()
|
||||||
|
@ -150,10 +180,10 @@ class TestStudyApi(BaseTest):
|
||||||
db_studies_after = session.query(StudyModel).all()
|
db_studies_after = session.query(StudyModel).all()
|
||||||
num_db_studies_after = len(db_studies_after)
|
num_db_studies_after = len(db_studies_after)
|
||||||
self.assertGreater(num_db_studies_after, num_db_studies_before)
|
self.assertGreater(num_db_studies_after, num_db_studies_before)
|
||||||
self.assertEquals(num_abandoned, 1)
|
self.assertEqual(num_abandoned, 1)
|
||||||
self.assertEquals(num_open, 1)
|
self.assertEqual(num_open, 1)
|
||||||
self.assertEquals(num_active, 1)
|
self.assertEqual(num_active, 1)
|
||||||
self.assertEquals(num_incomplete, 1)
|
self.assertEqual(num_incomplete, 1)
|
||||||
self.assertEqual(len(json_data), num_db_studies_after)
|
self.assertEqual(len(json_data), num_db_studies_after)
|
||||||
self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after)
|
self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after)
|
||||||
|
|
||||||
|
|
|
@ -153,14 +153,16 @@ class TestStudyService(BaseTest):
|
||||||
self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
|
self.assertEqual(1, docs["UVACompl_PRCAppr"]['count'])
|
||||||
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
|
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0])
|
||||||
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
|
self.assertIsNotNone(docs["UVACompl_PRCAppr"]['files'][0]['file_id'])
|
||||||
self.assertEquals(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
|
self.assertEqual(workflow.id, docs["UVACompl_PRCAppr"]['files'][0]['workflow_id'])
|
||||||
|
|
||||||
def test_get_all_studies(self):
|
def test_get_all_studies(self):
|
||||||
user = self.create_user_with_study_and_workflow()
|
user = self.create_user_with_study_and_workflow()
|
||||||
|
study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first()
|
||||||
|
self.assertIsNotNone(study)
|
||||||
|
|
||||||
# Add a document to the study with the correct code.
|
# Add a document to the study with the correct code.
|
||||||
workflow1 = self.create_workflow('docx')
|
workflow1 = self.create_workflow('docx', study=study)
|
||||||
workflow2 = self.create_workflow('empty_workflow')
|
workflow2 = self.create_workflow('empty_workflow', study=study)
|
||||||
|
|
||||||
# Add files to both workflows.
|
# Add files to both workflows.
|
||||||
FileService.add_workflow_file(workflow_id=workflow1.id,
|
FileService.add_workflow_file(workflow_id=workflow1.id,
|
||||||
|
@ -174,8 +176,8 @@ class TestStudyService(BaseTest):
|
||||||
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
|
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
|
||||||
|
|
||||||
studies = StudyService().get_all_studies_with_files()
|
studies = StudyService().get_all_studies_with_files()
|
||||||
self.assertEquals(1, len(studies))
|
self.assertEqual(1, len(studies))
|
||||||
self.assertEquals(3, len(studies[0].files))
|
self.assertEqual(3, len(studies[0].files))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,17 +193,17 @@ class TestStudyService(BaseTest):
|
||||||
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
|
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
|
||||||
investigators = StudyService().get_investigators(workflow.study_id)
|
investigators = StudyService().get_investigators(workflow.study_id)
|
||||||
|
|
||||||
self.assertEquals(9, len(investigators))
|
self.assertEqual(9, len(investigators))
|
||||||
|
|
||||||
# dhf8r is in the ldap mock data.
|
# dhf8r is in the ldap mock data.
|
||||||
self.assertEquals("dhf8r", investigators['PI']['user_id'])
|
self.assertEqual("dhf8r", investigators['PI']['user_id'])
|
||||||
self.assertEquals("Dan Funk", investigators['PI']['display_name']) # Data from ldap
|
self.assertEqual("Dan Funk", investigators['PI']['display_name']) # Data from ldap
|
||||||
self.assertEquals("Primary Investigator", investigators['PI']['label']) # Data from xls file.
|
self.assertEqual("Primary Investigator", investigators['PI']['label']) # Data from xls file.
|
||||||
self.assertEquals("Always", investigators['PI']['display']) # Data from xls file.
|
self.assertEqual("Always", investigators['PI']['display']) # Data from xls file.
|
||||||
|
|
||||||
# asd3v is not in ldap, so an error should be returned.
|
# asd3v is not in ldap, so an error should be returned.
|
||||||
self.assertEquals("asd3v", investigators['DC']['user_id'])
|
self.assertEqual("asd3v", investigators['DC']['user_id'])
|
||||||
self.assertEquals("Unable to locate a user with id asd3v in LDAP", investigators['DC']['error']) # Data from ldap
|
self.assertEqual("Unable to locate a user with id asd3v in LDAP", investigators['DC']['error']) # Data from ldap
|
||||||
|
|
||||||
# No value is provided for Department Chair
|
# No value is provided for Department Chair
|
||||||
self.assertIsNone(investigators['DEPT_CH']['user_id'])
|
self.assertIsNone(investigators['DEPT_CH']['user_id'])
|
||||||
|
|
|
@ -4,86 +4,14 @@ import random
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
from crc import session, app
|
from crc import session, app
|
||||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||||
from crc.models.file import FileModelSchema
|
from crc.models.file import FileModelSchema
|
||||||
from crc.models.stats import TaskEventModel
|
|
||||||
from crc.models.workflow import WorkflowStatus
|
from crc.models.workflow import WorkflowStatus
|
||||||
from crc.services.protocol_builder import ProtocolBuilderService
|
|
||||||
from crc.services.workflow_service import WorkflowService
|
|
||||||
|
|
||||||
|
|
||||||
class TestTasksApi(BaseTest):
|
class TestTasksApi(BaseTest):
|
||||||
|
|
||||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
|
|
||||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
|
||||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
|
||||||
headers=self.logged_in_headers(),
|
|
||||||
content_type="application/json")
|
|
||||||
self.assert_success(rv)
|
|
||||||
json_data = json.loads(rv.get_data(as_text=True))
|
|
||||||
workflow_api = WorkflowApiSchema().load(json_data)
|
|
||||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
|
||||||
return workflow_api
|
|
||||||
|
|
||||||
def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
|
|
||||||
prev_completed_task_count = workflow_in.completed_tasks
|
|
||||||
if isinstance(task_in, dict):
|
|
||||||
task_id = task_in["id"]
|
|
||||||
else:
|
|
||||||
task_id = task_in.id
|
|
||||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
|
||||||
headers=self.logged_in_headers(),
|
|
||||||
content_type="application/json",
|
|
||||||
data=json.dumps(dict_data))
|
|
||||||
if error_code:
|
|
||||||
self.assert_failure(rv, error_code=error_code)
|
|
||||||
return
|
|
||||||
|
|
||||||
self.assert_success(rv)
|
|
||||||
json_data = json.loads(rv.get_data(as_text=True))
|
|
||||||
|
|
||||||
# Assure stats are updated on the model
|
|
||||||
workflow = WorkflowApiSchema().load(json_data)
|
|
||||||
# The total number of tasks may change over time, as users move through gateways
|
|
||||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
|
||||||
self.assertIsNotNone(workflow.total_tasks)
|
|
||||||
self.assertEquals(prev_completed_task_count + 1, workflow.completed_tasks)
|
|
||||||
# Assure a record exists in the Task Events
|
|
||||||
task_events = session.query(TaskEventModel) \
|
|
||||||
.filter_by(workflow_id=workflow.id) \
|
|
||||||
.filter_by(task_id=task_id) \
|
|
||||||
.order_by(TaskEventModel.date.desc()).all()
|
|
||||||
self.assertGreater(len(task_events), 0)
|
|
||||||
event = task_events[0]
|
|
||||||
self.assertIsNotNone(event.study_id)
|
|
||||||
self.assertEquals("dhf8r", event.user_uid)
|
|
||||||
self.assertEquals(workflow.id, event.workflow_id)
|
|
||||||
self.assertEquals(workflow.workflow_spec_id, event.workflow_spec_id)
|
|
||||||
self.assertEquals(workflow.spec_version, event.spec_version)
|
|
||||||
self.assertEquals(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
|
||||||
self.assertEquals(task_in.id, task_id)
|
|
||||||
self.assertEquals(task_in.name, event.task_name)
|
|
||||||
self.assertEquals(task_in.title, event.task_title)
|
|
||||||
self.assertEquals(task_in.type, event.task_type)
|
|
||||||
self.assertEquals("COMPLETED", event.task_state)
|
|
||||||
# Not sure what vodoo is happening inside of marshmallow to get me in this state.
|
|
||||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
|
||||||
self.assertEquals(task_in.multi_instance_type.value, event.mi_type)
|
|
||||||
else:
|
|
||||||
self.assertEquals(task_in.multi_instance_type, event.mi_type)
|
|
||||||
|
|
||||||
self.assertEquals(task_in.multi_instance_count, event.mi_count)
|
|
||||||
self.assertEquals(task_in.multi_instance_index, event.mi_index)
|
|
||||||
self.assertEquals(task_in.process_name, event.process_name)
|
|
||||||
self.assertIsNotNone(event.date)
|
|
||||||
|
|
||||||
|
|
||||||
workflow = WorkflowApiSchema().load(json_data)
|
|
||||||
return workflow
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_current_user_tasks(self):
|
def test_get_current_user_tasks(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
workflow = self.create_workflow('random_fact')
|
workflow = self.create_workflow('random_fact')
|
||||||
|
@ -156,14 +84,14 @@ class TestTasksApi(BaseTest):
|
||||||
|
|
||||||
self.assertIsNotNone(workflow_api.navigation)
|
self.assertIsNotNone(workflow_api.navigation)
|
||||||
nav = workflow_api.navigation
|
nav = workflow_api.navigation
|
||||||
self.assertEquals(5, len(nav))
|
self.assertEqual(5, len(nav))
|
||||||
self.assertEquals("Do You Have Bananas", nav[0]['title'])
|
self.assertEqual("Do You Have Bananas", nav[0]['title'])
|
||||||
self.assertEquals("Bananas?", nav[1]['title'])
|
self.assertEqual("Bananas?", nav[1]['title'])
|
||||||
self.assertEquals("FUTURE", nav[1]['state'])
|
self.assertEqual("FUTURE", nav[1]['state'])
|
||||||
self.assertEquals("yes", nav[2]['title'])
|
self.assertEqual("yes", nav[2]['title'])
|
||||||
self.assertEquals("NOOP", nav[2]['state'])
|
self.assertEqual("NOOP", nav[2]['state'])
|
||||||
self.assertEquals("no", nav[3]['title'])
|
self.assertEqual("no", nav[3]['title'])
|
||||||
self.assertEquals("NOOP", nav[3]['state'])
|
self.assertEqual("NOOP", nav[3]['state'])
|
||||||
|
|
||||||
def test_navigation_with_exclusive_gateway(self):
|
def test_navigation_with_exclusive_gateway(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
@ -173,19 +101,20 @@ class TestTasksApi(BaseTest):
|
||||||
workflow_api = self.get_workflow_api(workflow)
|
workflow_api = self.get_workflow_api(workflow)
|
||||||
self.assertIsNotNone(workflow_api.navigation)
|
self.assertIsNotNone(workflow_api.navigation)
|
||||||
nav = workflow_api.navigation
|
nav = workflow_api.navigation
|
||||||
self.assertEquals(7, len(nav))
|
self.assertEqual(7, len(nav))
|
||||||
self.assertEquals("Task 1", nav[0]['title'])
|
self.assertEqual("Task 1", nav[0]['title'])
|
||||||
self.assertEquals("Which Branch?", nav[1]['title'])
|
self.assertEqual("Which Branch?", nav[1]['title'])
|
||||||
self.assertEquals("a", nav[2]['title'])
|
self.assertEqual("a", nav[2]['title'])
|
||||||
self.assertEquals("Task 2a", nav[3]['title'])
|
self.assertEqual("Task 2a", nav[3]['title'])
|
||||||
self.assertEquals("b", nav[4]['title'])
|
self.assertEqual("b", nav[4]['title'])
|
||||||
self.assertEquals("Task 2b", nav[5]['title'])
|
self.assertEqual("Task 2b", nav[5]['title'])
|
||||||
self.assertEquals("Task 3", nav[6]['title'])
|
self.assertEqual("Task 3", nav[6]['title'])
|
||||||
|
|
||||||
def test_document_added_to_workflow_shows_up_in_file_list(self):
|
def test_document_added_to_workflow_shows_up_in_file_list(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
self.create_reference_document()
|
self.create_reference_document()
|
||||||
workflow = self.create_workflow('docx')
|
workflow = self.create_workflow('docx')
|
||||||
|
|
||||||
# get the first form in the two form workflow.
|
# get the first form in the two form workflow.
|
||||||
task = self.get_workflow_api(workflow).next_task
|
task = self.get_workflow_api(workflow).next_task
|
||||||
data = {
|
data = {
|
||||||
|
@ -204,12 +133,12 @@ class TestTasksApi(BaseTest):
|
||||||
json_data = json.loads(rv.get_data(as_text=True))
|
json_data = json.loads(rv.get_data(as_text=True))
|
||||||
files = FileModelSchema(many=True).load(json_data, session=session)
|
files = FileModelSchema(many=True).load(json_data, session=session)
|
||||||
self.assertTrue(len(files) == 1)
|
self.assertTrue(len(files) == 1)
|
||||||
|
|
||||||
# Assure we can still delete the study even when there is a file attached to a workflow.
|
# Assure we can still delete the study even when there is a file attached to a workflow.
|
||||||
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_documentation_populated_in_end(self):
|
def test_get_documentation_populated_in_end(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
workflow = self.create_workflow('random_fact')
|
workflow = self.create_workflow('random_fact')
|
||||||
|
@ -287,8 +216,8 @@ class TestTasksApi(BaseTest):
|
||||||
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
||||||
|
|
||||||
workflow = self.get_workflow_api(workflow)
|
workflow = self.get_workflow_api(workflow)
|
||||||
self.assertEquals('Task_Manual_One', workflow.next_task.name)
|
self.assertEqual('Task_Manual_One', workflow.next_task.name)
|
||||||
self.assertEquals('ManualTask', workflow_api.next_task.type)
|
self.assertEqual('ManualTask', workflow_api.next_task.type)
|
||||||
self.assertTrue('Markdown' in workflow_api.next_task.documentation)
|
self.assertTrue('Markdown' in workflow_api.next_task.documentation)
|
||||||
self.assertTrue('Dan' in workflow_api.next_task.documentation)
|
self.assertTrue('Dan' in workflow_api.next_task.documentation)
|
||||||
|
|
||||||
|
@ -298,7 +227,7 @@ class TestTasksApi(BaseTest):
|
||||||
|
|
||||||
# get the first form in the two form workflow.
|
# get the first form in the two form workflow.
|
||||||
task = self.get_workflow_api(workflow).next_task
|
task = self.get_workflow_api(workflow).next_task
|
||||||
self.assertEquals("JustAValue", task.properties['JustAKey'])
|
self.assertEqual("JustAValue", task.properties['JustAKey'])
|
||||||
|
|
||||||
|
|
||||||
@patch('crc.services.protocol_builder.requests.get')
|
@patch('crc.services.protocol_builder.requests.get')
|
||||||
|
@ -318,13 +247,13 @@ class TestTasksApi(BaseTest):
|
||||||
# get the first form in the two form workflow.
|
# get the first form in the two form workflow.
|
||||||
workflow = self.get_workflow_api(workflow)
|
workflow = self.get_workflow_api(workflow)
|
||||||
navigation = self.get_workflow_api(workflow).navigation
|
navigation = self.get_workflow_api(workflow).navigation
|
||||||
self.assertEquals(4, len(navigation)) # Start task, form_task, multi_task, end task
|
self.assertEqual(4, len(navigation)) # Start task, form_task, multi_task, end task
|
||||||
self.assertEquals("UserTask", workflow.next_task.type)
|
self.assertEqual("UserTask", workflow.next_task.type)
|
||||||
self.assertEquals(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type)
|
self.assertEqual(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type)
|
||||||
self.assertEquals(9, workflow.next_task.multi_instance_count)
|
self.assertEqual(9, workflow.next_task.multi_instance_count)
|
||||||
|
|
||||||
# Assure that the names for each task are properly updated, so they aren't all the same.
|
# Assure that the names for each task are properly updated, so they aren't all the same.
|
||||||
self.assertEquals("Primary Investigator", workflow.next_task.properties['display_name'])
|
self.assertEqual("Primary Investigator", workflow.next_task.properties['display_name'])
|
||||||
|
|
||||||
|
|
||||||
def test_lookup_endpoint_for_task_field_enumerations(self):
|
def test_lookup_endpoint_for_task_field_enumerations(self):
|
||||||
|
@ -366,18 +295,18 @@ class TestTasksApi(BaseTest):
|
||||||
navigation = workflow_api.navigation
|
navigation = workflow_api.navigation
|
||||||
task = workflow_api.next_task
|
task = workflow_api.next_task
|
||||||
|
|
||||||
self.assertEquals(2, len(navigation))
|
self.assertEqual(2, len(navigation))
|
||||||
self.assertEquals("UserTask", task.type)
|
self.assertEqual("UserTask", task.type)
|
||||||
self.assertEquals("Activity_A", task.name)
|
self.assertEqual("Activity_A", task.name)
|
||||||
self.assertEquals("My Sub Process", task.process_name)
|
self.assertEqual("My Sub Process", task.process_name)
|
||||||
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
||||||
task = workflow_api.next_task
|
task = workflow_api.next_task
|
||||||
self.assertIsNotNone(task)
|
self.assertIsNotNone(task)
|
||||||
|
|
||||||
self.assertEquals("Activity_B", task.name)
|
self.assertEqual("Activity_B", task.name)
|
||||||
self.assertEquals("Sub Workflow Example", task.process_name)
|
self.assertEqual("Sub Workflow Example", task.process_name)
|
||||||
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
||||||
self.assertEquals(WorkflowStatus.complete, workflow_api.status)
|
self.assertEqual(WorkflowStatus.complete, workflow_api.status)
|
||||||
|
|
||||||
def test_update_task_resets_token(self):
|
def test_update_task_resets_token(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
@ -387,7 +316,7 @@ class TestTasksApi(BaseTest):
|
||||||
first_task = self.get_workflow_api(workflow).next_task
|
first_task = self.get_workflow_api(workflow).next_task
|
||||||
self.complete_form(workflow, first_task, {"has_bananas": True})
|
self.complete_form(workflow, first_task, {"has_bananas": True})
|
||||||
workflow = self.get_workflow_api(workflow)
|
workflow = self.get_workflow_api(workflow)
|
||||||
self.assertEquals('Task_Num_Bananas', workflow.next_task.name)
|
self.assertEqual('Task_Num_Bananas', workflow.next_task.name)
|
||||||
|
|
||||||
# Trying to re-submit the initial task, and answer differently, should result in an error.
|
# Trying to re-submit the initial task, and answer differently, should result in an error.
|
||||||
self.complete_form(workflow, first_task, {"has_bananas": False}, error_code="invalid_state")
|
self.complete_form(workflow, first_task, {"has_bananas": False}, error_code="invalid_state")
|
||||||
|
@ -408,18 +337,18 @@ class TestTasksApi(BaseTest):
|
||||||
workflow = WorkflowApiSchema().load(json_data)
|
workflow = WorkflowApiSchema().load(json_data)
|
||||||
|
|
||||||
# Assure the Next Task is the one we just reset the token to be on.
|
# Assure the Next Task is the one we just reset the token to be on.
|
||||||
self.assertEquals("Task_Has_Bananas", workflow.next_task.name)
|
self.assertEqual("Task_Has_Bananas", workflow.next_task.name)
|
||||||
|
|
||||||
# Go ahead and get that workflow one more time, it should still be right.
|
# Go ahead and get that workflow one more time, it should still be right.
|
||||||
workflow = self.get_workflow_api(workflow)
|
workflow = self.get_workflow_api(workflow)
|
||||||
|
|
||||||
# Assure the Next Task is the one we just reset the token to be on.
|
# Assure the Next Task is the one we just reset the token to be on.
|
||||||
self.assertEquals("Task_Has_Bananas", workflow.next_task.name)
|
self.assertEqual("Task_Has_Bananas", workflow.next_task.name)
|
||||||
|
|
||||||
# The next task should be a different value.
|
# The next task should be a different value.
|
||||||
self.complete_form(workflow, workflow.next_task, {"has_bananas": False})
|
self.complete_form(workflow, workflow.next_task, {"has_bananas": False})
|
||||||
workflow = self.get_workflow_api(workflow)
|
workflow = self.get_workflow_api(workflow)
|
||||||
self.assertEquals('Task_Why_No_Bananas', workflow.next_task.name)
|
self.assertEqual('Task_Why_No_Bananas', workflow.next_task.name)
|
||||||
|
|
||||||
@patch('crc.services.protocol_builder.requests.get')
|
@patch('crc.services.protocol_builder.requests.get')
|
||||||
def test_parallel_multi_instance(self, mock_get):
|
def test_parallel_multi_instance(self, mock_get):
|
||||||
|
@ -434,13 +363,13 @@ class TestTasksApi(BaseTest):
|
||||||
workflow = self.create_workflow('multi_instance_parallel')
|
workflow = self.create_workflow('multi_instance_parallel')
|
||||||
|
|
||||||
workflow_api = self.get_workflow_api(workflow)
|
workflow_api = self.get_workflow_api(workflow)
|
||||||
self.assertEquals(12, len(workflow_api.navigation))
|
self.assertEqual(12, len(workflow_api.navigation))
|
||||||
ready_items = [nav for nav in workflow_api.navigation if nav['state'] == "READY"]
|
ready_items = [nav for nav in workflow_api.navigation if nav['state'] == "READY"]
|
||||||
self.assertEquals(9, len(ready_items))
|
self.assertEqual(9, len(ready_items))
|
||||||
|
|
||||||
self.assertEquals("UserTask", workflow_api.next_task.type)
|
self.assertEqual("UserTask", workflow_api.next_task.type)
|
||||||
self.assertEquals("MutiInstanceTask",workflow_api.next_task.name)
|
self.assertEqual("MutiInstanceTask",workflow_api.next_task.name)
|
||||||
self.assertEquals("more information", workflow_api.next_task.title)
|
self.assertEqual("more information", workflow_api.next_task.title)
|
||||||
|
|
||||||
for i in random.sample(range(9), 9):
|
for i in random.sample(range(9), 9):
|
||||||
task = TaskSchema().load(ready_items[i]['task'])
|
task = TaskSchema().load(ready_items[i]['task'])
|
||||||
|
@ -448,5 +377,5 @@ class TestTasksApi(BaseTest):
|
||||||
#tasks = self.get_workflow_api(workflow).user_tasks
|
#tasks = self.get_workflow_api(workflow).user_tasks
|
||||||
|
|
||||||
workflow = self.get_workflow_api(workflow)
|
workflow = self.get_workflow_api(workflow)
|
||||||
self.assertEquals(WorkflowStatus.complete, workflow.status)
|
self.assertEqual(WorkflowStatus.complete, workflow.status)
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ class TestStudyApi(BaseTest):
|
||||||
content_type='multipart/form-data')
|
content_type='multipart/form-data')
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
self.assertIsNotNone(rv.data)
|
self.assertIsNotNone(rv.data)
|
||||||
self.assertEquals('application/octet-stream', rv.content_type)
|
self.assertEqual('application/octet-stream', rv.content_type)
|
||||||
|
|
||||||
def test_list_scripts(self):
|
def test_list_scripts(self):
|
||||||
rv = self.app.get('/v1.0/list_scripts')
|
rv = self.app.get('/v1.0/list_scripts')
|
||||||
|
|
|
@ -19,5 +19,5 @@ class TestUpdateStudyScript(BaseTest):
|
||||||
|
|
||||||
script = UpdateStudy()
|
script = UpdateStudy()
|
||||||
script.do_task(task, workflow.study_id, workflow.id, "title:details.label", "pi:details.value")
|
script.do_task(task, workflow.study_id, workflow.id, "title:details.label", "pi:details.value")
|
||||||
self.assertEquals("My New Title", workflow.study.title)
|
self.assertEqual("My New Title", workflow.study.title)
|
||||||
self.assertEquals("dhf8r", workflow.study.primary_investigator_id)
|
self.assertEqual("dhf8r", workflow.study.primary_investigator_id)
|
||||||
|
|
|
@ -25,7 +25,7 @@ class TestWorkflowProcessor(BaseTest):
|
||||||
|
|
||||||
def _populate_form_with_random_data(self, task):
|
def _populate_form_with_random_data(self, task):
|
||||||
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
||||||
WorkflowService.populate_form_with_random_data(task, api_task)
|
WorkflowService.populate_form_with_random_data(task, api_task, required_only=False)
|
||||||
|
|
||||||
def get_processor(self, study_model, spec_model):
|
def get_processor(self, study_model, spec_model):
|
||||||
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
|
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
|
||||||
|
|
|
@ -57,13 +57,13 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
||||||
task = next_user_tasks[0]
|
task = next_user_tasks[0]
|
||||||
|
|
||||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||||
self.assertEquals("dhf8r", task.data["investigator"]["user_id"])
|
self.assertEqual("dhf8r", task.data["investigator"]["user_id"])
|
||||||
|
|
||||||
self.assertEqual("MutiInstanceTask", task.get_name())
|
self.assertEqual("MutiInstanceTask", task.get_name())
|
||||||
api_task = WorkflowService.spiff_task_to_api_task(task)
|
api_task = WorkflowService.spiff_task_to_api_task(task)
|
||||||
self.assertEquals(MultiInstanceType.sequential, api_task.multi_instance_type)
|
self.assertEqual(MultiInstanceType.sequential, api_task.multi_instance_type)
|
||||||
self.assertEquals(3, api_task.multi_instance_count)
|
self.assertEqual(3, api_task.multi_instance_count)
|
||||||
self.assertEquals(1, api_task.multi_instance_index)
|
self.assertEqual(1, api_task.multi_instance_index)
|
||||||
task.update_data({"investigator":{"email":"asd3v@virginia.edu"}})
|
task.update_data({"investigator":{"email":"asd3v@virginia.edu"}})
|
||||||
processor.complete_task(task)
|
processor.complete_task(task)
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
|
@ -72,8 +72,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
||||||
api_task = WorkflowService.spiff_task_to_api_task(task)
|
api_task = WorkflowService.spiff_task_to_api_task(task)
|
||||||
self.assertEqual("MutiInstanceTask", api_task.name)
|
self.assertEqual("MutiInstanceTask", api_task.name)
|
||||||
task.update_data({"investigator":{"email":"asdf32@virginia.edu"}})
|
task.update_data({"investigator":{"email":"asdf32@virginia.edu"}})
|
||||||
self.assertEquals(3, api_task.multi_instance_count)
|
self.assertEqual(3, api_task.multi_instance_count)
|
||||||
self.assertEquals(2, api_task.multi_instance_index)
|
self.assertEqual(2, api_task.multi_instance_index)
|
||||||
processor.complete_task(task)
|
processor.complete_task(task)
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
|
|
||||||
|
@ -81,8 +81,8 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
||||||
api_task = WorkflowService.spiff_task_to_api_task(task)
|
api_task = WorkflowService.spiff_task_to_api_task(task)
|
||||||
self.assertEqual("MutiInstanceTask", task.get_name())
|
self.assertEqual("MutiInstanceTask", task.get_name())
|
||||||
task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}})
|
task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}})
|
||||||
self.assertEquals(3, api_task.multi_instance_count)
|
self.assertEqual(3, api_task.multi_instance_count)
|
||||||
self.assertEquals(3, api_task.multi_instance_index)
|
self.assertEqual(3, api_task.multi_instance_index)
|
||||||
processor.complete_task(task)
|
processor.complete_task(task)
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
task = processor.bpmn_workflow.last_task
|
task = processor.bpmn_workflow.last_task
|
||||||
|
@ -91,7 +91,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
||||||
expected['PI']['email'] = "asd3v@virginia.edu"
|
expected['PI']['email'] = "asd3v@virginia.edu"
|
||||||
expected['SC_I']['email'] = "asdf32@virginia.edu"
|
expected['SC_I']['email'] = "asdf32@virginia.edu"
|
||||||
expected['DC']['email'] = "dhf8r@virginia.edu"
|
expected['DC']['email'] = "dhf8r@virginia.edu"
|
||||||
self.assertEquals(expected,
|
self.assertEqual(expected,
|
||||||
task.data['StudyInfo']['investigators'])
|
task.data['StudyInfo']['investigators'])
|
||||||
|
|
||||||
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
||||||
|
@ -117,10 +117,10 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
||||||
task = next_user_tasks[2]
|
task = next_user_tasks[2]
|
||||||
|
|
||||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||||
self.assertEquals("asd3v", task.data["investigator"]["user_id"]) # The last of the tasks
|
self.assertEqual("asd3v", task.data["investigator"]["user_id"]) # The last of the tasks
|
||||||
|
|
||||||
api_task = WorkflowService.spiff_task_to_api_task(task)
|
api_task = WorkflowService.spiff_task_to_api_task(task)
|
||||||
self.assertEquals(MultiInstanceType.parallel, api_task.multi_instance_type)
|
self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type)
|
||||||
task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}})
|
task.update_data({"investigator":{"email":"dhf8r@virginia.edu"}})
|
||||||
processor.complete_task(task)
|
processor.complete_task(task)
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
|
@ -144,7 +144,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
||||||
expected['PI']['email'] = "asd3v@virginia.edu"
|
expected['PI']['email'] = "asd3v@virginia.edu"
|
||||||
expected['SC_I']['email'] = "asdf32@virginia.edu"
|
expected['SC_I']['email'] = "asdf32@virginia.edu"
|
||||||
expected['DC']['email'] = "dhf8r@virginia.edu"
|
expected['DC']['email'] = "dhf8r@virginia.edu"
|
||||||
self.assertEquals(expected,
|
self.assertEqual(expected,
|
||||||
task.data['StudyInfo']['investigators'])
|
task.data['StudyInfo']['investigators'])
|
||||||
|
|
||||||
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
self.assertEqual(WorkflowStatus.complete, processor.get_status())
|
||||||
|
|
|
@ -66,9 +66,9 @@ class TestWorkflowService(BaseTest):
|
||||||
task = processor.next_task()
|
task = processor.next_task()
|
||||||
WorkflowService.process_options(task, task.task_spec.form.fields[0])
|
WorkflowService.process_options(task, task.task_spec.form.fields[0])
|
||||||
options = task.task_spec.form.fields[0].options
|
options = task.task_spec.form.fields[0].options
|
||||||
self.assertEquals(28, len(options))
|
self.assertEqual(28, len(options))
|
||||||
self.assertEquals('1000', options[0]['id'])
|
self.assertEqual('1000', options[0]['id'])
|
||||||
self.assertEquals("UVA - INTERNAL - GM USE ONLY", options[0]['name'])
|
self.assertEqual("UVA - INTERNAL - GM USE ONLY", options[0]['name'])
|
||||||
|
|
||||||
def test_random_data_populate_form_on_auto_complete(self):
|
def test_random_data_populate_form_on_auto_complete(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
@ -77,5 +77,5 @@ class TestWorkflowService(BaseTest):
|
||||||
processor.do_engine_steps()
|
processor.do_engine_steps()
|
||||||
task = processor.next_task()
|
task = processor.next_task()
|
||||||
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
||||||
WorkflowService.populate_form_with_random_data(task, task_api)
|
WorkflowService.populate_form_with_random_data(task, task_api, required_only=False)
|
||||||
self.assertTrue(isinstance(task.data["sponsor"], dict))
|
self.assertTrue(isinstance(task.data["sponsor"], dict))
|
|
@ -3,17 +3,16 @@ from unittest.mock import patch
|
||||||
|
|
||||||
from tests.base_test import BaseTest
|
from tests.base_test import BaseTest
|
||||||
|
|
||||||
from crc.services.protocol_builder import ProtocolBuilderService
|
|
||||||
from crc import session, app
|
from crc import session, app
|
||||||
from crc.api.common import ApiErrorSchema
|
from crc.api.common import ApiErrorSchema
|
||||||
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
from crc.models.protocol_builder import ProtocolBuilderStudySchema
|
||||||
from crc.models.workflow import WorkflowSpecModel
|
from crc.models.workflow import WorkflowSpecModel
|
||||||
|
from crc.services.workflow_service import WorkflowService
|
||||||
|
|
||||||
|
|
||||||
class TestWorkflowSpecValidation(BaseTest):
|
class TestWorkflowSpecValidation(BaseTest):
|
||||||
|
|
||||||
def validate_workflow(self, workflow_name):
|
def validate_workflow(self, workflow_name):
|
||||||
self.load_example_data()
|
|
||||||
spec_model = self.load_test_spec(workflow_name)
|
spec_model = self.load_test_spec(workflow_name)
|
||||||
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
|
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
|
@ -22,6 +21,7 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||||
|
|
||||||
def test_successful_validation_of_test_workflows(self):
|
def test_successful_validation_of_test_workflows(self):
|
||||||
app.config['PB_ENABLED'] = False # Assure this is disabled.
|
app.config['PB_ENABLED'] = False # Assure this is disabled.
|
||||||
|
self.load_example_data()
|
||||||
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
|
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
|
||||||
self.assertEqual(0, len(self.validate_workflow("decision_table")))
|
self.assertEqual(0, len(self.validate_workflow("decision_table")))
|
||||||
self.assertEqual(0, len(self.validate_workflow("docx")))
|
self.assertEqual(0, len(self.validate_workflow("docx")))
|
||||||
|
@ -49,6 +49,13 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||||
|
|
||||||
self.load_example_data(use_crc_data=True)
|
self.load_example_data(use_crc_data=True)
|
||||||
app.config['PB_ENABLED'] = True
|
app.config['PB_ENABLED'] = True
|
||||||
|
self.validate_all_loaded_workflows()
|
||||||
|
|
||||||
|
def test_successful_validation_of_rrt_workflows(self):
|
||||||
|
self.load_example_data(use_rrt_data=True)
|
||||||
|
self.validate_all_loaded_workflows()
|
||||||
|
|
||||||
|
def validate_all_loaded_workflows(self):
|
||||||
workflows = session.query(WorkflowSpecModel).all()
|
workflows = session.query(WorkflowSpecModel).all()
|
||||||
errors = []
|
errors = []
|
||||||
for w in workflows:
|
for w in workflows:
|
||||||
|
@ -59,28 +66,54 @@ class TestWorkflowSpecValidation(BaseTest):
|
||||||
errors.extend(ApiErrorSchema(many=True).load(json_data))
|
errors.extend(ApiErrorSchema(many=True).load(json_data))
|
||||||
self.assertEqual(0, len(errors), json.dumps(errors))
|
self.assertEqual(0, len(errors), json.dumps(errors))
|
||||||
|
|
||||||
|
|
||||||
def test_invalid_expression(self):
|
def test_invalid_expression(self):
|
||||||
|
self.load_example_data()
|
||||||
errors = self.validate_workflow("invalid_expression")
|
errors = self.validate_workflow("invalid_expression")
|
||||||
self.assertEqual(1, len(errors))
|
self.assertEqual(2, len(errors))
|
||||||
self.assertEqual("workflow_execution_exception", errors[0]['code'])
|
self.assertEqual("workflow_validation_exception", errors[0]['code'])
|
||||||
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
|
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
|
||||||
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
|
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
|
||||||
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
|
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
|
||||||
self.assertEqual('ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
|
self.assertEqual('When populating all fields ... ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
|
||||||
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
|
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
|
||||||
|
self.assertIsNotNone(errors[0]['task_data'])
|
||||||
|
self.assertIn("has_bananas", errors[0]['task_data'])
|
||||||
|
|
||||||
def test_validation_error(self):
|
def test_validation_error(self):
|
||||||
|
self.load_example_data()
|
||||||
errors = self.validate_workflow("invalid_spec")
|
errors = self.validate_workflow("invalid_spec")
|
||||||
self.assertEqual(1, len(errors))
|
self.assertEqual(2, len(errors))
|
||||||
self.assertEqual("workflow_validation_error", errors[0]['code'])
|
self.assertEqual("workflow_validation_error", errors[0]['code'])
|
||||||
self.assertEqual("StartEvent_1", errors[0]['task_id'])
|
self.assertEqual("StartEvent_1", errors[0]['task_id'])
|
||||||
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
|
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
|
||||||
|
|
||||||
def test_invalid_script(self):
|
def test_invalid_script(self):
|
||||||
|
self.load_example_data()
|
||||||
errors = self.validate_workflow("invalid_script")
|
errors = self.validate_workflow("invalid_script")
|
||||||
self.assertEqual(1, len(errors))
|
self.assertEqual(2, len(errors))
|
||||||
self.assertEqual("workflow_execution_exception", errors[0]['code'])
|
self.assertEqual("workflow_validation_exception", errors[0]['code'])
|
||||||
self.assertTrue("NoSuchScript" in errors[0]['message'])
|
self.assertTrue("NoSuchScript" in errors[0]['message'])
|
||||||
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
|
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
|
||||||
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
|
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
|
||||||
self.assertEqual("invalid_script.bpmn", errors[0]['file_name'])
|
self.assertEqual("invalid_script.bpmn", errors[0]['file_name'])
|
||||||
|
|
||||||
|
def test_repeating_sections_correctly_populated(self):
|
||||||
|
self.load_example_data()
|
||||||
|
spec_model = self.load_test_spec('repeat_form')
|
||||||
|
final_data = WorkflowService.test_spec(spec_model.id)
|
||||||
|
self.assertIsNotNone(final_data)
|
||||||
|
self.assertIn('cats', final_data)
|
||||||
|
|
||||||
|
def test_required_fields(self):
|
||||||
|
self.load_example_data()
|
||||||
|
spec_model = self.load_test_spec('required_fields')
|
||||||
|
final_data = WorkflowService.test_spec(spec_model.id)
|
||||||
|
self.assertIsNotNone(final_data)
|
||||||
|
self.assertIn('string_required', final_data)
|
||||||
|
self.assertIn('string_not_required', final_data)
|
||||||
|
|
||||||
|
final_data = WorkflowService.test_spec(spec_model.id, required_only=True)
|
||||||
|
self.assertIsNotNone(final_data)
|
||||||
|
self.assertIn('string_required', final_data)
|
||||||
|
self.assertNotIn('string_not_required', final_data)
|
||||||
|
|
Loading…
Reference in New Issue