Merge branch 'rrt/dev' into fix/empty_task
This commit is contained in:
commit
cff270071a
|
@ -16,9 +16,7 @@ before_install:
|
|||
- psql -c 'create database crc_test;' -U postgres
|
||||
|
||||
install:
|
||||
- pip install pipenv pytest coverage
|
||||
- export PATH=$PATH:$HOME/.local/bin;
|
||||
- pipenv install
|
||||
- pipenv install --dev
|
||||
|
||||
env:
|
||||
global:
|
||||
|
|
|
@ -9,7 +9,6 @@ RUN set -xe \
|
|||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /app \
|
||||
&& useradd _gunicorn --no-create-home --user-group
|
||||
|
||||
COPY . /app/
|
||||
|
|
3
Pipfile
3
Pipfile
|
@ -6,6 +6,7 @@ verify_ssl = true
|
|||
[dev-packages]
|
||||
pytest = "*"
|
||||
pbr = "*"
|
||||
coverage = "*"
|
||||
|
||||
[packages]
|
||||
connexion = {extras = ["swagger-ui"],version = "*"}
|
||||
|
@ -40,6 +41,8 @@ gunicorn = "*"
|
|||
werkzeug = "*"
|
||||
sentry-sdk = {extras = ["flask"],version = "==0.14.4"}
|
||||
flask-mail = "*"
|
||||
flask-admin = "*"
|
||||
markdown = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.7"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "6c89585086260ebcb41918b8ef3b1d9e189e1b492208d3ff000a138bc2f2fcee"
|
||||
"sha256": "deb9e257fe8240d12bf82940ff22f5ddb338b305491f33655a82adda9438990f"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
|
@ -104,17 +104,17 @@
|
|||
},
|
||||
"celery": {
|
||||
"hashes": [
|
||||
"sha256:c3f4173f83ceb5a5c986c5fdaefb9456de3b0729a72a5776e46bd405fda7b647",
|
||||
"sha256:d1762d6065522879f341c3d67c2b9fe4615eb79756d59acb1434601d4aca474b"
|
||||
"sha256:ef17d7dffde7fc73ecab3a3b6389d93d3213bac53fa7f28e68e33647ad50b916",
|
||||
"sha256:fd77e4248bb1b7af5f7922dd8e81156f540306e3a5c4b1c24167c1f5f06025da"
|
||||
],
|
||||
"version": "==4.4.5"
|
||||
"version": "==4.4.6"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:5ad7e9a056d25ffa5082862e36f119f7f7cec6457fa07ee2f8c339814b80c9b1",
|
||||
"sha256:9cd41137dc19af6a5e03b630eefe7d1f458d964d406342dd3edf625839b944cc"
|
||||
"sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3",
|
||||
"sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41"
|
||||
],
|
||||
"version": "==2020.4.5.2"
|
||||
"version": "==2020.6.20"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
|
@ -261,6 +261,13 @@
|
|||
"index": "pypi",
|
||||
"version": "==1.1.2"
|
||||
},
|
||||
"flask-admin": {
|
||||
"hashes": [
|
||||
"sha256:68c761d8582d59b1f7702013e944a7ad11d7659a72f3006b89b68b0bd8df61b8"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.5.6"
|
||||
},
|
||||
"flask-bcrypt": {
|
||||
"hashes": [
|
||||
"sha256:d71c8585b2ee1c62024392ebdbc447438564e2c8c02b4e57b56a4cafd8d13c5f"
|
||||
|
@ -394,10 +401,10 @@
|
|||
},
|
||||
"kombu": {
|
||||
"hashes": [
|
||||
"sha256:437b9cdea193cc2ed0b8044c85fd0f126bb3615ca2f4d4a35b39de7cacfa3c1a",
|
||||
"sha256:dc282bb277197d723bccda1a9ba30a27a28c9672d0ab93e9e51bb05a37bd29c3"
|
||||
"sha256:be48cdffb54a2194d93ad6533d73f69408486483d189fe9f5990ee24255b0e0a",
|
||||
"sha256:ca1b45faac8c0b18493d02a8571792f3c40291cf2bcf1f55afed3d8f3aa7ba74"
|
||||
],
|
||||
"version": "==4.6.10"
|
||||
"version": "==4.6.11"
|
||||
},
|
||||
"ldap3": {
|
||||
"hashes": [
|
||||
|
@ -446,6 +453,14 @@
|
|||
],
|
||||
"version": "==1.1.3"
|
||||
},
|
||||
"markdown": {
|
||||
"hashes": [
|
||||
"sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17",
|
||||
"sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.2.2"
|
||||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
|
||||
|
@ -510,29 +525,34 @@
|
|||
},
|
||||
"numpy": {
|
||||
"hashes": [
|
||||
"sha256:0172304e7d8d40e9e49553901903dc5f5a49a703363ed756796f5808a06fc233",
|
||||
"sha256:34e96e9dae65c4839bd80012023aadd6ee2ccb73ce7fdf3074c62f301e63120b",
|
||||
"sha256:3676abe3d621fc467c4c1469ee11e395c82b2d6b5463a9454e37fe9da07cd0d7",
|
||||
"sha256:3dd6823d3e04b5f223e3e265b4a1eae15f104f4366edd409e5a5e413a98f911f",
|
||||
"sha256:4064f53d4cce69e9ac613256dc2162e56f20a4e2d2086b1956dd2fcf77b7fac5",
|
||||
"sha256:4674f7d27a6c1c52a4d1aa5f0881f1eff840d2206989bae6acb1c7668c02ebfb",
|
||||
"sha256:7d42ab8cedd175b5ebcb39b5208b25ba104842489ed59fbb29356f671ac93583",
|
||||
"sha256:965df25449305092b23d5145b9bdaeb0149b6e41a77a7d728b1644b3c99277c1",
|
||||
"sha256:9c9d6531bc1886454f44aa8f809268bc481295cf9740827254f53c30104f074a",
|
||||
"sha256:a78e438db8ec26d5d9d0e584b27ef25c7afa5a182d1bf4d05e313d2d6d515271",
|
||||
"sha256:a7acefddf994af1aeba05bbbafe4ba983a187079f125146dc5859e6d817df824",
|
||||
"sha256:a87f59508c2b7ceb8631c20630118cc546f1f815e034193dc72390db038a5cb3",
|
||||
"sha256:ac792b385d81151bae2a5a8adb2b88261ceb4976dbfaaad9ce3a200e036753dc",
|
||||
"sha256:b03b2c0badeb606d1232e5f78852c102c0a7989d3a534b3129e7856a52f3d161",
|
||||
"sha256:b39321f1a74d1f9183bf1638a745b4fd6fe80efbb1f6b32b932a588b4bc7695f",
|
||||
"sha256:cae14a01a159b1ed91a324722d746523ec757357260c6804d11d6147a9e53e3f",
|
||||
"sha256:cd49930af1d1e49a812d987c2620ee63965b619257bd76eaaa95870ca08837cf",
|
||||
"sha256:e15b382603c58f24265c9c931c9a45eebf44fe2e6b4eaedbb0d025ab3255228b",
|
||||
"sha256:e91d31b34fc7c2c8f756b4e902f901f856ae53a93399368d9a0dc7be17ed2ca0",
|
||||
"sha256:ef627986941b5edd1ed74ba89ca43196ed197f1a206a3f18cc9faf2fb84fd675",
|
||||
"sha256:f718a7949d1c4f622ff548c572e0c03440b49b9531ff00e4ed5738b459f011e8"
|
||||
"sha256:13af0184177469192d80db9bd02619f6fa8b922f9f327e077d6f2a6acb1ce1c0",
|
||||
"sha256:26a45798ca2a4e168d00de75d4a524abf5907949231512f372b217ede3429e98",
|
||||
"sha256:26f509450db547e4dfa3ec739419b31edad646d21fb8d0ed0734188b35ff6b27",
|
||||
"sha256:30a59fb41bb6b8c465ab50d60a1b298d1cd7b85274e71f38af5a75d6c475d2d2",
|
||||
"sha256:33c623ef9ca5e19e05991f127c1be5aeb1ab5cdf30cb1c5cf3960752e58b599b",
|
||||
"sha256:356f96c9fbec59974a592452ab6a036cd6f180822a60b529a975c9467fcd5f23",
|
||||
"sha256:3c40c827d36c6d1c3cf413694d7dc843d50997ebffbc7c87d888a203ed6403a7",
|
||||
"sha256:4d054f013a1983551254e2379385e359884e5af105e3efe00418977d02f634a7",
|
||||
"sha256:63d971bb211ad3ca37b2adecdd5365f40f3b741a455beecba70fd0dde8b2a4cb",
|
||||
"sha256:658624a11f6e1c252b2cd170d94bf28c8f9410acab9f2fd4369e11e1cd4e1aaf",
|
||||
"sha256:76766cc80d6128750075378d3bb7812cf146415bd29b588616f72c943c00d598",
|
||||
"sha256:7b57f26e5e6ee2f14f960db46bd58ffdca25ca06dd997729b1b179fddd35f5a3",
|
||||
"sha256:7b852817800eb02e109ae4a9cef2beda8dd50d98b76b6cfb7b5c0099d27b52d4",
|
||||
"sha256:8cde829f14bd38f6da7b2954be0f2837043e8b8d7a9110ec5e318ae6bf706610",
|
||||
"sha256:a2e3a39f43f0ce95204beb8fe0831199542ccab1e0c6e486a0b4947256215632",
|
||||
"sha256:a86c962e211f37edd61d6e11bb4df7eddc4a519a38a856e20a6498c319efa6b0",
|
||||
"sha256:a8705c5073fe3fcc297fb8e0b31aa794e05af6a329e81b7ca4ffecab7f2b95ef",
|
||||
"sha256:b6aaeadf1e4866ca0fdf7bb4eed25e521ae21a7947c59f78154b24fc7abbe1dd",
|
||||
"sha256:be62aeff8f2f054eff7725f502f6228298891fd648dc2630e03e44bf63e8cee0",
|
||||
"sha256:c2edbb783c841e36ca0fa159f0ae97a88ce8137fb3a6cd82eae77349ba4b607b",
|
||||
"sha256:cbe326f6d364375a8e5a8ccb7e9cd73f4b2f6dc3b2ed205633a0db8243e2a96a",
|
||||
"sha256:d34fbb98ad0d6b563b95de852a284074514331e6b9da0a9fc894fb1cdae7a79e",
|
||||
"sha256:d97a86937cf9970453c3b62abb55a6475f173347b4cde7f8dcdb48c8e1b9952d",
|
||||
"sha256:dd53d7c4a69e766e4900f29db5872f5824a06827d594427cf1a4aa542818b796",
|
||||
"sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a",
|
||||
"sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596"
|
||||
],
|
||||
"version": "==1.18.5"
|
||||
"version": "==1.19.0"
|
||||
},
|
||||
"openapi-spec-validator": {
|
||||
"hashes": [
|
||||
|
@ -544,10 +564,10 @@
|
|||
},
|
||||
"openpyxl": {
|
||||
"hashes": [
|
||||
"sha256:547a9fc6aafcf44abe358b89ed4438d077e9d92e4f182c87e2dc294186dc4b64"
|
||||
"sha256:6e62f058d19b09b95d20ebfbfb04857ad08d0833190516c1660675f699c6186f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.3"
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
|
@ -558,25 +578,25 @@
|
|||
},
|
||||
"pandas": {
|
||||
"hashes": [
|
||||
"sha256:034185bb615dc96d08fa13aacba8862949db19d5e7804d6ee242d086f07bcc46",
|
||||
"sha256:0c9b7f1933e3226cc16129cf2093338d63ace5c85db7c9588e3e1ac5c1937ad5",
|
||||
"sha256:1f6fcf0404626ca0475715da045a878c7062ed39bc859afc4ccf0ba0a586a0aa",
|
||||
"sha256:1fc963ba33c299973e92d45466e576d11f28611f3549469aec4a35658ef9f4cc",
|
||||
"sha256:29b4cfee5df2bc885607b8f016e901e63df7ffc8f00209000471778f46cc6678",
|
||||
"sha256:2a8b6c28607e3f3c344fe3e9b3cd76d2bf9f59bc8c0f2e582e3728b80e1786dc",
|
||||
"sha256:2bc2ff52091a6ac481cc75d514f06227dc1b10887df1eb72d535475e7b825e31",
|
||||
"sha256:415e4d52fcfd68c3d8f1851cef4d947399232741cc994c8f6aa5e6a9f2e4b1d8",
|
||||
"sha256:519678882fd0587410ece91e3ff7f73ad6ded60f6fcb8aa7bcc85c1dc20ecac6",
|
||||
"sha256:51e0abe6e9f5096d246232b461649b0aa627f46de8f6344597ca908f2240cbaa",
|
||||
"sha256:698e26372dba93f3aeb09cd7da2bb6dd6ade248338cfe423792c07116297f8f4",
|
||||
"sha256:83af85c8e539a7876d23b78433d90f6a0e8aa913e37320785cf3888c946ee874",
|
||||
"sha256:982cda36d1773076a415ec62766b3c0a21cdbae84525135bdb8f460c489bb5dd",
|
||||
"sha256:a647e44ba1b3344ebc5991c8aafeb7cca2b930010923657a273b41d86ae225c4",
|
||||
"sha256:b35d625282baa7b51e82e52622c300a1ca9f786711b2af7cbe64f1e6831f4126",
|
||||
"sha256:bab51855f8b318ef39c2af2c11095f45a10b74cbab4e3c8199efcc5af314c648"
|
||||
"sha256:02f1e8f71cd994ed7fcb9a35b6ddddeb4314822a0e09a9c5b2d278f8cb5d4096",
|
||||
"sha256:13f75fb18486759da3ff40f5345d9dd20e7d78f2a39c5884d013456cec9876f0",
|
||||
"sha256:35b670b0abcfed7cad76f2834041dcf7ae47fd9b22b63622d67cdc933d79f453",
|
||||
"sha256:4c73f373b0800eb3062ffd13d4a7a2a6d522792fa6eb204d67a4fad0a40f03dc",
|
||||
"sha256:5759edf0b686b6f25a5d4a447ea588983a33afc8a0081a0954184a4a87fd0dd7",
|
||||
"sha256:5a7cf6044467c1356b2b49ef69e50bf4d231e773c3ca0558807cdba56b76820b",
|
||||
"sha256:69c5d920a0b2a9838e677f78f4dde506b95ea8e4d30da25859db6469ded84fa8",
|
||||
"sha256:8778a5cc5a8437a561e3276b85367412e10ae9fff07db1eed986e427d9a674f8",
|
||||
"sha256:9871ef5ee17f388f1cb35f76dc6106d40cb8165c562d573470672f4cdefa59ef",
|
||||
"sha256:9c31d52f1a7dd2bb4681d9f62646c7aa554f19e8e9addc17e8b1b20011d7522d",
|
||||
"sha256:ab8173a8efe5418bbe50e43f321994ac6673afc5c7c4839014cf6401bbdd0705",
|
||||
"sha256:ae961f1f0e270f1e4e2273f6a539b2ea33248e0e3a11ffb479d757918a5e03a9",
|
||||
"sha256:b3c4f93fcb6e97d993bf87cdd917883b7dab7d20c627699f360a8fb49e9e0b91",
|
||||
"sha256:c9410ce8a3dee77653bc0684cfa1535a7f9c291663bd7ad79e39f5ab58f67ab3",
|
||||
"sha256:f69e0f7b7c09f1f612b1f8f59e2df72faa8a6b41c5a436dde5b615aaf948f107",
|
||||
"sha256:faa42a78d1350b02a7d2f0dbe3c80791cf785663d6997891549d0f86dc49125e"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.4"
|
||||
"version": "==1.0.5"
|
||||
},
|
||||
"psycopg2-binary": {
|
||||
"hashes": [
|
||||
|
@ -711,11 +731,11 @@
|
|||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee",
|
||||
"sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6"
|
||||
"sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b",
|
||||
"sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.23.0"
|
||||
"version": "==2.24.0"
|
||||
},
|
||||
"sentry-sdk": {
|
||||
"extras": [
|
||||
|
@ -751,11 +771,11 @@
|
|||
},
|
||||
"sphinx": {
|
||||
"hashes": [
|
||||
"sha256:1c445320a3310baa5ccb8d957267ef4a0fc930dc1234db5098b3d7af14fbb242",
|
||||
"sha256:7d3d5087e39ab5a031b75588e9859f011de70e213cd0080ccbc28079fb0786d1"
|
||||
"sha256:74fbead182a611ce1444f50218a1c5fc70b6cc547f64948f5182fb30a2a20258",
|
||||
"sha256:97c9e3bcce2f61d9f5edf131299ee9d1219630598d9f9a8791459a4d9e815be5"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.1.0"
|
||||
"version": "==3.1.1"
|
||||
},
|
||||
"sphinxcontrib-applehelp": {
|
||||
"hashes": [
|
||||
|
@ -802,7 +822,7 @@
|
|||
"spiffworkflow": {
|
||||
"editable": true,
|
||||
"git": "https://github.com/sartography/SpiffWorkflow.git",
|
||||
"ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0"
|
||||
"ref": "5450dc0463a95811d386b7de063d950bf6179d2b"
|
||||
},
|
||||
"sqlalchemy": {
|
||||
"hashes": [
|
||||
|
@ -890,6 +910,13 @@
|
|||
"index": "pypi",
|
||||
"version": "==1.0.1"
|
||||
},
|
||||
"wtforms": {
|
||||
"hashes": [
|
||||
"sha256:6ff8635f4caeed9f38641d48cfe019d0d3896f41910ab04494143fc027866e1b",
|
||||
"sha256:861a13b3ae521d6700dac3b2771970bd354a63ba7043ecc3a82b5288596a1972"
|
||||
],
|
||||
"version": "==2.3.1"
|
||||
},
|
||||
"xlrd": {
|
||||
"hashes": [
|
||||
"sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2",
|
||||
|
@ -922,6 +949,43 @@
|
|||
],
|
||||
"version": "==19.3.0"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a",
|
||||
"sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355",
|
||||
"sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65",
|
||||
"sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7",
|
||||
"sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9",
|
||||
"sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1",
|
||||
"sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0",
|
||||
"sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55",
|
||||
"sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c",
|
||||
"sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6",
|
||||
"sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef",
|
||||
"sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019",
|
||||
"sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e",
|
||||
"sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0",
|
||||
"sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf",
|
||||
"sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24",
|
||||
"sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2",
|
||||
"sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c",
|
||||
"sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4",
|
||||
"sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0",
|
||||
"sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd",
|
||||
"sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04",
|
||||
"sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e",
|
||||
"sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730",
|
||||
"sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2",
|
||||
"sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768",
|
||||
"sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796",
|
||||
"sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7",
|
||||
"sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a",
|
||||
"sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489",
|
||||
"sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.1"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:0505dd08068cfec00f53a74a0ad927676d7757da81b7436a6eefe4c7cf75c545",
|
||||
|
@ -932,10 +996,10 @@
|
|||
},
|
||||
"more-itertools": {
|
||||
"hashes": [
|
||||
"sha256:558bb897a2232f5e4f8e2399089e35aecb746e1f9191b6584a151647e89267be",
|
||||
"sha256:7818f596b1e87be009031c7653d01acc46ed422e6656b394b0f765ce66ed4982"
|
||||
"sha256:68c70cc7167bdf5c7c9d8f6954a7837089c6a36bf565383919bb595efb8a17e5",
|
||||
"sha256:b78134b2063dd214000685165d81c154522c3ee0a1c0d4d113c80361c234c5a2"
|
||||
],
|
||||
"version": "==8.3.0"
|
||||
"version": "==8.4.0"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
|
@ -961,10 +1025,10 @@
|
|||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa",
|
||||
"sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0"
|
||||
"sha256:a673fa23d7000440cc885c17dbd34fafcb7d7a6e230b29f6766400de36a33c44",
|
||||
"sha256:f3b3a4c36512a4c4f024041ab51866f11761cc169670204b235f6b20523d4e6b"
|
||||
],
|
||||
"version": "==1.8.1"
|
||||
"version": "==1.8.2"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
|
@ -990,10 +1054,10 @@
|
|||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
"sha256:79375666b9954d4a1a10739315816324c3e73110af9d0e102d906fdb0aec009f",
|
||||
"sha256:8c6b5b6ee1360b842645f336d9e5d68c55817c26d3050f46b235ef2bc650e48f"
|
||||
"sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784",
|
||||
"sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"
|
||||
],
|
||||
"version": "==0.2.4"
|
||||
"version": "==0.2.5"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
|
|
|
@ -12,7 +12,7 @@ CORS_ALLOW_ORIGINS = re.split(r',\s*', environ.get('CORS_ALLOW_ORIGINS', default
|
|||
TESTING = environ.get('TESTING', default="false") == "true"
|
||||
PRODUCTION = (environ.get('PRODUCTION', default="false") == "true")
|
||||
TEST_UID = environ.get('TEST_UID', default="dhf8r")
|
||||
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah13us,cl3wf"))
|
||||
ADMIN_UIDS = re.split(r',\s*', environ.get('ADMIN_UIDS', default="dhf8r,ajl2j,cah3us,cl3wf"))
|
||||
|
||||
# Sentry flag
|
||||
ENABLE_SENTRY = environ.get('ENABLE_SENTRY', default="false") == "true"
|
||||
|
@ -46,6 +46,7 @@ LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No
|
|||
LDAP_TIMEOUT_SEC = int(environ.get('LDAP_TIMEOUT_SEC', default=1))
|
||||
|
||||
# Email configuration
|
||||
DEFAULT_SENDER = 'askresearch@virginia.edu'
|
||||
FALLBACK_EMAILS = ['askresearch@virginia.edu', 'sartographysupport@googlegroups.com']
|
||||
MAIL_DEBUG = environ.get('MAIL_DEBUG', default=True)
|
||||
MAIL_SERVER = environ.get('MAIL_SERVER', default='smtp.mailtrap.io')
|
||||
|
|
|
@ -4,6 +4,8 @@ import sentry_sdk
|
|||
|
||||
import connexion
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from flask_admin import Admin
|
||||
from flask_admin.contrib.sqla import ModelView
|
||||
from flask_cors import CORS
|
||||
from flask_marshmallow import Marshmallow
|
||||
from flask_mail import Mail
|
||||
|
@ -32,18 +34,24 @@ db = SQLAlchemy(app)
|
|||
session = db.session
|
||||
""":type: sqlalchemy.orm.Session"""
|
||||
|
||||
# Mail settings
|
||||
mail = Mail(app)
|
||||
|
||||
migrate = Migrate(app, db)
|
||||
ma = Marshmallow(app)
|
||||
|
||||
from crc import models
|
||||
from crc import api
|
||||
from crc.api import admin
|
||||
|
||||
connexion_app.add_api('api.yml', base_path='/v1.0')
|
||||
|
||||
|
||||
# Convert list of allowed origins to list of regexes
|
||||
origins_re = [r"^https?:\/\/%s(.*)" % o.replace('.', '\.') for o in app.config['CORS_ALLOW_ORIGINS']]
|
||||
cors = CORS(connexion_app.app, origins=origins_re)
|
||||
|
||||
# Sentry error handling
|
||||
if app.config['ENABLE_SENTRY']:
|
||||
sentry_sdk.init(
|
||||
dsn="https://25342ca4e2d443c6a5c49707d68e9f40@o401361.ingest.sentry.io/5260915",
|
||||
|
@ -53,8 +61,6 @@ if app.config['ENABLE_SENTRY']:
|
|||
# Jinja environment definition, used to render mail templates
|
||||
template_dir = os.getcwd() + '/crc/static/templates/mails'
|
||||
env = Environment(loader=FileSystemLoader(template_dir))
|
||||
# Mail settings
|
||||
mail = Mail(app)
|
||||
|
||||
print('=== USING THESE CONFIG SETTINGS: ===')
|
||||
print('APPLICATION_ROOT = ', app.config['APPLICATION_ROOT'])
|
||||
|
@ -88,3 +94,10 @@ def clear_db():
|
|||
"""Load example data into the database."""
|
||||
from example_data import ExampleDataLoader
|
||||
ExampleDataLoader.clean_db()
|
||||
|
||||
@app.cli.command()
|
||||
def rrt_data_fix():
|
||||
"""Finds all the empty task event logs, and populates
|
||||
them with good wholesome data."""
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
WorkflowService.fix_legacy_data_model_for_rrt()
|
||||
|
|
15
crc/api.yml
15
crc/api.yml
|
@ -917,6 +917,21 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
/health_attesting:
|
||||
get:
|
||||
operationId: crc.api.approval.get_health_attesting_csv
|
||||
summary: Returns a CSV file with health attesting records
|
||||
tags:
|
||||
- Approvals
|
||||
responses:
|
||||
'200':
|
||||
description: A CSV file
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Approval"
|
||||
components:
|
||||
securitySchemes:
|
||||
jwt:
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
# Admin app
|
||||
import json
|
||||
|
||||
from flask import url_for
|
||||
from flask_admin import Admin
|
||||
from flask_admin.contrib import sqla
|
||||
from flask_admin.contrib.sqla import ModelView
|
||||
from werkzeug.utils import redirect
|
||||
from jinja2 import Markup
|
||||
|
||||
from crc import db, app
|
||||
from crc.api.user import verify_token, verify_token_admin
|
||||
from crc.models.approval import ApprovalModel
|
||||
from crc.models.file import FileModel
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
|
||||
|
||||
class AdminModelView(sqla.ModelView):
|
||||
can_create = False
|
||||
can_edit = False
|
||||
can_delete = False
|
||||
page_size = 50 # the number of entries to display on the list view
|
||||
column_exclude_list = ['bpmn_workflow_json', ]
|
||||
column_display_pk = True
|
||||
can_export = True
|
||||
|
||||
def is_accessible(self):
|
||||
return verify_token_admin()
|
||||
|
||||
def inaccessible_callback(self, name, **kwargs):
|
||||
# redirect to login page if user doesn't have access
|
||||
return redirect(url_for('home'))
|
||||
|
||||
class UserView(AdminModelView):
|
||||
column_filters = ['uid']
|
||||
|
||||
class StudyView(AdminModelView):
|
||||
column_filters = ['id', 'primary_investigator_id']
|
||||
column_searchable_list = ['title']
|
||||
|
||||
class ApprovalView(AdminModelView):
|
||||
column_filters = ['study_id', 'approver_uid']
|
||||
|
||||
class WorkflowView(AdminModelView):
|
||||
column_filters = ['study_id', 'id']
|
||||
|
||||
class FileView(AdminModelView):
|
||||
column_filters = ['workflow_id']
|
||||
|
||||
def json_formatter(view, context, model, name):
|
||||
value = getattr(model, name)
|
||||
json_value = json.dumps(value, ensure_ascii=False, indent=2)
|
||||
return Markup('<pre>{}</pre>'.format(json_value))
|
||||
|
||||
class TaskEventView(AdminModelView):
|
||||
column_filters = ['workflow_id', 'action']
|
||||
column_list = ['study_id', 'user_id', 'workflow_id', 'action', 'task_title', 'form_data', 'date']
|
||||
column_formatters = {
|
||||
'form_data': json_formatter,
|
||||
}
|
||||
|
||||
admin = Admin(app)
|
||||
|
||||
admin.add_view(StudyView(StudyModel, db.session))
|
||||
admin.add_view(ApprovalView(ApprovalModel, db.session))
|
||||
admin.add_view(UserView(UserModel, db.session))
|
||||
admin.add_view(WorkflowView(WorkflowModel, db.session))
|
||||
admin.add_view(FileView(FileModel, db.session))
|
||||
admin.add_view(TaskEventView(TaskEventModel, db.session))
|
|
@ -1,9 +1,11 @@
|
|||
import csv
|
||||
import io
|
||||
import json
|
||||
import pickle
|
||||
from base64 import b64decode
|
||||
from datetime import datetime
|
||||
|
||||
from flask import g
|
||||
from flask import g, make_response
|
||||
|
||||
from crc import db, session
|
||||
from crc.api.common import ApiError
|
||||
|
@ -88,71 +90,25 @@ def get_approvals_for_study(study_id=None):
|
|||
return results
|
||||
|
||||
|
||||
def get_health_attesting_csv():
|
||||
records = ApprovalService.get_health_attesting_records()
|
||||
si = io.StringIO()
|
||||
cw = csv.writer(si)
|
||||
cw.writerows(records)
|
||||
output = make_response(si.getvalue())
|
||||
output.headers["Content-Disposition"] = "attachment; filename=health_attesting.csv"
|
||||
output.headers["Content-type"] = "text/csv"
|
||||
return output
|
||||
|
||||
|
||||
# ----- Begin descent into madness ---- #
|
||||
def get_csv():
|
||||
"""A damn lie, it's a json file. A huge bit of a one-off for RRT, but 3 weeks of midnight work can convince a
|
||||
man to do just about anything"""
|
||||
approvals = ApprovalService.get_all_approvals(include_cancelled=False)
|
||||
output = []
|
||||
errors = []
|
||||
for approval in approvals:
|
||||
try:
|
||||
if approval.status != ApprovalStatus.APPROVED.value:
|
||||
continue
|
||||
for related_approval in approval.related_approvals:
|
||||
if related_approval.status != ApprovalStatus.APPROVED.value:
|
||||
continue
|
||||
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first()
|
||||
data = json.loads(workflow.bpmn_workflow_json)
|
||||
last_task = find_task(data['last_task']['__uuid__'], data['task_tree'])
|
||||
personnel = extract_value(last_task, 'personnel')
|
||||
training_val = extract_value(last_task, 'RequiredTraining')
|
||||
pi_supervisor = extract_value(last_task, 'PISupervisor')['value']
|
||||
review_complete = 'AllRequiredTraining' in training_val
|
||||
pi_uid = workflow.study.primary_investigator_id
|
||||
pi_details = LdapService.user_info(pi_uid)
|
||||
details = []
|
||||
details.append(pi_details)
|
||||
for person in personnel:
|
||||
uid = person['PersonnelComputingID']['value']
|
||||
details.append(LdapService.user_info(uid))
|
||||
content = ApprovalService.get_not_really_csv_content()
|
||||
|
||||
for person in details:
|
||||
record = {
|
||||
"study_id": approval.study_id,
|
||||
"pi_uid": pi_details.uid,
|
||||
"pi": pi_details.display_name,
|
||||
"name": person.display_name,
|
||||
"uid": person.uid,
|
||||
"email": person.email_address,
|
||||
"supervisor": "",
|
||||
"review_complete": review_complete,
|
||||
}
|
||||
# We only know the PI's supervisor.
|
||||
if person.uid == pi_details.uid:
|
||||
record["supervisor"] = pi_supervisor
|
||||
return content
|
||||
|
||||
output.append(record)
|
||||
|
||||
except Exception as e:
|
||||
errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e)))
|
||||
return {"results": output, "errors": errors }
|
||||
|
||||
|
||||
def extract_value(task, key):
|
||||
if key in task['data']:
|
||||
return pickle.loads(b64decode(task['data'][key]['__bytes__']))
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def find_task(uuid, task):
|
||||
if task['id']['__uuid__'] == uuid:
|
||||
return task
|
||||
for child in task['children']:
|
||||
task = find_task(uuid, child)
|
||||
if task:
|
||||
return task
|
||||
# ----- come back to the world of the living ---- #
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ from crc.services.mails import send_test_email
|
|||
|
||||
def render_markdown(data, template):
|
||||
"""
|
||||
Provides a quick way to very that a Jinja markdown template will work properly on a given json
|
||||
Provides a quick way to very that a Jinja markdown template will work properly on a given json
|
||||
data structure. Useful for folks that are building these markdown templates.
|
||||
"""
|
||||
try:
|
||||
|
@ -65,4 +65,4 @@ def send_email(address):
|
|||
"""Just sends a quick test email to assure the system is working."""
|
||||
if not address:
|
||||
address = "dan@sartography.com"
|
||||
return send_test_email(address, [address])
|
||||
return send_test_email(address, [address])
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import uuid
|
||||
|
||||
from SpiffWorkflow.util.deep_merge import DeepMerge
|
||||
from flask import g
|
||||
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||
|
@ -96,59 +96,10 @@ def delete_workflow_specification(spec_id):
|
|||
session.commit()
|
||||
|
||||
|
||||
def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
|
||||
"""Returns an API model representing the state of the current workflow, if requested, and
|
||||
possible, next_task is set to the current_task."""
|
||||
|
||||
nav_dict = processor.bpmn_workflow.get_nav_list()
|
||||
navigation = []
|
||||
for nav_item in nav_dict:
|
||||
spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id'])
|
||||
if 'description' in nav_item:
|
||||
nav_item['title'] = nav_item.pop('description')
|
||||
# fixme: duplicate code from the workflow_service. Should only do this in one place.
|
||||
if ' ' in nav_item['title']:
|
||||
nav_item['title'] = nav_item['title'].partition(' ')[2]
|
||||
else:
|
||||
nav_item['title'] = ""
|
||||
if spiff_task:
|
||||
nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False)
|
||||
nav_item['title'] = nav_item['task'].title # Prefer the task title.
|
||||
else:
|
||||
nav_item['task'] = None
|
||||
if not 'is_decision' in nav_item:
|
||||
nav_item['is_decision'] = False
|
||||
|
||||
navigation.append(NavigationItem(**nav_item))
|
||||
NavigationItemSchema().dump(nav_item)
|
||||
|
||||
spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first()
|
||||
workflow_api = WorkflowApi(
|
||||
id=processor.get_workflow_id(),
|
||||
status=processor.get_status(),
|
||||
next_task=None,
|
||||
navigation=navigation,
|
||||
workflow_spec_id=processor.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
is_latest_spec=processor.is_latest_spec,
|
||||
total_tasks=len(navigation),
|
||||
completed_tasks=processor.workflow_model.completed_tasks,
|
||||
last_updated=processor.workflow_model.last_updated,
|
||||
title=spec.display_name
|
||||
)
|
||||
if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks.
|
||||
# This may or may not work, sometimes there is no next task to complete.
|
||||
next_task = processor.next_task()
|
||||
if next_task:
|
||||
workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True)
|
||||
|
||||
return workflow_api
|
||||
|
||||
|
||||
def get_workflow(workflow_id, soft_reset=False, hard_reset=False):
|
||||
workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset)
|
||||
workflow_api_model = __get_workflow_api_model(processor)
|
||||
workflow_api_model = WorkflowService.processor_to_workflow_api(processor)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
|
@ -161,17 +112,20 @@ def set_current_task(workflow_id, task_id):
|
|||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
if task.state != task.COMPLETED and task.state != task.READY:
|
||||
spiff_task = processor.bpmn_workflow.get_task(task_id)
|
||||
if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY:
|
||||
raise ApiError("invalid_state", "You may not move the token to a task who's state is not "
|
||||
"currently set to COMPLETE or READY.")
|
||||
|
||||
# Only reset the token if the task doesn't already have it.
|
||||
if task.state == task.COMPLETED:
|
||||
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
||||
if spiff_task.state == spiff_task.COMPLETED:
|
||||
spiff_task.reset_token(reset_data=True) # Don't try to copy the existing data back into this task.
|
||||
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
workflow_api_model = __get_workflow_api_model(processor, task)
|
||||
WorkflowService.log_task_action(user_uid, workflow_model, spiff_task,
|
||||
WorkflowService.TASK_ACTION_TOKEN_RESET,
|
||||
version=processor.get_version_string())
|
||||
workflow_api_model = WorkflowService.processor_to_workflow_api(processor, spiff_task)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
|
@ -187,19 +141,21 @@ def update_task(workflow_id, task_id, body):
|
|||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
if not task:
|
||||
spiff_task = processor.bpmn_workflow.get_task(task_id)
|
||||
if not spiff_task:
|
||||
raise ApiError("empty_task", "Processor failed to obtain task.", status_code=404)
|
||||
if task.state != task.READY:
|
||||
if spiff_task.state != spiff_task.READY:
|
||||
raise ApiError("invalid_state", "You may not update a task unless it is in the READY state. "
|
||||
"Consider calling a token reset to make this task Ready.")
|
||||
task.update_data(body)
|
||||
processor.complete_task(task)
|
||||
if body: # IF and only if we get the body back, update the task data with the content.
|
||||
spiff_task.data = body # Accept the data from the front end as complete. Do not merge it in, as then it is impossible to remove items.
|
||||
processor.complete_task(spiff_task)
|
||||
processor.do_engine_steps()
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
|
||||
workflow_api_model = __get_workflow_api_model(processor)
|
||||
WorkflowService.log_task_action(user_uid, workflow_model, spiff_task, WorkflowService.TASK_ACTION_COMPLETE,
|
||||
version=processor.get_version_string())
|
||||
workflow_api_model = WorkflowService.processor_to_workflow_api(processor)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ class Task(object):
|
|||
PROP_OPTIONS_FILE = "spreadsheet.name"
|
||||
PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column"
|
||||
PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column"
|
||||
PROP_OPTIONS_READ_ONLY = "read_only"
|
||||
PROP_LDAP_LOOKUP = "ldap.lookup"
|
||||
VALIDATION_REQUIRED = "required"
|
||||
FIELD_TYPE_AUTO_COMPLETE = "autocomplete"
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
from flask_marshmallow.sqla import SQLAlchemyAutoSchema
|
||||
from marshmallow import EXCLUDE
|
||||
from sqlalchemy import func
|
||||
|
||||
from crc import db
|
||||
from crc.models.study import StudyModel
|
||||
|
||||
|
||||
class EmailModel(db.Model):
|
||||
__tablename__ = 'email'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
subject = db.Column(db.String)
|
||||
sender = db.Column(db.String)
|
||||
recipients = db.Column(db.String)
|
||||
content = db.Column(db.String)
|
||||
content_html = db.Column(db.String)
|
||||
study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=True)
|
||||
study = db.relationship(StudyModel)
|
|
@ -29,6 +29,9 @@ class LdapModel(db.Model):
|
|||
affiliation=", ".join(entry.uvaPersonIAMAffiliation),
|
||||
sponsor_type=", ".join(entry.uvaPersonSponsoredType))
|
||||
|
||||
def proper_name(self):
|
||||
return f'{self.display_name} - ({self.uid})'
|
||||
|
||||
|
||||
class LdapSchema(SQLAlchemyAutoSchema):
|
||||
class Meta:
|
||||
|
|
|
@ -17,6 +17,7 @@ class TaskEventModel(db.Model):
|
|||
task_title = db.Column(db.String)
|
||||
task_type = db.Column(db.String)
|
||||
task_state = db.Column(db.String)
|
||||
form_data = db.Column(db.JSON) # And form data submitted when the task was completed.
|
||||
mi_type = db.Column(db.String)
|
||||
mi_count = db.Column(db.Integer)
|
||||
mi_index = db.Column(db.Integer)
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
import markdown
|
||||
from jinja2 import Template
|
||||
|
||||
from crc import app
|
||||
from crc.api.common import ApiError
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.ldap_service import LdapService
|
||||
from crc.services.mails import send_mail
|
||||
|
||||
|
||||
class Email(Script):
|
||||
"""This Script allows to be introduced as part of a workflow and called from there, specifying
|
||||
recipients and content """
|
||||
|
||||
def get_description(self):
|
||||
return """
|
||||
Creates an email, using the provided arguments (a list of UIDs)"
|
||||
Each argument will be used to look up personal information needed for
|
||||
the email creation.
|
||||
|
||||
Example:
|
||||
Email Subject ApprvlApprvr1 PIComputingID
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, *args, **kwargs):
|
||||
self.get_subject(task, args)
|
||||
self.get_users_info(task, args)
|
||||
self.get_content(task)
|
||||
|
||||
def do_task(self, task, *args, **kwargs):
|
||||
args = [arg for arg in args if type(arg) == str]
|
||||
subject = self.get_subject(task, args)
|
||||
recipients = self.get_users_info(task, args)
|
||||
content, content_html = self.get_content(task)
|
||||
if recipients:
|
||||
send_mail(
|
||||
subject=subject,
|
||||
sender=app.config['DEFAULT_SENDER'],
|
||||
recipients=recipients,
|
||||
content=content,
|
||||
content_html=content_html
|
||||
)
|
||||
|
||||
def get_users_info(self, task, args):
|
||||
if len(args) < 1:
|
||||
raise ApiError(code="missing_argument",
|
||||
message="Email script requires at least one argument. The "
|
||||
"name of the variable in the task data that contains user"
|
||||
"id to process. Multiple arguments are accepted.")
|
||||
emails = []
|
||||
for arg in args:
|
||||
try:
|
||||
uid = task.workflow.script_engine.evaluate_expression(task, arg)
|
||||
except Exception as e:
|
||||
app.logger.error(f'Workflow engines could not parse {arg}')
|
||||
app.logger.error(str(e))
|
||||
continue
|
||||
user_info = LdapService.user_info(uid)
|
||||
email = user_info.email_address
|
||||
emails.append(user_info.email_address)
|
||||
if not isinstance(email, str):
|
||||
raise ApiError(code="invalid_argument",
|
||||
message="The Email script requires at least 1 UID argument. The "
|
||||
"name of the variable in the task data that contains subject and"
|
||||
" user ids to process. This must point to an array or a string, but "
|
||||
"it currently points to a %s " % emails.__class__.__name__)
|
||||
|
||||
return emails
|
||||
|
||||
def get_subject(self, task, args):
|
||||
if len(args) < 1:
|
||||
raise ApiError(code="missing_argument",
|
||||
message="Email script requires at least one subject argument. The "
|
||||
"name of the variable in the task data that contains subject"
|
||||
" to process. Multiple arguments are accepted.")
|
||||
subject = args[0]
|
||||
if not isinstance(subject, str):
|
||||
raise ApiError(code="invalid_argument",
|
||||
message="The Email script requires 1 argument. The "
|
||||
"the name of the variable in the task data that contains user"
|
||||
"ids to process. This must point to an array or a string, but "
|
||||
"it currently points to a %s " % subject.__class__.__name__)
|
||||
|
||||
return subject
|
||||
|
||||
def get_content(self, task):
|
||||
content = task.task_spec.documentation
|
||||
template = Template(content)
|
||||
rendered = template.render(task.data)
|
||||
rendered_markdown = markdown.markdown(rendered).replace('\n', '<br>')
|
||||
return rendered, rendered_markdown
|
|
@ -5,7 +5,7 @@ from crc.scripts.script import Script
|
|||
|
||||
class FactService(Script):
|
||||
def get_description(self):
|
||||
return """Just your basic class that can pull in data from a few api endpoints and
|
||||
return """Just your basic class that can pull in data from a few api endpoints and
|
||||
do a basic task."""
|
||||
|
||||
def get_cat(self):
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
from datetime import datetime
|
||||
import json
|
||||
import pickle
|
||||
from base64 import b64decode
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy import desc, func
|
||||
|
||||
from crc import app, db, session
|
||||
from crc.api.common import ApiError
|
||||
|
@ -109,16 +112,129 @@ class ApprovalService(object):
|
|||
db_approvals = query.all()
|
||||
return [Approval.from_model(approval_model) for approval_model in db_approvals]
|
||||
|
||||
@staticmethod
|
||||
def get_approval_details(approval):
|
||||
"""Returns a list of packed approval details, obtained from
|
||||
the task data sent during the workflow """
|
||||
def extract_value(task, key):
|
||||
if key in task['data']:
|
||||
return pickle.loads(b64decode(task['data'][key]['__bytes__']))
|
||||
else:
|
||||
return ""
|
||||
|
||||
def find_task(uuid, task):
|
||||
if task['id']['__uuid__'] == uuid:
|
||||
return task
|
||||
for child in task['children']:
|
||||
task = find_task(uuid, child)
|
||||
if task:
|
||||
return task
|
||||
|
||||
if approval.status != ApprovalStatus.APPROVED.value:
|
||||
return {}
|
||||
for related_approval in approval.related_approvals:
|
||||
if related_approval.status != ApprovalStatus.APPROVED.value:
|
||||
continue
|
||||
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == approval.workflow_id).first()
|
||||
data = json.loads(workflow.bpmn_workflow_json)
|
||||
last_task = find_task(data['last_task']['__uuid__'], data['task_tree'])
|
||||
personnel = extract_value(last_task, 'personnel')
|
||||
training_val = extract_value(last_task, 'RequiredTraining')
|
||||
pi_supervisor = extract_value(last_task, 'PISupervisor')['value']
|
||||
review_complete = 'AllRequiredTraining' in training_val
|
||||
pi_uid = workflow.study.primary_investigator_id
|
||||
pi_details = LdapService.user_info(pi_uid)
|
||||
details = {
|
||||
'Supervisor': pi_supervisor,
|
||||
'PI_Details': pi_details,
|
||||
'Review': review_complete
|
||||
}
|
||||
details['person_details'] = []
|
||||
details['person_details'].append(pi_details)
|
||||
for person in personnel:
|
||||
uid = person['PersonnelComputingID']['value']
|
||||
details['person_details'].append(LdapService.user_info(uid))
|
||||
|
||||
return details
|
||||
|
||||
@staticmethod
|
||||
def get_health_attesting_records():
|
||||
"""Return a list with prepared information related to all approvals """
|
||||
|
||||
approvals = ApprovalService.get_all_approvals(include_cancelled=False)
|
||||
|
||||
health_attesting_rows = [
|
||||
['university_computing_id',
|
||||
'last_name',
|
||||
'first_name',
|
||||
'department',
|
||||
'job_title',
|
||||
'supervisor_university_computing_id']
|
||||
]
|
||||
|
||||
for approval in approvals:
|
||||
try:
|
||||
details = ApprovalService.get_approval_details(approval)
|
||||
if not details:
|
||||
continue
|
||||
|
||||
for person in details['person_details']:
|
||||
first_name = person.given_name
|
||||
last_name = person.display_name.replace(first_name, '').strip()
|
||||
record = [
|
||||
person.uid,
|
||||
last_name,
|
||||
first_name,
|
||||
'',
|
||||
'Academic Researcher',
|
||||
details['Supervisor'] if person.uid == details['person_details'][0].uid else 'askresearch'
|
||||
]
|
||||
|
||||
if record not in health_attesting_rows:
|
||||
health_attesting_rows.append(record)
|
||||
|
||||
except Exception as e:
|
||||
app.logger.error("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e)))
|
||||
|
||||
return health_attesting_rows
|
||||
|
||||
@staticmethod
|
||||
def get_not_really_csv_content():
|
||||
approvals = ApprovalService.get_all_approvals(include_cancelled=False)
|
||||
output = []
|
||||
errors = []
|
||||
for approval in approvals:
|
||||
try:
|
||||
details = ApprovalService.get_approval_details(approval)
|
||||
|
||||
for person in details['person_details']:
|
||||
record = {
|
||||
"study_id": approval.study_id,
|
||||
"pi_uid": details['PI_Details'].uid,
|
||||
"pi": details['PI_Details'].display_name,
|
||||
"name": person.display_name,
|
||||
"uid": person.uid,
|
||||
"email": person.email_address,
|
||||
"supervisor": details['Supervisor'] if person.uid == details['person_details'][0].uid else "",
|
||||
"review_complete": details['Review'],
|
||||
}
|
||||
|
||||
output.append(record)
|
||||
|
||||
except Exception as e:
|
||||
errors.append("Error pulling data for workflow #%i: %s" % (approval.workflow_id, str(e)))
|
||||
return {"results": output, "errors": errors }
|
||||
|
||||
@staticmethod
|
||||
def update_approval(approval_id, approver_uid):
|
||||
"""Update a specific approval"""
|
||||
"""Update a specific approval
|
||||
NOTE: Actual update happens in the API layer, this
|
||||
funtion is currently in charge of only sending
|
||||
corresponding emails
|
||||
"""
|
||||
db_approval = session.query(ApprovalModel).get(approval_id)
|
||||
status = db_approval.status
|
||||
if db_approval:
|
||||
# db_approval.status = status
|
||||
# session.add(db_approval)
|
||||
# session.commit()
|
||||
if status == ApprovalStatus.APPROVED.value:
|
||||
# second_approval = ApprovalModel().query.filter_by(
|
||||
# study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
from datetime import datetime
|
||||
from flask_mail import Message
|
||||
from sqlalchemy import desc
|
||||
|
||||
from crc import app, db, mail, session
|
||||
from crc.api.common import ApiError
|
||||
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.email import EmailModel
|
||||
|
||||
|
||||
class EmailService(object):
|
||||
"""Provides common tools for working with an Email"""
|
||||
|
||||
@staticmethod
|
||||
def add_email(subject, sender, recipients, content, content_html, study_id):
|
||||
"""We will receive all data related to an email and store it"""
|
||||
|
||||
# Find corresponding study - if any
|
||||
study = None
|
||||
if type(study_id) == int:
|
||||
study = db.session.query(StudyModel).get(study_id)
|
||||
|
||||
# Create EmailModel
|
||||
email_model = EmailModel(subject=subject, sender=sender, recipients=str(recipients),
|
||||
content=content, content_html=content_html, study=study)
|
||||
|
||||
# Send mail
|
||||
try:
|
||||
msg = Message(subject,
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
|
||||
msg.body = content
|
||||
msg.html = content_html
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
app.logger.error(str(e))
|
||||
|
||||
db.session.add(email_model)
|
||||
db.session.commit()
|
|
@ -58,7 +58,7 @@ class FileService(object):
|
|||
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code)
|
||||
|
||||
"""Assure this is unique to the workflow, task, and document code AND the Name
|
||||
Because we will allow users to upload multiple files for the same form field
|
||||
Because we will allow users to upload multiple files for the same form field
|
||||
in some cases """
|
||||
file_model = session.query(FileModel)\
|
||||
.filter(FileModel.workflow_id == workflow_id)\
|
||||
|
|
|
@ -24,7 +24,7 @@ class LdapService(object):
|
|||
@staticmethod
|
||||
def __get_conn():
|
||||
if not LdapService.conn:
|
||||
if app.config['TESTING']:
|
||||
if app.config['TESTING'] or app.config['LDAP_URL'] == 'mock':
|
||||
server = Server('my_fake_server')
|
||||
conn = Connection(server, client_strategy=MOCK_SYNC)
|
||||
file_path = os.path.abspath(os.path.join(app.root_path, '..', 'tests', 'data', 'ldap_response.json'))
|
||||
|
|
|
@ -3,13 +3,15 @@ import os
|
|||
from flask import render_template, render_template_string
|
||||
from flask_mail import Message
|
||||
|
||||
from crc.services.email_service import EmailService
|
||||
|
||||
|
||||
# TODO: Extract common mailing code into its own function
|
||||
def send_test_email(sender, recipients):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan test',
|
||||
sender=sender,
|
||||
recipients=recipients)
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||
template_vars = {'primary_investigator': "test"}
|
||||
|
@ -20,109 +22,84 @@ def send_test_email(sender, recipients):
|
|||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
def send_mail(subject, sender, recipients, content, content_html, study_id=None):
|
||||
EmailService.add_email(subject=subject, sender=sender, recipients=recipients,
|
||||
content=content, content_html=content_html, study_id=study_id)
|
||||
|
||||
def send_ramp_up_submission_email(sender, recipients, approver_1, approver_2=None):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Submitted',
|
||||
sender=sender,
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_submission.txt')
|
||||
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_submission.html')
|
||||
msg.html = template.render(template_vars)
|
||||
from crc import env
|
||||
subject = 'Research Ramp-up Plan Submitted'
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
template = env.get_template('ramp_up_submission.txt')
|
||||
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||
content = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_submission.html')
|
||||
content_html = template.render(template_vars)
|
||||
|
||||
result = send_mail(subject, sender, recipients, content, content_html)
|
||||
return result
|
||||
|
||||
def send_ramp_up_approval_request_email(sender, recipients, primary_investigator):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Approval Request',
|
||||
sender=sender,
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approval_request.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approval_request.html')
|
||||
msg.html = template.render(template_vars)
|
||||
from crc import env
|
||||
subject = 'Research Ramp-up Plan Approval Request'
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
template = env.get_template('ramp_up_approval_request.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator}
|
||||
content = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approval_request.html')
|
||||
content_html = template.render(template_vars)
|
||||
|
||||
result = send_mail(subject, sender, recipients, content, content_html)
|
||||
return result
|
||||
|
||||
def send_ramp_up_approval_request_first_review_email(sender, recipients, primary_investigator):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Approval Request',
|
||||
sender=sender,
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approval_request_first_review.html')
|
||||
msg.html = template.render(template_vars)
|
||||
from crc import env
|
||||
subject = 'Research Ramp-up Plan Approval Request'
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
template = env.get_template('ramp_up_approval_request_first_review.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator}
|
||||
content = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approval_request_first_review.html')
|
||||
content_html = template.render(template_vars)
|
||||
|
||||
result = send_mail(subject, sender, recipients, content, content_html)
|
||||
return result
|
||||
|
||||
def send_ramp_up_approved_email(sender, recipients, approver_1, approver_2=None):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Approved',
|
||||
sender=sender,
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env
|
||||
subject = 'Research Ramp-up Plan Approved'
|
||||
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_approved.txt')
|
||||
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approved.html')
|
||||
msg.html = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approved.txt')
|
||||
template_vars = {'approver_1': approver_1, 'approver_2': approver_2}
|
||||
content = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_approved.html')
|
||||
content_html = template.render(template_vars)
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
result = send_mail(subject, sender, recipients, content, content_html)
|
||||
return result
|
||||
|
||||
def send_ramp_up_denied_email(sender, recipients, approver):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Denied',
|
||||
sender=sender,
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env
|
||||
subject = 'Research Ramp-up Plan Denied'
|
||||
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_denied.txt')
|
||||
template_vars = {'approver': approver}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_denied.html')
|
||||
msg.html = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_denied.txt')
|
||||
template_vars = {'approver': approver}
|
||||
content = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_denied.html')
|
||||
content_html = template.render(template_vars)
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
result = send_mail(subject, sender, recipients, content, content_html)
|
||||
return result
|
||||
|
||||
def send_ramp_up_denied_email_to_approver(sender, recipients, primary_investigator, approver_2):
|
||||
try:
|
||||
msg = Message('Research Ramp-up Plan Denied',
|
||||
sender=sender,
|
||||
recipients=recipients,
|
||||
bcc=['rrt_emails@googlegroups.com'])
|
||||
from crc import env
|
||||
subject = 'Research Ramp-up Plan Denied'
|
||||
|
||||
from crc import env, mail
|
||||
template = env.get_template('ramp_up_denied_first_approver.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2}
|
||||
msg.body = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_denied_first_approver.html')
|
||||
msg.html = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_denied_first_approver.txt')
|
||||
template_vars = {'primary_investigator': primary_investigator, 'approver_2': approver_2}
|
||||
content = template.render(template_vars)
|
||||
template = env.get_template('ramp_up_denied_first_approver.html')
|
||||
content_html = template.render(template_vars)
|
||||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
result = send_mail(subject, sender, recipients, content, content_html)
|
||||
return result
|
||||
|
|
|
@ -181,8 +181,6 @@ class StudyService(object):
|
|||
documents[code] = doc
|
||||
return documents
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_investigators(study_id):
|
||||
|
||||
|
@ -224,7 +222,6 @@ class StudyService(object):
|
|||
|
||||
return FileModelSchema().dump(file)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def synch_with_protocol_builder_if_enabled(user):
|
||||
"""Assures that the studies we have locally for the given user are
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import re
|
||||
import shlex
|
||||
import xml.etree.ElementTree as ElementTree
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
@ -36,7 +37,9 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
|
||||
This allows us to reference custom code from the BPMN diagram.
|
||||
"""
|
||||
commands = script.split(" ")
|
||||
# Shlex splits the whole string while respecting double quoted strings within
|
||||
commands = shlex.split(script)
|
||||
printable_comms = commands
|
||||
path_and_command = commands[0].rsplit(".", 1)
|
||||
if len(path_and_command) == 1:
|
||||
module_name = "crc.scripts." + self.camel_to_snake(path_and_command[0])
|
||||
|
@ -60,7 +63,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
"does not properly implement the CRC Script class.",
|
||||
task=task)
|
||||
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
||||
"""If this is running a validation, and not a normal process, then we want to
|
||||
"""If this is running a validation, and not a normal process, then we want to
|
||||
mimic running the script, but not make any external calls or database changes."""
|
||||
klass().do_task_validate_only(task, study_id, workflow_id, *commands[1:])
|
||||
else:
|
||||
|
@ -102,14 +105,15 @@ class WorkflowProcessor(object):
|
|||
|
||||
def __init__(self, workflow_model: WorkflowModel, soft_reset=False, hard_reset=False, validate_only=False):
|
||||
"""Create a Workflow Processor based on the serialized information available in the workflow model.
|
||||
If soft_reset is set to true, it will try to use the latest version of the workflow specification.
|
||||
If hard_reset is set to true, it will create a new Workflow, but embed the data from the last
|
||||
completed task in the previous workflow.
|
||||
If soft_reset is set to true, it will try to use the latest version of the workflow specification
|
||||
without resetting to the beginning of the workflow. This will work for some minor changes to the spec.
|
||||
If hard_reset is set to true, it will use the latest spec, and start the workflow over from the beginning.
|
||||
which should work in casees where a soft reset fails.
|
||||
If neither flag is set, it will use the same version of the specification that was used to originally
|
||||
create the workflow model. """
|
||||
self.workflow_model = workflow_model
|
||||
|
||||
if soft_reset or len(workflow_model.dependencies) == 0:
|
||||
if soft_reset or len(workflow_model.dependencies) == 0: # Depenencies of 0 means the workflow was never started.
|
||||
self.spec_data_files = FileService.get_spec_data_files(
|
||||
workflow_spec_id=workflow_model.workflow_spec_id)
|
||||
else:
|
||||
|
@ -216,8 +220,6 @@ class WorkflowProcessor(object):
|
|||
full_version = "v%s (%s)" % (version, files)
|
||||
return full_version
|
||||
|
||||
|
||||
|
||||
def update_dependencies(self, spec_data_files):
|
||||
existing_dependencies = FileService.get_spec_data_files(
|
||||
workflow_spec_id=self.workflow_model.workflow_spec_id,
|
||||
|
@ -299,25 +301,12 @@ class WorkflowProcessor(object):
|
|||
return WorkflowStatus.waiting
|
||||
|
||||
def hard_reset(self):
|
||||
"""Recreate this workflow, but keep the data from the last completed task and add
|
||||
it back into the first task. This may be useful when a workflow specification changes,
|
||||
and users need to review all the prior steps, but they don't need to reenter all the previous data.
|
||||
|
||||
Returns the new version.
|
||||
"""Recreate this workflow. This will be useful when a workflow specification changes.
|
||||
"""
|
||||
|
||||
# Create a new workflow based on the latest specs.
|
||||
self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
|
||||
new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
|
||||
new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine)
|
||||
new_bpmn_workflow.data = self.bpmn_workflow.data
|
||||
|
||||
# Reset the current workflow to the beginning - which we will consider to be the first task after the root
|
||||
# element. This feels a little sketchy, but I think it is safe to assume root will have one child.
|
||||
first_task = self.bpmn_workflow.task_tree.children[0]
|
||||
first_task.reset_token(reset_data=False)
|
||||
for task in new_bpmn_workflow.get_tasks(SpiffTask.READY):
|
||||
task.data = first_task.data
|
||||
new_bpmn_workflow.do_engine_steps()
|
||||
self.bpmn_workflow = new_bpmn_workflow
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import copy
|
||||
import string
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
@ -5,25 +6,26 @@ import random
|
|||
import jinja2
|
||||
from SpiffWorkflow import Task as SpiffTask, WorkflowException
|
||||
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
|
||||
from SpiffWorkflow.bpmn.specs.MultiInstanceTask import MultiInstanceTask
|
||||
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
|
||||
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
|
||||
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
|
||||
from SpiffWorkflow.specs import CancelTask, StartTask
|
||||
from flask import g
|
||||
from SpiffWorkflow.util.deep_merge import DeepMerge
|
||||
from jinja2 import Template
|
||||
|
||||
from crc import db, app
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.api_models import Task, MultiInstanceType
|
||||
from crc.models.api_models import Task, MultiInstanceType, NavigationItem, NavigationItemSchema, WorkflowApi
|
||||
from crc.models.file import LookupDataModel
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowModel, WorkflowStatus
|
||||
from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.lookup_service import LookupService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class WorkflowService(object):
|
||||
|
@ -37,7 +39,7 @@ class WorkflowService(object):
|
|||
the workflow Processor should be hidden behind this service.
|
||||
This will help maintain a structure that avoids circular dependencies.
|
||||
But for now, this contains tools for converting spiff-workflow models into our
|
||||
own API models with additional information and capabilities and
|
||||
own API models with additional information and capabilities and
|
||||
handles the testing of a workflow specification by completing it with
|
||||
random selections, attempting to mimic a front end as much as possible. """
|
||||
|
||||
|
@ -180,13 +182,83 @@ class WorkflowService(object):
|
|||
def __get_options(self):
|
||||
pass
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _random_string(string_length=10):
|
||||
"""Generate a random string of fixed length """
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(string_length))
|
||||
|
||||
@staticmethod
|
||||
def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None):
|
||||
"""Returns an API model representing the state of the current workflow, if requested, and
|
||||
possible, next_task is set to the current_task."""
|
||||
|
||||
nav_dict = processor.bpmn_workflow.get_nav_list()
|
||||
navigation = []
|
||||
for nav_item in nav_dict:
|
||||
spiff_task = processor.bpmn_workflow.get_task(nav_item['task_id'])
|
||||
if 'description' in nav_item:
|
||||
nav_item['title'] = nav_item.pop('description')
|
||||
# fixme: duplicate code from the workflow_service. Should only do this in one place.
|
||||
if ' ' in nav_item['title']:
|
||||
nav_item['title'] = nav_item['title'].partition(' ')[2]
|
||||
else:
|
||||
nav_item['title'] = ""
|
||||
if spiff_task:
|
||||
nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False)
|
||||
nav_item['title'] = nav_item['task'].title # Prefer the task title.
|
||||
else:
|
||||
nav_item['task'] = None
|
||||
if not 'is_decision' in nav_item:
|
||||
nav_item['is_decision'] = False
|
||||
|
||||
navigation.append(NavigationItem(**nav_item))
|
||||
NavigationItemSchema().dump(nav_item)
|
||||
|
||||
spec = db.session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first()
|
||||
workflow_api = WorkflowApi(
|
||||
id=processor.get_workflow_id(),
|
||||
status=processor.get_status(),
|
||||
next_task=None,
|
||||
navigation=navigation,
|
||||
workflow_spec_id=processor.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
is_latest_spec=processor.is_latest_spec,
|
||||
total_tasks=len(navigation),
|
||||
completed_tasks=processor.workflow_model.completed_tasks,
|
||||
last_updated=processor.workflow_model.last_updated,
|
||||
title=spec.display_name
|
||||
)
|
||||
if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks.
|
||||
# This may or may not work, sometimes there is no next task to complete.
|
||||
next_task = processor.next_task()
|
||||
if next_task:
|
||||
previous_form_data = WorkflowService.get_previously_submitted_data(processor.workflow_model.id, next_task)
|
||||
DeepMerge.merge(next_task.data, previous_form_data)
|
||||
workflow_api.next_task = WorkflowService.spiff_task_to_api_task(next_task, add_docs_and_forms=True)
|
||||
|
||||
return workflow_api
|
||||
|
||||
@staticmethod
|
||||
def get_previously_submitted_data(workflow_id, task):
|
||||
""" If the user has completed this task previously, find the form data for the last submission."""
|
||||
latest_event = db.session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow_id) \
|
||||
.filter_by(task_name=task.task_spec.name) \
|
||||
.filter_by(action=WorkflowService.TASK_ACTION_COMPLETE) \
|
||||
.order_by(TaskEventModel.date.desc()).first()
|
||||
if latest_event:
|
||||
if latest_event.form_data is not None:
|
||||
return latest_event.form_data
|
||||
else:
|
||||
app.logger.error("missing_form_dat", "We have lost data for workflow %i, task %s, it is not "
|
||||
"in the task event model, "
|
||||
"and it should be." % (workflow_id, task.task_spec.name))
|
||||
return {}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
@staticmethod
|
||||
def spiff_task_to_api_task(spiff_task, add_docs_and_forms=False):
|
||||
task_type = spiff_task.task_spec.__class__.__name__
|
||||
|
@ -318,21 +390,22 @@ class WorkflowService(object):
|
|||
field.options.append({"id": d.value, "name": d.label})
|
||||
|
||||
@staticmethod
|
||||
def log_task_action(user_uid, processor, spiff_task, action):
|
||||
def log_task_action(user_uid, workflow_model, spiff_task, action, version):
|
||||
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
||||
workflow_model = processor.workflow_model
|
||||
form_data = WorkflowService.extract_form_data(spiff_task.data, spiff_task)
|
||||
task_event = TaskEventModel(
|
||||
study_id=workflow_model.study_id,
|
||||
user_uid=user_uid,
|
||||
workflow_id=workflow_model.id,
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
spec_version=version,
|
||||
action=action,
|
||||
task_id=task.id,
|
||||
task_name=task.name,
|
||||
task_title=task.title,
|
||||
task_type=str(task.type),
|
||||
task_state=task.state,
|
||||
form_data=form_data,
|
||||
mi_type=task.multi_instance_type.value, # Some tasks have a repeat behavior.
|
||||
mi_count=task.multi_instance_count, # This is the number of times the task could repeat.
|
||||
mi_index=task.multi_instance_index, # And the index of the currently repeating task.
|
||||
|
@ -342,3 +415,64 @@ class WorkflowService(object):
|
|||
db.session.add(task_event)
|
||||
db.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def fix_legacy_data_model_for_rrt():
|
||||
""" Remove this after use! This is just to fix RRT so the data is handled correctly.
|
||||
|
||||
Utility that is likely called via the flask command line, it will loop through all the
|
||||
workflows in the system and attempt to add the right data into the task action log so that
|
||||
users do not have to re fill out all of the forms if they start over or go back in the workflow.
|
||||
Viciously inefficient, but should only have to run one time for RRT"""
|
||||
workflows = db.session.query(WorkflowModel).all()
|
||||
for workflow_model in workflows:
|
||||
task_logs = db.session.query(TaskEventModel) \
|
||||
.filter(TaskEventModel.workflow_id == workflow_model.id) \
|
||||
.filter(TaskEventModel.action == WorkflowService.TASK_ACTION_COMPLETE) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
# Grab all the data from last task completed, which will be everything in this
|
||||
# rrt situation because of how we were keeping all the data at the time.
|
||||
latest_data = processor.next_task().data
|
||||
|
||||
# Move forward in the task spec tree, dropping any data that would have been
|
||||
# added in subsequent tasks, just looking at form data, will not track the automated
|
||||
# task data additions, hopefully this doesn't hang us.
|
||||
for log in task_logs:
|
||||
# if log.task_data is not None: # Only do this if the task event does not have data populated in it.
|
||||
# continue
|
||||
data = copy.deepcopy(latest_data) # Or you end up with insane crazy issues.
|
||||
# In the simple case of RRT, there is exactly one task for the given task_spec
|
||||
task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0]
|
||||
data = WorkflowService.extract_form_data(data, task)
|
||||
log.form_data = data
|
||||
db.session.add(log)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def extract_form_data(latest_data, task):
|
||||
"""Removes data from latest_data that would be added by the child task or any of it's children."""
|
||||
data = {}
|
||||
|
||||
if hasattr(task.task_spec, 'form'):
|
||||
for field in task.task_spec.form.fields:
|
||||
if field.has_property(Task.PROP_OPTIONS_READ_ONLY) and \
|
||||
field.get_property(Task.PROP_OPTIONS_READ_ONLY).lower().strip() == "true":
|
||||
continue # Don't add read-only data
|
||||
elif field.has_property(Task.PROP_OPTIONS_REPEAT):
|
||||
group = field.get_property(Task.PROP_OPTIONS_REPEAT)
|
||||
if group in latest_data:
|
||||
data[group] = latest_data[group]
|
||||
elif isinstance(task.task_spec, MultiInstanceTask):
|
||||
group = task.task_spec.elementVar
|
||||
if group in latest_data:
|
||||
data[group] = latest_data[group]
|
||||
else:
|
||||
if field.id in latest_data:
|
||||
data[field.id] = latest_data[field.id]
|
||||
|
||||
return data
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Your Research Ramp-up Plan has been denied by {{ approver_1 }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver_1 }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval.
|
||||
Your Research Ramp-up Plan has been denied by {{ approver }}. Please return to the Research Ramp-up Plan application and review the comments from {{ approver }} on the home page. Next, open the application and locate the first step where changes are needed. Continue to complete additional steps saving your work along the way. Review your revised Research Ramp-up Plan and res-submit for approval.
|
|
@ -23,8 +23,16 @@ if [ "$RESET_DB_RRT" = "true" ]; then
|
|||
pipenv run flask load-example-rrt-data
|
||||
fi
|
||||
|
||||
if [ "$FIX_RRT_DATA" = "true" ]; then
|
||||
echo 'Fixing RRT data...'
|
||||
pipenv run flask rrt-data-fix
|
||||
fi
|
||||
|
||||
|
||||
# THIS MUST BE THE LAST COMMAND!
|
||||
if [ "$APPLICATION_ROOT" = "/" ]; then
|
||||
pipenv run gunicorn --bind 0.0.0.0:$PORT0 wsgi:app
|
||||
else
|
||||
pipenv run gunicorn -e SCRIPT_NAME="$APPLICATION_ROOT" --bind 0.0.0.0:$PORT0 wsgi:app
|
||||
fi
|
||||
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 1fdd1bdb600e
|
||||
Revises: 17597692d0b0
|
||||
Create Date: 2020-06-17 16:44:16.427988
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1fdd1bdb600e'
|
||||
down_revision = '17597692d0b0'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('task_event', sa.Column('task_data', sa.JSON(), nullable=True))
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('task_event', 'task_data')
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,38 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 5acd138e969c
|
||||
Revises: de30304ff5e6
|
||||
Create Date: 2020-06-24 21:36:15.128632
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '5acd138e969c'
|
||||
down_revision = 'de30304ff5e6'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('email',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('subject', sa.String(), nullable=True),
|
||||
sa.Column('sender', sa.String(), nullable=True),
|
||||
sa.Column('recipients', sa.String(), nullable=True),
|
||||
sa.Column('content', sa.String(), nullable=True),
|
||||
sa.Column('content_html', sa.String(), nullable=True),
|
||||
sa.Column('study_id', sa.Integer(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['study_id'], ['study.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('email')
|
||||
# ### end Alembic commands ###
|
|
@ -0,0 +1,30 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: de30304ff5e6
|
||||
Revises: 1fdd1bdb600e
|
||||
Create Date: 2020-06-18 16:19:11.133665
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'de30304ff5e6'
|
||||
down_revision = '1fdd1bdb600e'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('task_event', sa.Column('form_data', sa.JSON(), nullable=True))
|
||||
op.drop_column('task_event', 'task_data')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('task_event', sa.Column('task_data', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True))
|
||||
op.drop_column('task_event', 'form_data')
|
||||
# ### end Alembic commands ###
|
|
@ -57,6 +57,32 @@ class TestApprovalsService(BaseTest):
|
|||
self.assertEqual(1, models[0].version)
|
||||
self.assertEqual(2, models[1].version)
|
||||
|
||||
def test_get_health_attesting_records(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code="AD_CoCAppr")
|
||||
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
records = ApprovalService.get_health_attesting_records()
|
||||
|
||||
self.assertEqual(len(records), 1)
|
||||
|
||||
def test_get_not_really_csv_content(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
||||
name="anything.png", content_type="text",
|
||||
binary_data=b'5678', irb_doc_code="AD_CoCAppr")
|
||||
|
||||
ApprovalService.add_approval(study_id=workflow.study_id, workflow_id=workflow.id, approver_uid="dhf8r")
|
||||
records = ApprovalService.get_not_really_csv_content()
|
||||
|
||||
self.assertEqual(len(records), 2)
|
||||
|
||||
def test_new_approval_sends_proper_emails(self):
|
||||
self.assertEqual(1, 1)
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_0y2dq4f" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
|
||||
<bpmn:process id="Process_0tad5ma" name="Set Recipients" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>Flow_1synsig</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:endEvent id="Event_0izrcj4">
|
||||
<bpmn:incoming>Flow_1xlrgne</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
<bpmn:scriptTask id="Activity_0s5v97n" name="Email Recipients">
|
||||
<bpmn:documentation># Dear Approver
|
||||
## you have been requested for approval
|
||||
|
||||
|
||||
---
|
||||
New request submitted by {{ PIComputingID }}
|
||||
|
||||
Email content to be delivered to {{ ApprvlApprvr1 }}
|
||||
|
||||
---</bpmn:documentation>
|
||||
<bpmn:incoming>Flow_08n2npe</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1xlrgne</bpmn:outgoing>
|
||||
<bpmn:script>Email "Camunda Email Subject" ApprvlApprvr1 PIComputingID</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="Flow_1synsig" sourceRef="StartEvent_1" targetRef="Activity_1l9vih3" />
|
||||
<bpmn:sequenceFlow id="Flow_1xlrgne" sourceRef="Activity_0s5v97n" targetRef="Event_0izrcj4" />
|
||||
<bpmn:sequenceFlow id="Flow_08n2npe" sourceRef="Activity_1l9vih3" targetRef="Activity_0s5v97n" />
|
||||
<bpmn:userTask id="Activity_1l9vih3" name="Set Recipients">
|
||||
<bpmn:extensionElements>
|
||||
<camunda:formData>
|
||||
<camunda:formField id="ApprvlApprvr1" label="Approver" type="string" />
|
||||
<camunda:formField id="PIComputingID" label="Primary Investigator" type="string" />
|
||||
</camunda:formData>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>Flow_1synsig</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_08n2npe</bpmn:outgoing>
|
||||
</bpmn:userTask>
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_0tad5ma">
|
||||
<bpmndi:BPMNEdge id="Flow_08n2npe_di" bpmnElement="Flow_08n2npe">
|
||||
<di:waypoint x="370" y="117" />
|
||||
<di:waypoint x="450" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1xlrgne_di" bpmnElement="Flow_1xlrgne">
|
||||
<di:waypoint x="550" y="117" />
|
||||
<di:waypoint x="662" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1synsig_di" bpmnElement="Flow_1synsig">
|
||||
<di:waypoint x="215" y="117" />
|
||||
<di:waypoint x="270" y="117" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="179" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Event_0izrcj4_di" bpmnElement="Event_0izrcj4">
|
||||
<dc:Bounds x="662" y="99" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_04imfm6_di" bpmnElement="Activity_0s5v97n">
|
||||
<dc:Bounds x="450" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_0xugr62_di" bpmnElement="Activity_1l9vih3">
|
||||
<dc:Bounds x="270" y="77" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
|
@ -175,9 +175,6 @@ Your random fact is:
|
|||
<bpmndi:BPMNShape id="UserTask_186s7tw_di" bpmnElement="Task_User_Select_Type">
|
||||
<dc:Bounds x="270" y="210" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_10keafb_di" bpmnElement="Task_Get_Fact_From_API">
|
||||
<dc:Bounds x="470" y="210" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="EndEvent_0u1cgrf_di" bpmnElement="EndEvent_0u1cgrf">
|
||||
<dc:Bounds x="692" y="232" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
|
@ -187,6 +184,9 @@ Your random fact is:
|
|||
<bpmndi:BPMNShape id="TextAnnotation_1234e5n_di" bpmnElement="TextAnnotation_1234e5n">
|
||||
<dc:Bounds x="570" y="120" width="100" height="68" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_10keafb_di" bpmnElement="Task_Get_Fact_From_API">
|
||||
<dc:Bounds x="470" y="210" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Association_1cfasjp_di" bpmnElement="Association_1cfasjp">
|
||||
<di:waypoint x="344" y="210" />
|
||||
<di:waypoint x="359" y="184" />
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.models.email import EmailModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.scripts.email import Email
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.api.common import ApiError
|
||||
|
||||
from crc import db, mail
|
||||
|
||||
|
||||
class TestEmailScript(BaseTest):
|
||||
|
||||
def test_do_task(self):
|
||||
workflow = self.create_workflow('email')
|
||||
|
||||
task_data = {
|
||||
'PIComputingID': 'dhf8r',
|
||||
'ApprvlApprvr1': 'lb3dp'
|
||||
}
|
||||
task = self.get_workflow_api(workflow).next_task
|
||||
|
||||
with mail.record_messages() as outbox:
|
||||
|
||||
self.complete_form(workflow, task, task_data)
|
||||
|
||||
self.assertEqual(len(outbox), 1)
|
||||
self.assertEqual(outbox[0].subject, 'Camunda Email Subject')
|
||||
|
||||
# PI is present
|
||||
self.assertIn(task_data['PIComputingID'], outbox[0].body)
|
||||
self.assertIn(task_data['PIComputingID'], outbox[0].html)
|
||||
|
||||
# Approver is present
|
||||
self.assertIn(task_data['ApprvlApprvr1'], outbox[0].body)
|
||||
self.assertIn(task_data['ApprvlApprvr1'], outbox[0].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 1)
|
|
@ -0,0 +1,34 @@
|
|||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.email import EmailModel
|
||||
from crc.services.email_service import EmailService
|
||||
|
||||
|
||||
class TestEmailService(BaseTest):
|
||||
|
||||
def test_add_email(self):
|
||||
self.load_example_data()
|
||||
study = self.create_study()
|
||||
workflow = self.create_workflow('random_fact')
|
||||
|
||||
subject = 'Email Subject'
|
||||
sender = 'sender@sartography.com'
|
||||
recipients = ['recipient@sartography.com', 'back@sartography.com']
|
||||
content = 'Content for this email'
|
||||
content_html = '<p>Hypertext Markup Language content for this email</p>'
|
||||
|
||||
EmailService.add_email(subject=subject, sender=sender, recipients=recipients,
|
||||
content=content, content_html=content_html, study_id=study.id)
|
||||
|
||||
email_model = EmailModel.query.first()
|
||||
|
||||
self.assertEqual(email_model.subject, subject)
|
||||
self.assertEqual(email_model.sender, sender)
|
||||
self.assertEqual(email_model.recipients, str(recipients))
|
||||
self.assertEqual(email_model.content, content)
|
||||
self.assertEqual(email_model.content_html, content_html)
|
||||
self.assertEqual(email_model.study, study)
|
||||
|
||||
# TODO: Create email model without study
|
|
@ -0,0 +1,117 @@
|
|||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import mail, session
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.email import EmailModel
|
||||
from crc.services.mails import (
|
||||
send_ramp_up_submission_email,
|
||||
send_ramp_up_approval_request_email,
|
||||
send_ramp_up_approval_request_first_review_email,
|
||||
send_ramp_up_approved_email,
|
||||
send_ramp_up_denied_email,
|
||||
send_ramp_up_denied_email_to_approver
|
||||
)
|
||||
|
||||
|
||||
class TestMails(BaseTest):
|
||||
|
||||
def setUp(self):
|
||||
"""Initial setup shared by all TestApprovals tests"""
|
||||
self.load_example_data()
|
||||
self.study = self.create_study()
|
||||
self.workflow = self.create_workflow('random_fact')
|
||||
|
||||
self.sender = 'sender@sartography.com'
|
||||
self.recipients = ['recipient@sartography.com']
|
||||
self.primary_investigator = 'Dr. Bartlett'
|
||||
self.approver_1 = 'Max Approver'
|
||||
self.approver_2 = 'Close Reviewer'
|
||||
|
||||
def test_send_ramp_up_submission_email(self):
|
||||
with mail.record_messages() as outbox:
|
||||
|
||||
send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1)
|
||||
self.assertEqual(len(outbox), 1)
|
||||
self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Submitted')
|
||||
self.assertIn(self.approver_1, outbox[0].body)
|
||||
self.assertIn(self.approver_1, outbox[0].html)
|
||||
|
||||
send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2)
|
||||
self.assertEqual(len(outbox), 2)
|
||||
self.assertIn(self.approver_1, outbox[1].body)
|
||||
self.assertIn(self.approver_1, outbox[1].html)
|
||||
self.assertIn(self.approver_2, outbox[1].body)
|
||||
self.assertIn(self.approver_2, outbox[1].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 2)
|
||||
|
||||
def test_send_ramp_up_approval_request_email(self):
|
||||
with mail.record_messages() as outbox:
|
||||
send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator)
|
||||
|
||||
self.assertEqual(len(outbox), 1)
|
||||
self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request')
|
||||
self.assertIn(self.primary_investigator, outbox[0].body)
|
||||
self.assertIn(self.primary_investigator, outbox[0].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 1)
|
||||
|
||||
def test_send_ramp_up_approval_request_first_review_email(self):
|
||||
with mail.record_messages() as outbox:
|
||||
send_ramp_up_approval_request_first_review_email(
|
||||
self.sender, self.recipients, self.primary_investigator
|
||||
)
|
||||
|
||||
self.assertEqual(len(outbox), 1)
|
||||
self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approval Request')
|
||||
self.assertIn(self.primary_investigator, outbox[0].body)
|
||||
self.assertIn(self.primary_investigator, outbox[0].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 1)
|
||||
|
||||
def test_send_ramp_up_approved_email(self):
|
||||
with mail.record_messages() as outbox:
|
||||
send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1)
|
||||
self.assertEqual(len(outbox), 1)
|
||||
self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Approved')
|
||||
self.assertIn(self.approver_1, outbox[0].body)
|
||||
self.assertIn(self.approver_1, outbox[0].html)
|
||||
|
||||
send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2)
|
||||
self.assertEqual(len(outbox), 2)
|
||||
self.assertIn(self.approver_1, outbox[1].body)
|
||||
self.assertIn(self.approver_1, outbox[1].html)
|
||||
self.assertIn(self.approver_2, outbox[1].body)
|
||||
self.assertIn(self.approver_2, outbox[1].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 2)
|
||||
|
||||
def test_send_ramp_up_denied_email(self):
|
||||
with mail.record_messages() as outbox:
|
||||
send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1)
|
||||
self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied')
|
||||
self.assertIn(self.approver_1, outbox[0].body)
|
||||
self.assertIn(self.approver_1, outbox[0].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 1)
|
||||
|
||||
def test_send_send_ramp_up_denied_email_to_approver(self):
|
||||
with mail.record_messages() as outbox:
|
||||
send_ramp_up_denied_email_to_approver(
|
||||
self.sender, self.recipients, self.primary_investigator, self.approver_2
|
||||
)
|
||||
|
||||
self.assertEqual(outbox[0].subject, 'Research Ramp-up Plan Denied')
|
||||
self.assertIn(self.primary_investigator, outbox[0].body)
|
||||
self.assertIn(self.primary_investigator, outbox[0].html)
|
||||
self.assertIn(self.approver_2, outbox[0].body)
|
||||
self.assertIn(self.approver_2, outbox[0].html)
|
||||
|
||||
db_emails = EmailModel.query.count()
|
||||
self.assertEqual(db_emails, 1)
|
|
@ -61,14 +61,14 @@ class TestFileService(BaseTest):
|
|||
|
||||
# Archive the file
|
||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||
self.assertEquals(1, len(file_models))
|
||||
self.assertEqual(1, len(file_models))
|
||||
file_model = file_models[0]
|
||||
file_model.archived = True
|
||||
db.session.add(file_model)
|
||||
|
||||
# Assure that the file no longer comes back.
|
||||
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
|
||||
self.assertEquals(0, len(file_models))
|
||||
self.assertEqual(0, len(file_models))
|
||||
|
||||
# Add the file again with different data
|
||||
FileService.add_workflow_file(workflow_id=workflow.id,
|
|
@ -91,7 +91,6 @@ class TestFilesApi(BaseTest):
|
|||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
||||
|
||||
def test_archive_file_no_longer_shows_up(self):
|
||||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
|
@ -109,21 +108,16 @@ class TestFilesApi(BaseTest):
|
|||
self.assert_success(rv)
|
||||
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assertEquals(1, len(json.loads(rv.get_data(as_text=True))))
|
||||
self.assertEqual(1, len(json.loads(rv.get_data(as_text=True))))
|
||||
|
||||
file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all()
|
||||
self.assertEquals(1, len(file_model))
|
||||
self.assertEqual(1, len(file_model))
|
||||
file_model[0].archived = True
|
||||
db.session.commit()
|
||||
|
||||
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assertEquals(0, len(json.loads(rv.get_data(as_text=True))))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
self.assertEqual(0, len(json.loads(rv.get_data(as_text=True))))
|
||||
|
||||
def test_set_reference_file(self):
|
||||
file_name = "irb_document_types.xls"
|
||||
|
@ -285,8 +279,8 @@ class TestFilesApi(BaseTest):
|
|||
.filter(ApprovalModel.status == ApprovalStatus.PENDING.value)\
|
||||
.filter(ApprovalModel.study_id == workflow.study_id).all()
|
||||
|
||||
self.assertEquals(1, len(approvals))
|
||||
self.assertEquals(1, len(approvals[0].approval_files))
|
||||
self.assertEqual(1, len(approvals))
|
||||
self.assertEqual(1, len(approvals[0].approval_files))
|
||||
|
||||
|
||||
def test_change_primary_bpmn(self):
|
|
@ -1,55 +0,0 @@
|
|||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.services.mails import (
|
||||
send_ramp_up_submission_email,
|
||||
send_ramp_up_approval_request_email,
|
||||
send_ramp_up_approval_request_first_review_email,
|
||||
send_ramp_up_approved_email,
|
||||
send_ramp_up_denied_email,
|
||||
send_ramp_up_denied_email_to_approver
|
||||
)
|
||||
|
||||
|
||||
class TestMails(BaseTest):
|
||||
|
||||
def setUp(self):
|
||||
self.sender = 'sender@sartography.com'
|
||||
self.recipients = ['recipient@sartography.com']
|
||||
self.primary_investigator = 'Dr. Bartlett'
|
||||
self.approver_1 = 'Max Approver'
|
||||
self.approver_2 = 'Close Reviewer'
|
||||
|
||||
def test_send_ramp_up_submission_email(self):
|
||||
send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1)
|
||||
self.assertTrue(True)
|
||||
|
||||
send_ramp_up_submission_email(self.sender, self.recipients, self.approver_1, self.approver_2)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_send_ramp_up_approval_request_email(self):
|
||||
send_ramp_up_approval_request_email(self.sender, self.recipients, self.primary_investigator)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_send_ramp_up_approval_request_first_review_email(self):
|
||||
send_ramp_up_approval_request_first_review_email(
|
||||
self.sender, self.recipients, self.primary_investigator
|
||||
)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_send_ramp_up_approved_email(self):
|
||||
send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1)
|
||||
self.assertTrue(True)
|
||||
|
||||
send_ramp_up_approved_email(self.sender, self.recipients, self.approver_1, self.approver_2)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_send_ramp_up_denied_email(self):
|
||||
send_ramp_up_denied_email(self.sender, self.recipients, self.approver_1)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_send_send_ramp_up_denied_email_to_approver(self):
|
||||
send_ramp_up_denied_email_to_approver(
|
||||
self.sender, self.recipients, self.primary_investigator, self.approver_2
|
||||
)
|
||||
self.assertTrue(True)
|
|
@ -4,14 +4,86 @@ import random
|
|||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, app
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||
from crc.models.file import FileModelSchema
|
||||
from crc.models.workflow import WorkflowStatus
|
||||
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from crc.models.stats import TaskEventModel
|
||||
|
||||
class TestTasksApi(BaseTest):
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual("dhf8r", event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
# Not sure what vodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
# Assure that there is data in the form_data
|
||||
self.assertIsNotNone(event.form_data)
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
||||
|
||||
def test_get_current_user_tasks(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('random_fact')
|
||||
|
@ -299,13 +371,13 @@ class TestTasksApi(BaseTest):
|
|||
self.assertEqual("UserTask", task.type)
|
||||
self.assertEqual("Activity_A", task.name)
|
||||
self.assertEqual("My Sub Process", task.process_name)
|
||||
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
||||
workflow_api = self.complete_form(workflow, task, {"FieldA": "Dan"})
|
||||
task = workflow_api.next_task
|
||||
self.assertIsNotNone(task)
|
||||
|
||||
self.assertEqual("Activity_B", task.name)
|
||||
self.assertEqual("Sub Workflow Example", task.process_name)
|
||||
workflow_api = self.complete_form(workflow, task, {"name": "Dan"})
|
||||
workflow_api = self.complete_form(workflow, task, {"FieldB": "Dan"})
|
||||
self.assertEqual(WorkflowStatus.complete, workflow_api.status)
|
||||
|
||||
def test_update_task_resets_token(self):
|
||||
|
@ -373,7 +445,9 @@ class TestTasksApi(BaseTest):
|
|||
|
||||
for i in random.sample(range(9), 9):
|
||||
task = TaskSchema().load(ready_items[i]['task'])
|
||||
self.complete_form(workflow, task, {"investigator":{"email": "dhf8r@virginia.edu"}})
|
||||
data = workflow_api.next_task.data
|
||||
data['investigator']['email'] = "dhf8r@virginia.edu"
|
||||
self.complete_form(workflow, task, data)
|
||||
#tasks = self.get_workflow_api(workflow).user_tasks
|
||||
|
||||
workflow = self.get_workflow_api(workflow)
|
||||
|
|
|
@ -270,53 +270,6 @@ class TestWorkflowProcessor(BaseTest):
|
|||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_version_string().startswith('v2.1.1'))
|
||||
|
||||
def test_restart_workflow(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
task = processor.next_task()
|
||||
task.data = {"key": "Value"}
|
||||
processor.complete_task(task)
|
||||
task_before_restart = processor.next_task()
|
||||
processor.hard_reset()
|
||||
task_after_restart = processor.next_task()
|
||||
|
||||
self.assertNotEqual(task.get_name(), task_before_restart.get_name())
|
||||
self.assertEqual(task.get_name(), task_after_restart.get_name())
|
||||
self.assertEqual(task.data, task_after_restart.data)
|
||||
|
||||
def test_soft_reset(self):
|
||||
self.load_example_data()
|
||||
|
||||
# Start the two_forms workflow, and enter some data in the first form.
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
task = processor.next_task()
|
||||
task.data = {"color": "blue"}
|
||||
processor.complete_task(task)
|
||||
|
||||
# Modify the specification, with a minor text change.
|
||||
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_text_mod.bpmn')
|
||||
self.replace_file("two_forms.bpmn", file_path)
|
||||
|
||||
# Setting up another processor should not error out, but doesn't pick up the update.
|
||||
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
||||
processor2 = WorkflowProcessor(processor.workflow_model)
|
||||
self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description)
|
||||
self.assertNotEqual("# This is some documentation I wanted to add.",
|
||||
processor2.bpmn_workflow.last_task.task_spec.documentation)
|
||||
|
||||
# You can do a soft update and get the right response.
|
||||
processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True)
|
||||
self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description)
|
||||
self.assertEqual("# This is some documentation I wanted to add.",
|
||||
processor3.bpmn_workflow.last_task.task_spec.documentation)
|
||||
|
||||
|
||||
|
||||
def test_hard_reset(self):
|
||||
self.load_example_data()
|
||||
|
@ -344,8 +297,10 @@ class TestWorkflowProcessor(BaseTest):
|
|||
# Do a hard reset, which should bring us back to the beginning, but retain the data.
|
||||
processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True)
|
||||
self.assertEqual("Step 1", processor3.next_task().task_spec.description)
|
||||
self.assertEqual({"color": "blue"}, processor3.next_task().data)
|
||||
processor3.complete_task(processor3.next_task())
|
||||
self.assertTrue(processor3.is_latest_spec) # Now at version 2.
|
||||
task = processor3.next_task()
|
||||
task.data = {"color": "blue"}
|
||||
processor3.complete_task(task)
|
||||
self.assertEqual("New Step", processor3.next_task().task_spec.description)
|
||||
self.assertEqual("blue", processor3.next_task().data["color"])
|
||||
|
|
@ -1,7 +1,14 @@
|
|||
import json
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from SpiffWorkflow import Task as SpiffTask, WorkflowException
|
||||
from example_data import ExampleDataLoader
|
||||
from crc import db
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.api_models import Task
|
||||
|
||||
|
||||
class TestWorkflowService(BaseTest):
|
||||
|
@ -78,4 +85,50 @@ class TestWorkflowService(BaseTest):
|
|||
task = processor.next_task()
|
||||
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
||||
WorkflowService.populate_form_with_random_data(task, task_api, required_only=False)
|
||||
self.assertTrue(isinstance(task.data["sponsor"], dict))
|
||||
self.assertTrue(isinstance(task.data["sponsor"], dict))
|
||||
|
||||
def test_fix_legacy_data_model_for_rrt(self):
|
||||
ExampleDataLoader().load_rrt() # Make sure the research_rampup is loaded, as it's not a test spec.
|
||||
workflow = self.create_workflow('research_rampup')
|
||||
processor = WorkflowProcessor(workflow, validate_only=True)
|
||||
|
||||
# Use the test spec code to complete the workflow of research rampup.
|
||||
while not processor.bpmn_workflow.is_completed():
|
||||
processor.bpmn_workflow.do_engine_steps()
|
||||
tasks = processor.bpmn_workflow.get_tasks(SpiffTask.READY)
|
||||
for task in tasks:
|
||||
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
|
||||
WorkflowService.populate_form_with_random_data(task, task_api, False)
|
||||
task.complete()
|
||||
# create the task events
|
||||
WorkflowService.log_task_action('dhf8r', workflow, task,
|
||||
WorkflowService.TASK_ACTION_COMPLETE,
|
||||
version=processor.get_version_string())
|
||||
processor.save()
|
||||
db.session.commit()
|
||||
|
||||
WorkflowService.fix_legacy_data_model_for_rrt()
|
||||
|
||||
# All tasks should now have data associated with them.
|
||||
task_logs = db.session.query(TaskEventModel) \
|
||||
.filter(TaskEventModel.workflow_id == workflow.id) \
|
||||
.filter(TaskEventModel.action == WorkflowService.TASK_ACTION_COMPLETE) \
|
||||
.order_by(TaskEventModel.date).all() # Get them back in order.
|
||||
|
||||
self.assertEqual(17, len(task_logs))
|
||||
for log in task_logs:
|
||||
task = processor.bpmn_workflow.get_tasks_from_spec_name(log.task_name)[0]
|
||||
self.assertIsNotNone(log.form_data)
|
||||
# Each task should have the data in the form for that task in the task event.
|
||||
if hasattr(task.task_spec, 'form'):
|
||||
for field in task.task_spec.form.fields:
|
||||
if field.has_property(Task.PROP_OPTIONS_REPEAT):
|
||||
self.assertIn(field.get_property(Task.PROP_OPTIONS_REPEAT), log.form_data)
|
||||
else:
|
||||
self.assertIn(field.id, log.form_data)
|
||||
|
||||
# Some spot checks:
|
||||
# The first task should be empty, with all the data removed.
|
||||
self.assertEqual({}, task_logs[0].form_data)
|
||||
|
||||
|
Loading…
Reference in New Issue